Example #1
0
 def test_consistent_output_PE(self):
     test_primary_specific_outfile = io.StringIO()
     test_secondary_specific_outfile = io.StringIO()
     test_primary_multi_outfile = io.StringIO()
     test_secondary_multi_outfile = io.StringIO()
     test_unassigned_outfile = io.StringIO()
     test_unresolved_outfile = io.StringIO()
     sam1 = io.TextIOWrapper(resource_stream(__name__, 'data/paired_end_testdata_human.sam'))
     sam2 = io.TextIOWrapper(resource_stream(__name__, 'data/paired_end_testdata_mouse.sam'))
     process_headers(sam1,sam2,primary_specific=test_primary_specific_outfile, secondary_specific=test_secondary_specific_outfile)
     cat_counts = main_paired_end(getReadPairs(sam1,sam2),
                                  primary_specific=test_primary_specific_outfile,
                                  secondary_specific=test_secondary_specific_outfile,
                                  primary_multi=test_primary_multi_outfile,
                                  secondary_multi=test_secondary_multi_outfile,
                                  unresolved=test_unresolved_outfile,
                                  unassigned=test_unassigned_outfile,
                                  )
     self.assertEqual(sum([cat_counts[x] for x in cat_counts if 'primary_specific' in x])*2,
                      len(test_primary_specific_outfile.getvalue().split('\n'))-30) #29 lines of header in this file
     self.assertEqual(sum([cat_counts[x] for x in cat_counts if 'secondary_specific' in x and not 'primary_specific' in x])*2,
                      len(test_secondary_specific_outfile.getvalue().split('\n'))-27) #26 lines of header in this file
     self.assertEqual(sum([cat_counts[x] for x in cat_counts if 'primary_multi' in x and not 'primary_specific' in x and not 'secondary_specific' in x])*2,
                     len(test_primary_multi_outfile.getvalue().split('\n'))-1)
     self.assertEqual(sum([cat_counts[x] for x in cat_counts if 'secondary_multi' in x \
                     and not 'primary_multi' in x and not 'primary_specific' in x and not 'secondary_specific' in x])*2,
                     len(test_secondary_multi_outfile.getvalue().split('\n'))-1)
     self.assertEqual(hashlib.sha224(test_primary_specific_outfile.getvalue().encode('latin-1')).hexdigest(),'ecba2de3e3af9c7405a84ad2a4ebaf194ebfb4df76f45c311c0f681d')
     sam1.close()
     sam2.close()
     pass
Example #2
0
def build_program(main_file_data, library_header, shared_library, gcc_prefix, cflags, package=__name__):
    with tempfile.TemporaryDirectory() as tempdir:
        shead_name = os.path.basename(library_header)
        shlib_name = os.path.basename(shared_library)
        assert shlib_name.startswith("lib") and shlib_name.endswith(".so") and shead_name.endswith(".h")
        shlib_short = shlib_name[3:-3]  # strip "lib" and ".so"

        shared_lib_path = os.path.join(tempdir, shlib_name)
        lib_header_path = os.path.join(tempdir, shead_name)
        main_file_path = os.path.join(tempdir, "themis_main.c")
        main_output_path = os.path.join(tempdir, "themis_main")

        with open(shared_lib_path, "wb") as fout:
            with pkg_resources.resource_stream(package, shared_library) as fin:
                shutil.copyfileobj(fin, fout)
        with open(lib_header_path, "wb") as fout:
            with pkg_resources.resource_stream(package, library_header) as fin:
                shutil.copyfileobj(fin, fout)
        with open(main_file_path, "w") as fout:
            fout.write(main_file_data)

        subprocess.check_call([gcc_prefix + "gcc", *cflags.split(), "-I", tempdir, "-L", tempdir, "-l", shlib_short,
                               main_file_path, "-o", main_output_path])
        subprocess.check_call([gcc_prefix + "strip", main_output_path])
        with open(main_output_path, "rb") as fin:
            main_output_data = fin.read()
    return main_output_data
Example #3
0
 def test_consistent_output_conservative_PE(self):
     test_primary_specific_outfile = io.StringIO()
     test_secondary_specific_outfile = io.StringIO()
     test_primary_multi_outfile = io.StringIO()
     test_secondary_multi_outfile = io.StringIO()
     test_unassigned_outfile = io.StringIO()
     test_unresolved_outfile = io.StringIO()
     sam1 = io.TextIOWrapper(resource_stream(__name__, 'data/paired_end_testdata_human.sam'))
     sam2 = io.TextIOWrapper(resource_stream(__name__, 'data/paired_end_testdata_mouse.sam'))
     process_headers(sam1,sam2,primary_specific=test_primary_specific_outfile, secondary_specific=test_secondary_specific_outfile)
     cat_counts = conservative_main_paired_end(getReadPairs(sam1,sam2),
                                              primary_specific=test_primary_specific_outfile,
                                              secondary_specific=test_secondary_specific_outfile,
                                              primary_multi=test_primary_multi_outfile,
                                              secondary_multi=test_secondary_multi_outfile,
                                              unresolved=test_unresolved_outfile,
                                              unassigned=test_unassigned_outfile,
                                              )
     self.assertEqual(cat_counts[('primary_specific', 'secondary_specific')]*4 +\
                      cat_counts[('unresolved', 'unresolved')]*4, len(test_unresolved_outfile.getvalue().split('\n'))-1) #this test is not exhastive. Only states in test data
     self.assertEqual(cat_counts[('primary_multi', 'primary_multi')]*2, len(test_primary_multi_outfile.getvalue().split('\n'))-1)
     self.assertEqual(cat_counts[('secondary_multi', 'secondary_multi')]*2, len(test_secondary_multi_outfile.getvalue().split('\n'))-1)
     self.assertEqual(cat_counts[('primary_specific', 'primary_specific')]*2 + cat_counts[('primary_specific', 'primary_multi')]*2 + cat_counts[('primary_multi', 'primary_specific')]*2,
                      len(test_primary_specific_outfile.getvalue().split('\n'))-30) #29 lines of header in this file
     self.assertEqual(cat_counts[('secondary_specific', 'secondary_specific')]*2 + cat_counts[('secondary_specific', 'secondary_multi')]*2 + cat_counts[('secondary_multi', 'secondary_specific')]*2,
                      len(test_secondary_specific_outfile.getvalue().split('\n'))-27)  #26 lines of header in this file
     self.assertEqual(cat_counts[('unassigned', 'unassigned')]*2, len(test_unassigned_outfile.getvalue().split('\n'))-1)
     
     self.assertEqual(hashlib.sha224(test_primary_specific_outfile.getvalue().encode('latin-1')).hexdigest(),'8f5349ac96f194a4600bf0542cb1a6ebf71ada14b8ee0986598d7f58')
     sam1.close()
     sam2.close()
     pass
Example #4
0
 def test_process_headers(self):
     test_primary_specific_outfile = io.StringIO()
     test_secondary_specific_outfile = io.StringIO()
     test_primary_multi_outfile = io.StringIO()
     test_secondary_multi_outfile = io.StringIO()
     test_unassigned_outfile = io.StringIO()
     test_unresolved_outfile = io.StringIO()
     sam1 = io.TextIOWrapper(resource_stream(__name__, 'data/paired_end_testdata_human.sam'))
     sam2 = io.TextIOWrapper(resource_stream(__name__, 'data/paired_end_testdata_mouse.sam'))
     process_headers(sam1,sam2,
                  primary_specific=test_primary_specific_outfile,
                  secondary_specific=test_secondary_specific_outfile,
                  primary_multi=test_primary_multi_outfile,
                  secondary_multi=test_secondary_multi_outfile,
                  unresolved=test_unresolved_outfile,
                  unassigned=test_unassigned_outfile,
                  )
     self.assertEqual(len(test_primary_specific_outfile.getvalue()),695)
     self.assertEqual(len(test_secondary_specific_outfile.getvalue()),629)
     self.assertEqual(len(test_primary_multi_outfile.getvalue()),708)
     self.assertEqual(len(test_secondary_multi_outfile.getvalue()),642)
     self.assertEqual(len(test_unassigned_outfile.getvalue()),705)
     self.assertEqual(len(test_unresolved_outfile.getvalue()),705)
     sam1.close()
     sam2.close()
     pass
Example #5
0
def cdr_gff3(out_fp, fname='ighv_aligned.fasta', gffname='ighv_aligned.gff3'):
    """
    Calculate cysteine position in each sequence of the ighv alignment
    """

    from .. import gff3

    with resource_stream(_PKG, fname) as fp, resource_stream(_PKG, gffname) as gff_fp:
        sequences = ((name.split('|')[1], seq)
                     for name, seq, _ in util.readfq(fp))

        records = list(gff3.parse(gff_fp))

        out_fp.write('##gff-version\t3\n')
        w = csv.writer(out_fp, delimiter='\t', quoting=csv.QUOTE_NONE, lineterminator='\n')
        for name, s in sequences:
            pl = _position_lookup(s)

            for feature in records:
                if feature.start0 not in pl and feature.end - 1 not in pl:
                    continue

                start0 = pl.get(feature.start0, min(q for r, q in pl.items() if r >= feature.start0))
                end = pl.get(feature.end - 1, max(q for r, q in pl.items() if r <= feature.end - 1))

                f = feature._replace(seqid=name,
                                     start=start0 + 1,
                                     end=end + 1)
                w.writerow(f.update_attributes(ID=f.attribute_dict()['Name'] + '_' + name))
def process_saml_md_about_sps(saml_md: bytes):
    saml_md_tree = XML(saml_md)
    parser = XMLParser(
        remove_blank_text=True, resolve_entities=False, remove_comments=False)
    with resource_stream(__name__,
                         REMOVE_NAMESPACE_PREFIXES_XSL_FILE_PATH) as \
            xslt_root1_file:
        xslt_root1 = parse(xslt_root1_file, parser=parser)

        transform1 = XSLT(xslt_root1)
        saml_md_tree_1 = transform1(saml_md_tree)

    with resource_stream(__name__,
                         REMOVE_KEY_WHITESPACE_XSL_FILE_PATH) as \
            xslt_root2_file:
        xslt_root2 = parse(xslt_root2_file, parser=parser)

    transform2 = XSLT(xslt_root2)
    saml_md_2 = transform2(saml_md_tree_1)

    canonicalized_saml_md_2 = BytesIO()
    saml_md_2.write_c14n(
        canonicalized_saml_md_2, exclusive=True, with_comments=False)

    parser = XMLParser(
        remove_blank_text=True, resolve_entities=False, remove_comments=False)
    saml_md_tree_3 = XML(canonicalized_saml_md_2.getvalue(),
                         parser).getroottree()

    return saml_md_tree_3
Example #7
0
def main():
    try_import("scipy")
    try_import("scipy.interpolate.rbf")
    try_import("Crypto")
    try_import("serial")
    try_import("PIL")
    try_import("numpy")
    try_import("zipimport")

    try_import("fluxclient")
    try_import("fluxclient.fcode")
    try_import("fluxclient.hw_profile")
    try_import("fluxclient.laser")
    try_import("fluxclient.printer")
    try_import("fluxclient.printer._printer")
    try_import("fluxclient.robot")
    try_import("fluxclient.scanner")
    try_import("fluxclient.scanner._scanner")
    try_import("fluxclient.upnp")

    sys.stdout.write("Open resource fluxclient::assets/flux3dp-icon.png ... ")
    sys.stdout.flush()
    try:
        import pkg_resources
        pkg_resources.resource_stream("fluxclient", "assets/flux3dp-icon.png")
        sys.stdout.write("OK\n")
    except Exception as e:
        sys.stdout.write("ERROR: %s" % e)
        sys.stdout.flush()
 def testscheme_open(self, req):
     try:
         selector = req.get_selector()
         if selector == u'/ws_newcompass.asmx?WSDL':
             return urllib.addinfourl(
                 pkg_resources.resource_stream(__name__, 'tests/testdata/wsdl.xml'),
                 httplib.HTTPMessage(open('/dev/null')),
                 req.get_full_url(),
                 200
             )
         elif selector == u'/ws_newcompass.asmx':
             soapResponse = urlparse.urlparse(req.get_header('Soapaction')).path.strip('"').split('/')[-1] + '.xml'
             return urllib.addinfourl(
                 pkg_resources.resource_stream(__name__, 'tests/testdata/' + soapResponse),
                 httplib.HTTPMessage(open('/dev/null')),
                 req.get_full_url(),
                 200
             )
         elif selector == u'/biomuta.tsv':
             return urllib2.addinfourl(
                 pkg_resources.resource_stream(__name__, 'tests/testdata/Biomuta.tsv'),
                 httplib.HTTPMessage(open('/dev/null')),
                 req.get_full_url(),
                 200
             )
         else:
             raise urllib2.URLError('Not found')
     except Exception:
         raise urllib2.URLError('Not found')
def main():
    xls_file = resource_string('rdfconverters.resources', 'gaaps/xebrv7.xls')
    book = xlrd.open_workbook(file_contents=xls_file)

    xebr_concepts = XEBRV7Concepts(book)
    # RDF output graph
    xebr2xbrl = Graph()
    for key, ns in NS.items():
        xebr2xbrl.bind(key, ns)
    rdf_converter = RDFConverter(xebr2xbrl, xebr_concepts)

    def map_gaap(clazz_mapper, f, clazz_labels, base):
        mapper = clazz_mapper(book)
        graph = Graph().parse(f, format='n3')
        labels = clazz_labels(graph, mapper).get_labels()
        rdf_converter.add_mappings_to_graph(mapper, labels, base)


    f = resource_stream('rdfconverters.resources', 'gaaps/it.n3')
    map_gaap(ITMapper, f, ITGaapLabels, 'http://www.dfki.de/lt/xbrl_it.owl#')

    f = resource_stream('rdfconverters.resources', 'gaaps/pgc.n3')
    map_gaap(ESMapper, f, ESGaapLabels, 'http://www.dfki.de/lt/xbrl_es.owl#')

    f = resource_stream('rdfconverters.resources', 'gaaps/be.n3')
    map_gaap(BEMapper, f, BEGaapLabels, 'http://www.dfki.de/lt/xbrl_be.owl#')


    util.write_graph(xebr2xbrl, '/tmp/xebr2xbrl.n3')
Example #10
0
    def test_AmpliconData_align_to_reference(self):
        forward_file = resource_stream(__name__, 'data/testdata_R1.fastq')
        reverse_file = resource_stream(__name__, 'data/testdata_R2.fastq')
        manifest = io.TextIOWrapper(resource_stream(__name__, 'data/testdatamanifest.txt'))
        self.amplicons.process_twofile_readpairs(forward_file, reverse_file)
        self.amplicons.add_references_from_manifest(manifest)
        self.amplicons.merge_overlaps()
        
        self.amplicons.reference = {'002b0ade8cda6a7bdc15f09ed5812126':('BRCA1_Exon9_UserDefined_(9825051)_7473614_chr17_41243841_41244065', 'chr17', '41243841', '41244065', '-'),
                                    'randomfoo': ('MadeUp','chr99','1','100','+'),}
        self.amplicons.match_to_reference(min_score = 10, trim_primers=0, global_align=True)
        
        self.amplicons.align_to_reference()
        #print(str(sorted(list(self.amplicons.aligned.items()))))
        #print(str(sorted(list(self.amplicons.location.items()))))
        self.assertEqual(md5(str(sorted(list(self.amplicons.aligned.items())))).hexdigest(),'ffb3aa886476f377b485c9d967a844c7')
        self.assertEqual(md5(str(sorted(list(self.amplicons.location.items())))).hexdigest(),'cd58f75e64192227c8c2c6beca7564cf')

        self.amplicons.aligned = {}
        self.amplicons.location = {}
        self.amplicons.align_to_reference(global_align=False)
        #print(str(sorted(list(self.amplicons.aligned.items()))))
        #print(str(sorted(list(self.amplicons.location.items()))))
        self.assertEqual(md5(str(sorted(list(self.amplicons.aligned.items())))).hexdigest(),'0bec5d7ad6ca3e6f0a5f3bd366ab1c66')
        self.assertEqual(md5(str(sorted(list(self.amplicons.location.items())))).hexdigest(),'1058c4388032a963aec5a03679d5a899')
        
        pass
Example #11
0
def get_schema(version):
    # type: (str) -> Tuple[Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[unicode,Any], Loader]

    if version in SCHEMA_CACHE:
        return SCHEMA_CACHE[version]

    cache = {}
    version = version.split("#")[-1].split(".")[0]
    for f in cwl_files:
        try:
            res = resource_stream(__name__, 'schemas/%s/%s' % (version, f))
            cache["https://w3id.org/cwl/" + f] = res.read()
            res.close()
        except IOError:
            pass

    for f in salad_files:
        try:
            res = resource_stream(
                __name__, 'schemas/%s/salad/schema_salad/metaschema/%s'
                % (version, f))
            cache["https://w3id.org/cwl/salad/schema_salad/metaschema/"
                  + f] = res.read()
            res.close()
        except IOError:
            pass

    SCHEMA_CACHE[version] = schema_salad.schema.load_schema(
        "https://w3id.org/cwl/CommonWorkflowLanguage.yml", cache=cache)

    global SCHEMA_FILE, SCHEMA_ANY  # pylint: disable=global-statement
    SCHEMA_FILE = SCHEMA_CACHE[version][3].idx["https://w3id.org/cwl/cwl#File"]
    SCHEMA_ANY = SCHEMA_CACHE[version][3].idx["https://w3id.org/cwl/salad#Any"]

    return SCHEMA_CACHE[version]
Example #12
0
 def read_table(self):
     self.tables = []
     for i in range(1,6):
         cols = [1,2,3,4,5,6]
         if i==4:
             cols = [1,2,3,4,5]
         stream = resource_stream(__name__,
                                  self.basefilename + str(i) + '.dat')
         table = numpy.loadtxt(stream,
                               #converters={0: self._CaLa_iso_converter},
                               usecols=cols
                               )
         stream.close()
         if i==4:
             table = numpy.hstack((table,
                                  numpy.nan * numpy.ones((len(table),1))))
         self.tables.append(table)
         #print "Table " + str(i)
         #print table.shape
     #import newio
     #table = newio.loadtxt(self.basefilename + '6.dat')
     stream = resource_stream(__name__, self.basefilename + '6.dat')
     table = numpy.loadtxt(stream)
     stream.close()
     self.tables.append(table)
     self.data = numpy.vstack(self.tables[0:5])
     self.remnant_data = self.tables[5]
     return self.tables
Example #13
0
def _zone_apl(depcom):
    """Retrouve la zone APL (aide personnalisée au logement) de la commune en fonction du depcom (code INSEE)."""
    global zone_apl_by_depcom
    if zone_apl_by_depcom is None:
        with pkg_resources.resource_stream(
            openfisca_france.__name__,
            'assets/apl/20110914_zonage.csv',
            ) as csv_file:
            csv_reader = csv.DictReader(csv_file)
            zone_apl_by_depcom = {
                # Keep only first char of Zonage column because of 1bis value considered equivalent to 1.
                row['CODGEO']: int(row['Zonage'][0])
                for row in csv_reader
                }
        # Add subcommunes (arrondissements and communes associées), use the same value as their parent commune.
        with pkg_resources.resource_stream(
            openfisca_france.__name__,
            'assets/apl/commune_depcom_by_subcommune_depcom.json',
            ) as json_file:
            commune_depcom_by_subcommune_depcom = json.load(json_file)
            for subcommune_depcom, commune_depcom in commune_depcom_by_subcommune_depcom.iteritems():
                zone_apl_by_depcom[subcommune_depcom] = zone_apl_by_depcom[commune_depcom]

    default_value = 2
    return fromiter(
        (
            zone_apl_by_depcom.get(depcom_cell, default_value)
            for depcom_cell in depcom
            ),
        dtype = int16,
        )
Example #14
0
def loadSecretome(portal):
    if portal._p_jar is not None and isinstance(portal._p_jar.db()._storage, DemoStorage):
        # Don't bother if we're just testing
        return
    try:
        resources = portal['resources']
    except KeyError:
        resources = portal[portal.invokeFactory('Folder', 'resrouces')]
        resources.title = u'Resources'
    try:
        secretome = resources['secretome']
    except KeyError:
        secretome = createContentInContainer(resources, 'eke.secretome.secretomefolder', title=u'Secretome')
    ids = secretome.keys()
    if len(ids) > 0:
        secretome.manage_delObjects(list(ids))
    with pkg_resources.resource_stream(__name__, 'data/uniqueIDs.csv') as infile:
        rows = csv.DictReader(infile)
        for row in rows:
            probesetID, hgnc, timesMapped = row['hgu133plus2ID'], row['HGNC.symbol'], int(row['times.mapped.to'])
            databases = row['databases.foundin'].split(u'|')
            _logger.info('Creating probeset %s', probesetID)
            createContentInContainer(
                secretome,
                'eke.secretome.probeset',
                title=probesetID,
                hgncSymbol=hgnc,
                databaseNames=databases,
                timesMapped=timesMapped
            )
    transaction.commit()
    with pkg_resources.resource_stream(__name__, 'data/mappedIDs.csv') as infile:
        rows = csv.DictReader(infile)
        mappings = {}
        class Mapping(object):
            def __init__(self):
                self.databases, self.probesets = set(), set()
        for row in rows:
            database, gene, probesets = row['database'], row['beforemapping'], row['hgu133plus2.ID']
            if probesets == 'none':
                probesets = set()
            else:
                probesets = set(probesets.split(u'|'))
            mapping = mappings.get(gene, Mapping())
            mapping.databases.add(database)
            mapping.probesets = mapping.probesets | probesets
            mappings[gene] = mapping
        for gene, mapping in mappings.iteritems():
            _logger.info('Creating gene/protein %s', gene)
            createContentInContainer(
                secretome,
                'eke.secretome.geneprotein',
                title=gene,
                databaseNames=list(mapping.databases),
                probesetMappings=list(mapping.probesets)
            )
    transaction.commit()
    _logger.info('Publishing everything')
    publish(resources)
    transaction.commit()
Example #15
0
    def get(self, filename=None):
        """return a static file"""
        if self.module is not None:
            try:
                fp = pkg_resources.resource_stream(self.module.import_name, os.path.join(self.module.config.static_folder, filename))
            except IOError:
                raise werkzeug.exceptions.NotFound()
            config = self.module.config
        else:
            try:
                fp = pkg_resources.resource_stream(self.app.import_name, os.path.join(self.app.config.static_folder, filename))
            except IOError:
                raise werkzeug.exceptions.NotFound()
            config = self.app.config
        
        mimetype = mimetypes.guess_type(filename)[0]
        if mimetype is None:
            mimetype = 'application/octet-stream'

        headers = Headers()
        data = wrap_file(self.request.environ, fp)

        rv = self.app.response_class(data, mimetype=mimetype, headers=headers,
                                        direct_passthrough=True)

        rv.cache_control.public = True
        cache_timeout = self.config.static_cache_timeout
        if cache_timeout is not None:
            rv.cache_control.max_age = cache_timeout
            rv.expires = int(time.time() + cache_timeout)
        return rv
Example #16
0
    def setUp(self):
        """Setup tests."""
        from inspire.modules.workflows.receivers import precache_holdingpen_row
        from invenio_workflows.receivers import index_holdingpen_record
        from invenio_workflows.signals import workflow_halted, workflow_object_saved

        # Disable the holdingpen caching receiver.
        workflow_halted.disconnect(precache_holdingpen_row)
        workflow_object_saved.disconnect(index_holdingpen_record)

        self.create_registries()
        self.record_oai_arxiv_plots = pkg_resources.resource_string(
            "inspire.testsuite", os.path.join("workflows", "fixtures", "oai_arxiv_record_with_plots.xml")
        )
        self.record_oai_arxiv_accept = pkg_resources.resource_string(
            "inspire.testsuite", os.path.join("workflows", "fixtures", "oai_arxiv_record_to_accept.xml")
        )
        self.some_record = pkg_resources.resource_string(
            "inspire.testsuite", os.path.join("workflows", "fixtures", "some_record.xml")
        )
        self.arxiv_tarball = pkg_resources.resource_stream(
            "inspire.testsuite", os.path.join("workflows", "fixtures", "1407.7587v1")
        )
        self.arxiv_pdf = pkg_resources.resource_stream(
            "inspire.testsuite", os.path.join("workflows", "fixtures", "1407.7587v1.pdf")
        )
        self.arxiv_tarball_accept = pkg_resources.resource_stream(
            "inspire.testsuite", os.path.join("workflows", "fixtures", "1511.01097")
        )
        self.arxiv_pdf_accept = pkg_resources.resource_stream(
            "inspire.testsuite", os.path.join("workflows", "fixtures", "1511.01097v1.pdf")
        )
Example #17
0
def get_schema(version):
    # type: (Text) -> Tuple[Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[Text,Any], Loader]

    if version in SCHEMA_CACHE:
        return SCHEMA_CACHE[version]

    cache = {}
    version = version.split("#")[-1]
    if '.dev' in version:
        version = ".".join(version.split(".")[:-1])
    for f in cwl_files:
        try:
            res = resource_stream(__name__, 'schemas/%s/%s' % (version, f))
            cache["https://w3id.org/cwl/" + f] = res.read()
            res.close()
        except IOError:
            pass

    for f in salad_files:
        try:
            res = resource_stream(
                __name__, 'schemas/%s/salad/schema_salad/metaschema/%s'
                % (version, f))
            cache["https://w3id.org/cwl/salad/schema_salad/metaschema/"
                  + f] = res.read()
            res.close()
        except IOError:
            pass

    SCHEMA_CACHE[version] = schema_salad.schema.load_schema(
        "https://w3id.org/cwl/CommonWorkflowLanguage.yml", cache=cache)

    return SCHEMA_CACHE[version]
Example #18
0
    def test_AmpliconData_match_to_reference(self):
        forward_file = resource_stream(__name__, 'data/testdata_R1.fastq')
        reverse_file = resource_stream(__name__, 'data/testdata_R2.fastq')
        manifest = io.TextIOWrapper(resource_stream(__name__, 'data/testdatamanifest.txt'))
        self.amplicons.process_twofile_readpairs(forward_file, reverse_file)
        self.amplicons.add_references_from_manifest(manifest)
        self.amplicons.merge_overlaps()
        
        self.amplicons.match_to_reference(min_score = 0.1, trim_primers=0, global_align=True)
        #print(str(sorted(list(self.amplicons.reference.items()))))
        self.assertEqual(md5(str(sorted(list(self.amplicons.reference.items())))).hexdigest(),'e08f4ac8d71a067547f43746972e86dc')
        
        self.amplicons.reference = {}
        self.amplicons.match_to_reference(min_score = 0.1, trim_primers=0, global_align=False)
        #print(str(sorted(list(self.amplicons.reference.items()))))
        self.assertEqual(md5(str(sorted(list(self.amplicons.reference.items())))).hexdigest(),'e08f4ac8d71a067547f43746972e86dc')

        self.amplicons.reference = {}
        self.amplicons.match_to_reference(min_score = 0.1, trim_primers=10, global_align=False)
        #print(str(sorted(list(self.amplicons.reference.items()))))
        self.assertEqual(md5(str(sorted(list(self.amplicons.reference.items())))).hexdigest(),'e08f4ac8d71a067547f43746972e86dc')
        
        self.amplicons.reference = {}
        self.amplicons.match_to_reference(min_score = 10, trim_primers=50, global_align=True)
        #print(str(sorted(list(self.amplicons.reference.items()))))
        self.assertEqual(md5(str(sorted(list(self.amplicons.reference.items())))).hexdigest(),'fbfdb58228dcdabe280e742a56127b91')
        
        self.amplicons.reference = {'002b0ade8cda6a7bdc15f09ed5812126':('BRCA1_Exon9_UserDefined_(9825051)_7473614_chr17_41243841_41244065', 'chr17', '41243841', '41244065', '-'),
                                    'randomfoo': ('MadeUp','chr99','1','100','+'),}
        self.amplicons.match_to_reference(min_score = 10, trim_primers=0, global_align=True)
        #print(str(sorted(list(self.amplicons.reference.items()))))
        self.assertEqual(md5(str(sorted(list(self.amplicons.reference.items())))).hexdigest(),'a963bb251be3c27f4780b8292cdc3e13')
        
        pass
Example #19
0
    def testUncompressed(self):
        khepburn = self.view.findPath('//CineGuide/KHepburn')
        self.assert_(khepburn is not None)

        input = resource_stream('tests', 'data/khepltr.jpg')
        binary = khepburn.getAttributeAspect('picture', 'type').makeValue(None, mimetype='image/jpg')
        outputStream = binary.getOutputStream(compression=None)
        
        while True:
            data = input.read(1048576)
            if len(data) > 0:
                outputStream.write(data)
            else:
                break

        input.close()
        outputStream.close()
        khepburn.picture = binary

        self._reopenRepository()

        khepburn = self.view.findPath('//CineGuide/KHepburn')
        self.assert_(khepburn is not None)

        input = resource_stream('tests', 'data/khepltr.jpg')
        inputStream = khepburn.picture.getInputStream()
        data = input.read()
        picture = inputStream.read()
        input.close()
        inputStream.close()

        self.assert_(data == picture)
Example #20
0
 def test_save_hash_table(self):
     from ambivert import ambivert
     forward_file = resource_stream(__name__, 'data/testdata_R1.fastq')
     reverse_file = resource_stream(__name__, 'data/testdata_R2.fastq')
     manifest = io.TextIOWrapper(resource_stream(__name__, 'data/testdatamanifest.txt'))
     amplicons = ambivert.process_amplicon_data(forward_file, reverse_file,
                               manifest=manifest, fasta=None,
                               threshold=50, overlap=20, 
                               savehashtable=None, hashtable=None,
                               )
     outfile = NamedTemporaryFile(delete=False,mode='wb')
     outfilename = outfile.name
     amplicons.save_hash_table(outfile)
     amplicons.reference = {}
     amplicons.load_hash_table(open(outfilename,mode='rb'))
     self.assertEqual(md5(str(sorted(amplicons.reference))).hexdigest(),'c5e174ffd7ee8cfbd47e162667c83796')
     outfile.close()
     
     #test case where references dont match the hash table
     amplicons.reference_sequences = {}
     amplicons.load_hash_table(open(outfilename,mode='rb'))
     self.assertWarns(UserWarning)
     
     outfile.close()
     os.unlink(outfile.name)
     pass
Example #21
0
 def test_AmpliconData_print_to_sam(self):
     from ambivert import ambivert
     forward_file = resource_stream(__name__, 'data/testdata_R1.fastq')
     reverse_file = resource_stream(__name__, 'data/testdata_R2.fastq')
     manifest = io.TextIOWrapper(resource_stream(__name__, 'data/testdatamanifest.txt'))
     amplicons = ambivert.process_amplicon_data(forward_file, reverse_file,
                               manifest=manifest, fasta=None,
                               threshold=50, overlap=20, 
                               savehashtable=None, hashtable=None,
                               )
     outfile = io.StringIO()
     amplicons.aligned = {}
     amplicons.print_to_sam('32ff3ea55601305b5e8b3bd266c4080a',samfile=outfile)
     #print(outfile.getvalue())
     self.assertEqual(md5(outfile.getvalue()).hexdigest(),'275aab1624c01fea59326e7889370fbd')
     outfile.close()
     
     outfile = io.StringIO()
     amplicons.printall_to_sam(samfile=outfile)
     #print(outfile.getvalue().split('\n')[:27])
     #print(outfile.getvalue().split('\n')[27])
     #print(outfile.getvalue().split('\n')[28:])
     self.assertEqual(md5(repr(outfile.getvalue().split('\n')[:27])).hexdigest(),'5c5ab03009ec9d805ef09979e5bae3c4')
     self.assertEqual(outfile.getvalue().split('\n')[27],'@PG\tID:AmBiVErT\tVN:{0}'.format(__version__))
     self.assertEqual(md5(repr(outfile.getvalue().split('\n')[28:])).hexdigest(),'462c330929084d6a88d51679318e37e2')
     #self.assertEqual(md5(outfile.getvalue()).hexdigest(),'0a879b5f8d996617f3f48da47ed18362')
     outfile.close()
Example #22
0
    def _get_tld_extractor(self):
        if self._extractor:
            return self._extractor

        cached_file = self.cache_file
        try:
            if self._extractor is None:
                with open(cached_file, "r") as f:
                    jsonfile = f.readlines()
                    self._extractor = _PublicSuffixListTLDExtractor(json.loads(jsonfile[0]))
                return self._extractor
        except IOError as ioe:
            file_not_found = ioe.errno == errno.ENOENT
            if not file_not_found:
                LOG.error("error reading TLD cache file %s: %s", cached_file, ioe)
        except Exception as ex:
            LOG.error("error reading TLD cache file %s: %s", cached_file, ex)

        if self.fetch:
            tld_sources = (_PublicSuffixListSource,)
            tlds = [tld for tld_source in tld_sources for tld in tld_source()]

        if not tlds:
            with pkg_resources.resource_stream(__name__, ".tld_set_snapshot") as snapshot_file:
                self._extractor = _PublicSuffixListTLDExtractor(pickle.load(snapshot_file))
                return self._extractor

        LOG.info("computed TLDs: [%s, ...]", ", ".join(list(tlds)[:10]))
        if LOG.isEnabledFor(logging.DEBUG):
            import difflib

            with pkg_resources.resource_stream(__name__, ".tld_set_snapshot.json") as snapshot_file:
                snapshot = sorted(json.load(snapshot_file))
            new = sorted(tlds)
            for line in difflib.unified_diff(snapshot, new, fromfile=".tld_set_snapshot.json", tofile=cached_file):
                print(line)

        try:
            with open(cached_file, "w") as f:
                data_to_dump = {}
                for tld in tlds:
                    if tld.find(".") != -1:
                        token = tld.split(".")
                        tld_info = token[len(token) - 1]
                        if tld_info in data_to_dump:
                            list_tld = data_to_dump[tld_info]
                            list_tld.append(tld)
                        else:
                            data_to_dump[tld_info] = [tld]
                    else:
                        data_to_dump[tld] = [tld]
                if sys.version.split(".")[0].split(".")[0] == "2":
                    json.dump(data_to_dump, f, encoding="utf-8", ensure_ascii=False)
                if sys.version.split(".")[0].split(".")[0] == "3":
                    json.dump(data_to_dump, f)
        except IOError as e:
            LOG.warn("unable to cache TLDs in file %s: %s", cached_file, e)
        self._extractor = _PublicSuffixListTLDExtractor(data_to_dump)
        return self._extractor
Example #23
0
 def test_AmpliconData_process_twofile_readpairs(self):
     forward_file = resource_stream(__name__, 'data/testdata_R1.fastq')
     reverse_file = resource_stream(__name__, 'data/testdata_R2.fastq')
     self.amplicons.process_twofile_readpairs(forward_file, reverse_file)
     
     #print(str(self.amplicons.data))
     self.assertEqual(md5(str(sorted(self.amplicons.data))).hexdigest(),'37e10d52aa874a8fbdb40330e1294f02')
     pass
Example #24
0
    def _get_tld_extractor(self):

        if self._extractor:
            return self._extractor

        if self.cache_file:
            try:
                with open(self.cache_file, 'rb') as f:
                    self._extractor = _PublicSuffixListTLDExtractor(
                        pickle.load(f))
                    return self._extractor
            except IOError as ioe:
                file_not_found = ioe.errno == errno.ENOENT
                if not file_not_found:
                    LOG.error(
                        "error reading TLD cache file %s: %s", self.cache_file, ioe)
            except Exception as ex:
                LOG.error(
                    "error reading TLD cache file %s: %s", self.cache_file, ex)

        tlds = frozenset()
        if self.suffix_list_urls:
            raw_suffix_list_data = fetch_file(self.suffix_list_urls)
            tlds = get_tlds_from_raw_suffix_list_data(
                raw_suffix_list_data, self.include_psl_private_domains)

        if not tlds:
            if self.fallback_to_snapshot:
                with closing(pkg_resources.resource_stream(__name__, '.tld_set_snapshot')) as snapshot_file:
                    self._extractor = _PublicSuffixListTLDExtractor(
                        pickle.load(snapshot_file))
                    return self._extractor
            else:
                raise Exception("tlds is empty, but fallback_to_snapshot is set"
                                " to false. Cannot proceed without tlds.")

        LOG.info("computed TLDs: [%s, ...]", ', '.join(list(tlds)[:10]))
        if LOG.isEnabledFor(logging.DEBUG):
            import difflib
            with closing(pkg_resources.resource_stream(__name__, '.tld_set_snapshot')) as snapshot_file:
                snapshot = sorted(pickle.load(snapshot_file))
            new = sorted(tlds)
            for line in difflib.unified_diff(snapshot, new, fromfile=".tld_set_snapshot", tofile=self.cache_file):
                if sys.version_info < (3,):
                    sys.stderr.write(line.encode('utf-8') + "\n")
                else:
                    sys.stderr.write(line + "\n")

        if self.cache_file:
            try:
                with open(self.cache_file, 'wb') as f:
                    pickle.dump(tlds, f)
            except IOError as e:
                LOG.warn(
                    "unable to cache TLDs in file %s: %s", self.cache_file, e)

        self._extractor = _PublicSuffixListTLDExtractor(tlds)
        return self._extractor
Example #25
0
def findEntryPointPlugins(allPlugins):
    # look for plugins enabled via setuptools `entry_points`
    for entry_point in iter_entry_points(group='girder.plugin'):
        # set defaults
        allPlugins[entry_point.name] = {
            'name': entry_point.name,
            'description': '',
            'version': '',
            'dependencies': set()
        }
        configJson = os.path.join('girder', 'plugin.json')
        configYml = os.path.join('girder', 'plugin.yml')
        data = {}
        try:
            if pkg_resources.resource_exists(entry_point.name, configJson):
                with pkg_resources.resource_stream(
                        entry_point.name, configJson) as conf:
                    try:
                        data = json.load(codecs.getreader('utf8')(conf))
                    except ValueError:
                        _recordPluginFailureInfo(
                            plugin=entry_point.name,
                            traceback=traceback.format_exc())
                        logprint.exception(
                            'ERROR: Plugin "%s": plugin.json is not valid '
                            'JSON.' % entry_point.name)
            elif pkg_resources.resource_exists(entry_point.name, configYml):
                with pkg_resources.resource_stream(
                        entry_point.name, configYml) as conf:
                    try:
                        data = yaml.safe_load(conf)
                    except yaml.YAMLError:
                        _recordPluginFailureInfo(
                            plugin=entry_point.name,
                            traceback=traceback.format_exc())
                        logprint.exception(
                            'ERROR: Plugin "%s": plugin.yml is not valid '
                            'YAML.' % entry_point.name)
        except (ImportError, SystemError):
            # Fall through and just try to load the entry point below.  If
            # there is still an error, we'll log it there.
            pass
        if data == {}:
            try:
                data = getattr(entry_point.load(), 'config', {})
            except (ImportError, SystemError):
                # If the plugin failed to load via entrypoint, but is in the
                # plugins directory, it may still load.  We mark and report the
                # failure; if it loads later, the failure will be cleared, but
                # the report is still desired.
                _recordPluginFailureInfo(
                    plugin=entry_point.name, traceback=traceback.format_exc())
                logprint.exception(
                    'ERROR: Plugin "%s": could not be loaded by entrypoint.' % entry_point.name)
                continue
        allPlugins[entry_point.name].update(data)
        allPlugins[entry_point.name]['dependencies'] = set(
            allPlugins[entry_point.name]['dependencies'])
Example #26
0
    def create_binaries(
        self,
        binaries=[],
        zipmode=zipfile.ZIP_STORED,
        patch=None,
        compile_python=True,
    ):
        """Create binaries passed as argument (or all found in packages)

        :param binaries: a list of binaries to create or an empty list to
            create all binaries found in the packages.
        :param zipmode: a valid zipmode for the binary payload. By default
            use ZIP_STORED.
        :param patch: a file name where monkey patching for libraries live
        :param compile_python: a boolean which indicates if .py files should
            be compiled and added to the binary payload.
        """
        with directory() as tmp:
            # first install the required package in temporary directory
            self.install_packages(tmp)

            # extract base library
            with pkg_resources.resource_stream(
                'pyload.resources',
                'xlib.zip'
            ) as src_stream:
                with zipfile.ZipFile(src_stream) as z:
                    z.extractall(tmp)

            # for any script defined in project(s)
            for mod, fun, name in self.scripts(tmp, binaries):
                out_file = os.path.join(self.out_dir, name)

                if not os.path.exists(self.out_dir):
                    os.makedirs(self.out_dir)

                with tempfile.TemporaryFile() as f:
                    with zipfile.PyZipFile(f, 'a', zipmode) as lib:
                        self.add_libraries(tmp, lib, compile_python)
                        self.add_mainpy(lib, mod, fun, patch=patch)

                    # rewind stream to read from the beginning
                    f.seek(0)

                    # add pyload core
                    with pkg_resources.resource_stream(
                        'pyload.resources', 'pyload'
                    ) as src_stream:
                        with open(out_file, 'wb') as target_stream:
                            shutil.copyfileobj(src_stream, target_stream)

                    # fill binary with zip payload
                    with open(out_file, 'ab') as d:
                        d.write(f.read())

                os.chmod(out_file, 511)  # 0o777
                print("Successfully create binary %s" % (out_file,))
    def __init__(self):
        QDialog.__init__(self)
    
        self.__mapWidgetUi = Ui_MapWidget()
        self.__mapWidgetUi.setupUi(self)
        
        self.__view = self.__mapWidgetUi.graphicsView
        self.__view.setObjectName("ACS_Map_View")
        self.__scene = QGraphicsScene()
                
        self.__view.setScene(self.__scene)

        self.__current_lat = 35.720428
        self.__current_lon = -120.769924
        self.__current_ground_width = 41000000. #meters(start w/ view of whole earth)
        #TODO: don't hard code location
        self.__tiler = acs_map_tiler.ACS_MapTiler(35.720428, -120.769924)

        self.__current_detail_layer = 0 #corresponds to "zoom" in map_tiler module
        self.__detail_layers = []
        self.__rect_tiles = OrderedDict() #see rectKey method for how key works

        #detail layers are various levels of raster detail for the map.
        #0 is lowest level of detail, 20 highest.  0 loads fast and the 
        #entire world can fit on a single tile.  20 loads slow, and it is unwise
        #to try to show the entire world due to the number of tiles required 
        #(higher numbered detail layers are intended for "zooming in")
        self.setupDetailLayers()

        self.__plane_layer = QGraphicsItemGroup()
        self.__scene.addItem(self.__plane_layer)
        self.__plane_icons = {}
        self.__plane_labels = {}
        self.__plane_icon_pixmaps = {}
        img_bytes = pkg_resources.resource_stream("acs_dashboards", "data/images/flyingWingTiny.png").read()
        self.__plane_icon_pixmaps[0] = QPixmap()
        self.__plane_icon_pixmaps[0].loadFromData(img_bytes)
        img_bytes2 = pkg_resources.resource_stream("acs_dashboards", "data/images/flyingWingTiny2.png").read()
        self.__plane_icon_pixmaps[1] = QPixmap()
        self.__plane_icon_pixmaps[1].loadFromData(img_bytes2)

        #for drawing waypoints:
        self.__mission_layer = QGraphicsItemGroup()
        self.__scene.addItem(self.__mission_layer)
        self.__wp_diameter = 0.0001
        self.__prev_drawn_nav_wp = None  #needed when drawing lines between wps
        self.__wp_loiter_radius = None #NOTE: this is in meters; the map itself
                                       #is in degrees

        #for drawing fence:
        self.__fence_layer = QGraphicsItemGroup()
        self.__scene.addItem(self.__fence_layer)

        #slots
        self.__view.just_zoomed.connect(self.onZoom)
        self.__mapWidgetUi.zoom_sb.valueChanged.connect(self.onZoomSBValueChanged)
        self.__view.just_panned.connect(self.onPan)
Example #28
0
 def test_add_references_from_manifest_and_fasta(self):
     manifest = io.TextIOWrapper(resource_stream(__name__, 'data/testdatamanifest.txt'))
     self.amplicons.add_references_from_manifest(manifest)
     self.assertEqual(md5(str(sorted(self.amplicons.reference_sequences.items()))).hexdigest(),'c899019b1ac4ccdb6e3ebc19f40add63')
     fasta = io.TextIOWrapper(resource_stream(__name__, 'data/testdatareferences.fasta'))
     self.amplicons.reference_sequences = {}
     self.amplicons.add_references_from_fasta(fasta)
     self.assertEqual(md5(str(sorted(self.amplicons.reference_sequences.items()))).hexdigest(),'fc3c6701032dbd84c6f5731d344df060')
     pass
Example #29
0
def get_test_case(name):
    """Returns a test case file object by name.

    This works even if we're in a zip file!
    """
    expected_name = re.sub(r"\.txt", ".expected.txt", name)

    case = pkg_resources.resource_stream("tbget.tests", name)
    expected = pkg_resources.resource_stream("tbget.tests", expected_name)
    return case, expected
Example #30
0
    def setUp(self):
        self.keys = {}
        for key, cert in KEY_FILE_PAIRS:
            cdict = rsa_pem.parse(pkg_resources.resource_stream(__name__, key).read())
            t = rsa_pem.dict_to_tuple(cdict)
            self.keys[key] = RSA.construct(t)

            cdict = x509_pem.parse(pkg_resources.resource_stream(__name__, cert).read())
            t = x509_pem.dict_to_tuple(cdict)
            self.keys[cert] = RSA.construct(t)
Example #31
0
def load_sql(file):
    return ''.join(x for x in TextIOWrapper(resource_stream('CGRdb.sql', file))
                   if not x.startswith(('#', '/*', '*/', '\n'))).replace(
                       '$', '$$')
Example #32
0
from base64 import encodestring
from pkg_resources import resource_stream
from itertools import product

import numpy as np
from IPython.core.display import HTML

from probability import WeightedBox, RandomVariable

height, width = 120, 120
host_html = ('<img height="%d" width="%d" src="data:image/png;base64,%s">' %
             (height,
              width,
              encodestring(resource_stream('stats_lectures', \
                           'data/host.png').read()).decode('ascii')))

student_html = ('<img height="%d" width="%d" src="data:image/png;base64,%s"/>' %
                (height,
                 width,
                 encodestring(resource_stream('stats_lectures', \
                              'data/user-student.png').read()).decode('ascii')))

student_win_html = ('<img height="%d" width="%d" src="data:image/png;base64,%s"/>' %
                    (height,
                     width,
                     encodestring(resource_stream('stats_lectures', \
                                  'data/student_win.png').read()).decode('ascii')))

student_lose_html = ('<img height="%d" width="%d" src="data:image/png;base64,%s"/>' %
                     (height,
                      width,
Example #33
0
def qc_fasta(arg_sequence, check_with_mimimap2=True):
    log.debug("Starting qc_fasta")
    schema_resource = pkg_resources.resource_stream(__name__, "validation/formats")
    with tempfile.NamedTemporaryFile() as tmp:
        tmp.write(schema_resource.read())
        tmp.flush()
        val = magic.Magic(magic_file=tmp.name,
                          uncompress=False, mime=True)

    gz = ""
    if arg_sequence.name.endswith(".gz"):
        sequence = gzip.GzipFile(fileobj=arg_sequence, mode='rb')
        gz = ".gz"
    else:
        sequence = arg_sequence

    sequence = io.TextIOWrapper(sequence)
    r = sequence.read(4096)
    sequence.seek(0)

    seqlabel = r[1:r.index("\n")]
    seq_type = val.from_buffer(r).lower()

    if seq_type == "text/fasta":
        # ensure that contains only one entry
        submitlabel, submitseq = read_fasta(sequence)
        sequence.seek(0)
        sequence.detach()

        if check_with_mimimap2:
            with tempfile.NamedTemporaryFile() as tmp1:
                with tempfile.NamedTemporaryFile() as tmp2:
                    refstring = pkg_resources.resource_string(__name__, "SARS-CoV-2-reference.fasta")
                    tmp1.write(refstring)
                    tmp1.flush()
                    tmp2.write(submitlabel.encode("utf8"))
                    tmp2.write(("".join(submitseq)).encode("utf8"))
                    tmp2.flush()

                    similarity = 0
                    try:
                        log.debug("Trying to run minimap2")
                        cmd = ["minimap2", "-c", "-x", "asm20", tmp1.name, tmp2.name]
                        logging.info("QC checking similarity to reference")
                        logging.info(" ".join(cmd))
                        result = subprocess.run(cmd, stdout=subprocess.PIPE)
                        result.check_returncode()
                        res = result.stdout.decode("utf-8")
                        mm = res.split("\t")
                        if len(mm) >= 10:
                            # divide Number of matching bases in the mapping / Target sequence length
                            similarity = (float(mm[9]) / float(mm[6])) * 100.0
                        else:
                            similarity = 0
                    except Exception as e:
                        logging.warn("QC against reference sequence using 'minimap2': %s", e, exc_info=e)

                    if similarity < 70.0:
                        raise ValueError(
                            f"QC fail for {seqlabel}: alignment to reference was less than 70% (was {similarity})")

        return "sequence.fasta" + gz, seqlabel, seq_type
    elif seq_type == "text/fastq":
        sequence.seek(0)
        sequence.detach()
        return "reads.fastq" + gz, seqlabel, seq_type
    else:
        log.debug(seqlabel)
        log.debug(seq_type)
        raise ValueError("Sequence file ({}) does not look like a DNA FASTA or FASTQ".format(arg_sequence))
Example #34
0
 def get_schematrons(cls):
     with resource_stream(
             __name__,
             "xml/gemini2/gemini2-schematron-20110906-v1.2.sch") as schema:
         return [cls.schematron(schema)]
Example #35
0
__author__ = 'marcusmorgenstern'
__mail__ = ''

import pickle
import re
from abc import ABCMeta, abstractmethod
from copy import deepcopy, copy
from numbers import Number
from numpy import sqrt
from pkg_resources import resource_stream
from pyfluka.base import IllegalArgumentError
from pyfluka.utils import ureg

f = resource_stream(__name__, "../data/periodic_table.p")
_periodic_table = pickle.load(f)
f.close()


class AbsPhysicsQuantity(object):
    __metaclass__ = ABCMeta

    @abstractmethod
    def __init__(self, val, unc, unit, symbol=None):
        if issubclass(type(val), AbsPhysicsQuantity):
            self.val = copy(val.val)
            self.unc = copy(val.unc)
        elif isinstance(val, str) and unc == 0:
            self.val = ureg.Quantity(val)
            self.unc = ureg.Quantity(0., self.val.units)
        elif not isinstance(val, ureg.Quantity):
            self.val = val * ureg.Quantity(unit)
Example #36
0
 def test_x509_parse(self):
     self.assertEqual(top.x509_parse, x509_pem.parse)
     data = pkg_resources.resource_stream(__name__,
                                          KEY_FILE_PAIRS[0][1]).read()
     x509_dict = top.parse(data)
     self.assertTrue(x509_dict)
Example #37
0
def data_stream(name):
    return resource_stream(__name__, 'test_weblogo/data/' + name)
unit_type_dict = {}
translations = {}
resource_package = __name__

directory = os.fsencode(
    pkg_resources.resource_filename(resource_package, "dicts"))
for f in os.listdir(directory):
    file_name = "dicts/" + os.fsdecode(f)
    new_dict, f_type = read_dictionary(file_name)
    if f_type == "translation_file":
        translations.update(new_dict)
    elif f_type == "mapping_file":
        unit_type_dict.update(new_dict)
    elif f_type == "definitions_file":
        try:
            with closing(pkg_resources.resource_stream(__name__,
                                                       file_name)) as f:
                rbytes = f.read()
                load_user_definitions(StringIO(rbytes.decode('utf-8')))
        except Exception as e:
            msg = getattr(e, 'message', '') or str(e)
            raise ValueError("While opening {}\n{}".format(f, msg))


def convert_attr_to_units(attr):
    """ Calculate unit conversions if passed a convertible attribute."""
    # get all preferred units that have not yet been derived from this attribute
    to_units_URIs = attr_incomplete_preferred_units(attr)
    if not to_units_URIs:
        return None
    # pull important values off attribute
    meas_type = attr_type_URI(attr)
Example #39
0
 def test_rsa_parse(self):
     self.assertEqual(top.rsa_parse, rsa_pem.parse)
     data = pkg_resources.resource_stream(__name__,
                                          KEY_FILE_PAIRS[0][0]).read()
     rsa_dict = top.parse(data)
     self.assertTrue(rsa_dict)
Example #40
0
def datafile(name, sep='\t'):
    "Read key,value pairs from file."
    fp = resource_stream(__name__, join('data', name))
    for line in fp:
        yield line.decode(encoding='latin-1').strip().split(sep)
Example #41
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    logging.basicConfig(level=logging.INFO)

    parser = argparse.ArgumentParser(
        description=
        """Read ham log and output GIS data for callsigns worked. Output files will be
prefixed with output path. E.g. given "foo/bar", the following files will be
created: "foo/bar_points.geojson", "foo/bar_lines.geojson", and "foo/bar.kml"
""")
    parser.add_argument('infile',
                        type=str,
                        help='Input log file (ADIF or Cabrillo)')
    parser.add_argument('outpath', type=str, help='Output path prefix')
    parser.add_argument('-c',
                        '--cfg',
                        type=str,
                        help='Config file path',
                        default=os.path.join(os.environ['HOME'], '.geologrc'))
    parser.add_argument('-v',
                        '--verbose',
                        type=bool,
                        help='Turn on additional output',
                        default=False)
    args = parser.parse_args(argv[1:])

    cfg = ConfigParser.SafeConfigParser()

    cfg.read(args.cfg)

    try:
        un = cfg.get('qrz', 'username')
    except ConfigParser.Error:
        un = raw_input("QRZ.com user name:")

    try:
        pw = cfg.get('qrz', 'password')
    except ConfigParser.Error:
        pw = raw_input("QRZ.com password (not stored):")

    try:
        cachepath = cfg.get('qrz', 'cachepath')
    except ConfigParser.Error:
        cachepath = CACHEPATH

    try:
        cachepath = cfg.get('qrz', 'cachepath')
    except ConfigParser.Error:
        cachepath = CACHEPATH

    try:
        ctydatpath = cfg.get('geolog', 'cachepath')
        ctydatflo = open(ctydatpath)
    except ConfigParser.Error:
        ctydatflo = resource_stream(__name__, "ctydat/cty.dat")

    log.info("QRZ cache: %s" % cachepath)

    geolog(args.infile, args.outpath, un, pw, cachepath, ctydatflo)

    return 0
Example #42
0
 def get_stream(self, resource_name):
     path = self.get_path(resource_name)
     if pkg_resources.resource_exists(self.pkg_name, path):
         return pkg_resources.resource_stream(self.pkg_name, path)
Example #43
0
def main(
        argsl=None,  # type: List[str]
        args=None,  # type: argparse.Namespace
        executor=None,  # type: Callable[..., Tuple[Dict[Text, Any], Text]]
        makeTool=workflow.defaultMakeTool,  # type: Callable[..., Process]
        selectResources=None,  # type: Callable[[Dict[Text, int]], Dict[Text, int]]
        stdin=sys.stdin,  # type: IO[Any]
        stdout=sys.stdout,  # type: IO[Any]
        stderr=sys.stderr,  # type: IO[Any]
        versionfunc=versionstring,  # type: Callable[[], Text]
        job_order_object=None,  # type: MutableMapping[Text, Any]
        make_fs_access=StdFsAccess,  # type: Callable[[Text], StdFsAccess]
        fetcher_constructor=None,  # type: FetcherConstructorType
        resolver=tool_resolver,
        logger_handler=None,
        custom_schema_callback=None  # type: Callable[[], None]
):
    # type: (...) -> int

    _logger.removeHandler(defaultStreamHandler)
    if logger_handler:
        stderr_handler = logger_handler
    else:
        stderr_handler = logging.StreamHandler(stderr)
    _logger.addHandler(stderr_handler)
    try:
        if args is None:
            if argsl is None:
                argsl = sys.argv[1:]
            args = arg_parser().parse_args(argsl)

        # If On windows platform, A default Docker Container is Used if not explicitely provided by user
        if onWindows() and not args.default_container:
            # This docker image is a minimal alpine image with bash installed(size 6 mb). source: https://github.com/frol/docker-alpine-bash
            args.default_container = windows_default_container_id

        # If caller provided custom arguments, it may be not every expected
        # option is set, so fill in no-op defaults to avoid crashing when
        # dereferencing them in args.
        for k, v in six.iteritems({
                'print_deps': False,
                'print_pre': False,
                'print_rdf': False,
                'print_dot': False,
                'relative_deps': False,
                'tmp_outdir_prefix': 'tmp',
                'tmpdir_prefix': 'tmp',
                'print_input_deps': False,
                'cachedir': None,
                'quiet': False,
                'debug': False,
                'timestamps': False,
                'js_console': False,
                'version': False,
                'enable_dev': False,
                'enable_ext': False,
                'strict': True,
                'skip_schemas': False,
                'rdf_serializer': None,
                'basedir': None,
                'tool_help': False,
                'workflow': None,
                'job_order': None,
                'pack': False,
                'on_error': 'continue',
                'relax_path_checks': False,
                'validate': False,
                'enable_ga4gh_tool_registry': False,
                'ga4gh_tool_registries': [],
                'find_default_container': None,
                'make_template': False,
                'overrides': None
        }):
            if not hasattr(args, k):
                setattr(args, k, v)

        if args.quiet:
            _logger.setLevel(logging.WARN)
        if args.debug:
            _logger.setLevel(logging.DEBUG)
        if args.timestamps:
            formatter = logging.Formatter("[%(asctime)s] %(message)s",
                                          "%Y-%m-%d %H:%M:%S")
            stderr_handler.setFormatter(formatter)

        if args.version:
            print(versionfunc())
            return 0
        else:
            _logger.info(versionfunc())

        if args.print_supported_versions:
            print("\n".join(supportedCWLversions(args.enable_dev)))
            return 0

        if not args.workflow:
            if os.path.isfile("CWLFile"):
                setattr(args, "workflow", "CWLFile")
            else:
                _logger.error("")
                _logger.error(
                    "CWL document required, no input file was provided")
                arg_parser().print_help()
                return 1
        if args.relax_path_checks:
            command_line_tool.ACCEPTLIST_RE = command_line_tool.ACCEPTLIST_EN_RELAXED_RE

        if args.ga4gh_tool_registries:
            ga4gh_tool_registries[:] = args.ga4gh_tool_registries
        if not args.enable_ga4gh_tool_registry:
            del ga4gh_tool_registries[:]

        if custom_schema_callback:
            custom_schema_callback()
        elif args.enable_ext:
            res = pkg_resources.resource_stream(__name__, 'extensions.yml')
            use_custom_schema("v1.0", "http://commonwl.org/cwltool",
                              res.read())
            res.close()
        else:
            use_standard_schema("v1.0")

        uri, tool_file_uri = resolve_tool_uri(
            args.workflow,
            resolver=resolver,
            fetcher_constructor=fetcher_constructor)

        overrides = []  # type: List[Dict[Text, Any]]

        try:
            job_order_object, input_basedir, jobloader = load_job_order(
                args, stdin, fetcher_constructor, overrides, tool_file_uri)
        except Exception as e:
            _logger.error(Text(e), exc_info=args.debug)

        if args.overrides:
            overrides.extend(
                load_overrides(file_uri(os.path.abspath(args.overrides)),
                               tool_file_uri))

        try:
            document_loader, workflowobj, uri = fetch_document(
                uri,
                resolver=resolver,
                fetcher_constructor=fetcher_constructor)

            if args.print_deps:
                printdeps(workflowobj, document_loader, stdout,
                          args.relative_deps, uri)
                return 0

            document_loader, avsc_names, processobj, metadata, uri \
                = validate_document(document_loader, workflowobj, uri,
                                    enable_dev=args.enable_dev, strict=args.strict,
                                    preprocess_only=args.print_pre or args.pack,
                                    fetcher_constructor=fetcher_constructor,
                                    skip_schemas=args.skip_schemas,
                                    overrides=overrides)

            if args.print_pre:
                stdout.write(json.dumps(processobj, indent=4))
                return 0

            overrides.extend(metadata.get("cwltool:overrides", []))

            conf_file = getattr(args,
                                "beta_dependency_resolvers_configuration",
                                None)  # Text
            use_conda_dependencies = getattr(args, "beta_conda_dependencies",
                                             None)  # Text

            make_tool_kwds = vars(args)

            job_script_provider = None  # type: Callable[[Any, List[str]], Text]
            if conf_file or use_conda_dependencies:
                dependencies_configuration = DependenciesConfiguration(
                    args)  # type: DependenciesConfiguration
                make_tool_kwds[
                    "job_script_provider"] = dependencies_configuration

            make_tool_kwds["find_default_container"] = functools.partial(
                find_default_container, args)
            make_tool_kwds["overrides"] = overrides

            tool = make_tool(document_loader, avsc_names, metadata, uri,
                             makeTool, make_tool_kwds)
            if args.make_template:
                yaml.safe_dump(generate_input_template(tool),
                               sys.stdout,
                               default_flow_style=False,
                               indent=4,
                               block_seq_indent=2)
                return 0

            if args.validate:
                _logger.info("Tool definition is valid")
                return 0

            if args.pack:
                stdout.write(
                    print_pack(document_loader, processobj, uri, metadata))
                return 0

            if args.print_rdf:
                stdout.write(
                    printrdf(tool, document_loader.ctx, args.rdf_serializer))
                return 0

            if args.print_dot:
                printdot(tool, document_loader.ctx, stdout)
                return 0

        except (validate.ValidationException) as exc:
            _logger.error(u"Tool definition failed validation:\n%s",
                          exc,
                          exc_info=args.debug)
            return 1
        except (RuntimeError, WorkflowException) as exc:
            _logger.error(u"Tool definition failed initialization:\n%s",
                          exc,
                          exc_info=args.debug)
            return 1
        except Exception as exc:
            _logger.error(
                u"I'm sorry, I couldn't load this CWL file%s",
                ", try again with --debug for more information.\nThe error was: "
                "%s" % exc if not args.debug else ".  The error was:",
                exc_info=args.debug)
            return 1

        if isinstance(tool, int):
            return tool

        # If on MacOS platform, TMPDIR must be set to be under one of the shared volumes in Docker for Mac
        # More info: https://dockstore.org/docs/faq
        if sys.platform == "darwin":
            tmp_prefix = "tmp_outdir_prefix"
            default_mac_path = "/private/tmp/docker_tmp"
            if getattr(args, tmp_prefix) and getattr(
                    args, tmp_prefix) == DEFAULT_TMP_PREFIX:
                setattr(args, tmp_prefix, default_mac_path)

        for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
            if getattr(args, dirprefix) and getattr(
                    args, dirprefix) != DEFAULT_TMP_PREFIX:
                sl = "/" if getattr(args, dirprefix).endswith(
                    "/") or dirprefix == "cachedir" else ""
                setattr(args, dirprefix,
                        os.path.abspath(getattr(args, dirprefix)) + sl)
                if not os.path.exists(os.path.dirname(getattr(args,
                                                              dirprefix))):
                    try:
                        os.makedirs(os.path.dirname(getattr(args, dirprefix)))
                    except Exception as e:
                        _logger.error("Failed to create directory: %s", e)
                        return 1

        if args.cachedir:
            if args.move_outputs == "move":
                setattr(args, 'move_outputs', "copy")
            setattr(args, "tmp_outdir_prefix", args.cachedir)

        secret_store = SecretStore()

        try:
            job_order_object = init_job_order(
                job_order_object,
                args,
                tool,
                print_input_deps=args.print_input_deps,
                relative_deps=args.relative_deps,
                stdout=stdout,
                make_fs_access=make_fs_access,
                loader=jobloader,
                input_basedir=input_basedir,
                secret_store=secret_store)
        except SystemExit as e:
            return e.code

        if not executor:
            if args.parallel:
                executor = MultithreadedJobExecutor()
            else:
                executor = SingleJobExecutor()

        if isinstance(job_order_object, int):
            return job_order_object

        try:
            setattr(args, 'basedir', input_basedir)
            del args.workflow
            del args.job_order
            (out, status) = executor(tool,
                                     job_order_object,
                                     logger=_logger,
                                     makeTool=makeTool,
                                     select_resources=selectResources,
                                     make_fs_access=make_fs_access,
                                     secret_store=secret_store,
                                     **vars(args))

            # This is the workflow output, it needs to be written
            if out is not None:

                def locToPath(p):
                    for field in ("path", "nameext", "nameroot", "dirname"):
                        if field in p:
                            del p[field]
                    if p["location"].startswith("file://"):
                        p["path"] = uri_file_path(p["location"])

                visit_class(out, ("File", "Directory"), locToPath)

                # Unsetting the Generation fron final output object
                visit_class(out, ("File", ),
                            MutationManager().unset_generation)

                if isinstance(out, six.string_types):
                    stdout.write(out)
                else:
                    stdout.write(json.dumps(out, indent=4))
                stdout.write("\n")
                stdout.flush()

            if status != "success":
                _logger.warning(u"Final process status is %s", status)
                return 1
            else:
                _logger.info(u"Final process status is %s", status)
                return 0

        except (validate.ValidationException) as exc:
            _logger.error(u"Input object failed validation:\n%s",
                          exc,
                          exc_info=args.debug)
            return 1
        except UnsupportedRequirement as exc:
            _logger.error(u"Workflow or tool uses unsupported feature:\n%s",
                          exc,
                          exc_info=args.debug)
            return 33
        except WorkflowException as exc:
            _logger.error(
                u"Workflow error, try again with --debug for more "
                "information:\n%s",
                strip_dup_lineno(six.text_type(exc)),
                exc_info=args.debug)
            return 1
        except Exception as exc:
            _logger.error(
                u"Unhandled error, try again with --debug for more information:\n"
                "  %s",
                exc,
                exc_info=args.debug)
            return 1

    finally:
        _logger.removeHandler(stderr_handler)
        _logger.addHandler(defaultStreamHandler)
Example #44
0
def serve_openapi_yaml():
    stream = pkg_resources.resource_stream('slivka',
                                           'data/openapi-docs/openapi.yaml')
    return flask.send_file(stream, 'application/yaml', as_attachment=False)
def load_resource(filename):
    with pkg_resources.resource_stream(__name__, filename) as fd:
        return fd.read().decode("utf-8")
Example #46
0
 def test_parse_steria_cert(self):
     haka = pkg_resources.resource_stream(__name__,
                                          'keys/steria.pem').read()
     self.assertTrue(haka)
     cdict = x509_pem.parse(haka)
     self.assertTrue(cdict)
Example #47
0
def main(args=None):
    """
    This is the main function called by the `ivelplot` script.

    """

    from astropy.utils.compat import argparse
    from astropy.extern.configobj import configobj, validate

    from pkg_resources import resource_stream

    parser = argparse.ArgumentParser(
        description='An interactive environment for absorption line '
                    'identification and Voigt profile \nfitting with VPFIT.\n'
                    '\nTo dump a default configuration file: ivelplot -d'
                    '\nTo dump an extended default configuration file: '
                    'ivelplot -dd',
        formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument('config', help='path to the configuration file')
    parser.add_argument('-z', '--redshift', help='redshift')
    parser.add_argument('--search', action='store_true',
                        help='display a general search list of ions')
    parser.add_argument('--lyman', action='store_true',
                        help='display the Lyman series transitions')
    parser.add_argument('--galactic', action='store_true',
                        help='display the common Galactic transitions')
    parser.add_argument('--agn', action='store_true',
                        help='display the common AGN associated transitions')

    config = resource_stream(__name__, '/config/ivelplot.cfg')
    config_extended = resource_stream(
        __name__, '/config/ivelplot_extended.cfg')
    spec = resource_stream(__name__, '/config/ivelplot_specification.cfg')

    if len(sys.argv) > 1:

        if sys.argv[1] == '-d':
            cfg = configobj.ConfigObj(config)
            cfg.filename = '{0}/ivelplot.cfg'.format(os.getcwd())
            cfg.write()
            return

        elif sys.argv[1] == '-dd':
            cfg = configobj.ConfigObj(config_extended)
            cfg.filename = '{0}/ivelplot.cfg'.format(os.getcwd())
            cfg.write()
            return

    args = parser.parse_args(args)

    try:
        cfg = configobj.ConfigObj(args.config, configspec=spec)
        validator = validate.Validator()
        cfg.validate(validator)

    except:
        raise IOError('Configuration file could not be read')

    fname = cfg['WINDOW'].pop('transitions')

    if args.search:
        fh = resource_stream(__name__, '/data/search.dat')
        transitions = list(fh)
        fh.close()

    elif args.lyman:
        fh = resource_stream(__name__, '/data/lyman.dat')
        transitions = list(fh)
        fh.close()

    elif args.galactic:
        fh = resource_stream(__name__, '/data/galactic.dat')
        transitions = list(fh)
        fh.close()

    elif args.agn:
        fh = resource_stream(__name__, '/data/agn.dat')
        transitions = list(fh)
        fh.close()

    else:
        print('Reading transitions from ', fname)
        fh = open(fname)
        transitions = list(fh)
        fh.close()

    transitions = [transition for transition in transitions
                   if not transition.startswith('#')]

    fname = cfg['DATA'].pop('filename')
    if not fname:
        raise IOError('no data to plot!')

    spectrum = Table.read(fname) if fname[-4:] == 'fits' else ascii.read(fname)
    wavelength = spectrum[cfg['DATA'].pop('wavelength_column')]
    flux = spectrum[cfg['DATA'].pop('flux_column')]
    error = spectrum[cfg['DATA'].pop('error_column')]
    continuum = spectrum[cfg['DATA'].pop('continuum_column')]
    redshift = float(args.redshift) if args.redshift is not None else 0

    cfg['MODEL']['system_width'] = (cfg['WINDOW']['vmax'] -
                                    cfg['WINDOW']['vmin'])
    cfg['MODEL']['absorbers'] = None

    print(info)

    app = QApplication(sys.argv)
    app.aboutToQuit.connect(app.deleteLater)

    desktop = app.desktop()
    screen = desktop.screenGeometry()
    width = screen.width() / desktop.physicalDpiX() * 0.88

    fontsize = 0.7 * width
    label_fontsize = 0.6 * width

    cfg['WINDOW']['width'] = width
    cfg['WINDOW']['fontsize'] = fontsize
    cfg['WINDOW']['label_fontsize'] = label_fontsize

    velocity_plot = InteractiveVelocityPlot(
        fname, transitions, wavelength, flux, error, continuum, redshift,
        **cfg)
    velocity_plot.window.show()

    output_stream = OutputStream()
    output_stream.text_written.connect(velocity_plot.on_output)

    sys.stdout = output_stream
    sys.exit(app.exec_())
Example #48
0
def new_js_proc():
    # type: () -> subprocess.Popen

    res = resource_stream(__name__, 'cwlNodeEngine.js')
    nodecode = res.read()

    nodejs = None
    trynodes = ("nodejs", "node")
    for n in trynodes:
        try:
            if subprocess.check_output(
                [n, "--eval", "process.stdout.write('t')"]) != "t":
                continue
            nodejs = subprocess.Popen([n, "--eval", nodecode],
                                      stdin=subprocess.PIPE,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
            break
        except subprocess.CalledProcessError:
            pass
        except OSError as e:
            if e.errno == errno.ENOENT:
                pass
            else:
                raise

    if nodejs is None:
        try:
            nodeimg = "node:slim"
            global have_node_slim
            if not have_node_slim:
                dockerimgs = subprocess.check_output(
                    ["docker", "images", nodeimg])
                if len(dockerimgs.split("\n")) <= 1:
                    nodejsimg = subprocess.check_output(
                        ["docker", "pull", nodeimg])
                    _logger.info("Pulled Docker image %s %s", nodeimg,
                                 nodejsimg)
                have_node_slim = True
            nodejs = subprocess.Popen([
                "docker", "run", "--attach=STDIN", "--attach=STDOUT",
                "--attach=STDERR", "--sig-proxy=true", "--interactive", "--rm",
                nodeimg, "node", "--eval", nodecode
            ],
                                      stdin=subprocess.PIPE,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
        except OSError as e:
            if e.errno == errno.ENOENT:
                pass
            else:
                raise
        except subprocess.CalledProcessError:
            pass

    if nodejs is None:
        raise JavascriptException(
            u"cwltool requires Node.js engine to evaluate Javascript "
            "expressions, but couldn't find it.  Tried %s, docker run "
            "node:slim" % u", ".join(trynodes))

    return nodejs
Example #49
0
def load():
    path = 'etc/hook.yaml'
    with resource_stream(catchbot.__name__, path) as f:
        return yaml.load(f)
Example #50
0
def main(
    argsl=None,  # type: List[str]
    args=None,  # type: argparse.Namespace
    job_order_object=None,  # type: MutableMapping[Text, Any]
    stdin=sys.stdin,  # type: IO[Any]
    stdout=None,  # type: Union[TextIO, codecs.StreamWriter]
    stderr=sys.stderr,  # type: IO[Any]
    versionfunc=versionstring,  # type: Callable[[], Text]
    logger_handler=None,  #
    custom_schema_callback=None,  # type: Callable[[], None]
    executor=None,  # type: Callable[..., Tuple[Dict[Text, Any], Text]]
    loadingContext=None,  # type: LoadingContext
    runtimeContext=None  # type: RuntimeContext
):  # type: (...) -> int
    if not stdout:  # force UTF-8 even if the console is configured differently
        if (hasattr(sys.stdout, "encoding")  # type: ignore
                and sys.stdout.encoding != 'UTF-8'):  # type: ignore
            if six.PY3 and hasattr(sys.stdout, "detach"):
                stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
            else:
                stdout = codecs.getwriter('utf-8')(sys.stdout)  # type: ignore
        else:
            stdout = cast(TextIO, sys.stdout)  # type: ignore

    _logger.removeHandler(defaultStreamHandler)
    if logger_handler:
        stderr_handler = logger_handler
    else:
        stderr_handler = logging.StreamHandler(stderr)
    _logger.addHandler(stderr_handler)
    # pre-declared for finally block
    workflowobj = None
    input_for_prov = None
    try:
        if args is None:
            if argsl is None:
                argsl = sys.argv[1:]
            args = arg_parser().parse_args(argsl)

        if runtimeContext is None:
            runtimeContext = RuntimeContext(vars(args))
        else:
            runtimeContext = runtimeContext.copy()

        # If on Windows platform, a default Docker Container is used if not
        # explicitely provided by user
        if onWindows() and not runtimeContext.default_container:
            # This docker image is a minimal alpine image with bash installed
            # (size 6 mb). source: https://github.com/frol/docker-alpine-bash
            runtimeContext.default_container = windows_default_container_id

        # If caller parsed its own arguments, it may not include every
        # cwltool option, so fill in defaults to avoid crashing when
        # dereferencing them in args.
        for key, val in six.iteritems(get_default_args()):
            if not hasattr(args, key):
                setattr(args, key, val)

        rdflib_logger = logging.getLogger("rdflib.term")
        rdflib_logger.addHandler(stderr_handler)
        rdflib_logger.setLevel(logging.ERROR)
        if args.quiet:
            _logger.setLevel(logging.WARN)
        if runtimeContext.debug:
            _logger.setLevel(logging.DEBUG)
            rdflib_logger.setLevel(logging.DEBUG)
        if args.timestamps:
            formatter = logging.Formatter("[%(asctime)s] %(message)s",
                                          "%Y-%m-%d %H:%M:%S")
            stderr_handler.setFormatter(formatter)

        if args.version:
            print(versionfunc())
            return 0
        else:
            _logger.info(versionfunc())

        if args.print_supported_versions:
            print("\n".join(supportedCWLversions(args.enable_dev)))
            return 0

        if not args.workflow:
            if os.path.isfile("CWLFile"):
                setattr(args, "workflow", "CWLFile")
            else:
                _logger.error("")
                _logger.error(
                    "CWL document required, no input file was provided")
                arg_parser().print_help()
                return 1
        if args.relax_path_checks:
            command_line_tool.ACCEPTLIST_RE = command_line_tool.ACCEPTLIST_EN_RELAXED_RE

        if args.ga4gh_tool_registries:
            ga4gh_tool_registries[:] = args.ga4gh_tool_registries
        if not args.enable_ga4gh_tool_registry:
            del ga4gh_tool_registries[:]

        if custom_schema_callback:
            custom_schema_callback()
        elif args.enable_ext:
            res = pkg_resources.resource_stream(__name__, 'extensions.yml')
            use_custom_schema("v1.0", "http://commonwl.org/cwltool",
                              res.read())
            res.close()
        else:
            use_standard_schema("v1.0")
        #call function from provenance.py if the provenance flag is enabled.
        if args.provenance:
            if not args.compute_checksum:
                _logger.error(
                    "--provenance incompatible with --no-compute-checksum")
                return 1

            runtimeContext.research_obj = ResearchObject(
                temp_prefix_ro=args.tmpdir_prefix,
                # Optionals, might be None
                orcid=args.orcid,
                full_name=args.cwl_full_name)

        if loadingContext is None:
            loadingContext = LoadingContext(vars(args))
        else:
            loadingContext = loadingContext.copy()
        loadingContext.research_obj = runtimeContext.research_obj
        loadingContext.disable_js_validation = \
            args.disable_js_validation or (not args.do_validate)
        loadingContext.construct_tool_object = getdefault(
            loadingContext.construct_tool_object, workflow.default_make_tool)
        loadingContext.resolver = getdefault(loadingContext.resolver,
                                             tool_resolver)

        uri, tool_file_uri = resolve_tool_uri(
            args.workflow,
            resolver=loadingContext.resolver,
            fetcher_constructor=loadingContext.fetcher_constructor)

        try_again_msg = "" if args.debug else ", try again with --debug for more information"

        try:
            job_order_object, input_basedir, jobloader = load_job_order(
                args, stdin, loadingContext.fetcher_constructor,
                loadingContext.overrides_list, tool_file_uri)

            if args.overrides:
                loadingContext.overrides_list.extend(
                    load_overrides(file_uri(os.path.abspath(args.overrides)),
                                   tool_file_uri))

            document_loader, workflowobj, uri = fetch_document(
                uri,
                resolver=loadingContext.resolver,
                fetcher_constructor=loadingContext.fetcher_constructor)

            if args.print_deps:
                printdeps(workflowobj, document_loader, stdout,
                          args.relative_deps, uri)
                return 0

            document_loader, avsc_names, processobj, metadata, uri \
                = validate_document(document_loader, workflowobj, uri,
                                    enable_dev=loadingContext.enable_dev,
                                    strict=loadingContext.strict,
                                    preprocess_only=(args.print_pre or args.pack),
                                    fetcher_constructor=loadingContext.fetcher_constructor,
                                    skip_schemas=args.skip_schemas,
                                    overrides=loadingContext.overrides_list,
                                    do_validate=loadingContext.do_validate)
            if args.pack:
                stdout.write(
                    print_pack(document_loader, processobj, uri, metadata))
                return 0
            if args.provenance and runtimeContext.research_obj:
                # Can't really be combined with args.pack at same time
                runtimeContext.research_obj.packed_workflow(
                    print_pack(document_loader, processobj, uri, metadata))

            if args.print_pre:
                stdout.write(json_dumps(processobj, indent=4))
                return 0

            loadingContext.overrides_list.extend(
                metadata.get("cwltool:overrides", []))

            tool = make_tool(document_loader, avsc_names, metadata, uri,
                             loadingContext)
            if args.make_template:
                yaml.safe_dump(generate_input_template(tool),
                               sys.stdout,
                               default_flow_style=False,
                               indent=4,
                               block_seq_indent=2)
                return 0

            if args.validate:
                _logger.info("Tool definition is valid")
                return 0

            if args.print_rdf:
                stdout.write(
                    printrdf(tool, document_loader.ctx, args.rdf_serializer))
                return 0

            if args.print_dot:
                printdot(tool, document_loader.ctx, stdout)
                return 0

        except (validate.ValidationException) as exc:
            _logger.error(u"Tool definition failed validation:\n%s",
                          exc,
                          exc_info=args.debug)
            return 1
        except (RuntimeError, WorkflowException) as exc:
            _logger.error(u"Tool definition failed initialization:\n%s",
                          exc,
                          exc_info=args.debug)
            return 1
        except Exception as exc:
            _logger.error(
                u"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
                try_again_msg,
                exc if not args.debug else "",
                exc_info=args.debug)
            return 1

        if isinstance(tool, int):
            return tool
        # If on MacOS platform, TMPDIR must be set to be under one of the
        # shared volumes in Docker for Mac
        # More info: https://dockstore.org/docs/faq
        if sys.platform == "darwin":
            default_mac_path = "/private/tmp/docker_tmp"
            if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
                runtimeContext.tmp_outdir_prefix = default_mac_path

        for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
            if getattr(runtimeContext, dirprefix) and getattr(
                    runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX:
                sl = "/" if getattr(runtimeContext, dirprefix).endswith("/") or dirprefix == "cachedir" \
                        else ""
                setattr(
                    runtimeContext, dirprefix,
                    os.path.abspath(getattr(runtimeContext, dirprefix)) + sl)
                if not os.path.exists(
                        os.path.dirname(getattr(runtimeContext, dirprefix))):
                    try:
                        os.makedirs(
                            os.path.dirname(getattr(runtimeContext,
                                                    dirprefix)))
                    except Exception as e:
                        _logger.error("Failed to create directory: %s", e)
                        return 1

        if args.cachedir:
            if args.move_outputs == "move":
                runtimeContext.move_outputs = "copy"
            runtimeContext.tmp_outdir_prefix = args.cachedir

        runtimeContext.secret_store = getdefault(runtimeContext.secret_store,
                                                 SecretStore())
        try:
            initialized_job_order_object, input_for_prov = init_job_order(
                job_order_object,
                args,
                tool,
                jobloader,
                stdout,
                print_input_deps=args.print_input_deps,
                provArgs=runtimeContext.research_obj,
                relative_deps=args.relative_deps,
                input_basedir=input_basedir,
                secret_store=runtimeContext.secret_store)
        except SystemExit as err:
            return err.code

        if not executor:
            if args.parallel:
                executor = MultithreadedJobExecutor()
            else:
                executor = SingleJobExecutor()
        assert executor is not None

        try:
            runtimeContext.basedir = input_basedir
            del args.workflow
            del args.job_order

            conf_file = getattr(args,
                                "beta_dependency_resolvers_configuration",
                                None)  # Text
            use_conda_dependencies = getattr(args, "beta_conda_dependencies",
                                             None)  # Text

            job_script_provider = None  # type: Optional[DependenciesConfiguration]
            if conf_file or use_conda_dependencies:
                runtimeContext.job_script_provider = DependenciesConfiguration(
                    args)

            runtimeContext.find_default_container = \
                functools.partial(find_default_container, args)
            runtimeContext.make_fs_access = getdefault(
                runtimeContext.make_fs_access, StdFsAccess)
            (out, status) = executor(tool,
                                     initialized_job_order_object,
                                     runtimeContext,
                                     logger=_logger)

            if out is not None:

                def loc_to_path(obj):
                    for field in ("path", "nameext", "nameroot", "dirname"):
                        if field in obj:
                            del obj[field]
                    if obj["location"].startswith("file://"):
                        obj["path"] = uri_file_path(obj["location"])

                visit_class(out, ("File", "Directory"), loc_to_path)

                # Unsetting the Generation from final output object
                visit_class(out, ("File", ),
                            MutationManager().unset_generation)

                if isinstance(out, string_types):
                    stdout.write(out)
                else:
                    stdout.write(
                        json_dumps(
                            out,
                            indent=4,  # type: ignore
                            ensure_ascii=False))
                stdout.write("\n")
                if hasattr(stdout, "flush"):
                    stdout.flush()  # type: ignore

            if status != "success":
                _logger.warning(u"Final process status is %s", status)
                return 1
            _logger.info(u"Final process status is %s", status)
            return 0

        except (validate.ValidationException) as exc:
            _logger.error(u"Input object failed validation:\n%s",
                          exc,
                          exc_info=args.debug)
            return 1
        except UnsupportedRequirement as exc:
            _logger.error(u"Workflow or tool uses unsupported feature:\n%s",
                          exc,
                          exc_info=args.debug)
            return 33
        except WorkflowException as exc:
            _logger.error(u"Workflow error%s:\n%s",
                          try_again_msg,
                          strip_dup_lineno(six.text_type(exc)),
                          exc_info=args.debug)
            return 1
        except Exception as exc:
            _logger.error(u"Unhandled error%s:\n  %s",
                          try_again_msg,
                          exc,
                          exc_info=args.debug)
            return 1

    finally:
        if args and runtimeContext and runtimeContext.research_obj \
                and args.rm_tmpdir and workflowobj:
            #adding all related cwl files to RO
            prov_dependencies = printdeps(workflowobj, document_loader, stdout,
                                          args.relative_deps, uri,
                                          runtimeContext.research_obj)
            prov_dep = prov_dependencies[1]
            assert prov_dep
            runtimeContext.research_obj.generate_snapshot(prov_dep)
            #for input file dependencies
            if input_for_prov:
                runtimeContext.research_obj.generate_snapshot(input_for_prov)
            #NOTE: keep these commented out lines to evaluate tests later
            #if job_order_object:
            #runtimeContext.research_obj.generate_snapshot(job_order_object)

            runtimeContext.research_obj.close(args.provenance)

        _logger.removeHandler(stderr_handler)
        _logger.addHandler(defaultStreamHandler)
Example #51
0
from sylvia import *
import pkg_resources

_debug = False

# get arguments
if len(sys.argv) < 2:

    print "Usage: python rhymeColors.py <input_file>"
    sys.exit(2)

inFile = sys.argv[1]

# instantiate phonetic dictionary class from sylvia
pd = PhoneticDictionary(
    binFile=pkg_resources.resource_stream("sylvia", "cmudict.sylviabin"))

colors = ['red', 'green', 'yellow', 'blue', 'cyan', 'white',
          'grey']  # define list of colors
omit = ["the", "and", "as", "of"]  # filler words to omit
output = ""  # initialized output text
word_list = []  # list of words from input w/o filler words
inp = []  # list of words from input w/ filler words
r_matrix = []  # list of lists of rhymes


# the function that does it all
def get_dem_rhymes(word_list, file):

    # get rhymes for each word
    for word in word_list:
Example #52
0
from enum import Enum
from functools import wraps
from reasoner_pydantic import Query as PDQuery, AsyncQuery as PDAsyncQuery, Response as PDResponse
from pydantic import BaseModel
from src.util import create_log_entry
from fastapi import Body, FastAPI, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.openapi.utils import get_openapi
from fastapi.middleware.cors import CORSMiddleware
from src.service_aggregator import entry

# set the app version
APP_VERSION = '2.0.23'

# Set up default logger.
with pkg_resources.resource_stream('src', 'logging.yml') as f:
    config = yaml.safe_load(f.read())

# declare the log directory
log_dir = './logs'

# make the directory if it does not exist
if not os.path.exists(log_dir):
    os.makedirs(log_dir)

# create a configuration for the log file
config['handlers']['file']['filename'] = os.path.join(log_dir, 'aragorn.log')

# load the log config
logging.config.dictConfig(config)
Example #53
0
 def get_schematrons(cls):
     with resource_stream(__name__,
                          "xml/gemini2/Gemini2_R1r3.sch") as schema:
         return [cls.schematron(schema)]
Example #54
0
                                   endpoint_override_path)

try:
    #Modification for compilation by py2exe:
    if _is_from_exe():
        #Load endpoints.json from .exe directory, i.e. C:\Program Files\Amazon\cfn-bootstrap
        log.debug('Loading py2exe endpoints.json file')
        with open(
                os.path.join(os.path.dirname(sys.executable),
                             'endpoints.json'), 'r') as f:
            _endpoint_data = json.load(f)

    # end py2exe
    else:
        _endpoint_data = json.load(
            pkg_resources.resource_stream("cfnbootstrap.resources.documents",
                                          "endpoints.json"))

except ValueError:
    log.exception("Failed to load endpoints.json")
    raise

try:
    #Apply endpoint_override, if present
    if os.path.isfile(endpoint_override_path):
        log.debug('Loading existing endpoints override file %s' %
                  (endpoint_override_path, ))
        with open(endpoint_override_path, 'r') as f:
            _endpoint_data = json.load(f)
except ValueError:
    log.exception("Failed to load endpoints override file: %s" %
                  (endpoint_override_path, ))
Example #55
0
 def get_schematrons(cls):
     with resource_stream(
             __name__,
             "xml/medin/ISOTS19139A1Constraints_v1.4.sch") as schema:
         return [cls.schematron(schema)]
def get_cli_spec(device, version):
    # pylint: disable-msg=E1101
    return pkg_resources.resource_stream(
        __name__, '%s-%s.xml' % device_type_alias(device, version))
Example #57
0
def get_test_image():
    return pkg_resources.resource_stream('sw.allotmentclub.browser.tests',
                                         'assyrian.gif').read()
Example #58
0
 def from_resource(cls, resource_name, output_dir=None, root_dir=None, requirement=REQ, defaults=None, printer=logger):
     conf = pkg_resources.resource_stream(requirement, resource_name)
     merger = cls(output_dir, root_dir, defaults=defaults, printer=printer)
     merger.readfp(conf)
     return merger
Example #59
0
    def activate(self):
        InfiniteGlass.DEBUG("ghost", "SHADOW ACTIVATE %s\n" % (self, ))
        sys.stderr.flush()
        if self.properties.get("IG_GHOSTS_DISABLED", 0):
            return

        for name, value in self.properties.items():
            InfiniteGlass.DEBUG("ghost.properties",
                                "%s=%s\n" % (name, str(value)[:100]))
            sys.stderr.flush()

        self.window = self.manager.display.root.create_window(
            map=False, **self.properties.get("__attributes__", {}))
        self.window["IG_GHOST"] = "IG_GHOST"

        with pkg_resources.resource_stream("glass_ghosts", "ghost.svg") as f:
            ghost_image = f.read()

        for name, value in self.properties.items():
            pattern, value = self.format_pair(name, value)
            if pattern in ghost_image:
                ghost_image = ghost_image.replace(pattern, value)

        pattern, value = self.format_pair("key", self.key())
        if pattern in ghost_image:
            ghost_image = ghost_image.replace(pattern, value)

        if "SM_CLIENT_ID" in self.properties:
            if self.properties["SM_CLIENT_ID"] in self.manager.clients:
                for name, value in self.manager.clients[
                        self.properties["SM_CLIENT_ID"]].properties.items():
                    pattern, value = self.format_pair(name, value[1], b" ")
                    if pattern in ghost_image:
                        ghost_image = ghost_image.replace(pattern, value)
        else:
            pattern, value = self.format_pair("SM_CLIENT_ID", "No state saved")
            if pattern in ghost_image:
                ghost_image = ghost_image.replace(pattern, value)
            if "WM_COMMAND" in self.properties:
                pattern, value = self.format_pair(
                    "RestartCommand", self.properties["WM_COMMAND"])
                if pattern in ghost_image:
                    ghost_image = ghost_image.replace(pattern, value)

        self.window["IG_CONTENT"] = ("IG_SVG", ghost_image)
        self.window["WM_PROTOCOLS"] = ["WM_DELETE_WINDOW"]
        self.apply(self.window, action_type="ghost_set")

        @self.window.on(mask="StructureNotifyMask")
        def DestroyNotify(win, event):
            InfiniteGlass.DEBUG("ghost", "GHOST DESTROY %s\n" % (self, ))
            sys.stderr.flush()
            self.destroy()

        self.DestroyNotify = DestroyNotify

        @self.window.on(mask="StructureNotifyMask", client_type="IG_CLOSE")
        def ClientMessage(win, event):
            InfiniteGlass.DEBUG("ghost", "GHOST CLOSE %s\n" % (self, ))
            sys.stderr.flush()
            win.destroy()

        self.CloseMessage = ClientMessage

        @self.window.on(mask="StructureNotifyMask", client_type="IG_DELETE")
        def ClientMessage(win, event):
            InfiniteGlass.DEBUG("ghost", "GHOST DELETE %s\n" % (self, ))
            sys.stderr.flush()
            win.destroy()

        self.DeleteMessage = ClientMessage

        @self.window.on(mask="NoEventMask", client_type="WM_PROTOCOLS")
        def ClientMessage(win, event):
            InfiniteGlass.DEBUG("ghost",
                                "GHOST WM_DELETE_WINDOW %s\n" % (self, ))
            sys.stderr.flush()
            if event.parse("ATOM")[0] == "WM_DELETE_WINDOW":
                win.destroy()
            else:
                InfiniteGlass.DEBUG(
                    "ghost",
                    "%s: Unknown WM_PROTOCOLS message: %s\n" % (self, event))
                sys.stderr.flush()

        self.WMDelete = ClientMessage

        @self.window.on()
        def PropertyNotify(win, event):
            name = self.manager.display.get_atom_name(event.atom)
            if name not in self.manager.config["ghost_update"]: return
            try:
                self.properties.update(
                    glass_ghosts.helpers.expand_property(win, name))
                InfiniteGlass.DEBUG(
                    "ghost.update.property", "%s.%s=<%s>%s from %s\n" %
                    (self, name, type(
                        self.properties[name]), self.properties[name], win))
                sys.stderr.flush()
            except:
                pass
            else:
                self.update_key()

        self.PropertyNotify = PropertyNotify

        @self.window.on()
        def ButtonPress(win, event):
            self.restart()

        self.ButtonPress = ButtonPress

        @self.window.on(mask="StructureNotifyMask", client_type="IG_RESTART")
        def ClientMessage(win, event):
            self.restart()

        self.RestartMessage = ClientMessage

        @self.window.on()
        def Expose(win, event):
            self.redraw()

        self.Expose = Expose

        self.window.map()
        self.redraw()
Example #60
0
from pkg_resources import resource_stream
import pandas as pd

print(__name__)
stream = resource_stream(__name__, "data/df_players.pkl")
df_players = pd.read_pickle(stream, compression=None)
df_players = df_players.dropna(subset=["goal_difference"])

stream = resource_stream(__name__, "data/df_matches.pkl")
df_matches = pd.read_pickle(stream, compression=None)