Esempio n. 1
0
def main():
    args = parse_args()
    configure_logging(args.log_config)
    config = load_config(args.config)

    stations_pf = load_stations(config)
    data_gridded, data_gridded_lt, timestamps = load_forecast(
        args.wrfout, args.wrfout_long_term, args.components,
        config.get('spinup', 0))

    for location in stations_pf:
        try:
            pf = timeseries_for_location(location, args, data_gridded,
                                         data_gridded_lt)
        except TargetOutsideGridError:
            LOG.info(
                'Location of station %s outside wrfout grid, ignoring the station.',
                location['ref'])
            continue

        filename = templated_filename(config,
                                      analysis_date=args.analysis,
                                      ref=location['ref'],
                                      create_dirs=True)

        metadata = OrderedDict([
            ('ref', location['ref']), ('name', location['name']),
            ('analysis_date', args.analysis.strftime('%Y-%m-%dT%H:%M:%S')),
            ('longitude', '{:.4f}'.format(location['lon'])),
            ('latitude', '{:.4f}'.format(location['lat']))
        ])

        save_timeseries(timestamps, pf, filename, metadata)
Esempio n. 2
0
def load_config(options):
    """Load data from the specified configuration file."""

    listify_string = lambda x: [item.strip() for item in x.split(",")]

    config = utilities.load_config(options.config)

    mykey = utilities.safe_load(config, "pgpprocessor", "keyid", 0)
    protocols = listify_string(
        utilities.safe_load(config, "connectors", "protocols"))
    connectors = {}
    force_sender = utilities.safe_load(config, "connectors", "force_sender")

    if protocols == ['']:
        raise RuntimeError("No protocols detected.  Have you run 'make'?")

    # loop through the protocols, finding connectors each protocol uses
    # load the settings for each connector.
    for protocol in protocols:
        protocol_connectors = listify_string(
            utilities.safe_load(config, protocol, "connectors"))

        if not protocol_connectors:
            continue

        for connector in protocol_connectors:
            connectors[connector] = dict(
                utilities.safe_load(config, connector, None, {}))

    return mykey, protocols, connectors, force_sender
Esempio n. 3
0
    def setUp(self):
        """Do a good bit of setup to make this a nicer test-class.

        Successful tests will call ``Santiago.outgoing_request``, so that's
        overridden to record that the method is called.

        """
	self.gpg = gnupg.GPG(gnupghome='../data/test_gpg_home')
        self.keyid = utilities.load_config().get("pgpprocessor", "keyid")

        self.santiago = santiago.Santiago(
            hosting = {self.keyid: {santiago.Santiago.SERVICE_NAME: [1] }},
            consuming = {self.keyid: {santiago.Santiago.SERVICE_NAME: [1] }},
            me = self.keyid,
	    gpg = self.gpg)

        self.santiago.requested = False
        self.santiago.outgoing_request = (lambda *args, **kwargs:
                                              self.record_success())

        self.from_ = self.keyid
        self.to = self.keyid
        self.host = self.keyid
        self.client = self.keyid
        self.service = santiago.Santiago.SERVICE_NAME
        self.reply_to = [1]
        self.request_version = 1
        self.reply_versions = [1]
Esempio n. 4
0
    def __init__(self, client, loop):
        config = utilities.load_config()
        config["client"] = client
        self.client = client

        self.db = database.Discord_Database()

        self.commands = {}
        for i in Bot.INSTRUCTIONS:
            try:
                cmd = i(config)
                for c in cmd.commands:
                    self.commands[c] = cmd
            except AssertionError:
                utilities.log_message(f"{i} disabled.")

        self.patterns = []
        for p in Bot.PATTERNS:
            try:
                self.patterns.append(p(config))
            except AssertionError:
                utilities.log_message(f"{p} disabled.")

        self.reaction_handlers = []
        for h in self.patterns + list(self.commands.values()):
            if h.monitors_reactions:
                self.reaction_handlers.append(h)

        self.token = config["token"]

        loop.run_until_complete(database.init_db(config["db_file"]))
Esempio n. 5
0
    def setUp(self):
        """Create an encryptable request."""
	self.gpg = gnupg.GPG(gnupghome='../data/test_gpg_home')
        self.keyid = utilities.load_config().get("pgpprocessor", "keyid")

        self.santiago = santiago.Santiago(
            me = self.keyid,
            consuming = { self.keyid: { santiago.Santiago.SERVICE_NAME: ( "https://1", )}},
	    gpg = self.gpg)

        self.request_sender = OutgoingRequest.TestRequestSender()
        self.santiago.senders = { "https": self.request_sender }

        self.host = self.keyid
        self.client = self.keyid
        self.service = santiago.Santiago.SERVICE_NAME
        self.reply_to = [ "https://1" ]
        self.locations = [1]
        self.request_version = 1
        self.reply_versions = [1]

        self.request = {
            "host": self.host, "client": self.client,
            "service": self.service,
            "reply_to": self.reply_to, "locations": self.locations,
            "request_version": self.request_version,
            "reply_versions": self.reply_versions }
Esempio n. 6
0
    def create_santiago(self):
	gpg_to_use = gnupg.GPG(gnupghome='../data/test_gpg_home')
        # get my key, if possible
        try:
            mykey = utilities.load_config("../data/test.cfg").get(
                "pgpprocessor", "keyid")
        except configparser.NoSectionError:
            mykey = 0

        # set up monitors, listeners, and senders
        cert = "../data/freedombuddy.crt"
        connector = "https"
        service = "freedombuddy"
        location = "https://localhost:"
        serving_port = 8080

        listeners = { connector: { "socket_port": serving_port,
                                 "ssl_certificate": cert,
                                 "ssl_private_key": cert
                                  }, }
        senders = { connector: { "proxy_host": "localhost",
                               "proxy_port": 8118} }
        monitors = { connector: {} }

        # services to host and consume
        hosting = { mykey: { service: [location + str(serving_port)] } }
        consuming = { mykey: { service: [location + str(serving_port)] } }

        # go!
        return santiago.Santiago(listeners, senders,
                                 hosting, consuming,
                                 me=mykey, monitors=monitors,
				 gpg = gpg_to_use)
Esempio n. 7
0
def load_config(options):
    """Load data from the specified configuration file."""

    config = utilities.load_config(options.config)

    mykey = safe_load(config, "pgpprocessor", "keyid", 0)
    lang = safe_load(config, "general", "locale", "en")
    protocols = [safe_load(config, "connectors", "protocols", {})]
    connectors = {}

    if protocols == [{}]:
        raise RuntimeError("No protocols detected.  Have you run 'make'?")

    # loop through the protocols, finding connectors each protocol uses
    # load the settings for each connector.
    for protocol in protocols:
        protocol_connectors = safe_load(config, protocol, "connectors",
            [ protocol + "-listener", protocol + "-sender",
              protocol + "-monitor" ])

        # when we do load data from the file, it's a comma-delimited string
        if hasattr(protocol_connectors, "split"):
            protocol_connectors = protocol_connectors.split(", ")

        for connector in protocol_connectors:
            connectors[connector] = dict(safe_load(config, connector, None, {}))

    return mykey, lang, protocols, connectors
Esempio n. 8
0
	def __init__(self, port, host, request_handler, parameters):

		# Registering configuration settings and request handler, logger
		self.config_dict = utilities.load_config(CONFIG_FILE)

		self.logger = logging.getLogger()
    		utilities.init_logger(self.logger, self.config_dict)

		check_config(self.config_dict, self.logger)
		
		self.request_handler = request_handler
		self.parameters = parameters

		self.servSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
		self.servSock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
		self.servSock.bind((host, port))
		self.servSock.listen(self.config_dict['listen_connections'])
		self.servSock.setblocking(0)
		
		if (self.config_dict['tcp_nagle']):
			self.servSock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)

		# Intializing client dicts

                self.connections = {}
		self.responses = {}
				
		self.epoll = select.epoll()
		
		# Creating Epoll for future read events
		self.epoll.register(self.servSock.fileno(), select.EPOLLIN | select.EPOLLET)

		self.logger.info('[%s:%d] started' % (host, port))
Esempio n. 9
0
    def setUp(self):

        self.gpg = gnupg.GPG(gnupghome='../data/test_gpg_home')
        config = utilities.load_config()
        self.key_id = utilities.safe_load(config, "pgpprocessor", "keyid", 0)
        self.recipient = "*****@*****.**"
        self.message = {'lol': 'cats'}
Esempio n. 10
0
    def setUp(self):
        self.keyid = utilities.load_config().get("pgpprocessor", "keyid")

        self.santiago = santiago.Santiago(me = self.keyid)

        self.host = 1
        self.service = 2
        self.location = 3
Esempio n. 11
0
    def setUp(self):
	self.gpg = gnupg.GPG(gnupghome='../data/test_gpg_home')
        self.keyid = utilities.load_config().get("pgpprocessor", "keyid")

        self.santiago = santiago.Santiago(me = self.keyid, gpg=self.gpg)

        self.host = 1
        self.service = 2
        self.location = 3
Esempio n. 12
0
    def setUp(self):

        self.iterations = 3
        self.gpg = gnupg.GPG(gnupghome='../data/test_gpg_home')
	config = utilities.load_config()
    	self.key_id = utilities.safe_load(config, "pgpprocessor", "keyid", 0)
        self.messages = utilities.multi_sign(
            gpg = self.gpg,
            iterations = self.iterations,
	    keyid = self.key_id)
Esempio n. 13
0
    def setUp(self):
        """Create a request."""

        self.gpg = gnupg.GPG(gnupghome='../data/test_gpg_home')

        self.keyid = utilities.load_config().get("pgpprocessor", "keyid")
        self.santiago = santiago.Santiago(me = self.keyid, gpg = self.gpg)

        self.request = { "host": self.keyid, "client": self.keyid,
                         "service": santiago.Santiago.SERVICE_NAME, "reply_to": [1],
                         "locations": [1],
                         "request_version": 1, "reply_versions": [1], }

        self.ALL_KEYS = set(("host", "client", "service",
                             "locations", "reply_to",
                             "request_version", "reply_versions"))
        self.REQUIRED_KEYS = set(("client", "host", "service",
                                  "request_version", "reply_versions"))
        self.OPTIONAL_KEYS = set(("locations", "reply_to"))
        self.LIST_KEYS = set(("reply_to", "locations", "reply_versions"))
Esempio n. 14
0
def spectra_paths(srcfile):
    """
    Return a list of paths to the pickle files generated by run_config() from
    srcfile.

    :param srcfile: path to the pybatchdict configuration JSON.
    :return: (dict, BatchDict): Dictionary containing each configuration, keyed
        by the hyphenated configuration name, where each configuration also
        has a key 'path' that denotes its file path. The BatchDict is from the
        original JSON configuration.
    """

    config = utilities.load_config(srcfile)
    dirname = os.path.dirname(srcfile)
    configurations = dict(zip(config.hyphenate_changes(), config.combos))
    for cname in configurations:
        configurations[cname]['path'] = os.path.join(
            dirname, 'spectra-%s.pickle' % cname
        )

    return configurations, config
Esempio n. 15
0
    def _prepare_collaborator(self):
        config = utilities.load_config()
        lastfm_config = config['lastfmConfig']
        spotify_config = config['spotifyConfig']
        # we shouldn't need to be authenticated. We just need their library, nothing else
        self.lastfmNetwork = pylast.LastFMNetwork(
            api_key=lastfm_config['lastfmAPIKey'],
            api_secret=lastfm_config['lastfmAPISecret'])
        self.lastfmUser = self.lastfmNetwork.get_user(self.lastfmUsername)
        self.lastfmUserLibrary = self.lastfmUser.get_library()

        if self.spotifyUsername is not None and self.spotify is None:
            try:
                # todo: work out if it's possible to automate this? I hate the spotify API authentication
                self.spotifyToken = spotipy.util.prompt_for_user_token(
                    self.spotifyUsername, self.spotifyScopes,
                    spotify_config['spotifyClientId'],
                    spotify_config['spotifyClientSecret'],
                    spotify_config['redirectURI'])
                self.spotify = spotipy.Spotify(auth=self.spotifyToken)
                self.spotifyUser = self.spotify.current_user()
            except SpotifyOauthError:
                print("Failed to set up spotify user")
Esempio n. 16
0
def test_find_equivalent_variant_whole_seq(fetch_seq_mock_data):
    with patch.object(bioutils.seqfetcher,
                      'fetch_seq',
                      side_effect=lambda ac, s, e: fetch_seq_mock_data[
                          (str(ac), str(s), str(e))]):
        gene_config_path = os.path.join(pwd, 'test_files',
                                        'gene_config_test.txt')

        cfg = load_config(gene_config_path)
        regions = extract_gene_regions_dict(cfg, 'start_hg38_legacy_variants',
                                            'end_hg38_legacy_variants').keys()
        seq_wrapper = seq_utils.SeqRepoWrapper(regions_preload=regions)

        # empty case
        assert [] == find_equivalent_variants_whole_seq({}, seq_wrapper)

        # a bunch of variants. If they appear in the same set, they are considered equivalent
        example_variants = [
            frozenset({'chr13:g.32355030:A>AA'}),
            frozenset({'chr13:g.32339774:GAT>G', 'chr13:g.32339776:TAT>T'}),
            frozenset({'chr17:g.43090921:G>GCA', 'chr17:g.43090921:GCA>GCACA'})
        ]

        # construct variant dict (flattening example_variants!)
        variant_dict = {
            v: VCFVariant(int(v.split(':')[0].lstrip('chr')),
                          int(v.split(':')[1].lstrip('g.')),
                          v.split(':')[2].split('>')[0],
                          v.split(':')[2].split('>')[1])
            for eq_variants in example_variants for v in eq_variants
        }

        whole_seq_provider = seq_utils.WholeSeqSeqProvider(seq_wrapper)

        assert frozenset(example_variants) == frozenset(
            find_equivalent_variants_whole_seq(variant_dict,
                                               whole_seq_provider))
import os
import sys
import datetime as dt
import pandas as pd
import numpy as np
from pre_process_library import batting as bats
import utilities as util
CONFIG = util.load_config()


def add_batting_stats(data):
    """
    """

    game_batter_stats = pd.groupby(
        by=['gameId', 'batterId'],
        as_index=False
    ).agg(({
        


def process_date_games(path):
    """
    """

    # Read in 4 standardized tables
    df_batting = pd.read_parquet(path+"batting.parquet")
    df_pitching = pd.read_parquet(path+"pitching.parquet")
    df_boxscore = pd.read_parquet(path+"boxscore.parquet")
    df_innings = pd.read_parquet(path+"innings.parquet")
Esempio n. 18
0
	if not (sport and fport and log and host):
		logger.error('%s is not correctly configured' % CONFIG_FILE)
		sys.exit(-1)


def proxy_request_handler(epoll_context, parameters):
	startTime = time.time()
	request, host, port = epoll_context
	

def server_request_handler(epoll_context, parameters):
    startTime = time.time()
    request, host, port = epoll_context
    	
def receive_signal(signum, stack):
    print 'Received:', signum


if __name__ == '__main__':
        opts = argparse.ArgumentParser(description=__description__)
	parse_config(opts)
	args = opts.parse_args()

	config_dict = utilities.load_config(args.config)
	utilities.init_logger(logger, config_dict)
	check_config(config_dict, logger)
	signal.signal(signal.SIGUSR1, receive_signal)

	#server = epoll.Server(int(args.port), args.host, request_handler, [])
	#thisserver.run()
Esempio n. 19
0
    def setUp(self):
        self.columns = ['Source', 'Gene_symbol_ENIGMA', 'Genomic_Coordinate',
                        'Chr', 'Pos', 'Ref', 'Alt', 'Reference_sequence_ENIGMA',
                        'HGVS_cDNA_ENIGMA', 'BIC_Nomenclature_ENIGMA',
                        'Abbrev_AA_change_ENIGMA', 'URL_ENIGMA',
                        'Condition_ID_type_ENIGMA', 'Condition_ID_value_ENIGMA',
                        'Condition_category_ENIGMA',
                        'Clinical_significance_ENIGMA',
                        'Date_last_evaluated_ENIGMA', 'Assertion_method_ENIGMA',
                        'Assertion_method_citation_ENIGMA',
                        'Clinical_significance_citations_ENIGMA',
                        'Comment_on_clinical_significance_ENIGMA',
                        'Collection_method_ENIGMA', 'Allele_origin_ENIGMA',
                        'ClinVarAccession_ENIGMA', 'HGVS_protein_ENIGMA',
                        'BX_ID_ENIGMA', 'Clinical_Significance_ClinVar',
                        'Date_Last_Updated_ClinVar', 'BX_ID_ClinVar',
                        'HGVS_ClinVar', 'Submitter_ClinVar', 'Protein_ClinVar',
                        'SCV_ClinVar', 'Allele_Origin_ClinVar',
                        'Method_ClinVar', 'Description_ClinVar',
                        'Summary_Evidence_ClinVar', 'Review_Status_ClinVar',
                        'Condition_Type_ClinVar', 'Condition_Value_ClinVar',
                        'Condition_DB_ID_ClinVar', 'Individuals_LOVD',
                        'BX_ID_LOVD', 'Variant_effect_LOVD', 'Variant_frequency_LOVD',
                        'HGVS_cDNA_LOVD', 'HGVS_protein_LOVD', 'Genetic_origin_LOVD',
                        'RNA_LOVD', 'Submitters_LOVD', 'DBID_LOVD',
                        'Functional_analysis_technique_LOVD',
                        'Functional_analysis_result_LOVD', 'Created_date_LOVD',
                        'Edited_date_LOVD', 'Submission_ID_LOVD',
                        'BX_ID_ESP', 'Minor_allele_frequency_percent_ESP',
                        'EA_Allele_Frequency_ESP', 'AA_Allele_Frequency_ESP',
                        'Allele_Frequency_ESP', 'polyPhen2_result_ESP',
                        'EUR_Allele_frequency_1000_Genomes',
                        'AFR_Allele_frequency_1000_Genomes',
                        'AMR_Allele_frequency_1000_Genomes',
                        'EAS_Allele_frequency_1000_Genomes',
                        'BX_ID_1000_Genomes', 'Allele_frequency_1000_Genomes',
                        'SAS_Allele_frequency_1000_Genomes',
                        'Allele_frequency_ExAC', 'BX_ID_ExAC', 'BX_ID_BIC',
                        'Patient_nationality_BIC', 'Clinical_importance_BIC',
                        'Clinical_classification_BIC', 'BIC_Designation_BIC',
                        'Literature_citation_BIC',
                        'Number_of_family_member_carrying_mutation_BIC',
                        'Germline_or_Somatic_BIC', 'Ethnicity_BIC',
                        'Mutation_type_BIC', 'IARC_class_exLOVD',
                        'BIC_Nomenclature_exLOVD', 'Sum_family_LR_exLOVD',
                        'Combined_prior_probablility_exLOVD', 'BX_ID_exLOVD',
                        'HGVS_cDNA_exLOVD', 'Literature_source_exLOVD',
                        'Co_occurrence_LR_exLOVD',
                        'Posterior_probability_exLOVD',
                        'Missense_analysis_prior_probability_exLOVD',
                        'Segregation_LR_exLOVD', 'HGVS_protein_exLOVD',
                        "Allele_count_AFR_ExAC", "Allele_number_AFR_ExAC",
                        "Homozygous_count_AFR_ExAC", "Allele_count_AMR_ExAC",
                        "Allele_number_AMR_ExAC", "Homozygous_count_AMR_ExAC",
                        "Allele_count_EAS_ExAC", "Allele_number_EAS_ExAC",
                        "Homozygous_count_EAS_ExAC", "Allele_count_FIN_ExAC",
                        "Allele_number_FIN_ExAC", "Homozygous_count_FIN_ExAC",
                        "Allele_count_NFE_ExAC", "Allele_number_NFE_ExAC",
                        "Homozygous_count_NFE_ExAC", "Allele_count_OTH_ExAC",
                        "Allele_number_OTH_ExAC", "Homozygous_count_OTH_ExAC",
                        "Allele_count_SAS_ExAC", "Allele_number_SAS_ExAC",
                        "Homozygous_count_SAS_ExAC",
                        "Allele_frequency_AFR_ExAC",
                        "Allele_frequency_AMR_ExAC",
                        "Allele_frequency_EAS_ExAC",
                        "Allele_frequency_FIN_ExAC",
                        "Allele_frequency_NFE_ExAC",
                        "Allele_frequency_OTH_ExAC",
                        "Allele_frequency_SAS_ExAC",
                        "DateSignificanceLastEvaluated_ClinVar",
                        "SCV_Version_ClinVar",
                        "Synonyms_ClinVar",
                        "HGVS_Nucleotide_Findlay_BRCA1_Ring_Function_Scores",
                        "Log_RNA_Depletion_Findlay_BRCA1_Ring_Function_Scores",
                        "Functional_Enrichment_Score_Findlay_BRCA1_Ring_Function_Scores",
                        "BX_ID_Findlay_BRCA1_Ring_Function_Scores",
                        "HGVS_cDNA_GnomAD",
                        "HGVS_protein_GnomAD",
                        "Flags_GnomAD",
                        "Consequence_GnomAD",
                        "Variant_id_GnomAD",
                        "Allele_count_genome_AFR_GnomAD",
                        "Allele_count_hemi_genome_AFR_GnomAD",
                        "Allele_count_hom_genome_AFR_GnomAD",
                        "Allele_number_genome_AFR_GnomAD",
                        "Allele_frequency_genome_AFR_GnomAD",
                        "Allele_count_genome_AMR_GnomAD",
                        "Allele_count_hemi_genome_AMR_GnomAD",
                        "Allele_count_hom_genome_AMR_GnomAD",
                        "Allele_number_genome_AMR_GnomAD",
                        "Allele_frequency_genome_AMR_GnomAD",
                        "Allele_count_genome_ASJ_GnomAD",
                        "Allele_count_hemi_genome_ASJ_GnomAD",
                        "Allele_count_hom_genome_ASJ_GnomAD",
                        "Allele_number_genome_ASJ_GnomAD",
                        "Allele_frequency_genome_ASJ_GnomAD",
                        "Allele_count_genome_EAS_GnomAD",
                        "Allele_count_hemi_genome_EAS_GnomAD",
                        "Allele_count_hom_genome_EAS_GnomAD",
                        "Allele_number_genome_EAS_GnomAD",
                        "Allele_frequency_genome_EAS_GnomAD",
                        "Allele_count_genome_FIN_GnomAD",
                        "Allele_count_hemi_genome_FIN_GnomAD",
                        "Allele_count_hom_genome_FIN_GnomAD",
                        "Allele_number_genome_FIN_GnomAD",
                        "Allele_frequency_genome_FIN_GnomAD",
                        "Allele_count_genome_NFE_GnomAD",
                        "Allele_count_hemi_genome_NFE_GnomAD",
                        "Allele_count_hom_genome_NFE_GnomAD",
                        "Allele_number_genome_NFE_GnomAD",
                        "Allele_frequency_genome_NFE_GnomAD",
                        "Allele_count_genome_OTH_GnomAD",
                        "Allele_count_hemi_genome_OTH_GnomAD",
                        "Allele_count_hom_genome_OTH_GnomAD",
                        "Allele_number_genome_OTH_GnomAD",
                        "Allele_frequency_genome_OTH_GnomAD",
                        "Allele_count_genome_SAS_GnomAD",
                        "Allele_count_hemi_genome_SAS_GnomAD",
                        "Allele_count_hom_genome_SAS_GnomAD",
                        "Allele_number_genome_SAS_GnomAD",
                        "Allele_frequency_genome_SAS_GnomAD",
                        "Allele_count_genome_GnomAD",
                        "Allele_number_genome_GnomAD",
                        "Allele_frequency_genome_GnomAD",
                        "Allele_count_exome_AFR_GnomAD",
                        "Allele_count_exome_AFR_GnomAD",
                        "Allele_count_hom_exome_AFR_GnomAD",
                        "Allele_number_exome_AFR_GnomAD",
                        "Allele_frequency_exome_AFR_GnomAD",
                        "Allele_count_exome_AMR_GnomAD",
                        "Allele_count_hemi_exome_AMR_GnomAD",
                        "Allele_count_hom_exome_AMR_GnomAD",
                        "Allele_number_exome_AMR_GnomAD",
                        "Allele_frequency_exome_AMR_GnomAD",
                        "Allele_count_exome_ASJ_GnomAD",
                        "Allele_count_hemi_exome_ASJ_GnomAD",
                        "Allele_count_hom_exome_ASJ_GnomAD",
                        "Allele_number_exome_ASJ_GnomAD",
                        "Allele_frequency_exome_ASJ_GnomAD",
                        "Allele_count_exome_EAS_GnomAD",
                        "Allele_count_hemi_exome_EAS_GnomAD",
                        "Allele_count_hom_exome_EAS_GnomAD",
                        "Allele_number_exome_EAS_GnomAD",
                        "Allele_frequency_exome_EAS_GnomAD",
                        "Allele_count_exome_FIN_GnomAD",
                        "Allele_count_hemi_exome_FIN_GnomAD",
                        "Allele_count_hom_exome_FIN_GnomAD",
                        "Allele_number_exome_FIN_GnomAD",
                        "Allele_frequency_exome_FIN_GnomAD",
                        "Allele_count_exome_NFE_GnomAD",
                        "Allele_count_hemi_exome_NFE_GnomAD",
                        "Allele_count_hom_exome_NFE_GnomAD",
                        "Allele_number_exome_NFE_GnomAD",
                        "Allele_frequency_exome_NFE_GnomAD",
                        "Allele_count_exome_OTH_GnomAD",
                        "Allele_count_hemi_exome_OTH_GnomAD",
                        "Allele_count_hom_exome_OTH_GnomAD",
                        "Allele_number_exome_OTH_GnomAD",
                        "Allele_frequency_exome_OTH_GnomAD",
                        "Allele_count_exome_SAS_GnomAD",
                        "Allele_count_hemi_exome_SAS_GnomAD",
                        "Allele_count_hom_exome_SAS_GnomAD",
                        "Allele_number_exome_SAS_GnomAD",
                        "Allele_frequency_exome_SAS_GnomAD",
                        "Allele_number_exome_GnomAD",
                        "Allele_count_exome_GnomAD",
                        "Allele_frequency_exome_GnomAD",
                        "BX_ID_GnomAD"]

        self.sources = aggregate_reports.FIELD_DICT.keys() + ["ENIGMA"]
        self.vcf_test_file = VCF_TESTDATA_FILENAME
        self.tsv_test_file = TSV_TESTDATA_FILENAME

        pwd = os.path.dirname(os.path.realpath(__file__))

        gene_config_df = utilities.load_config(os.path.join(pwd, 'test_files', 'gene_config_test.txt'))
        self.genome_regions_symbol_dict = utilities.get_genome_regions_symbol_dict(gene_config_df, 'start_hg38_legacy_variants', 'end_hg38_legacy_variants')
Esempio n. 20
0
async def main():

    if getattr(sys, "frozen", False):  # Check if program is compiled to exe
        main_folder_path = os.path.dirname(sys.executable)
    else:
        main_folder_path = os.path.dirname(os.path.abspath(__file__))

    config = load_config(os.path.join(main_folder_path, "horizon_config.ini"))
    setup_logging(main_folder_path, level=config['logging_level'])

    print_timestamp(
        f"Horizon Trade Notifier {version} - https://discord.gg/Xu8pqDWmgE - https://github.com/JartanFTW"
    )
    logger.log(
        49,
        f"Horizon Trade Notifier {version} - https://discord.gg/Xu8pqDWmgE - https://github.com/JartanFTW"
    )

    update = await check_for_update(version)
    if update:
        print_timestamp("A new update is available!")

    tasks = []
    user = await User.create(config["cookie"])
    if config['completed']['enabled']:
        worker = await TradeWorker.create(
            main_folder_path,
            user,
            config['completed']['webhook'],
            config['completed']['update_interval'],
            config['completed']['theme_name'],
            trade_type="Completed",
            add_unvalued_to_value=config['add_unvalued_to_value'],
            testing=config['testing'],
            webhook_content=config['completed']['webhook_content'])
        tasks.append(asyncio.create_task(worker.check_trade_loop()))
    if config['inbound']['enabled']:
        worker = await TradeWorker.create(
            main_folder_path,
            user,
            config['inbound']['webhook'],
            config['inbound']['update_interval'],
            config['inbound']['theme_name'],
            trade_type="Inbound",
            add_unvalued_to_value=config['add_unvalued_to_value'],
            testing=config['testing'],
            double_check=config['double_check'],
            webhook_content=config['inbound']['webhook_content'])
        tasks.append(asyncio.create_task(worker.check_trade_loop()))
    if config['outbound']['enabled']:
        worker = await TradeWorker.create(
            main_folder_path,
            user,
            config['outbound']['webhook'],
            config['outbound']['update_interval'],
            config['outbound']['theme_name'],
            trade_type="Outbound",
            add_unvalued_to_value=config['add_unvalued_to_value'],
            testing=config['testing'],
            webhook_content=config['outbound']['webhook_content'])
        tasks.append(asyncio.create_task(worker.check_trade_loop()))

    if tasks:
        await asyncio.wait(tasks)
    else:
        print_timestamp(
            "Looks like you don't have any trade types enabled in the config! There is nothing for me to do :("
        )
    await user.client.aclose()
    return
Esempio n. 21
0
def main():
    configs = load_config("game")
    map_data = generateFullMap(configs["map"], configs["tile"])
Esempio n. 22
0
        plt.title('Simulator Precision-Coverage Curves auc = {0:02f}'.format(auc))
        plt.xlabel('Coverage')
        plt.ylabel('Precision')
        plt.xlim([0, 0.6]) 
        plt.ylim([0,1.0])
        # plt.show()
        
    
    return auc, {'precisions': precisions, 'recalls': recalls, 'auc': auc, 'cfg': None}

# pr_data = glob2.glob(os.path.join(sys.argv[1],'*','*','outfile.npy')) + glob2.glob(os.path.join(sys.argv[1],'*','outfile.npy')) + glob2.glob(os.path.join(sys.argv[1],'outfile.npy'))
pr_data = glob2.glob(os.path.join(sys.argv[1],'*','*','all_full_results.npz')) + glob2.glob(os.path.join(sys.argv[1],'*','all_full_results.npz')) + glob2.glob(os.path.join(sys.argv[1],'all_full_results.npz'))

default_compare = True
if default_compare:
    default_config = utilities.load_config('/home/msundermeyer/ngc_ws/6dof-graspnet/contact_graspnet')
else:
    default_config = np.load(pr_data[0], allow_pickle=True).item()['cfg']

legends = []
all_diff_dicts = {}
cfgs = {}
aucs_01 = {}
name_dict = {}

gt_grasps = []
for p in range(100):
    y=np.load('/home/msundermeyer/datasets/visibility_filtered_gt_grasp/{}_filtered_gt_grasps.npz'.format(p), allow_pickle=True)
    gt_grasps.append(y['gt_grasp_scene_trafos'])

Esempio n. 23
0
from http.server import HTTPServer, BaseHTTPRequestHandler

import utilities
from request_parser import NestAwayRequestParser
from nest_manager import NestAwayManager

## Configs
CONFIGS = utilities.load_config()
REQUEST_PARSER = NestAwayRequestParser()
NEST_MANAGER = NestAwayManager()


class RequestHandler(BaseHTTPRequestHandler):
    def do_POST(self):
        content_len = int(self.headers.get('content-length', 0))
        body = self.rfile.read(content_len)

        state = REQUEST_PARSER.parse(body)  # Safe(ish), valid json
        if (state):
            status_code, response_json = NEST_MANAGER.update_state(state)
            self.send_response(status_code, response_json)
            self.send_header("Content-Type", "application/json")
            self.end_headers()
        else:
            self.send_response(
                '400',
                '{\"error\": \"Malformed request, didn\'t contain valid \'user\' and \'away_state\' properties\"}'
            )
            self.send_header("Content-Type", "application/json")
            self.end_headers()
def main():
    global DISCARDED_REPORTS_WRITER

    parser = argparse.ArgumentParser()
    options(parser)

    args = parser.parse_args()

    gene_config_df = utilities.load_config(args.config)

    gene_regions_dict = utilities.extract_gene_regions_dict(gene_config_df, 'start_hg38_legacy_variants', 'end_hg38_legacy_variants')

    gene_regions_trees = seq_utils.build_interval_trees_by_chr(gene_regions_dict.keys(), lambda c,s,e: None)

    genome_regions_symbol_dict = utilities.get_genome_regions_symbol_dict(gene_config_df, 'start_hg38_legacy_variants', 'end_hg38_legacy_variants')

    seq_provider = seq_utils.SeqRepoWrapper(regions_preload=gene_regions_dict.keys())

    if args.verbose:
        logging_level = logging.DEBUG
    else:
        logging_level = logging.CRITICAL

    log_file_path = args.artifacts_dir + "variant_merging.log"
    logging.basicConfig(filename=log_file_path, filemode="w", level=logging_level,
                        format=' %(asctime)s %(filename)-15s %(message)s')

    discarded_reports_file = open(args.artifacts_dir + "discarded_reports.tsv", "w")

    fieldnames = ['Report_id', 'Source', 'Reason', 'Variant']

    DISCARDED_REPORTS_WRITER = csv.DictWriter(discarded_reports_file, delimiter="\t", fieldnames=fieldnames)
    DISCARDED_REPORTS_WRITER.writeheader()

    # merge repeats within data sources before merging between data sources
    source_dict, columns, variants = preprocessing(args.input, args.output, seq_provider, gene_regions_trees)

    # merges repeats from different data sources, adds necessary columns and data
    print "\n------------merging different datasets------------------------------"
    for source_name, file in source_dict.iteritems():
        (columns, variants) = add_new_source(columns, variants, source_name,
                                             file, FIELD_DICT[source_name], genome_regions_symbol_dict)

    # standardizes genomic coordinates for variants
    print "\n------------standardizing genomic coordinates-------------"
    variants = variant_standardize(columns, seq_provider, gene_regions_trees, variants=variants)

    # compare dna sequence results of variants and merge if equivalent
    print "------------dna sequence comparison merge-------------------------------"
    variants = string_comparison_merge(variants, seq_provider)

    # write final output to file
    write_new_tsv(args.output + "merged.tsv", columns, variants)

    # copy enigma file to artifacts directory along with other ready files
    copy(args.input + ENIGMA_FILE, args.output)

    # write reports to reports file
    aggregate_reports.write_reports_tsv(args.output + "reports.tsv", columns, args.output, genome_regions_symbol_dict)

    discarded_reports_file.close()

    print "final number of variants: %d" % len(variants)
    print "Done"
Esempio n. 25
0
fn_words_dict = root + model + 'tc_words_dict.p'
fn_embedding_words_dict = root + model + 'embedding_word_dict.p'
do_submission = True


fn_train_out = 'l2_train_data.csv'
fn_valid_out = "l2_valid_data.csv"
fn_test_out = "l2_test_data.csv"


class Config:
    pass

cfg = Config()
load_config(cfg, model_fp)

# should be removed
cfg.level = 'word'
cfg.glove = 'False'
cfg.tokenize_mode = 'twitter'
cfg.do_preprocess = 'True'


tc = ToxicComments(cfg)
epochs = [fn.split('.ckpt')[0] for fn in os.listdir(logs) if fn.endswith('.meta')]
results = pd.read_csv(root + model + 'results.csv')

def _get_score():
    rocs = np.zeros(len(epochs))
    rocs_t = np.zeros(len(epochs))
Esempio n. 26
0
def plot(configfile, separate=False):
    # Load the configurations.
    """
    Plot the cospectra for each model variant.

    :param configfile: (str) path to configuration file.
    :param separate: (bool) whether or not to plot each variant in its own plot.
    """

    configdir = os.path.dirname(configfile)
    configs = utilities.load_config(configfile)
    cnames = configs.hyphenate_changes()

    # Cache models if they are used more than once.
    modelcache = {}
    axpanes = None
    ax = None

    # Compute and plot each spectrum.
    for r, (config, cname) in enumerate(itertools.izip(configs.combos, cnames)):
        print 'Plotting %s.' % cname

        # Set up the figure. Outside axes are for the legend.
        if separate or r == 0:
            axpanes = None
            pp.figure(figsize=(15, 15))

            ax = pp.subplot(111)
            ax.patch.set_visible(False)
            pp.axis('off')

        # Fetch the model. If it's already been created, retrieve it from the
        # cache.
        modelname = pbdict.getkeypath(config, '/model/name', 'nbd2')

        if modelname in modelcache:
            model = modelcache[modelname]
        else:
            model = models.parasitism.get_model(modelname)

        # Set up model parameters.
        sym_params = {
            models.parasitism.params[name]: value for name, value in
            pbdict.getkeypath(config, '/model/params').iteritems()
        }

        noise = np.array(pbdict.getkeypath(config, '/model/noise'))

        # Get the spectrum.
        freqs = np.linspace(0, 0.5, config.get('nfreqs', 100))

        spectrum = np.array([
            model.calculate_spectrum(sym_params, noise, v)
            for v in freqs
        ]).T

        # Plot the spectrum.
        plotargs = dict(
            varnames=None if r < len(cnames) - 1 else model.vars,
            plotfun=pp.scatter,
            s=2,
            alpha=2**-5,
            axpanes=axpanes
        )

        if 'color' in config:
            plotargs['color'] = config['color']
        elif separate:
            plotargs['color'] = 'k'
        else:
            plotargs['color'] = cm.rainbow(float(r) / (len(cnames) - 1))

        axpanes = models.plotting.plot_cospectra(freqs, spectrum, **plotargs)

        if separate:
            figpath = os.path.join(configdir, 'spectra-%s.png' % cname)
            print 'Writing %s.' % figpath
            pp.savefig(figpath)

    if not separate:
        # Make a legend for each model using its hyphenated configuration name.
        ax.legend(axpanes[0][0]['top'].get_lines(), cnames)
        # Save the plot.
        figpath = os.path.join(configdir, 'spectra.png')
        print 'Writing %s.' % figpath
        pp.savefig(figpath)
Esempio n. 27
0
def run_config(srcfile, forceseries=False, forcespectra=False):
    """
    Simulate the model specified in srcfile (a JSON config) and compute its
    cross-spectra. Save results in pickle files in the same directory as
    srcfile.

    :param srcfile: (str) path to JSON configuration file.
    :param forceseries: (bool) whether or not to force [re]calculation of
        time series.
    :param forcespectra: (bool) whether or not to force [re]calculation of
        spectral matrices.
    """

    # Load each configuration specified by the pybatchdict JSON config.
    configs = utilities.load_config(srcfile)
    seriescache = {}

    for config, cname in izip(configs.combos, configs.hyphenate_changes()):
        print 'Running %s.' % cname

        # Simulate time series. Cache time series to avoid re-simulating for
        # the same parameter values.
        sim = config['simulation']
        model = models.parasitism.get_model(sim['model'])
        sym_params = models.parasitism.sym_params(sim['params'])
        nvars = len(model.vars)

        # If we don't actually change the simulation, just use the same series
        # each time.
        serieshash = cname if 'simulation' in cname else 1
        seriescache.setdefault(serieshash, {})
        series = seriescache[serieshash]

        noise = np.array([
            [sim['noise']['Sh'], sim['noise']['Shh'], 0, 0],
            [sim['noise']['Shh'], sim['noise']['Sh'], 0, 0],
            [0, 0, sim['noise']['Sp'], sim['noise']['Spp']],
            [0, 0, sim['noise']['Spp'], sim['noise']['Sp']]
        ])

        nsteps = pbdict.getkeypath(config, '/simulation/timesteps')

        # If we haven't already simulated for these parameters, do so.
        if 'linear' not in series:
            # Output file for simulation.
            outfile = os.path.join(
                os.path.dirname(srcfile), 'series-%s.pickle' % cname
            )

            if forceseries or not os.path.exists(outfile):
                # Simulate time series for linearized model.
                if 'linear' not in series:
                    print '\tSimulating linear model (%d steps).' % nsteps

                    series['linear'] = model.simulate_linear(
                        np.zeros(nvars), sym_params, noise, nsteps
                    )

                print '\tWriting %s.' % outfile
                cPickle.dump(series, open(outfile, 'w'))
            else:
                # If the series has already been simulated and saved, just
                # load it and use it for subsequent cross-spectra computations.

                print '\tLoading saved %s.' % outfile
                loadseries = cPickle.load(open(outfile))
                series['linear'] = loadseries['linear']

        # Output pickle file for cross-spectra (analytic and estimation from
        # simulated time series).
        outfile = os.path.join(
            os.path.dirname(srcfile), 'spectra-%s.pickle' % cname
        )

        if forcespectra or not os.path.exists(outfile):
            # Calculate spectral matrices.
            spectra = {}
            csdargs = pbdict.getkeypath(config, '/analysis/csd')
            smoothingargs = pbdict.getkeypath(config, '/analysis/smoothing')
            linear = series['linear']

            # If we have more time series data than required by our NFFT window
            # size, only use the last NFFT data points rather than using
            # Welch's method to average over periodograms. Done to simplify
            # spectral estimation using smoothing instead. The LAST NFFT data
            # points are used in order to ensure the data are sufficiently
            # random (which they won't be if NFFT=len(linear), since the
            # nonlinear time series always starts at the equilibrium and the
            # linear time series always starts at zero.
            if 'NFFT' in csdargs and csdargs['NFFT'] < linear.shape[1]:
                linear = linear[:, -csdargs['NFFT']:]

            # Compute cross-spectra.
            freqs, spectra['linear'] = models.utilities.spectrum(
                linear, **csdargs
            )

            # Smooth spectral matrices for better estimate.
            spectra['linear_smoothed'] = np.empty_like(spectra['linear'])
            spectra['linear_median'] = np.empty_like(spectra['linear'])

            magargs = dict(smoothingargs.iteritems())
            magargs['window'] = 'median'

            for i, j in combinations_with_replacement(range(nvars), 2):
                spectra['linear_smoothed'][i, j] = models.utilities.smooth(
                    spectra['linear'][i, j], **smoothingargs
                )

                spectra['linear_median'][i, j] = \
                    models.utilities.smooth_phasors(
                        spectra['linear'][i, j],
                        magargs=magargs,
                        phasorargs=smoothingargs
                    )

            # Evaluate analytic spectral matrix.
            spectra['analytic'] = np.array([
                model.calculate_spectrum(sym_params, noise, v)
                for v in freqs
            ]).T

            if len(spectra['analytic'].shape) == 1:
                spectra['analytic'] = spectra['analytic'].reshape(
                    spectra['linear'].shape
                )

            # Save everything.
            print '\tWriting %s.' % outfile

            cPickle.dump(
                dict(
                    config=config,
                    name=cname,
                    spectra=spectra,
                    freqs=freqs,
                    model=model
                ),
                open(outfile, 'w')
            )
        else:
            print '\tSkipping %s (already exists).' % outfile
Esempio n. 28
0
import os

import discord
from discord.ext import commands
from praw import Reddit

import utilities

## Config
CONFIG_OPTIONS = utilities.load_config()


class StupidQuestions:
    REDDIT_USER_AGENT = "discord:hawking:{} (by /u/hawking-py)".format(
        CONFIG_OPTIONS.get("version", "0.0.1"))

    def __init__(self, hawking, bot, *args, **kwargs):
        self.hawking = hawking
        self.bot = bot

        ## Load module specific configs from 'stupid_questions.json' located in modules folder
        modules_folder_name = CONFIG_OPTIONS.get("modules_folder", "modules")
        config = utilities.load_json(
            os.path.sep.join([
                utilities.get_root_path(), modules_folder_name,
                "stupid_questions.json"
            ]))
        reddit_client_id = config.get("reddit_client_id")
        reddit_secret = config.get("reddit_secret")

        subreddits = CONFIG_OPTIONS.get("stupid_question_subreddits",
Esempio n. 29
0
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl

from utilities import load_config
from networks.layers import conv_block, convT_block, linear_block, UPSoftmax

config = load_config()
n_z = config["model"]["n_z"]
n_gf = config["model"]["n_gf"]
n_df = config["model"]["n_df"]


class gen(nn.Module):
    # conv1 (100, 1, 1)   ->  (256, 5,  5)
    # conv2 (256, 5, 5)   ->  (128, 10, 10)
    # conv3 (128, 10, 10  ->  (64, 20, 20)
    # conv4 (32, 20, 20)  ->  (16, 38, 38)
    # conv5 (32, 38, 38)  ->  (16, 76, 76)
    # conv6 (16, 76, 76)  ->  (1, 150, 150)

    def __init__(self):
        super().__init__()
        self.up1 = convT_block(n_z, n_gf * 16, 5, 1, 0, activation="relu")

        self.up2 = convT_block(n_gf * 16, n_gf * 8, 4, 2, 1, activation="relu")

        self.up3 = convT_block(n_gf * 8, n_gf * 4, 4, 2, 1, activation="relu")