Esempio n. 1
0
def run_iteration(iter_id,
                  n_iters,
                  model_path='',
                  dir_names=[],
                  eval_only=False,
                  interactive=False):
    clear_dir.main(model_path=model_path)
    rcnn_args = run_rcnn.convert_args()
    if iter_id > 0 and not eval_only:
        rcnn_args.train = True
        rcnn_args.test = False
        run_rcnn.main(rcnn_args)
    rcnn_args.train = False
    rcnn_args.test = True
    run_rcnn.main(rcnn_args)
    init_root_dirs.init_data('Entire_dataset', iter_id)
    generate_raw_ann_from_rcnn_results.main(model_path=model_path)
    generate_adaptive_detection.generate_ann()
    track_instances.main(videonames=dir_names)
    create_tubelets.init_tracklet(videonames=dir_names)
    create_tubelets.create_tubelet(videonames=dir_names)
    create_tubelets.smoothen_label(videonames=dir_names)
    filter_data.filter_data(videonames=dir_names)
    add_instances.run_trackers(videonames=dir_names)
    if iter_id > 0 and interactive:
        convert_tubelet_to_coco_format.create_train(only_use_true_gt=True)
    else:
        convert_tubelet_to_coco_format.create_train()
    convert_tubelet_to_coco_format.create_test()
    ulti.main(videonames=dir_names)
Esempio n. 2
0
def read_thread(thread_id, board, passcode=''):
	try:
		posts = api.get_thread_posts(board, thread_id, passcode)

		for post in posts:
			post.comment = filter_data(post.comment)
		posts = list(filter(
			lambda post: len(post.comment) > 1,
			posts
		))

		pairs = thread_posts_to_pairs(posts)

		pairs = list(filter(
			lambda pair: pair[1].id not in readen_post_ids,
			pairs
		))
		if len(pairs) == 0:
			return

		ids = [post.id for post in posts]
		readen_post_ids.update(ids)
		print('posts readen:', len(readen_post_ids))

		comments = [pair[0].comment + '\n\n' + pair[1].comment for pair in pairs]
		return '\n\n'.join(comments) + '\n\n'
	except KeyboardInterrupt:
		raise
	except:
		print(traceback.format_exc(), thread_id)
def get_euadr_relation_type(settings):
    """
    Returns the EU-ADR answer for the semantic relationship
    type for all work units.
    """
    all_data = filter_data(settings)

    euadr_relation_type = dict()
    for unit_id, group in all_data.groupby("_unit_id"):
        assert len(group["gold_std_association_type"].unique()) == 1
        euadr_relation_type[unit_id] = group["gold_std_association_type"].iloc[0]

    return euadr_relation_type
Esempio n. 4
0
def main():
    #Configurando o diretório de entrada e saída
    dir_name = os.path.dirname(os.path.realpath(__file__))

    #chamando a função que baixar os dados do deter
    import_deter()
    #chamando a função que descompacta e move o arquivo deter_amz para a pasta raiz
    unzipefile_raiz()

    #Testando para ver se o dado de datas foi criado
    #print("Vai printar o datai",datai)
    #print(datanomei)

    #Filtrando dado
    for i in range(len(datai)):
        print(i)
        filter_data(datai[i], datanomei[i])

    for shp in range(len(glob('*.shp')) - 1):
        print(shp)
        deleta_vazio(datanomei[shp])

    #Gerando zip
    zip_shapes(dir_name, dir_name)
Esempio n. 5
0
def main():
    while True:
        inp_dict = get_inputs()
        try:
            clear_dict = clear_inputs(inp_dict)
        except ValueError:
            print('________________________________')
            print('Value incorrect please try again')
            print('________________________________')
            continue
        else:
            final_list = filter_data(cards, clear_dict)
            final_str = show_item(final_list)
            print('\n'.join(final_str))
        break
Esempio n. 6
0
def SubsetSLEProjections(sample_dict):

    # SLE projection directory
    #sle_dir = "191220_emulated"
    #sle_dir = "201011_proj_TIMESERIES"
    sle_dir = "2lm_projections"

    # Initialize the output dictionary
    sle_dict = {"ice_source": [], "region": [], "year": [], "scenario-sample": [],\
       "GSAT": [], "SLE": []}

    # Loop over the required SSPs from the sample dictionary
    for this_scenario in sample_dict.keys():

        # Open this matched scenario file
        filename = os.path.join(
            sle_dir, "projections_FAIR_{0}.csv".format(this_scenario.upper()))
        this_sle_dict = import_data(filename, "FAIR")

        # Filter this data for the appropriate samples
        this_sle_dict = filter_data(this_sle_dict,
                                    "FAIR",
                                    sample=sample_dict[this_scenario],
                                    ice_source="Glaciers")

        # Append these data to the output structure
        sle_dict["ice_source"].extend(this_sle_dict["ice_source"])
        sle_dict["region"].extend(this_sle_dict["region"])
        sle_dict["year"].extend(this_sle_dict["year"])
        sle_dict["GSAT"].extend(this_sle_dict["GSAT"])
        sle_dict["SLE"].extend(this_sle_dict["SLE"])

        # Add a field called "scenario-sample" to the dictionary
        scenario_sample = [
            "{0}-{1}".format(this_scenario, x) for x in this_sle_dict["sample"]
        ]
        sle_dict["scenario-sample"].extend(scenario_sample)

    # Convert everything over into numpy arrays
    sle_dict["ice_source"] = np.array(sle_dict["ice_source"])
    sle_dict["region"] = np.array(sle_dict["region"])
    sle_dict["year"] = np.array(sle_dict["year"])
    sle_dict["GSAT"] = np.array(sle_dict["GSAT"])
    sle_dict["SLE"] = np.array(sle_dict["SLE"])
    sle_dict["scenario-sample"] = np.array(sle_dict["scenario-sample"])

    # Return the sea level projection dictionary
    return (sle_dict)
Esempio n. 7
0
    def genWords(self, doc):
        """
        Generate all candidate words with their frequency/entropy/aggregation informations
        @param doc the document used for words generation
        """
        # [\\s\\d,.·<>/?:;\'\"[\\]{}()\\|~!@#$%^&*\\-_=+a-zA-Z,。《》、?:;“”‘’{}【】()…¥!—┄-]
        length = len(doc)

        count = 1
        pattern = r'\n|[a-zA-Z_0-9]|\s|\n|\(|\)|-|=|\_|\+|,|。|、|;|‘|’|【|】|·|!| |…|(|)|“|”|:|——|?|%|《|》'
        #pattern = r'。'
        #print(length)
        for sentence in filter_data(re.split(pattern, doc)):
            #print(count, sentence)
            count += 1
            if sentence:
                # doc = re.sub(pattern, '', str(doc))
                suffix_indexes = indexOfSortedSuffix(sentence,
                                                     self.max_word_len)
                #print(suffix_indexes)

                # compute frequency and neighbors
                for suf in suffix_indexes:
                    word = sentence[suf[0]:suf[1]]
                    # print(word)
                    if word not in self.word_cands:
                        self.word_cands[word] = WordInfo(word)
                    self.word_cands[word].update(sentence[suf[0] - 1:suf[0]],
                                                 sentence[suf[1]:suf[1] + 1])

                # compute probability and entropy
                #length = len(doc)
        for k in self.word_cands:
            self.word_cands[k].compute(length)
        #print('aaaa')
        #print('value', word_cands.values())

        # compute aggregation of words whose length > 1
        values = sorted(list(self.word_cands.values()),
                        key=lambda x: len(x.text),
                        reverse=True)
        #print('values', values)
        for v in values:
            if len(v.text) == 1: continue
            v.computeAggregation(self.word_cands)

        return sorted(values, key=lambda v: len(v.text), reverse=True)
Esempio n. 8
0
def select_thread_posts(board, thread_id, max_posts, min_post_len,
                        max_post_len):
    selected_posts = []

    posts = api.get_thread_posts(board, thread_id)
    random.shuffle(posts)

    for post in posts:
        post.comment = filter_data(post.comment)

        seed_tokens = comment_to_tokens(post.comment)
        if len(seed_tokens) >= min_post_len \
          and len(seed_tokens) <= max_post_len:
            selected_posts.append(post)

            if len(selected_posts) == max_posts:
                break

    return selected_posts
Esempio n. 9
0
def select_threads(board, max_threads, min_post_len, max_post_len):
    selected_threads = []

    threads = api.get_threads(board)
    for thread_id in threads:
        posts = api.get_thread_posts(board, thread_id)
        if not posts:
            continue

        for post in posts:
            post.comment = filter_data(post.comment)

        seed_tokens = comment_to_tokens(posts[0].comment)
        if len(posts) >= 3 \
          and len(seed_tokens) >= min_post_len \
          and len(seed_tokens) <= max_post_len:
            selected_threads.append((thread_id, posts))

            if len(selected_threads) == max_threads:
                break

    return selected_threads
def ipccar6_preprocess_ismipemuicesheet(pipeline_id, scenario, pyear_start,
                                        pyear_end, pyear_step, model_driver,
                                        baseyear):

    # Get the searchable RCP scenario format
    #scenario_dict = {'ssp585': "SSP585", 'ssp370': "SSP370", 'ssp245': "SSP245", 'ssp126': "SSP126", 'ssp119': "SSP119"}
    #scenario_search = scenario_dict[scenario]

    # Define the target years
    targyears = np.arange(pyear_start, pyear_end + 1, pyear_step)

    # Load the data
    if (model_driver == "CMIP6"):
        filename = os.path.join(os.path.dirname(__file__),
                                "191208_annual_CMIP6",
                                "projections_{}.csv".format(scenario.upper()))
        data_dict = import_data(filename, model_driver)
    else:
        filename = os.path.join(os.path.dirname(__file__),
                                "20210215_CLIMATE_FORCING_IPCC.csv")
        forcing_data = import_temp_data(filename)
        forcing_data_filtered = filter_temp_data(
            forcing_data,
            ensemble="FAIR",
            scenario=["SSP119", "SSP126", "SSP245", "SSP370", "SSP585"])
        sample_dict = FindFAIRInputSamples(forcing_data_filtered, scenario)
        data_dict = SubsetSLEProjections(sample_dict)

    # Filter the input data for ice sheet and target years
    eais_dict = filter_data(data_dict,
                            model_driver,
                            ice_source="AIS",
                            region="EAIS",
                            year=targyears)
    wais_dict = filter_data(data_dict,
                            model_driver,
                            ice_source="AIS",
                            region="WAIS",
                            year=targyears)
    pen_dict = filter_data(data_dict,
                           model_driver,
                           ice_source="AIS",
                           region="PEN",
                           year=targyears)
    gis_dict = filter_data(data_dict,
                           model_driver,
                           ice_source="GrIS",
                           region="ALL",
                           year=targyears)

    # Generate the sample data structures
    eais_samps = MakeDataStructure(eais_dict, model_driver)
    wais_samps = MakeDataStructure(wais_dict, model_driver)
    pen_samps = MakeDataStructure(pen_dict, model_driver)
    gis_samps = MakeDataStructure(gis_dict, model_driver)

    # Define the linear trend terms to be added to the samples
    #trend_mean = {"EAIS": -0.02, "WAIS": 0.28, "PEN": 0.06, "GIS": 0.46}	# SOD
    #trend_sd = {"EAIS": 0.05, "WAIS": 0.03, "PEN": 0.01, "GIS": 0.04}		# SOD
    trend_mean = {"EAIS": 0.09, "WAIS": 0.18, "PEN": 0.06, "GIS": 0.19}
    trend_sd = {"EAIS": 0.04, "WAIS": 0.09, "PEN": 0.03, "GIS": 0.1}

    # Populate the output dictionary
    outdata = {'eais_samps': eais_samps, 'wais_samps': wais_samps, 'pen_samps': pen_samps, \
       'gis_samps': gis_samps, 'scenario': scenario, 'targyears': targyears, \
       'trend_mean': trend_mean, 'trend_sd': trend_sd, 'baseyear': baseyear, \
       'model_driver': model_driver}

    # Define the data directory
    outdir = os.path.dirname(__file__)

    # Write the rates data to a pickle file
    outfile = open(os.path.join(outdir, "{}_data.pkl".format(pipeline_id)),
                   'wb')
    p.dump(outdata, outfile)
    outfile.close()
Esempio n. 11
0
        'model': 'GTX',
        'model_number': 1000,
        'gpu_mgh': 1183,
        'memory_interface_bit': 128,
        'gpu_memory_mb': 2048,
        'price': 3500,
        'is_sold': False,
        'nvidia_graphics_family': True
    },
]
assert filter_data(
    cards, {
        'min_price': 1,
        'max_price': 10,
        'min_memory_interface_bit': None,
        'max_memory_interface_bit': None,
        'min_gpu_mgh': None,
        'max_gpu_mgh': None,
        'is_sold': None,
        'is_nvidia_family': None,
        'manufacturer': 'MSI',
    }) == []
assert filter_data(
    cards, {
        'min_price': 1,
        'max_price': 10000,
        'min_memory_interface_bit': None,
        'max_memory_interface_bit': 200,
        'min_gpu_mgh': None,
        'max_gpu_mgh': None,
        'is_sold': None,
        'is_nvidia_family': None,
Esempio n. 12
0
#! /usr/bin/env python3

import days, filter_data, graphs

days.split_days()
print("Wedi rhannu i ddyddiau")
filter_data.filter_data()
print("Wedi ffiltro")
graphs.all_graphs()
print("Wedi creu graffiau")
print("Wedi gorffen")
Esempio n. 13
0
def main():
    inp_dict = get_inputs()
    clear_dict = clear_inputs(inp_dict)
    final_list = filter_data(cards, clear_dict)
    pprint(final_list)
def MakeDataStructure(data_dict, model_driver):

    # Initialize the data structure
    outdata = []

    # If this is CMIP6 data
    if (model_driver == "CMIP6"):

        # Get a list of the GCM, param_n, and emulator_n available
        all_gcm = np.unique(data_dict['GCM'])
        all_param_n = np.unique(data_dict['param_n'])
        all_emulator_n = np.unique(data_dict['emulator_n'])

        # Loop over the available GCMs, param_ns, and emualtor_ns
        for this_param_n in all_param_n:
            for this_emulator_n in all_emulator_n:

                # Get the available years
                filtered_dict = filter_data(data_dict,
                                            model_driver,
                                            param_n=this_param_n,
                                            emulator_n=this_emulator_n)
                all_years = np.unique(filtered_dict['year'])

                # Get the order of the years
                year_order = np.argsort(all_years)

                # Get the time series for this model
                ts_data = filtered_dict['SLE'][year_order]

                # Append this time series to the output data structure
                outdata.append(ts_data)

    # Otherwise, this is FAIR data
    else:

        # Get a list of the unique samples
        all_samples = np.unique(data_dict['scenario-sample'])

        # Loop over the available samples
        for this_sample in all_samples:

            # Get the available years
            filtered_dict = filter_data(data_dict,
                                        model_driver,
                                        scenario_sample=this_sample)
            all_years = np.unique(filtered_dict['year'])

            # Get the order of the years
            year_order = np.argsort(all_years)

            # Get the time series for this model
            ts_data = filtered_dict['SLE'][year_order]

            # Append this time series to the output data structure
            outdata.append(ts_data)

    # Make this a numpy array
    outdata = np.array(outdata)

    # Return the data structure
    return (outdata)
Esempio n. 15
0
# import torch.nn as nn
# import numpy as np
# import random
# from torch.cuda.amp import autocast as autocast, GradScaler
#
# cudnn.benchmark = True
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID

from tools.train import train
from tools.test import test
from filter_data import filter_data

if __name__ == '__main__':

    if cfg.INPUT.FILTER:
        filter_data(cfg.INPUT.LABEL_PATH, cfg.INPUT.FILTER_DATA_PATH,
                    cfg.INPUT.FILTER_DATA_NUM)

    train_loader, val_loader, num_query, num_classes, query_name, gallery_name = make_dataloader(
        cfg)
    if cfg.MODEL.MODE == 'train':
        ######################################### resume model ###################################
        train(train_loader, num_classes)

        with torch.no_grad():
            test(val_loader, num_query, query_name, gallery_name, num_classes)

    if cfg.MODEL.MODE == 'evaluate':
        with torch.no_grad():
            test(val_loader, num_query, query_name, gallery_name, num_classes)
 def test_on_rows(self):
     filters = ["GI_4585642-S", "GI_23312375-A", "GI_5453687-S"]
     data = [["GI_4585642-S","1"], ["GI_23312375-A1","1"], ["GI_5453687-S","1"],["GI_4585642-S2","1"], ["GI_23312375-A","1"], ["GI_5453687-S3","1"]]
     results = filter_data.filter_data(data, filters)
     self.assertEqual(results, [["GI_4585642-S","1"], ["GI_5453687-S","1"], ["GI_23312375-A","1"]])
def ar5_preprocess_glaciersfair(scenario, startyr, pipeline_id):

    # Define the temperature input file name
    infilename = "CLIMATE_FORCING_1850.csv"
    infile = os.path.join(os.path.dirname(__file__), infilename)

    # Acceptable SSP scenarios
    ssp_scenarios = ['ssp585', 'ssp370', 'ssp245', 'ssp126', 'ssp119']

    # Import the temperature data
    temp_data = import_data(infile)

    # Test the provided scenario
    scenario_test = re.search("^tlim(\d*\.?\d+)win(\d*\.?\d+)$", scenario)
    if (scenario_test):

        # This is a temperature limit, so extract the limit from the scenario string
        temp_target = float(scenario_test.group(1))
        temp_target_window = float(scenario_test.group(2))

        # Produce a list of models and scenarios that match the criteria
        temp_data_filtered = tas_limit_filter(temp_data, temp_target,
                                              temp_target_window)

    elif (scenario in ssp_scenarios):

        # Filter the temperature data for this particular scenario
        temp_data_filtered = filter_data(temp_data,
                                         ensemble="FAIR",
                                         scenario=scenario.upper())

    else:

        # This is an invalid scenario
        raise Exception("Invalid scenario definition: {}".format(scenario))

    # The module is calibrated to use the temperature reference period for AR5, so center
    # the temperature data to the mean of that period
    ref_idx = np.flatnonzero(
        np.logical_and(temp_data_filtered["years"] >= 1986,
                       temp_data_filtered["years"] <= 2005))
    ref_tas = np.nanmean(temp_data_filtered["data"][:, ref_idx], axis=1)
    temp_data_filtered["data"] = temp_data_filtered["data"] - ref_tas[:, np.
                                                                      newaxis]

    # Find the mean and sd of the matched models/scenarios
    temp_mean = np.nanmean(temp_data_filtered['data'], axis=0)
    temp_sd = np.nanstd(temp_data_filtered['data'], axis=0)
    data_years = temp_data_filtered['years']

    # Find which year in the data years is the start year
    baseyear_idx = np.flatnonzero(data_years == startyr)

    # Integrate temperature to obtain K yr at ends of calendar years
    # Note - The original code I believe performs a cumulative sum of the standard
    # deviations, which is not correct.  Below I provide a fix to that bug as well as
    # a replication of the bug for diagnostic purposes.
    inttemp_mean = np.cumsum(temp_mean)
    #inttemp_sd = np.sqrt(np.cumsum(temp_sd**2))  # Assume independence across models
    inttemp_sd = np.cumsum(temp_sd)  # Assume correlation

    # Integrated quantities must be centered on the baseline year
    inttemp_mean -= inttemp_mean[baseyear_idx]
    inttemp_sd -= inttemp_sd[baseyear_idx]

    # Store preprocessed data in pickles
    output = {'temp_mean': temp_mean, 'temp_sd': temp_sd, 'inttemp_mean': inttemp_mean, \
       'inttemp_sd': inttemp_sd, 'data_years': data_years, 'startyr': startyr, \
       'scenario': scenario}

    # Write the configuration to a file
    outdir = os.path.dirname(__file__)
    outfile = open(os.path.join(outdir, "{}_data.pkl".format(pipeline_id)),
                   'wb')
    pickle.dump(output, outfile)
    outfile.close()

    return (0)
Esempio n. 18
0
import filter_data as fd
from importance_sampler import *

PATH = "C:\\Users\\REX\\Dropbox\\cmu\\fall2015\\15889\\project\\lectures\\feats.csv"
OUT_PATH = "C:\\Users\\REX\\Dropbox\\cmu\\fall2015\\15889\\project\\lectures\\filtered_feats.csv"
labels_path = "C:\\Users\\REX\\Dropbox\\cmu\\fall2015\\15889\\project\\lectures\\filtered_feats_labels.csv"
target_action = "Q315"
fd.filter_data(PATH, OUT_PATH, target_action, labels_path=labels_path)
                  mode='r+',
                  dtype=dt,
                  shape=(int(nsamples), int(nchannels)))
    data = d.T
    #filtered data should copy from data
    fdata = data
    print('Data Imported. Starting to filter ...')
    time = np.arange(0, len(fdata[ichan, :]) / 30000, 1 / 30000)

    # check a test channel of the data
    plt.plot(data[ichan, :])
    thresh = np.zeros((nchannels, 1), dtype=float)

    #Data Filtering
    for i in range(nchannels):
        fdata[i, :] = filter_data(data[i, :], 500, 8000, 30000)
        thresh[i] = find_spike_threshold(fdata[i, :], 6)

    print('Data filtered!!')
    print('Now plot the filtered data')
    plt.plot(time, fdata[ichan, :])
    plt.xlabel('Time(sec)')
    plt.ylabel('Voltage(mv)')
    print("Notice that you have to close the window to continue the code!")
    plt.show()

    # Extract extracellular spikes
    print('Starting Extracellular spikes extraction ...')
    spk, spk_w, t_spk_w, spk_idx, threshold = extract_extracellular_spikes(
        fdata, 6, None, 1, 1, 2, 'neg')
    ###Save the work space File
Esempio n. 20
0
def start():
    flag_main = True
    while flag_main:
        print("""Comands:
        1-filter,
        2-analyse 
        3-read_data
        4-gen_data
        5-exit
         """)
        a = int(input("Enter command:"))
        if a == 2:
            print(""" Analyze
                        1 - Total receipt of a specific year
                        2 - Total receipt of range of years
                        3 - full data of certain duty
                        4 - Receipt of a duty in a certain year
                        5 - Receipt of a duty in a range of years
                        6 - Total sum of receipts of a duty
                        7-  Total Betting 
                        8-  Total Gaming
                        8 - quit

                        """)
            b = int(input("Enter command: "))
            if b == 1:
                c = int((input("Enter year: ")))
                ad.total_receipt(c)
            if b == 2:
                u = int(input("enter number of years you want to compare: "))
                years = [int(input("Enter year")) for i in range(u)]
                ad.total_receipt_years(years)
            if b == 3:
                print(""" Analyze
                        1 - general_betting_duty
                        2 - pool_betting_duty
                        3 - gaming_duty
                        4 - amusement_machine_licence
                        5 - bingo
                        6 - machine_games_duty
                        7 - lottery_duty
                                        """)
                j = int(input("enter command: "))
                if j == 1:
                    ad.column_full("general_betting_duty")
                if j == 2:
                    ad.column_full("pool_betting_duty")
                if j == 3:
                    ad.column_full("gaming_duty")
                if j == 4:
                    ad.column_full("amusement_machine_licence")
                if j == 5:
                    ad.column_full("bingo")
                if j == 6:
                    ad.column_full("machine_games_duty")
                if j == 7:
                    ad.column_full("lottery_duty")
            if b == 4:
                yy = int(input("enter year: "))
                print(""" Analyze
                        1 - general_betting_duty
                        2 - pool_betting_duty
                        3 - gaming_duty
                        4 - amusement_machine_licence
                        5 - bingo
                        6 - machine_games_duty
                        7 - lottery_duty
                                                       """)
                j = int(input("enter command: "))
                if j == 1:
                    ad.column_year("general_betting_duty", yy)
                if j == 2:
                    ad.column_year("pool_betting_duty", yy)
                if j == 3:
                    ad.column_year("gaming_duty", yy)
                if j == 4:
                    ad.column_year("amusement_machine_licence", yy)
                if j == 5:
                    ad.column_year("bingo", yy)
                if j == 6:
                    ad.column_year("machine_games_duty", yy)
                if j == 7:
                    ad.column_year("lottery_duty", yy)
            if b == 5:
                u = int(input("enter number of years you want to compare: "))
                years = [int(input("Enter year: ")) for i in range(u)]
                print(""" Analyze
                        1 - general_betting_duty
                        2 - pool_betting_duty
                        3 - gaming_duty
                        4 - amusement_machine_licence
                        5 - bingo
                        6 - machine_games_duty
                        7 - lottery_duty
                                                                       """)
                j = int(input("enter command: "))
                if j == 1:
                    ad.column_multiple_years("general_betting_duty", years)
                if j == 2:
                    ad.column_multiple_years("pool_betting_duty", years)
                if j == 3:
                    ad.column_multiple_years("gaming_duty", years)
                if j == 4:
                    ad.column_multiple_years("amusement_machine_licence",
                                             years)
                if j == 5:
                    ad.column_multiple_years("bingo", years)
                if j == 6:
                    ad.column_multiple_years("machine_games_duty", years)
                if j == 7:
                    ad.column_multiple_years("lottery_duty", years)
            if b == 6:
                print(""" Analyze
                        1 - general_betting_duty
                        2 - pool_betting_duty
                        3 - gaming_duty
                        4 - amusement_machine_licence
                        5 - bingo
                        6 - machine_games_duty
                        7 - lottery_duty
                                                                                       """
                      )
                j = int(input("enter command: "))
                if j == 1:
                    ad.annual_receipt("general_betting_duty")
                if j == 2:
                    ad.annual_receipt("pool_betting_duty")
                if j == 3:
                    ad.annual_receipt("gaming_duty")
                if j == 4:
                    ad.annual_receipt("amusement_machine_licence")
                if j == 5:
                    ad.annual_receipt("bingo")
                if j == 6:
                    ad.annual_receipt("machine_games_duty")
                if j == 7:
                    ad.annual_receipt("lottery_duty")
            if b == 7:
                ad.annual_betting()
            if b == 8:
                ad.annual_gaming()
        elif a == 1:
            print(""" Filter
            1 - Year
            2 - Year >
            3 - Year <
            4 - Month
            5 - Month >
            6 - Month <
            7 - Date
            8 - Other fields
            9 - quit
            
            """)

            b = int(input("Enter command:"))

            if b == 1:
                c = int((input("Enter year:")))
                fd.filter_date("year", c)
            if b == 2:
                c = int((input("Enter year:")))
                fd.filter_date_gt("year", c)
            if b == 3:
                c = input("Enter year:")
                fd.filter_date_ls("year", c)
            if b == 4:
                c = int((input("Enter month:")))
                fd.filter_date("month", c)
            if b == 5:
                c = int((input("Enter month:")))
                fd.filter_date_gt("month", c)
            if b == 6:
                c = input("Enter month:")
                fd.filter_date_ls("month", c)
            if b == 7:
                c = input("Enter date:(yyyy-mm)")
                fullDate = "%s-01" % c
                print(fullDate)
                fd.filter_data("date", fullDate)
            if b == 8:

                print(""" operation
                         1 - =
                         2 - >
                         3 - <
                              """)
                op = int(input("Enter operation: "))
                val = int(input("Enter Value: "))
                print(""" Field
                        1 - general_betting_duty
                        2 - pool_betting_duty
                        3 - gaming_duty
                        4 - amusement_machine_licence
                        5 - bingo
                        6 - machine_games_duty
                        7 - lottery_duty
                                                       """)
                j = int(input("enter command"))
                s = ""
                if j == 1:
                    s = "general_betting_duty"
                if j == 2:
                    s = "pool_betting_duty"
                if j == 3:
                    s = "gaming_duty"
                if j == 4:
                    s = "amusement_machine_licence"
                if j == 5:
                    s = "bingo"
                if j == 6:
                    s = "machine_games_duty"
                if j == 7:
                    s = "lottery_duty"
                if op == 1:
                    fd.filter_data(s, val)
                if op == 2:
                    fd.filter_data_gt(s, val)
                if op == 3:
                    fd.filter_data_ls(s, val)
            if b == 15:
                break
            if (b < 1 or b > 15):
                print("Error number")
                flag_main = True

        elif a == 3:
            gd.read_data()
        elif a == 4:
            c = int(input("Enter start year: "))
            gd.generate_data(c)
        elif a == 5:
            break
Esempio n. 21
0
def ipccar6_preprocess_gmipemuglaciers(pipeline_id, scenario, pyear_start,
                                       pyear_end, pyear_step, model_driver,
                                       baseyear):

    # Get the searchable RCP scenario format
    #scenario_dict = {'ssp585': "SSP585", 'ssp370': "SSP370", 'ssp245': "SSP245", 'ssp126': "SSP126", 'ssp119': "SSP119"}
    #scenario_search = scenario_dict[scenario]

    # Define data years
    data_years = np.arange(2016, 2101)

    # Define the target years
    targyears = np.arange(pyear_start, pyear_end + 1, pyear_step)

    # Load the data
    if (model_driver == "CMIP6"):
        filename = os.path.join(os.path.dirname(__file__),
                                "191208_annual_CMIP6",
                                "projections_{}.csv".format(scenario.upper()))
        data_dict = import_data(filename, model_driver)
    else:
        filename = os.path.join(os.path.dirname(__file__),
                                "20210215_CLIMATE_FORCING_IPCC.csv")
        forcing_data = import_temp_data(filename)
        forcing_data_filtered = filter_temp_data(
            forcing_data,
            ensemble="FAIR",
            scenario=["SSP119", "SSP126", "SSP245", "SSP370", "SSP585"])
        sample_dict = FindFAIRInputSamples(forcing_data_filtered, scenario)
        data_dict = SubsetSLEProjections(sample_dict)

    # Filter the input data for glacier regions and target years
    glacier_regions = ["region_{}".format(x) for x in np.arange(19) + 1]
    gic_dict = filter_data(data_dict,
                           model_driver,
                           ice_source="Glaciers",
                           region=glacier_regions)  #, year=targyears)

    # Initialize samples array
    gic_samps = []

    # Loop over the glacier regions
    for this_region in glacier_regions:

        # Extract only the samples for this region
        gic_temp_dict = filter_data(gic_dict,
                                    model_driver,
                                    ice_source="Glaciers",
                                    region=this_region)

        # Generate the sample data structures
        gic_samps.append(MakeDataStructure(gic_temp_dict, model_driver))

    # Make the samples a numpy array
    gic_samps = np.array(gic_samps)

    # Load the already stored
    #with open("gic_samps.pkl", "rb") as f:
    #	stored_data = p.load(f)
    #
    #gic_samps = stored_data["gic_samps"]
    '''
	# Temporarily store the gic samps
	outdata = {"gic_samps": gic_samps}
	outdir = os.path.dirname(__file__)
	outfile = open(os.path.join(outdir, "gic_samps.pkl".format(pipeline_id)), 'wb')
	p.dump(outdata, outfile)
	outfile.close()
	'''

    # Find the ratio of melt over the first decade of projection years
    region_melt = []
    total_melt = 0.0
    for x in np.arange(gic_samps.shape[0]):
        this_melt = np.nanmean(gic_samps[x, :, 9]) - np.nanmean(gic_samps[x, :,
                                                                          0])
        total_melt += this_melt
        region_melt.append(this_melt)
    region_melt = np.array(region_melt)
    melt_ratio = region_melt / total_melt

    # Create a pool of baseline adjustments to apply to the samples
    np.random.seed(8071)
    trend_q = np.random.random_sample(gic_samps.shape[1])
    trend_mean = 0.7
    trend_sd = 0.1
    glac_trend = norm.ppf(trend_q, trend_mean,
                          trend_sd) * (data_years[0] - baseyear)

    # Filter for the target projection years and apply the baseline adjustment
    targyears_idx = np.flatnonzero(np.isin(data_years, targyears))
    gic_samps = gic_samps[:, :, targyears_idx]
    for x in np.arange(gic_samps.shape[0]):
        n_glac_samps = gic_samps.shape[2]
        this_trend = glac_trend * melt_ratio[x]
        gic_samps[x, :, :] += this_trend[np.newaxis, :n_glac_samps]

    # Populate the output dictionary
    outdata = {'gic_samps': gic_samps, 'scenario': scenario, 'targyears': targyears, \
       'baseyear': baseyear, 'model_driver': model_driver}

    # Define the data directory
    outdir = os.path.dirname(__file__)

    # Write the rates data to a pickle file
    outfile = open(os.path.join(outdir, "{}_data.pkl".format(pipeline_id)),
                   'wb')
    p.dump(outdata, outfile)
    outfile.close()