コード例 #1
0
    def test_register_return_trip(self):
        ''' '''

        params = Parameters(args=['2018-10-01'])
        params.parse()
        self.assertEqual(register_return_trip(params=params), None)

        logging.info('Test passed for: register_return_trip')
コード例 #2
0
    def test_register_ride(self):
        ''' '''

        params = Parameters(args=['Odense', 'Copenhagen', '2018-10-01', '4'])
        params.parse()
        self.assertEqual(register_ride(params=params), None)

        logging.info('Test passed for: register_ride')
コード例 #3
0
def register_return_trip(params: Parameters) -> bool:
    ''' Register return-ride in DB.'''

    logging.info('Create return-trip.')

    last_ride = RidesTable.get_last_inserted()

    params.from_city = last_ride.to_city
    params.to_city = last_ride.from_city
    params.num_seats = last_ride.num_seats

    register_ride(params=params)
コード例 #4
0
    def test_retrieve_rides(self):
        ''' '''

        args = [
            [Parameters(args=[]),
             f'Odense Copenhagen 2018-10-01 4 {linesep}'],  # S
            [
                Parameters(args=['2018-10-01']),
                f'Odense Copenhagen 2018-10-01 4 {linesep}'
            ],  # S 2018-10-01
            [
                Parameters(args=['2018-10-01', '4']),
                f'Odense Copenhagen 2018-10-01 4 {linesep}'
            ],  # S 2018-10-01 4
            [Parameters(args=['2018-10-01', '7']), ''],  # S 2018-10-01 7
            [
                Parameters(args=['2018-10-01', '2', 'Odense']),
                f'Odense Copenhagen 2018-10-01 4 {linesep}'
            ],  # S Odense 2018-10-01 2
            [Parameters(args=['2018-10-01', 'Aarhus']),
             ''],  # S 2018-10-01 Aarhus
            [Parameters(args=['2020-10-01', 'Aarhus']),
             ''],  # S 2020-10-01 Aarhus
            [Parameters(args=['2020-10-01', 'Odense']),
             '']  # S 2020-10-01 Odense
        ]

        for arg in args:

            arg[0].parse()
            self.assertEqual(retrieve_rides(arg[0]), arg[1])

        logging.info('Test passed for: retrieve_rides')
コード例 #5
0
ファイル: loading_txt.py プロジェクト: tuuznik/genetic_rumble
def load_parametry(alpha: int = 4) -> Parameters:
    temp_employee_number = 0
    rating = []
    skills_matrix = []
    employee_time = []
    company_time = []
    population_size = 0
    f = open("parametry.txt", "r")
    f_lines = f.readlines()
    for i in range(len(f_lines)):
        if f_lines[i] == 'EMPLOYEE_NUMBER' + '\n':
            temp_employee_number = int(f_lines[i + 1])
        if f_lines[i] == 'POPULATION_NUMBER' + '\n':
            population_size = int(f_lines[i + 1])
        if f_lines[i] == 'RATING' + '\n':
            rating = line2list(f_lines[i + 1])
        if f_lines[i] == 'SKILLS_MATRIX' + '\n':
            for j in range(temp_employee_number):
                skills_matrix.append(line2list(f_lines[i + 1 + j]))
        if f_lines[i] == 'EMPLOYEE_TIME' + '\n':
            employee_time = line2list(f_lines[i + 1])
        if f_lines[i] == 'COMPANY_TIME' + '\n':
            company_time = line2list(f_lines[i + 1])
    f.close()
    return Parameters(rating,
                      skills_matrix,
                      employee_time,
                      company_time,
                      alpha=alpha,
                      population_count=population_size)
コード例 #6
0
def handle_request(req: str):
    ''' Handles user requests.'''

    cmd_list = req.split(' ')

    cmd = cmd_list[0] # Get the cmd indicator, [S, C, R].

    cmd_list.pop(0) # Remove cmd indicator.

    args = cmd_list[:] # Make a new list, to reset indexing.

    params = Parameters(args=args)
    params.parse()

    # Create ride.
    if cmd in CMDs.create_ride:
        
        try:
            register_ride(params=params)

        except Exception as e:
            logging.error(
                f'Registering trip failed: {e}'
            )

    # Retrieve ride(s).
    elif cmd in CMDs.retrieve_ride:

        try:
            res = retrieve_rides(params=params)
            print_out(txt=res)
            
        except Exception as e:
            logging.error(
                f'Retreiving data failed: {e}'
            )

    # Create return ride.
    elif cmd in CMDs.create_return_ride:

        try:
            register_return_trip(params=params)

        except Exception as e:
            logging.error(
                f'Registering return trip failed: {e}'
            )
コード例 #7
0
def trian_model(args, model_path=None):
    loader, num_words, embedding_matrix = create_new_data_loader(args)

    parameters = Parameters(num_of_words=num_words, use_cuda=args.use_cuda)
    cur_avb = AVB(params=parameters, embedding_matrix=embedding_matrix,
                  noise_size=args.noise_size)

    curr_discriminator = Discriminator(params=parameters)
    if args.use_cuda:
        cur_avb = cur_avb.cuda()
        curr_discriminator = curr_discriminator.cuda()

    optimizer_vae = Adam(cur_avb.learnable_parameters(), args.learning_rate)
    optimizer_discriminator = Adam(curr_discriminator.parameters(), args.learning_rate)

    current_trainer = cur_avb.trainer(optimizer_vae=optimizer_vae,
                                      discriminator=curr_discriminator,
                                      optimizer_discriminator=optimizer_discriminator,
                                      use_cuda=args.use_cuda,
                                      dropout=args.dropout)

    current_validator = cur_avb.validater(use_cuda=args.use_cuda, dropout=args.dropout)
    valid_loader, _, _ = create_new_data_loader(args, path='data/nips_valid_sorted.txt')

    if model_path:
        check_point = torch.load(model_path)
        cur_avb.load_state_dict(check_point['vae_state_dict'])
        curr_discriminator.load_state_dict(check_point['discriminator_state_dict'])
        optimizer_vae.load_state_dict(check_point['optimizer_vae'])
        optimizer_discriminator.load_state_dict(check_point['optimizer_discriminator'])

    epoch = 0
    for iteration in range(args.num_iterations):
        for data_tuple in loader:
            kld, loss, d_cost, wasserstein_d = current_trainer(data_tuple=data_tuple)
            
            if epoch % 15 == 14:
                ppl_list = []
                for valid_data_batch in valid_loader:
                    perplexity = current_validator(valid_data_batch)
                    print(perplexity)
                    ppl_list.append(perplexity)

                print(np.average(ppl_list))

            if epoch % 100 == 99:
                save_checkpoint({
                    'epoch': iteration + 1,
                    'vae_state_dict': cur_avb.state_dict(),
                    'discriminator_state_dict': curr_discriminator.state_dict(),
                    'optimizer_vae': optimizer_vae.state_dict(),
                    'optimizer_discriminator': optimizer_discriminator.state_dict()
                })

            epoch += 1
コード例 #8
0
 def test_solution_functions(self):
     rating1 = [3, 2, 1, 3]
     skill1 = [[1, 1, 2, 2], [2, 1, 1, 2], [1, 1, 2, 1]]
     sol1 = [2, 2, 0, 0]
     solution1 = Solution(employee_number=3,
                          company_number=len(sol1),
                          premade_list=sol1)
     self.assertEqual(solution1.f_target(rating1, skill1), 13)
     emp_time = [1.5, 2, 1]
     comp_time = [1, 1, 0.5, 0.5]
     parameters = Parameters(rating1, skill1, emp_time, comp_time)
     self.assertEqual(solution1.f_penalty(emp_time, comp_time), 4)
     self.assertEqual(solution1.adaptation(parameters), 9)
コード例 #9
0
ファイル: decoder.py プロジェクト: ischeinfeld/py_nlp
    def __init__(self, corpus_name, rarity=5):
        """Initialize parameters using a corpus

		Parameters q (transition) and e (emission) are calculated by eponymous
		functions from the class Parameters using counts over the corpus.
		See parameters_class.py for details.

		The corpus must be from the Penn WSJ treebank

		corpus_name can be one of three names:
			"train" uses /Users/ischeinfeld/Documents/Code/WSJ/train.txt
			"develop" uses /Users/ischeinfeld/Documents/Code/WSJ/develop.txt"
			"test" uses /Users/ischeinfeld/Documents/Code/WSJ/test.txt"

		or corpus_name can be a complete path to the corpus text:
			ex. /dir/dir/dir/file(.txt)
		"""

        ### Create instance of Parameters with the given corpus
        # Note that this completes all the counts for q and e

        self.corpus = import_wsj(corpus_name)
        self.params = Parameters(self.corpus)
コード例 #10
0
    def set_up(self, data_settings, params):
        self.params_dict = params  # object Runner的method: set_up

        # time some things, like epoch time
        start_time = time.time()

        # ---------------------------------------- #
        #     DEFINE XVAL DATASETS                 #
        # ---------------------------------------- #

        # Create self.dataset_pair: DatasetPair containing train and val Datasets.
        self._prepare_data(data_settings)
        # Number of instances to put in a training batch.
        self.n_batch = min(self.params_dict['n_batch'],
                           self.dataset_pair.n_train)  # 如果训练集样本个数小于一个batch中的样本个数的话 就整个训练样本一起训练了

        # This is already a model object because of the use of "!!python/object:... in the yaml file.
        model = self.params_dict["model"]
        # Set various attributes of the model
        model.init_with_params(self.params_dict, self.procdata)

        # Import priors from YAML
        parameters = Parameters()  # instantiate a class
        parameters.load(self.params_dict)

        print("----------------------------------------------")
        if self.args.verbose:
            print("parameters:")
            parameters.pretty_print()
        n_vals = LocalAndGlobal.from_list(
            parameters.get_parameter_counts())  # (10,0,0,4), 有14个属性,分别记录着constant,global,global_conditioned,local parameters的维度
        self.n_theta = n_vals.sum()  # 所有parameters的个数 总共14个parameters

        #     Pytorch PARTS        #

        # DEFINE THE OBJECTIVE
        print("Set up model")
        device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
        #device = torch.device('cpu')

        self.device = device
        # create feed dictionaries for training dataset and validation dataset respectively
        self._create_feed_dicts()

        self.objective = Objective(parameters, self.params_dict, model, self.dataset_pair.times, self.procdata, self.device, self.args.dreg )#, self.args.verbose)
        # Initialize the weight of neural networks
        self.weight_orthogonal_initialization()
        # feed the model to the GPU (if GPU is available)
        self.objective.to(device)

        self.optimizer = torch.optim.Adam(self.objective.parameters(), lr=5e-3)
コード例 #11
0
def test_model(args):
    model_path = "/home/tristan/Documents/pattern_recognition_final/resultscheckpoint_ecpch21.tar"
    check_point = torch.load(model_path)
    loader, num_words, embedding_matrix = create_new_data_loader(args)

    parameters = Parameters(num_of_words=num_words, use_cuda=args.use_cuda)

    cur_avb = AVB(params=parameters, embedding_matrix=embedding_matrix,
                  noise_size=args.noise_size)
    if args.use_cuda:
        cur_avb = cur_avb.cuda()

    cur_avb.load_state_dict(check_point['vae_state_dict'])

    validater = cur_avb.validater(data_loader=loader)
    validater(use_cuda=args.use_cuda,
              dropout=args.dropout)
コード例 #12
0
 def test_population_init(self):
     rating1 = [1, 2, 3.5, 4, 5]  # 5 firm
     skill1 = [[2, 2, 1, 3, 2], [1, 1, 1, 2, 1], [2, 3, 2, 1,
                                                  1]]  # 3 pracowników
     emp_time1 = [1.8, 2, 0.7]
     comp_time1 = [1, 2, 2.5, 1.5, 0.8]
     params = Parameters(rating=rating1,
                         skills_matrix=skill1,
                         employee_time=emp_time1,
                         company_time=comp_time1)
     current_population = Population(parameters=params)
     self.assertEqual(len(current_population.main_list), 10)
     custom_list = [
         Solution(employee_number=len(params.skills_matrix),
                  company_number=len(params.skills_matrix[0]))
         for i in range(5)
     ]
     with self.assertRaises(MeasuresError):
         Population(parameters=params, test_list=custom_list)
コード例 #13
0
def train_model(args, model_path=None, save=True):
    loader, num_words, embedding_matrix = create_new_data_loader(args)
    valid_loader, _, _ = create_new_data_loader(
        args, path='data/nips_valid_sorted.txt')

    parameters = Parameters(num_of_words=num_words, use_cuda=args.use_cuda)
    rvae = RVAE_dilated(params=parameters, embedding_matrix=embedding_matrix)
    adam_optimizer = Adam(rvae.learnable_parameters(), args.learning_rate)
    if args.use_cuda:
        rvae = rvae.cuda()

    if model_path:
        check_point = torch.load(model_path)
        rvae.load_state_dict(check_point['state_dict'])
        adam_optimizer.load_state_dict(check_point['optimizer'])

    current_trainer = rvae.trainer(optimizer=adam_optimizer,
                                   use_cuda=args.use_cuda,
                                   dropout=args.dropout)

    current_validater = rvae.validater(use_cuda=args.use_cuda,
                                       dropout=args.dropout)
    epoch = 0
    for train_data_batch in loader:
        ppl_list = []
        kld, loss = current_trainer(data_tuple=train_data_batch)
        if epoch % 15 == 14:
            for valid_data_batch in valid_loader:
                perplexity = current_validater(valid_data_batch)
                ppl_list.append(perplexity)

            print(np.average(ppl_list))
        epoch += 1

        if save and epoch % 100 == 99:
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': rvae.state_dict(),
                'optimizer': adam_optimizer.state_dict(),
            })
コード例 #14
0
ファイル: decoder.py プロジェクト: ischeinfeld/py_nlp
	def __init__(self, corpus_name, rarity=5):
		"""Initialize parameters using a corpus

		Parameters q (transition) and e (emission) are calculated by eponymous
		functions from the class Parameters using counts over the corpus.
		See parameters_class.py for details.

		The corpus must be from the Penn WSJ treebank

		corpus_name can be one of three names:
			"train" uses /Users/ischeinfeld/Documents/Code/WSJ/train.txt
			"develop" uses /Users/ischeinfeld/Documents/Code/WSJ/develop.txt"
			"test" uses /Users/ischeinfeld/Documents/Code/WSJ/test.txt"

		or corpus_name can be a complete path to the corpus text:
			ex. /dir/dir/dir/file(.txt)
		"""

		### Create instance of Parameters with the given corpus
		# Note that this completes all the counts for q and e

		self.corpus = import_wsj(corpus_name)
		self.params = Parameters(self.corpus)
コード例 #15
0
    def __init__(self, experiment: str, scenario: str, run: str):

        self.cdi_map = {
            'SUSCEPTIBLE': 'Susceptible',
            'COLONIZED': 'Asymptomatically Colonized',
            'CDI': 'CDI',
            'DEAD': 'Dead'
        }

        self.experiment_dir = Path(experiment)
        self.output_dir = Path(experiment, scenario, run, 'model_output')
        self.params = Parameters(
            Path(experiment, scenario, run, "parameters.json"))
        self.daily_counts = pd.read_csv(
            Path(self.output_dir, 'daily_counts.csv'))
        self.locations = NcLocations(self.experiment_dir)

        if 'cdi' == self.params.base['disease_model']:
            self.cdi_cases = pd.read_csv(Path(self.output_dir,
                                              'CDI_cases.csv'))
        if 'cre' == self.params.base['disease_model']:
            self.cre_cases = pd.read_csv(Path(self.output_dir,
                                              'CRE_cases.csv'))

        # ---- Some files may be compressed
        try:
            self.events = pd.read_csv(Path(self.output_dir,
                                           'model_events.csv'))
        except UnicodeDecodeError:
            self.events = pd.read_csv(Path(self.output_dir,
                                           'model_events.csv'),
                                      compression='gzip')

        self.catchment_counties =\
            [1, 21, 23, 27, 35, 37, 49, 51, 57, 61, 63, 65, 67, 69, 79, 81, 83, 85, 89, 101, 103, 105, 107, 125, 127,
             129, 133, 135, 145, 147, 149, 151, 155, 161, 163, 175, 183, 189, 191, 193, 195]
コード例 #16
0
 def setUp(self):
     parameters = Parameters()
     parameters.STRENGTH_THRESHOLD = STRENGTH_THRESHOLD
     self.strategy = Strategy(None, parameters)
コード例 #17
0
    if optimisation_type == 'AD':
        if set_of_angles is np.array([-45, 0, 45, 90], int):
            lampam_to_be_optimised = np.array(
                [1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0])
        else:
            lampam_to_be_optimised = np.array(
                [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1])

    # Lamination parameters sensitivities from the first-lebel optimiser
    first_level_sensitivities = np.ones((12, ), float)

    parameters = Parameters(
        constraints=constraints,
        p_A=p_A,
        n_D1=n_D1,
        n_D2=n_D2,
        n_D3=n_D3,
        first_level_sensitivities=first_level_sensitivities,
        lampam_to_be_optimised=lampam_to_be_optimised,
        repair_membrane_switch=repair_membrane_switch,
        repair_flexural_switch=repair_flexural_switch)

    ss, completed, n_obj_func_D_calls = repair_ss(ss,
                                                  constraints,
                                                  parameters,
                                                  lampam_target,
                                                  count_obj=True)
    print('Repair successful?', completed)
    print_ss(ss, 20)
    print('n_obj_func_D_calls', n_obj_func_D_calls)
    check_ss_manufacturability(ss, constraints)
コード例 #18
0
import src.hlt as hlt
from src.strategy import Strategy
from src.hlt import NORTH, EAST, SOUTH, WEST, STILL, Move, Square
from src.parameters import Parameters
import random

myID, game_map = hlt.get_init()
hlt.send_init("Level 3")
params = Parameters()

while True:
    game_map.get_frame()
    strategy = Strategy(game_map, params)
    moves = [
        strategy.decide_move(square) for square in game_map
        if square.owner == myID
    ]
    hlt.send_frame(moves)
コード例 #19
0
ファイル: run_repair.py プロジェクト: noemiefedon/RELAY
table_param = pd.DataFrame()
table_param.loc[0, 'in_plane_coeffs'] \
= ' '.join(np.array(in_plane_coeffs, dtype=str))
table_param.loc[0, 'out_of_plane_coeffs'] \
= ' '.join(np.array(out_of_plane_coeffs, dtype=str))
table_param.loc[0, 'p_A'] = p_A
table_param.loc[0, 'n_D1'] = n_D1
table_param.loc[0, 'n_D2'] = n_D2
table_param.loc[0, 'n_D3'] = n_D3
table_param = table_param.transpose()

parameters = Parameters(
    constraints=constraints,
    p_A=p_A,
    n_D1=n_D1,
    n_D2=n_D2,
    n_D3=n_D3,
    repair_membrane_switch=True,
    repair_flexural_switch=True)
#==============================================================================
# Tests
#==============================================================================
table_10_bal = pd.DataFrame()
table_membrane = pd.DataFrame()
table_diso_contig = pd.DataFrame()
table_flexural = pd.DataFrame()

data = pd.read_excel(file_to_open, sheet_name='stacks', index_col=0)
#print(data)

t_cummul_10_bal = 0
コード例 #20
0
ファイル: decoder.py プロジェクト: ischeinfeld/py_nlp
class Decoder:
    def __init__(self, corpus_name, rarity=5):
        """Initialize parameters using a corpus

		Parameters q (transition) and e (emission) are calculated by eponymous
		functions from the class Parameters using counts over the corpus.
		See parameters_class.py for details.

		The corpus must be from the Penn WSJ treebank

		corpus_name can be one of three names:
			"train" uses /Users/ischeinfeld/Documents/Code/WSJ/train.txt
			"develop" uses /Users/ischeinfeld/Documents/Code/WSJ/develop.txt"
			"test" uses /Users/ischeinfeld/Documents/Code/WSJ/test.txt"

		or corpus_name can be a complete path to the corpus text:
			ex. /dir/dir/dir/file(.txt)
		"""

        ### Create instance of Parameters with the given corpus
        # Note that this completes all the counts for q and e

        self.corpus = import_wsj(corpus_name)
        self.params = Parameters(self.corpus)

    def decode(self, sentence):
        """Decode a sentence

		Input: a sentence (str)

		Output: a tuple with lists of tokens and tags
		"""

        if isinstance(sentence, str):
            token_seq = self.prep_sentence(sentence)  # Tokenize sentence
            #print("decoder_class_log, token_seq from string", token_seq)
        else:
            token_seq = sentence[0]  # Already tokenized
            #print("decoder_class_log, token_seq from list", token_seq)

        for i in range(len(token_seq)):  # Replace rare words
            token_seq[i] = self.params.rep_rare_input(token_seq[i])

        print("Token sequence after replacing rarities:", token_seq)

        ### Calculate pi values (log probabilities) and store back pointers

        pi = []
        bp = []
        tags = self.params.tags

        pi.append({})
        pi[0]['<START>'] = {}
        pi[0]['<START>']['<START>'] = 0  # pi[k][u][v] = 0 because log1 = 0

        bp.append({})
        bp[0]['<START>'] = {}
        bp[0]['<START>']['<START>'] = None  # pi[k][u][v]

        for k in range(1, len(token_seq) + 1):
            pi.append({})  # pi[k] = {}
            bp.append({})  # bp[k] = {}
            for u in tags:
                pi[k][u] = {}
                bp[k][u] = {}
                for v in tags:
                    max = float('-inf')
                    pi[k][u][v] = max  # Pi value
                    bp[k][u][v] = None  # back pointer value
                    for w in pi[k - 1].keys():
                        try:
                            print("marker")
                            log_prob = (
                                pi[k - 1][w][u] + log(self.params.q(v, w, u)) +
                                log(self.params.e(token_seq[k - 1], v)))
                            # token_seq[k-1] is the token at v
                        except KeyError:
                            log_prob = float('-inf')
                        except ValueError:
                            log_prob = float('-inf')

                        if log_prob >= max:  # Explicit
                            max = log_prob
                            pi[k][u][v] = log_prob  # New max log probability
                            bp[k][u][v] = w  # Backpointer to w

        ### Find last two tags, using <STOP> transition probability

        max = float('-inf')
        for u in tags:
            for v in tags:
                try:
                    log_prob = (pi[len(token_seq)][u][v] +
                                log(self.params.q('<STOP>', u, v)))
                except KeyError:
                    log_prob = float('-inf')
                except ValueError:
                    log_prob = float('-inf')

                if log_prob >= max:  # Explicit
                    max = log_prob
                    yn, yn_1 = v, u  #  | yn is y sub n |  yn_1 is y sub (n-1)

        ### Get tag sequence using backtracking

        tag_seq = []  # Replace with ['<START>', '<START>'] if desired
        for word in token_seq:
            tag_seq.append(None)

        tag_seq[-1] = yn
        tag_seq[-2] = yn_1

        for i in range(len(tag_seq) - 3, -1, -1):
            tag_seq[i] = bp[i + 3][tag_seq[i + 1]][tag_seq[i + 2]]

        print("Log probability is:", max)
        print("Probability is:", exp(max))
        return (token_seq, tag_seq)

    def prep_sentence(self, sentence):
        """Tokenizes a sentence string"""
        sentence_list = TreebankWordTokenizer().tokenize(sentence)
        return sentence_list
コード例 #21
0
class Environment(object):

    #
    # VARIABLES
    #
    identifier = ""

    parameters = Parameters()
    state = State()
    banks = []
    network = Network("")

    #
    # METHODS
    #
    # -------------------------------------------------------------------------
    # __init__
    # -------------------------------------------------------------------------
    def __init__(self):
        pass

    # -------------------------------------------------------------------------
    # initialize
    # -------------------------------------------------------------------------
    def initialize(self, environment_directory, identifier):
        self.identifier = identifier
        # first, read in the environment file
        environment_filename = environment_directory + identifier + ".xml"
        self.read_environment_file(environment_filename)
        logging.info("  environment file read: %s", environment_filename)

        # then read in all the banks
        if (self.parameters.bankDirectory != ""):
            # print("oooo", self.parameters.bankDirectory)
            if (self.parameters.bankDirectory != "none"):  # none is used for tests only
                self.initialize_banks_from_files(self.parameters.bankDirectory, self.get_state(0), 0)
                logging.info("  banks read from directory: %s", self.parameters.bankDirectory)
        else:
            logging.error("ERROR: no bankDirectory given in %s\n", environment_filename)

        self.initial_assets = 0.0  # the initial assets are needed to determine the fire-sale price in bank.liquidate_assets
        for bank in self.banks:
            self.initial_assets += bank.get_account("I")

        # finally, create the network
        # note: this has to be done after creating the banks, as they are
        # passed to the network as node objects
        self.network.identifier = self.identifier
        self.network.initialize_networks(self)

        # when there is a SIFI surcharge, implement it now on the banking capital
        self.apply_sifi_surcharge()

    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # read_environment_file
    # -------------------------------------------------------------------------
    def read_environment_file(self, environmentFilename):
        from xml.etree import ElementTree
        xmlText = open(environmentFilename).read()

        element = ElementTree.XML(xmlText)
        self.identifier = element.attrib['title']

        self.parameters.identifier = self.identifier

        # loop over all entries in the xml file
        for subelement in element:
            # the first set of parameters will be valid for the whole simulation
            if (subelement.attrib['type'] == 'numSweeps'):
                self.parameters.numSweeps = int(subelement.attrib['value'])
            if (subelement.attrib['type'] == 'numSimulations'):
                self.parameters.numSimulations = int(subelement.attrib['value'])
            if (subelement.attrib['type'] == 'numBanks'):
                self.parameters.numBanks = int(subelement.attrib['value'])
            if (subelement.attrib['type'] == 'bankDirectory'):
                self.parameters.bankDirectory = str(subelement.attrib['value'])
            if (subelement.attrib['type'] == 'graphType'):
                self.parameters.graphType = str(subelement.attrib['value'])
            if (subelement.attrib['type'] == 'graphParameter1'):
                self.parameters.graphParameter1 = float(subelement.attrib['value'])
            if (subelement.attrib['type'] == 'graphParameter2'):
                self.parameters.graphParameter2 = float(subelement.attrib['value'])
            if (subelement.attrib['type'] == 'contractsNetworkFile'):
                self.parameters.contractsNetworkFile = str(subelement.attrib['value'])
            # now also read in the parameters that can change during the simulation
            if (subelement.attrib['type'] == 'changing'):
                name = subelement.attrib['name']
                value = float(subelement.attrib['value'])
                validFrom = subelement.attrib['validity'].rsplit("-")[0]
                validTo = subelement.attrib['validity'].rsplit("-")[1]
                self.parameters.add_parameter(name, value, validFrom, validTo)

    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # write_environment_file(file_name)
    # -------------------------------------------------------------------------
    def write_environment_file(self, file_name):
        out_file = open(file_name + "-check.xml", 'w')

        text = "<environment title='" + self.identifier + "'>\n"
        text += "    <parameter type='numSweeps' value='" + str(self.parameters.numSweeps) + "'></parameter>\n"
        text += "    <parameter type='numSimulations' value='" + str(
            self.parameters.numSimulations) + "'></parameter>\n"
        text += "    <parameter type='numBanks' value='" + str(self.parameters.numBanks) + "'></parameter>\n"
        text += "    <parameter type='bankDirectory' value='" + str(self.parameters.bankDirectory) + "'></parameter>\n"
        text += "    <parameter type='graphType' value='" + str(self.parameters.graphType) + "'></parameter>\n"
        text += "    <parameter type='contractsNetworkFile' value='" + str(
            self.parameters.contractsNetworkFile) + "'></parameter>\n"

        for entry in self.parameters.parameters:
            text += "    <parameter type='changing' name='" + str(entry['type']) + "' value='" + str(
                entry['value']) + "' validity='" + str(entry['validity'][0]) + "-" + str(
                entry['validity'][1]) + "'></parameter>\n"

        text += "</environment>\n"

        out_file.write(text)
        out_file.close()

    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # initialize_banks_from_files
    # banks have to be initialized for each simulation as a number of banks might become inactive
    # in the previous simulation
    # -------------------------------------------------------------------------
    def initialize_banks_from_files(self, bankDirectory, state, time):
        # this routine is called more than once, so we have to reset the list of banks each time
        self.banks = []

        listing = os.listdir(bankDirectory)
        # print('listing', listing)
        if len(listing) != self.parameters.numBanks:
            logging.error("    ERROR: number of configuration files in %s (=%s) does not match numBanks (=%s)",
                          bankDirectory, str(len(listing)), str(self.parameters.numBanks))

        for infile in listing:
            # print("infile", infile)
            bank = Bank()
            bank.get_parameters_from_file(bankDirectory + infile, self.get_state(0), self.parameters.numBanks, time)
            # print("bank", bank)
            self.banks.append(bank)
            bank.__del__()  # TODO not sure if this is really safe, but it is better than doing nothing about all those created instances...

    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # get_state
    # -------------------------------------------------------------------------
    def get_state(self, time):  # TODO bring parameters in same order as in environment file and in state.__str__()
        # for each time t in the simulation return the actual set of parameters
        for parameter in self.parameters.parameters:
            validFrom = int(parameter['validity'][0])
            validTo = int(parameter['validity'][1])
            if (int(time) >= int(validFrom)) and (int(time) <= int(validTo)):  # we have a valid parameterset
                if parameter['type'] == 'rb':
                    self.state.rb = float(parameter['value'])
                if parameter['type'] == 'rd':
                    self.state.rd = float(parameter['value'])
                if parameter['type'] == 'r':
                    self.state.r = float(parameter['value'])
                if parameter['type'] == 'collateralQuality':
                    self.state.collateralQuality = float(parameter['value'])
                if parameter['type'] == 'successProbabilityFirms':
                    self.state.successProbabilityFirms = float(parameter['value'])
                if parameter['type'] == 'positiveReturnFirms':
                    self.state.positiveReturnFirms = float(parameter['value'])
                if parameter['type'] == 'scaleFactorHouseholds':
                    self.state.scaleFactorHouseholds = float(parameter['value'])
                if parameter['type'] == 'dividendLevel':
                    self.state.dividendLevel = float(parameter['value'])
                if parameter['type'] == 'pFinancial':
                    self.state.pFinancial = float(parameter['value'])
                if parameter['type'] == 'rhoFinancial':
                    self.state.rhoFinancial = float(parameter['value'])
                if parameter['type'] == 'pReal':
                    self.state.pReal = float(parameter['value'])
                if parameter['type'] == 'rhoReal':
                    self.state.rhoReal = float(parameter['value'])
                if parameter['type'] == 'xiBank':
                    self.state.xiBank = float(parameter['value'])
                if parameter['type'] == 'thetaBank':
                    self.state.thetaBank = float(parameter['value'])
                if parameter['type'] == 'rhoBank':
                    self.state.rhoBank = float(parameter['value'])
                if parameter['type'] == 'shockType':
                    self.state.shockType = int(parameter['value'])
                if parameter['type'] == 'gammaBank':
                    self.state.gammaBank = float(parameter['value'])
                if parameter['type'] == 'assetNumber':
                    self.state.assetNumber = float(parameter['value'])
                if parameter['type'] == 'liquidationDiscountFactor':
                    self.state.liquidationDiscountFactor = float(parameter['value'])
                if parameter['type'] == 'riskAversionDiscountFactor':
                    self.state.riskAversionDiscountFactor = float(parameter['value'])
                if parameter['type'] == 'riskAversionAmplificationFactor':
                    self.state.riskAversionAmplificationFactor = float(parameter['value'])
                if parameter['type'] == 'interbankLoanMaturity':
                    self.state.interbankLoanMaturity = float(parameter['value'])
                if parameter['type'] == 'firmLoanMaturity':
                    self.state.firmLoanMaturity = float(parameter['value'])
                if parameter['type'] == 'sifiSurchargeFactor':
                    self.state.sifiSurchargeFactor = float(parameter['value'])
                if parameter['type'] == 'requiredCapitalRatio':
                    self.state.requiredCapitalRatio = float(parameter['value'])
                if parameter['type'] == 'liquidityCoverageRatio':
                    self.state.liquidityCoverageRatio = float(parameter['value'])
                if parameter['type'] == 'netStableFundingRatio':
                    self.state.netStableFundingRatio = float(parameter['value'])
                if parameter['type'] == 'leverageRatio':
                    self.state.leverageRatio = float(parameter['value'])

        #
        # at this point we have all the variables from the parameters[] list
        # now we need to update them to incorporate past defaults to calculate
        # new return and volatility for real and financial assets
        self.state.update_state(time)

        return self.state

    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # apply_sifi_surcharge
    # -------------------------------------------------------------------------
    def apply_sifi_surcharge(self):
        degree_sum = 0
        for bank in self.network.contracts:
            degree_sum += float(nx.degree(self.network.contracts)[bank])
        average_degree = float(degree_sum / len(self.network.contracts.nodes()))

        for bank in self.network.contracts:
            # the sifi surcharge is the product of the sifiSurchargeFactor and the connectedness as measured
            # by degree/average_degree
            # the maximum ensures that no bank has to hold less than 1.0 times their banking capital
            sifiSurcharge = max(self.get_state(0).sifiSurchargeFactor * (
                        float(nx.degree(self.network.contracts)[bank]) / average_degree), 1.0)
            bank.apply_sifi_surcharge(sifiSurcharge)
コード例 #22
0
"""Tests the functionality of parameters_class"""

from src.preprocessing import import_wsj
from src.parameters import Parameters

sentences = import_wsj('train')

params = Parameters(sentences)

## examples of parameteres
print("q(<STOP>|NN,.): ", params.q('<STOP>', 'NN', '.'))

print("e(the|DT): ", params.e("the", "DT"))

print("If the time between now ...")
params.q('NN', 'DT', 'NN')
params.q('NN', 'DT', 'NN')
params.q('NN', 'DT', 'NN')
params.q('NN', 'DT', 'NN')
params.q('NN', 'DT', 'NN')
print("... and now is not long, than the class is preserving counts.")
コード例 #23
0
ファイル: decoder.py プロジェクト: ischeinfeld/py_nlp
class Decoder:

	def __init__(self, corpus_name, rarity=5):
		"""Initialize parameters using a corpus

		Parameters q (transition) and e (emission) are calculated by eponymous
		functions from the class Parameters using counts over the corpus.
		See parameters_class.py for details.

		The corpus must be from the Penn WSJ treebank

		corpus_name can be one of three names:
			"train" uses /Users/ischeinfeld/Documents/Code/WSJ/train.txt
			"develop" uses /Users/ischeinfeld/Documents/Code/WSJ/develop.txt"
			"test" uses /Users/ischeinfeld/Documents/Code/WSJ/test.txt"

		or corpus_name can be a complete path to the corpus text:
			ex. /dir/dir/dir/file(.txt)
		"""

		### Create instance of Parameters with the given corpus
		# Note that this completes all the counts for q and e

		self.corpus = import_wsj(corpus_name)
		self.params = Parameters(self.corpus)


	def decode(self, sentence):
		"""Decode a sentence

		Input: a sentence (str)

		Output: a tuple with lists of tokens and tags
		"""

		if isinstance(sentence, str):
			token_seq = self.prep_sentence(sentence) # Tokenize sentence
			#print("decoder_class_log, token_seq from string", token_seq)
		else:
			token_seq = sentence[0] # Already tokenized
			#print("decoder_class_log, token_seq from list", token_seq)

		for i in range(len(token_seq)):          # Replace rare words
			token_seq[i] = self.params.rep_rare_input(token_seq[i])


		print("Token sequence after replacing rarities:", token_seq)

		### Calculate pi values (log probabilities) and store back pointers

		pi = []
		bp = []
		tags = self.params.tags

		pi.append({})
		pi[0]['<START>'] = {}
		pi[0]['<START>']['<START>'] = 0 # pi[k][u][v] = 0 because log1 = 0

		bp.append({})
		bp[0]['<START>'] = {}
		bp[0]['<START>']['<START>'] = None # pi[k][u][v]

		for k in range(1, len(token_seq) + 1):
			pi.append({}) # pi[k] = {}
			bp.append({}) # bp[k] = {}
			for u in tags:
				pi[k][u] = {}
				bp[k][u] = {}
				for v in tags:
					max = float('-inf')
					pi[k][u][v] = max # Pi value
					bp[k][u][v] = None # back pointer value
					for w in pi[k-1].keys():
						try:
							print("marker")
							log_prob = (pi[k-1][w][u] + log(self.params.q(v, w, u))
									+ log(self.params.e(token_seq[k - 1],v)))
									# token_seq[k-1] is the token at v
						except KeyError:
							log_prob = float('-inf')
						except ValueError:
							log_prob = float('-inf')

						if log_prob >= max: # Explicit
							max = log_prob
							pi[k][u][v] = log_prob # New max log probability
							bp[k][u][v] = w # Backpointer to w

		### Find last two tags, using <STOP> transition probability

		max = float('-inf')
		for u in tags:
			for v in tags:
				try:
					log_prob = (pi[len(token_seq)][u][v]
								+ log(self.params.q('<STOP>', u, v)))
				except KeyError:
					log_prob = float('-inf')
				except ValueError:
					log_prob = float('-inf')

				if log_prob >= max: # Explicit
					max = log_prob
					yn, yn_1 = v, u #  | yn is y sub n |  yn_1 is y sub (n-1)

		### Get tag sequence using backtracking

		tag_seq = [] # Replace with ['<START>', '<START>'] if desired
		for word in token_seq:
			tag_seq.append(None)

		tag_seq[-1] = yn
		tag_seq[-2] = yn_1

		for i in range(len(tag_seq) - 3, -1, -1):
			tag_seq[i] = bp[i+3][tag_seq[i+1]][tag_seq[i+2]]

		print("Log probability is:", max)
		print("Probability is:", exp(max))
		return (token_seq, tag_seq)

	def prep_sentence(self, sentence):
		"""Tokenizes a sentence string"""
		sentence_list = TreebankWordTokenizer().tokenize(sentence)
		return sentence_list
コード例 #24
0
#    print_ss(ss[:ss_ini.size//2])
#    print('objD', objD)

    if constraints.sym:
        ss[ss.size // 2 + ss.size % 2:] = np.flip(ss[:ss.size // 2])

    if count_obj:
        return ss, n_obj_func_D_calls
    return ss

if __name__ == "__main__":

    print('\n*** Test for the function repair_flexural_4 ***')
    constraints = Constraints(sym=True, dam_tol=True)
    parameters = Parameters(repair_flexural_switch=True,
                            n_D2=2,
                            constraints=constraints)
    ss = np.array([
        0, 0, 0, 0, 45, 45, 45, 45, 45, 0, 0, 0, 0, 0, 0, 0, 0, 45, 45, 45, 45,
        45, 0, 0, 0, 0, 0, 0, 0, 0, 45, 45, 45, 45, 45, 0, 0, 0, 0, 0, 0, 0, 0,
        45, 45, 45, 45, 45, 0, 0, 0, 0, 0, 0, 0, 0, 45, 45, 45, 45, 45, 0, 0,
        0, 0, 0, 0, 0, 0, 45, 45, 45, 45, 45, 0, 0, 0, 0, 0, 0, 0, 0, 45, 45,
        45, 45, 45, 0, 0, 0, 0, 0, 0, 0, 0, 45, 45, 45, 45, 45, 0, 0, 0, 0, 0,
        0, 0, 0, 45, 45, 45, 45, 45, 0, 0, 0, 0, 0, 0, 0, 0, 45, 45, 45, 45,
        45, 0, 0, 0, 0, 0, 0, 0, 0, 45, 45, 45, 45, 45, 0, 0, 0, 0, 0, 0, 0, 0,
        45, 45, 45, 45, 45, 0, 0, 0, 0, 0, 0, 0, 0, 45, 45, 45, 45, 45, 0, 0,
        0, 0, 0, 0, 0, 0, 45, 45, 45, 45, 45, 0, 0, 0, 0
    ], int)
    if constraints.sym:
        ss = np.hstack((ss, 0, np.flip(ss)))
コード例 #25
0
ファイル: ldm.py プロジェクト: RTIInternational/NCMInD
    def __init__(self, experiment: str, scenario: str, run: str):
        """ Ldm: Location & Disease model - a class built to run agent-based simulations.

        Current disease models implemented:
            - cdi: Use: "disease_model": "cdi" in the parameters.json
            - cre: Use: "disease_model": "cre"
        """

        # ----- Setup the model directory structure
        self.experiment_dir = Path(experiment)
        self.scenario_dir = Path(self.experiment_dir, scenario)
        self.run_dir = Path(self.scenario_dir, run)
        self.output_dir = Path(self.run_dir, "model_output")
        self.run_dir.mkdir(exist_ok=True)
        self.output_dir.mkdir(exist_ok=True)

        # ----- Setup the model parameters
        self.params = Parameters(Path(self.run_dir, "parameters.json"))
        self.seed = self.params.base["seed"]
        self.rng = np.random.RandomState(self.seed)

        self.time = 0
        self.daily_counts = pd.DataFrame()

        # ----- Population
        self.population = self.read_population()
        self.unique_ids = np.array(self.population.index.values, dtype=np.int32)
        self.county_codes = self.population.County_Code.values
        self.age_groups = self.population.Age.values
        self.logrecnos = self.population.logrecno.values
        self.population = self.population.drop(
            ["County_Code", "Age", "logrecno"], axis=1
        )

        self.concurrent_conditions = assign_conditions(
            self.age_groups, self.rng.rand(len(self.population))
        )

        # ----- For Event Tracking: Create the SQL Connection
        self.event_columns = [
            "Unique_ID",
            "Time",
            "State",
            "Location",
            "LOS",
            "Old",
            "New",
            "County",
        ]
        self.conn, self.cur = None, None
        self.create_sql_connection()

        self.location = location_models[self.params.base["location_model"]](
            model=self, params=self.params.location
        )
        self.disease = disease_models[self.params.base["disease_model"]](
            model=self, params=self.params.disease
        )

        life_dict = dict()
        for k1, v1 in self.params.life["death_probabilities"].items():
            for k2, v2 in self.params.life["death_multipliers"].items():
                life_dict[
                    AgeGroup[k1].value, self.location.locations.category_enum[k2].value
                ] = (v1 * v2)

        self.life = Life(
            model=self,
            params=self.params.life,
            enum=LifeState,
            transition_dict=life_dict,
            key_types=[AgeGroup, self.location.locations.category_enum],
        )
        # --- Death probability is based on Age + Location
        locations = [
            self.location.locations.convert_int(l, "int_category")
            for l in self.location.location.values
        ]
        self.life.probabilities = self.life.find_probabilities(
            list(zip(self.age_groups, locations))
        )

        self.disease.collect_agents(initiate=True)