def simulate(init_file): """This function simulates a user-specified version of the generalized Roy model.""" init_dict = read(init_file) # We perform some basic consistency checks regarding the user's request. check_initialization_dict(init_dict) # Distribute information seed = init_dict['SIMULATION']['seed'] # Set random seed to ensure recomputabiltiy np.random.seed(seed) # Simulate unobservables of the model U, V = simulate_unobservables(init_dict) # Simulate observables of the model X = simulate_covariates(init_dict) # Simulate endogeneous variables of the model Y, D, Y_1, Y_0 = simulate_outcomes(init_dict, X, U, V) # Write output file df = write_output(init_dict, Y, D, X, Y_1, Y_0, U, V) # Calculate Criteria function value if not init_dict['DETERMINISTIC']: x0 = start_values(init_dict, df, 'init') init_dict['AUX']['criteria_value'] = calculate_criteria( init_dict, df, x0) # Print Log file print_info(init_dict, df) return df
def simulate(init_file): """This function simulates a user-specified version of the generalized Roy model.""" init_dict = read(init_file) # Distribute information seed = init_dict['SIMULATION']['seed'] # Set random seed to ensure recomputabiltiy np.random.seed(seed) # Simulate unobservables of the model U, V = simulate_unobservables(init_dict) # Simulate observables of the model X = simulate_covariates(init_dict, 'TREATED') Z = simulate_covariates(init_dict, 'COST') # Simulate endogeneous variables of the model Y, D, Y_1, Y_0 = simulate_outcomes(init_dict, X, Z, U) # Write output file df = write_output(init_dict, Y, D, X, Z, Y_1, Y_0, U, V) # Calculate Criteria function value if init_dict['DETERMINISTIC'] is False: x0 = start_values(init_dict, df, 'init') init_dict['AUX']['criteria_value'] = calculate_criteria( init_dict, df, x0) # Print Log file print_info(init_dict, df) return df
def par_fit(init_file): """The function estimates the coefficients of the simulated data set.""" check_presence_init(init_file) dict_ = read(init_file) np.random.seed(dict_["SIMULATION"]["seed"]) # We perform some basic consistency checks regarding the user's request. check_presence_estimation_dataset(dict_) #check_initialization_dict2(dict_) #check_init_file(dict_) # Distribute initialization information. data = read_data(dict_["ESTIMATION"]["file"]) num_treated = dict_["AUX"]["num_covars_treated"] num_untreated = num_treated + dict_["AUX"]["num_covars_untreated"] _, X1, X0, Z1, Z0, Y1, Y0 = process_data(data, dict_) if dict_["ESTIMATION"]["maxiter"] == 0: option = "init" else: option = dict_["ESTIMATION"]["start"] # Read data frame # define starting values x0 = start_values(dict_, data, option) opts, method = optimizer_options(dict_) dict_["AUX"]["criteria"] = calculate_criteria(dict_, X1, X0, Z1, Z0, Y1, Y0, x0) dict_["AUX"]["starting_values"] = backward_transformation(x0) rslt_dict = bfgs_dict() if opts["maxiter"] == 0: rslt = adjust_output(None, dict_, x0, X1, X0, Z1, Z0, Y1, Y0, rslt_dict) else: opt_rslt = minimize( minimizing_interface, x0, args=(dict_, X1, X0, Z1, Z0, Y1, Y0, num_treated, num_untreated, rslt_dict), method=method, options=opts, ) rslt = adjust_output(opt_rslt, dict_, opt_rslt["x"], X1, X0, Z1, Z0, Y1, Y0, rslt_dict) # Print Output files print_logfile(dict_, rslt) if "comparison" in dict_["ESTIMATION"].keys(): if dict_["ESTIMATION"]["comparison"] == 0: pass else: write_comparison(data, rslt) else: write_comparison(data, rslt) return rslt
def estimate(init_file): """The function estimates the coefficients of the simulated data set.""" check_presence_init(init_file) dict_ = read(init_file) np.random.seed(dict_['SIMULATION']['seed']) # We perform some basic consistency checks regarding the user's request. check_presence_estimation_dataset(dict_) check_initialization_dict(dict_) check_init_file(dict_) # Distribute initialization information. data_file = dict_['ESTIMATION']['file'] if dict_['ESTIMATION']['maxiter'] == 0: option = 'init' else: option = dict_['ESTIMATION']['start'] # Read data frame data = read_data(data_file) # define starting values x0 = start_values(dict_, data, option) opts, method = optimizer_options(dict_) dict_['AUX']['criteria'] = calculate_criteria(dict_, data, x0) dict_['AUX']['starting_values'] = backward_transformation(x0) rslt_dict = bfgs_dict() if opts['maxiter'] == 0: rslt = adjust_output(None, dict_, x0, data, rslt_dict) else: opt_rslt = minimize(minimizing_interface, x0, args=(dict_, data, rslt_dict), method=method, options=opts) rslt = adjust_output(opt_rslt, dict_, opt_rslt['x'], data, rslt_dict) # Print Output files print_logfile(dict_, rslt) if 'comparison' in dict_['ESTIMATION'].keys(): if dict_['ESTIMATION']['comparison'] == 0: pass else: write_comparison(dict_, data, rslt) else: write_comparison(dict_, data, rslt) return rslt
def test3(): """The fourth test checks whether the simulation process works if there are only treated or un- treated Agents by setting the number of agents to one. Additionally the test checks if the start values for the estimation process are set to the initialization file values due to perfect separation. """ constr = dict() constr['AGENTS'], constr['DETERMINISTIC'] = 1, False for _ in range(10): generate_random_dict(constr) dict_ = read('test.grmpy.ini') df = simulate('test.grmpy.ini') start = start_values(dict_, df, 'auto') np.testing.assert_equal(dict_['AUX']['init_values'][:(-6)], start[:(-4)])
def test10(): """This test checks if the start_values function returns the init file values if the start option is set to init. """ for _ in range(10): constr = dict() constr['DETERMINISTIC'] = False generate_random_dict(constr) dict_ = read('test.grmpy.ini') true = [] for key_ in ['TREATED', 'UNTREATED', 'CHOICE']: true += list(dict_[key_]['all']) df = simulate('test.grmpy.ini') x0 = start_values(dict_, df, 'init')[:-4] np.testing.assert_array_equal(true, x0)
def test11(): """This test checks if the refactor auxiliary function returns an unchanged init file if the maximum number of iterations is set to zero. """ for _ in range(10): constr = dict() constr['DETERMINISTIC'], constr['AGENTS'] = False, 1000 constr['MAXITER'], constr['START'] = 0, 'init' generate_random_dict(constr) init_dict = read('test.grmpy.ini') df = simulate('test.grmpy.ini') start = start_values(init_dict, df, 'init') start = backward_transformation(start) rslt = estimate('test.grmpy.ini') np.testing.assert_equal(start, rslt['AUX']['x_internal'])
def test2(): """This test runs a random selection of five regression tests from the package's regression test vault. """ fname = os.path.dirname( grmpy.__file__) + '/test/resources/regression_vault.grmpy.json' tests = json.load(open(fname)) for i in np.random.choice(range(len(tests)), size=5): stat, dict_, criteria = tests[i] print_dict(dict_) df = simulate('test.grmpy.ini') init_dict = read('test.grmpy.ini') start = start_values(init_dict, df, 'init') criteria_ = calculate_criteria(init_dict, df, start) np.testing.assert_array_almost_equal(criteria, criteria_) np.testing.assert_almost_equal(np.sum(df.sum()), stat)
def test2(): """This test runs a random selection of five regression tests from the package's regression test vault. """ fname = TEST_RESOURCES_DIR + '/regression_vault.grmpy.json' tests = json.load(open(fname)) random_choice = np.random.choice(range(len(tests)), 5) tests = [tests[i] for i in random_choice] for test in tests: stat, dict_, criteria = test print_dict(dict_) df = simulate('test.grmpy.ini') init_dict = read('test.grmpy.ini') start = start_values(init_dict, df, 'init') criteria_ = calculate_criteria(init_dict, df, start) np.testing.assert_almost_equal(np.sum(df.sum()), stat) np.testing.assert_array_almost_equal(criteria, criteria_)
def test3(): """The test checks if the criteria function value of the simulated and the 'estimated' sample is equal if both samples include an identical number of individuals. """ for _ in range(5): constr = dict() constr['DETERMINISTIC'], constr['AGENTS'], constr[ 'START'] = False, 1000, 'init' constr['OPTIMIZER'], constr['SAME_SIZE'] = 'SCIPY-BFGS', True generate_random_dict(constr) df1 = simulate('test.grmpy.ini') rslt = estimate('test.grmpy.ini') init_dict = read('test.grmpy.ini') df2 = simulate_estimation(init_dict, rslt) start = start_values(init_dict, df1, 'init') criteria = [] for data in [df1, df2]: criteria += [calculate_criteria(init_dict, data, start)] np.testing.assert_allclose(criteria[1], criteria[0], rtol=0.1)
def test3(): """The test checks if the criteria function value of the simulated and the 'estimated' sample is equal if both samples include an identical number of individuals. """ for _ in range(5): constr = constraints(probability=0.0, agents=10000, start='init', optimizer='SCIPY-BFGS') dict_ = generate_random_dict(constr) print_dict(dict_) df1 = simulate('test.grmpy.ini') rslt = estimate('test.grmpy.ini') init_dict = read('test.grmpy.ini') df2 = simulate_estimation(init_dict, rslt, df1) start = start_values(init_dict, df1, 'init') criteria = [] for data in [df1, df2]: criteria += [calculate_criteria(init_dict, data, start)] np.testing.assert_allclose(criteria[1], criteria[0], rtol=0.1)
def estimate(init_file): """The function estimates the coefficients of the simulated data set.""" # Import init file as dictionary assert os.path.isfile(init_file) dict_ = read(init_file) # Check if the initialization file specifications are appropriate for the estimation process check_init_file(dict_) data_file = dict_['ESTIMATION']['file'] assert os.path.isfile(data_file) # Start value option option = dict_['ESTIMATION']['start'] # Read data frame data = pd.read_table(data_file, delim_whitespace=True, header=0) # define starting values x0 = start_values(dict_, data, option) opts, method = optimizer_options(dict_) dict_['AUX']['criteria'] = calculate_criteria(dict_, data, x0) if opts['maxiter'] == 0: rslt = adjust_output_maxiter_zero(dict_, x0) else: rslt_dict = bfgs_dict() opt_rslt = minimize(minimizing_interface, x0, args=(dict_, data, rslt_dict), method=method, options=opts) rslt = adjust_output(opt_rslt, dict_, opt_rslt['x'], rslt_dict) # Print Output files print_logfile(dict_, rslt) write_descriptives(dict_, data, rslt) return rslt
np.random.seed(1234235) seeds = np.random.randint(0, 1000, size=NUM_TESTS) directory = os.path.dirname(__file__) file_dir = os.path.join(directory, 'regression_vault.grmpy.json') if True: tests = [] for seed in seeds: np.random.seed(seed) constr = constraints(0.0) dict_ = generate_random_dict(constr) df = simulate('test.grmpy.ini') stat = np.sum(df.sum()) init_dict = read('test.grmpy.ini') start = start_values(init_dict, df, 'init') criteria = calculate_criteria(init_dict, df, start) tests += [(stat, dict_, criteria)] json.dump(tests, open(file_dir, 'w')) if True: tests = json.load(open(file_dir, 'r')) for test in tests: stat, dict_, criteria = test print_dict(dict_) init_dict = read('test.grmpy.ini') df = simulate('test.grmpy.ini') start = start_values(init_dict, df, 'init') criteria_ = calculate_criteria(init_dict, df, start) np.testing.assert_array_almost_equal(criteria, criteria_)