def get_wealth_data(bin_weights, J, flag_graphs, output_dir): ''' Inputs: bin_weights = ability weights (Jx1 array) J = number of ability groups (scalar) flag_graphs = whether or not to graph distribution (bool) output_dir = path to the starting data Output: Saves a pickle of the desired wealth percentiles. Graphs those levels. ''' if flag_graphs: wealth_data_graphs(output_dir) perc_array = np.zeros(J) # convert bin_weights to integers to index the array of data moments bins2 = (bin_weights * 100).astype(int) perc_array = np.cumsum(bins2) perc_array -= 1 wealth_data_array = np.zeros((78, J)) wealth_data_array[:, 0] = data2[:, :perc_array[0]].mean(axis=1) # pull out the data moments for each percentile for j in xrange(1, J): wealth_data_array[:, j] = data2[ :, perc_array[j - 1]:perc_array[j]].mean(axis=1) var_names = ['wealth_data_array'] dictionary = {} for key in var_names: dictionary[key] = locals()[key] saved_moments_dir = os.path.join(output_dir, "Saved_moments") utils.mkdirs(saved_moments_dir) pkl_path = os.path.join(saved_moments_dir, "wealth_data_moments.pkl") pickle.dump(dictionary, open(pkl_path, "wb"))
def test(vid_path="../examples/videos/sample_video1.avi"): mkdirs("test_results") start = time.time(); test_bgsub_fd(vid_path); test_bgsub_es(vid_path); test_bgsub_mog(vid_path); time_taken = time.time() - start; print "Tested all Background Subtraction methods ... [DONE] in " + str(time_taken) +" seconds"
def showEvent(self,evt): self.first_run = True mkdirs("data") create_history_db() self.message_loop_task.start() self.urlComboBox.addItem("http://zc.trade.500.com/sfc/index.php") urls = get_last_access_urls() for url in urls: self.urlComboBox.addItem(url)
def visualize_rep(VAE, data_loader, limit=3, inter=2 / 3, loc=-1, z_dim=128, output_dir='traverse_result'): decoder = VAE.decode encoder = VAE.encode interpolation = torch.arange(-limit, limit + 0.1, inter) fixed_idx = 0 fixed_img = data_loader.dataset.__getitem__(fixed_idx) fixed_img = fixed_img.to('cpu').unsqueeze(0) fixed_img_z = encoder(fixed_img)[:, :z_dim] random_z = torch.rand(1, z_dim, 1, 1, device='cpu') Z = {'fixed_img': fixed_img_z, 'random_z': random_z} gifs = [] for key in Z: z_ori = Z[key] samples = [] for row in range(z_dim): if loc != -1 and row != loc: continue z = z_ori.clone() for val in interpolation: z[:, row] = val sample = F.sigmoid(decoder(z)).data samples.append(sample) gifs.append(sample) samples = torch.cat(samples, dim=0).cpu() title = '{}_latent_traversal(iter:{})'.format(key, 1) output_dir = os.path.join(output_dir, str(1)) mkdirs(output_dir) gifs = torch.cat(gifs) print("gif size is {}".format(gifs.shape)) gifs = gifs.view(len(Z), z_dim, len(interpolation), 1, 256, 256).transpose(1, 2) for i, key in enumerate(Z.keys()): for j, val in enumerate(interpolation): save_image(tensor=gifs[i][j].cpu(), filename=os.path.join(output_dir, '{}_{}.jpg'.format(key, j)), nrow=z_dim, pad_value=1)
def main(): mkdirs(INSTALLDIR) os.environ.update(make_build_env()) args = parse_args() if on_travis() and not args.test_only: fetch_and_build() if on_travis(): dbs = ('sqlite3', 'mysql') else: dbs = ('sqlite3', ) for db in dbs: shell('rm -rf {}/*'.format(INSTALLDIR)) start_and_test_with_db(db)
def save(self): cp = ConfigParser.RawConfigParser() cp.add_section(self.name) self._extensions and cp.set(self.name, 'extensions', self._extensions) self._cache_size_limit and cp.set(self.name, 'cache_size_limit', self._cache_size_limit) self._cache_key and cp.set(self.name, 'cache_key', self._cache_key) self._proxy_ip and cp.set(self.name, 'proxy_ip', self._proxy_ip) if not os.path.exists(os.path.dirname(self._path)): mkdirs(os.path.dirname(self._path)) with open(self._path, 'wb') as f: cp.write(f)
def run(summ_path, ref_path, rouge_args=None, verbose=False, saveto=None, eos=".", ignore_empty=False, stemming=True): s = settings.Settings() s._load() stime = time() with tempfile.TemporaryDirectory() as dirpath: sys_root, model_root = [ os.path.join(dirpath, _) for _ in ["system", "model"] ] print("Preparing documents...", end=" ") utils.mkdirs([sys_root, model_root]) ignored = utils.split_files(model_file=ref_path, system_file=summ_path, model_dir=model_root, system_dir=sys_root, eos=eos, ignore_empty=ignore_empty) print("%d line(s) ignored" % len(ignored)) print("Running ROUGE...") log_level = logging.ERROR if not verbose else None r = pyrouge.Rouge155(rouge_dir=os.path.dirname(s.data['ROUGE_path']), log_level=log_level, stemming=stemming) r.system_dir = sys_root r.model_dir = model_root r.system_filename_pattern = r's.(\d+).txt' r.model_filename_pattern = 'm.[A-Z].#ID#.txt' data_arg = "-e %s" % s.data['ROUGE_data'] if not rouge_args: rouge_args = ['-c', 95, '-r', 1000, '-n', 2, '-a'] rouge_args_str = " ".join([str(_) for _ in rouge_args]) else: rouge_args_str = rouge_args rouge_args_str = "%s %s" % (data_arg, rouge_args_str) output = r.convert_and_evaluate(rouge_args=rouge_args_str) if saveto is not None: saveto = open(saveto, 'w') utils.tee(saveto, output) print("Elapsed time: %.3f seconds" % (time() - stime))
def save_checkpoint(self, iteration): encoderMx_path = os.path.join(self.ckpt_dir, 'iter_%s_encoderMx.pt' % iteration) encoderMy_path = os.path.join(self.ckpt_dir, 'iter_%s_encoderMy.pt' % iteration) decoderMy_path = os.path.join(self.ckpt_dir, 'iter_%s_decoderMy.pt' % iteration) mkdirs(self.ckpt_dir) torch.save(self.encoderMx, encoderMx_path) torch.save(self.encoderMy, encoderMy_path) torch.save(self.decoderMy, decoderMy_path)
def plot_result(self, data_plot, save_fig_dir, is_plot=True): utils.mkdirs(save_fig_dir) columns = list(data_plot.columns) print("Start to plot and save {} figures to {} ...".format( len(columns) - 1, save_fig_dir)) print("Head of data plot") print(data_plot.head()) x_offset = -0.07 y_offset = 0.01 mpl.style.use("seaborn") model_column = columns[0] for score_solumn in columns[1:]: # Sort by ascending score data_plot.sort_values(score_solumn, ascending=True, inplace=True) ax = data_plot.plot(kind="bar", x=model_column, y=score_solumn, legend=None, color='C1', figsize=(len(self.models) + 1, 4), width=0.3) title = "Mean {} score - {} cross validation".format( score_solumn, self.cv) ax.set(title=title, xlabel=model_column, ylabel=score_solumn) ax.tick_params(axis='x', rotation=0) # Set lower and upper limit of y-axis min_score = data_plot.loc[:, score_solumn].min() max_score = data_plot.loc[:, score_solumn].max() y_lim_min = (min_score - 0.2) if min_score > 0.2 else 0 y_lim_max = (max_score + 1) if max_score > 1 else 1 ax.set_ylim([y_lim_min, y_lim_max]) # Show value of each column to see clearly for p in ax.patches: b = p.get_bbox() text_value = "{:.4f}".format(b.y1) ax.annotate(text_value, xy=(b.x0 + x_offset, b.y1 + y_offset)) save_fig_path = os.path.join(save_fig_dir, "{}.png".format(score_solumn)) plt.savefig(save_fig_path, dpi=800) print("Plot and save {} figures to {} done".format( len(columns) - 1, save_fig_dir)) if is_plot: plt.show()
def __init__(self, datadir, db='sqlite3'): self.db = db self.datadir = datadir self.central_conf_dir = join(datadir, 'conf') self.seafile_conf_dir = join(datadir, 'seafile-data') self.ccnet_conf_dir = join(datadir, 'ccnet') self.log_dir = join(datadir, 'logs') mkdirs(self.log_dir) self.ccnet_log = join(self.log_dir, 'ccnet.log') self.seafile_log = join(self.log_dir, 'seafile.log') self.ccnet_proc = None self.seafile_proc = None
def get_train_data_generator(data_dir, logger, save=False): """ Function to return data generators for train and valid data sets. """ train_dir = pjoin(data_dir, "train") valid_dir = pjoin(data_dir, "valid") logger.info("Summarizing sample info in train set.") train_num = log_data_info(train_dir, logger) logger.info("Summarizing sample info in valid set.") valid_num = log_data_info(valid_dir, logger) logger.info( "Total number of training data: {}; Total number of validation data: {}." .format(train_num, valid_num)) # prepare data augmentation configuration train_datagen = ImageDataGenerator( preprocessing_function=preprocess_input, rotation_range=30, # 图片随机转动的角度 width_shift_range=0.2, # 图片随机水平偏移的幅度 height_shift_range=0.2, # 图片随机竖直偏移的幅度 shear_range=0.2, # 逆时针方向的剪切变换角度 zoom_range=0.2, # 随机缩放的幅度 horizontal_flip=True, # 随机水平翻转 vertical_flip=True # 随机竖直翻转 ) valid_datagen = ImageDataGenerator(preprocessing_function=preprocess_input) save_dir = None if save: save_dir = pjoin(data_dir, "gen_train") utils.mkdirs(save_dir) train_generator = train_datagen.flow_from_directory( train_dir, target_size=(IMG_HEIGHT, IMG_WIDTH), batch_size=PATCH_BATCH_SIZE, class_mode='categorical', save_to_dir=save_dir) valid_generator = valid_datagen.flow_from_directory( valid_dir, target_size=(IMG_HEIGHT, IMG_WIDTH), batch_size=PATCH_BATCH_SIZE, class_mode='categorical') return (train_generator, train_num), (valid_generator, valid_num)
def run(iterator, args=None): sindarin = args.folder + '/run.sin' runfolder = args.folder + '-' + str(iterator) mkdirs(runfolder) shutil.copyfile(sindarin, os.path.join(runfolder, 'run.sin')) with cd(runfolder): replace_file('run.sin', args.scan_object, iterator) subprocess.call('rm -f *grid', shell=True) if (not args.dryrun and not os.path.isfile('done')): whizard_run('whizard -r', 'run.sin') with open('done', 'a'): os.utime('done', None) print 'done with ' + runfolder else: print 'skipping ' + runfolder
def run(self): mkdirs("cache_web_pages/html") send_msg("开始分析基本面..") matchs = self.extract_matchs(str(self.targetUrl)) self.batch_download_data_pages(matchs) for match in matchs: msg = "获取初赔 %s" % match["odds_url"] send_msg(msg) self.parse_odds(match, get_cache_web_file_name(match["odds_url"])) msg = "分析基本面 %s" % match["base_face_url"] send_msg(msg) self.parse_baseface(match, get_cache_web_file_name(match["base_face_url"])) self.dataset = matchs send_msg("refresh_match_grid"); send_msg("完成.")
def save_checkpoint(self, iteration): encoder_path = os.path.join(self.ckpt_dir, 'iter_%s_encoder.pt' % iteration) decoder_path = os.path.join(self.ckpt_dir, 'iter_%s_decoder.pt' % iteration) rvec_path = os.path.join(self.ckpt_dir, 'iter_%s_rvec.pt' % iteration) D_path = os.path.join(self.ckpt_dir, 'iter_%s_D.pt' % iteration) mkdirs(self.ckpt_dir) torch.save(self.encoder, encoder_path) torch.save(self.decoder, decoder_path) torch.save(self.rvec, rvec_path) torch.save(self.D, D_path)
def set_up_dir(self): project_path = pjoin("Experiments/%s_%s" % (self.args.project_name, self.ExpID)) if hasattr(self.args, 'resume_ExpID') and self.args.resume_ExpID: project_path = get_project_path(self.args.resume_ExpID) if self.args.debug: # debug has the highest priority. If debug, all the things will be saved in Debug_dir project_path = "Debug_Dir" self.weights_path = pjoin(project_path, "weights") self.gen_img_path = pjoin(project_path, "gen_img") self.cache_path = pjoin(project_path, ".caches") self.log_path = pjoin(project_path, "log") self.logplt_path = pjoin(project_path, "log", "plot") self.logtxt_path = pjoin(project_path, "log", "log.txt") mkdirs(self.weights_path, self.gen_img_path, self.logplt_path, self.cache_path) self.logtxt = open(self.logtxt_path, "a+") self.script_hist = open('.script_history', 'a+') # save local script history, for convenience of check
def download_jdk(): if not search_jdk() or settings["force_install_jdk"]: jdk_link = settings["jdk_link_x64"] if utils.is_x64() else settings["jdk_link_x86"] jdk_zip_name = settings["jdk_zip_name"] extracted = utils.download_file_and_extract(jdk_link, jdk_zip_name) if not extracted: print("Extracting JDK have failed. Redownloading...") download_jdk() else: pass os.rename("jdk-14.0.2+12", "jdk-14.0.2") if os.path.exists("jdk-14.0.2+12") else None default_directory = os.environ.get("APPDATA") if os.environ.get("APPDATA") is not None else os.getcwd() created = utils.mkdirs(default_directory) if not created: directory = os.getcwd() # failed to create directory, can't do much here else: directory = default_directory new_directory = os.path.join(directory, "jdk-14.0.2") if os.path.exists(new_directory): shutil.rmtree(new_directory) try: shutil.move("jdk-14.0.2", directory) except: directory = os.getcwd() # Set JAVA_HOME and PATH extend_path(os.path.join(directory, "jdk-14.0.2")) settings.setKey("jdk_installation_path", directory, False)
def load_feature(config, train: bool) -> Union[Tuple[np.ndarray], np.ndarray]: """ 从 "{config.feature_folder}/*.csv" 文件中加载特征数据 Args: config: 配置项 train (bool): 是否为训练数据 Returns: - X (Tuple[np.ndarray]): 训练特征、测试特征和对应的标签 - X (np.ndarray): 预测特征 """ feature_path = os.path.join( config.feature_folder, "train.csv" if train == True else "predict.csv") # 加载特征数据 df = pd.read_csv(feature_path) features = [ str(i) for i in range(1, FEATURE_NUM[config.opensmile_config] + 1) ] X = df.loc[:, features].values Y = df.loc[:, 'label'].values # 标准化模型路径 scaler_path = os.path.join(config.checkpoint_path, 'SCALER_OPENSMILE.m') if train == True: # 标准化数据 scaler = StandardScaler().fit(X) # 保存标准化模型 utils.mkdirs(config.checkpoint_path) joblib.dump(scaler, scaler_path) X = scaler.transform(X) # 划分训练集和测试集 x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42) return x_train, x_test, y_train, y_test else: # 标准化数据 # 加载标准化模型 scaler = joblib.load(scaler_path) X = scaler.transform(X) return X
def handle_results(self, runfolder, purpose): done = os.path.isfile(os.path.join(runfolder, 'done')) if (done and purpose == 'events'): os.rename( os.path.join(runfolder, runfolder) + '.hepmc', os.path.join("../rivet", runfolder + '.hepmc')) if (done and purpose == 'test_soft'): ut.mkdirs("../scan-results") soft_log = os.path.join(runfolder, 'soft.log') if os.path.isfile(soft_log): os.rename( soft_log, os.path.join("../scan-results", runfolder.strip('--1') + '.soft.dat')) else: return FAIL return SUCCESS
def save_checkpoint(self, iteration): encoderA_path = os.path.join(self.ckpt_dir, 'iter_%s_encoderA.pt' % iteration) encoderB_path = os.path.join(self.ckpt_dir, 'iter_%s_encoderB.pt' % iteration) decoderA_path = os.path.join(self.ckpt_dir, 'iter_%s_decoderA.pt' % iteration) decoderB_path = os.path.join(self.ckpt_dir, 'iter_%s_decoderB.pt' % iteration) mkdirs(self.ckpt_dir) torch.save(self.encoderA, encoderA_path) torch.save(self.encoderB, encoderB_path) torch.save(self.decoderA, decoderA_path) torch.save(self.decoderB, decoderB_path)
def __init__(self, query='', lang='', top_tweet=False): self.query = query self.url = "https://twitter.com/i/search/timeline?l={}".format(lang) self.lang = lang if not top_tweet: self.url = self.url + "&f=tweets" self.url = self.url + "&q=%s&src=typed&max_position=%s" path_here = os.path.abspath(os.path.dirname(__file__)) cache_directory = path_here + '/../../.caches' mkdirs(cache_directory) self.cache_filename = '%s/%s%s.cache'%(cache_directory,query,lang) self.min_position = '' if os.path.exists(self.cache_filename): with open(self.cache_filename, 'r') as f: self.min_position = f.read()
def __init__(self, excelName, sheetName, dropboxDir, accessToken, batchSize_GB=1000, sleepTime_min=10, batchTimeLimit_hour=200): """ Creates attribute variables and makes a log file dropboxApp.log in the logs directory. Uploads files to Dropbox using the desktop APP in batches. """ self.excelName = excelName self.sheetName = sheetName self.dropboxDir = dropboxDir self.accessToken = accessToken self.batchSize = batchSize_GB * 1024 * 1024 * 1024 self.sleepTime_min = sleepTime_min self.batchTimeLimit_hour = batchTimeLimit_hour self.names = ['inputFile', 'outputDir'] self.df = pandas.read_excel(self.excelName, sheet_name=self.sheetName, names=self.names) self.dbx = dropbox.Dropbox(self.accessToken) physicalDriveName, intervalBetweenProcessUsageCheck_s, minDataIOSpeed_mbps, maximumProcessStopDuration_min = 'PhysicalDrive3', 15, 5, 30 resourceMonitorThread = Thread(target=utils.resourceUsage, args=[ physicalDriveName, intervalBetweenProcessUsageCheck_s, minDataIOSpeed_mbps, maximumProcessStopDuration_min ], daemon=True) resourceMonitorThread.start() print('Stage 2 - Data upload using APP') self.logFileName = './logs/upload/' + datetime.datetime.now().strftime( "%Y%m%d_%H%M%S") + '_DropboxAPP.log' self.getFileList() utils.mkdirs(self.dropboxDirList) self.makeBatches() self.uploadFiles()
def run( common_args, cmd_argv ): args = docopt(scm.mount.USAGE, argv=cmd_argv) # Success Msg if ( args['get-success-msg'] ): print( "Repo mounted and committed to your repo" ) return # Error Msg if ( args['get-error-msg'] ): print( "" ) # No message return # Check if there are pending repo changes cmd = f'git diff-index HEAD --exit-code --quiet' t = utils.run_shell( cmd, False ) cmd = f'git diff-index --cached HEAD --exit-code --quiet' t2 = utils.run_shell( cmd, False ) utils.check_results( t, "ERROR: Your local repo has pending tree modification (i.e. need to do a commit/revert)." ) utils.check_results( t2, "ERROR: Your local repo has pending index modification (i.e. need to do a commit/revert)." ) # -b option is not supported/needed if ( args['-b'] != None ): sys.exit( "The '-b' option is not supported/needed. Use a 'remote-ref' as the <id> argument" ) # Default Package name pkg = args['<repo>'] if ( args['-p'] ): pkg = args['-p'] # Make sure the Parent destination directory exists dst = args['<dst>'] utils.mkdirs( dst ) # Set directory for the subtree directory dst = os.path.join( dst, pkg ) dst = utils.force_unix_dir_sep(dst) utils.print_verbose( f"Destination for the copy: {dst}" ) # Create a 'subtree' cmd = f'git subtree add --prefix {dst} {args["<origin>"]}/_git/{args["<repo>"]} {args["<id>"]} --squash' t = utils.run_shell( cmd, common_args['-v'] ) if ( utils.is_error(t) ): # Clean-up dst dir if there was failure utils.remove_tree( dst ) utils.check_results( t, "ERROR: Failed to create a subtree for the specified package/repository." )
def get_data(config, data_path: str, train: bool) -> Union[Tuple[np.ndarray], np.ndarray]: """ 提取所有音频的特征: 遍历所有文件夹, 读取每个文件夹中的音频, 提取每个音频的特征,把所有特征 保存在 "{config.feature_folder}/*.p" 文件中。 Args: config: 配置项 data_path (str): 数据集文件夹/测试文件路径 train (bool): 是否为训练数据 Returns: - train = True: 训练特征、测试特征和对应的标签 - train = False: 预测特征 """ if train == True: files = get_data_path(data_path, config.class_labels) max_, min_ = get_max_min(files) mfcc_data = [] for file in files: label = re.findall(".*-(.*)-.*", file)[0] # 三分类 # if(label == "sad" or label == "neutral"): # label = "neutral" # elif(label == "angry" or label == "fear"): # label = "negative" # elif(label == "happy" or label == "surprise"): # label = "positive" features = extract_features(file, max_) mfcc_data.append([file, features, config.class_labels.index(label)]) else: features = extract_features(data_path) mfcc_data = [[data_path, features, -1]] # 如果 config.feature_folder 文件夹不存在,则新建一个 utils.mkdirs(config.feature_folder) # 特征存储路径 feature_path = os.path.join(config.feature_folder, "train.p" if train == True else "predict.p") # 保存特征 pickle.dump(mfcc_data, open(feature_path, 'wb')) return load_feature(config, train=train)
def run(common_args, cmd_argv): args = docopt(scm.copy.USAGE, argv=cmd_argv) # Use the mount command so as to have consistent pre/post GIT behavior with adopting non-integrated packages if (not args['--force']): cmd_argv[0] = 'mount' cmd_argv.insert(1, '--noro') scm.git.mount.run(common_args, cmd_argv) # Do a brute force copy else: # -b option is not supported/needed if (args['-b'] != None): sys.exit( "The '-b' option is not supported/needed. Use a 'remote-ref' as the <id> argument" ) # Default Package name pkg = args['<repo>'] if (args['-p']): pkg = args['-p'] # Make sure the destination directory exists dst = os.path.join(os.getcwd(), args['<dst>']) utils.print_verbose(f"Destination for the copy: {dst}") utils.mkdirs(dst) # Create a clone of the repo # NOTE: I hate cloning the entire repo - but I have not found a way to get JUST a snapshot by a remote-ref cmd = f'git clone --branch {args["<id>"]} --depth=1 {args["<origin>"]}/_git/{args["<repo>"]} {pkg}' utils.push_dir(dst) t = utils.run_shell(cmd, common_args['-v']) utils.pop_dir() if (utils.is_error(t)): # Clean-up dst dir if there was failure utils.remove_tree(dst) utils.check_results( t, f"ERROR: Failed the retreive/clone the specified package/repository. Note: the <id> ({args['<id>']}) MUST be a git TAG." ) # Remove the .git directoy since this is a non-tracked copy gitdir = os.path.join(dst, pkg, ".git") utils.remove_tree( gitdir, warn_msg="Not able to remove the .git directory for local copy")
def __new__(cls, name, type_name, when='M', interval=1440, backupCount=10): logger_name = '%s_%s' % (name, type_name) logger = cls.__loggers.get(logger_name, None) if not logger: logger = logging.getLogger(name) the_game_log_dir = os.path.join(LOGS_DIR, name) mkdirs(the_game_log_dir) the_game_log_file_name = os.path.join( the_game_log_dir, '%s.log' % type_name) fileTimeHandler = MultiProcessTimedRotatingFileHandler( the_game_log_file_name, when, interval, backupCount) formatter = logging.Formatter( '%(asctime)s pid:%(process)d %(name)s:%(lineno)d %(levelname)s %(message)s', datefmt='[%Y-%m-%d %H:%M:%S %z]') fileTimeHandler.setFormatter(formatter) logger.addHandler(fileTimeHandler) cls.__loggers[logger_name] = logger return logger
def save_synth(self, iters, howmany=100): decoder = self.decoder Z = torch.randn(howmany, self.z_dim) if self.use_cuda: Z = Z.cuda() # do synthesis X = torch.sigmoid(decoder(Z)).data.cpu() # save the results as image fname = os.path.join(self.output_dir_synth, 'synth_%s.jpg' % iters) mkdirs(self.output_dir_synth) save_image(tensor=X, filename=fname, nrow=int(np.sqrt(howmany)), pad_value=1)
def print_options(opt): message = '' message += '----------------- Options ---------------\n' for k, v in sorted(vars(opt).items()): comment = '' default = parser.get_default(k) if v != default: comment = '\t[default: %s]' % str(default) message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) message += '----------------- End -------------------' print(message) # save to the disk expr_dir = opt.save_dir / opt.name mkdirs(expr_dir) file_name = expr_dir / 'opt.txt' with open(file_name, 'wt') as opt_file: opt_file.write(message) opt_file.write('\n')
def __new__(cls, name, type_name, when='M', interval=1440, backupCount=10): logger_name = '%s_%s' % (name, type_name) logger = cls.__loggers.get(logger_name, None) if not logger: logger = logging.getLogger(name) the_game_log_dir = os.path.join(LOGS_DIR, name) mkdirs(the_game_log_dir) the_game_log_file_name = os.path.join(the_game_log_dir, '%s.log' % type_name) fileTimeHandler = MultiProcessTimedRotatingFileHandler( the_game_log_file_name, when, interval, backupCount) formatter = logging.Formatter( '%(asctime)s pid:%(process)d %(name)s:%(lineno)d %(levelname)s %(message)s', datefmt='[%Y-%m-%d %H:%M:%S %z]') fileTimeHandler.setFormatter(formatter) logger.addHandler(fileTimeHandler) cls.__loggers[logger_name] = logger return logger
def save_predicted_data(df, save_dir): predicted_data = [] for index, row in df.iterrows(): predicted_data.append({ "id": row["id"], "label": row["LabelID_Pred"], "content": row["content"] }) # Save data to file utils.mkdirs(save_dir) save_path = os.path.join( save_dir, "predicted_data_{}_{}.txt".format(df.shape[0], utils.get_format_time_now())) with open(save_path, 'w') as f: json.dump(predicted_data, f) print("Save predicted data to {} done".format(save_path))
def _install(self,aid,retrieve,msg=None,archive=True): d=self.path(aid) if is_existing_directory(d): return d a=retrieve(aid) if a==None: return None utils.mkdirs(os.path.dirname(d)) f=tempfile.mkdtemp(suffix='.'+self._base(aid), dir=self._root) try: self._notify(aid,d,a,msg) if not archive: base=os.path.basename(a) ix=base.rfind('.') suf=base[ix:] if ix>=0 else '' if suf=='.gz': base=base[:ix] ix=base.rfind('.') suf=(base[ix:] if ix>=0 else '')+suf shutil.copy(a,join(f,'artifact'+suf)) else: utils.expandArchive(a, f) if self._finalizer!=None: self._finalizer(aid,f) err=0 while not is_existing_directory(d): try: os.rename(f,d) except OSError: if not is_existing_directory(d): err=err+1 else: log.error( 'cannot rename directory '+f+':'+str(sys.exc_info()),log.INFRA) break utils.rmtree(f) if is_existing_directory(d): return d log.error( 'no folder '+d,log.INFRA) return None except: log.error( 'cannot expand archive '+a+':'+ str(sys.exc_info()),log.INFRA) utils.rmtree(f) raise XmakeException('ERR: cannot expand archive '+a+': '+str(sys.exc_info()))
def __init__(self, topdir, datadir, db='sqlite3', seaf_server_bin='seaf-server', ccnet_server_bin='ccnet-server'): self.db = db self.datadir = datadir self.central_conf_dir = join(datadir, 'conf') self.seafile_conf_dir = join(datadir, 'seafile-data') self.ccnet_conf_dir = join(datadir, 'ccnet') self.log_dir = join(datadir, 'logs') mkdirs(self.log_dir) self.ccnet_log = join(self.log_dir, 'ccnet.log') self.seafile_log = join(self.log_dir, 'seafile.log') self.ccnet_server_bin = ccnet_server_bin self.seaf_server_bin = seaf_server_bin self.sql_dir = join(topdir, 'seafile-server', 'scripts', 'sql') self.ccnet_proc = None self.seafile_proc = None
def save_checkpoint(self, iteration): encoder_path = os.path.join(self.ckpt_dir, 'iter_%s_encoder.pt' % iteration) decoder_path = os.path.join(self.ckpt_dir, 'iter_%s_decoder.pt' % iteration) prior_alpha_path = os.path.join(self.ckpt_dir, 'iter_%s_prior_alpha.pt' % iteration) post_alpha_path = os.path.join(self.ckpt_dir, 'iter_%s_post_alpha.pt' % iteration) D_path = os.path.join(self.ckpt_dir, 'iter_%s_D.pt' % iteration) mkdirs(self.ckpt_dir) torch.save(self.encoder, encoder_path) torch.save(self.decoder, decoder_path) torch.save(self.prior_alpha, prior_alpha_path) torch.save(self.post_alpha, post_alpha_path) torch.save(self.D, D_path)
def __init__(self, db_dir): mkdirs(db_dir) self.path = os.path.join(db_dir, 'logdata.sqlite') con = sqlite3.connect(self.path) con.row_factory = sqlite3.Row self.cursor = con.cursor() create_script = relative_file(__file__, 'create.sql') with open(create_script) as script: self.cursor.executescript(script.read()) for host_info in [ HostInfo(shortname='bb', name='bitbucket', urnpattern='https://bitbucket.org', vcs='hg', lister_module='repoblick.lister.bitbucket'), HostInfo(shortname='gh', name='github', urnpattern='https://github.com/%s.git', vcs='git'), HostInfo(shortname='gc-hg', name='googlecode-mercurial', urnpattern='https://%s.googlecode.com/hg/', vcs='hg'), HostInfo(shortname='gc-svn', name='googlecode-subversion', urnpattern='http://%s.googlecode.com/svn/trunk/', vcs='svn'), ]: self.add_host(host_info) self.commit()
def plot_multi_functions(functions, output_path="./Ex5_Output/plot.jpg"): for func_name, (x, y) in functions.items(): print("Plot Func name : {}".format(func_name)) plt.plot(x, y, label=func_name) plt.legend() plt.title("Plot multi functions") plt.xlabel("x") plt.ylabel("y") # Save figure to output path dir_path = output_path[:output_path.rfind("/")] utils.mkdirs(dir_path) output_path = os.path.abspath(output_path) print("Save file to {} done".format(output_path)) plt.savefig(output_path, dpi=200) plt.show()
def load_feature(config, train: bool) -> Union[Tuple[np.ndarray], np.ndarray]: """ 从 "{config.feature_folder}/*.p" 文件中加载特征数据 Args: config: 配置项 train (bool): 是否为训练数据 Returns: - X (Tuple[np.ndarray]): 训练特征、测试特征和对应的标签 - X (np.ndarray): 预测特征 """ feature_path = os.path.join(config.feature_folder, "train.p" if train == True else "predict.p") features = pd.DataFrame( data = joblib.load(feature_path), columns = ['file_name', 'features', 'emotion'] ) X = list(features['features']) Y = list(features['emotion']) # 标准化模型路径 scaler_path = os.path.join(config.checkpoint_path, 'SCALER_LIBROSA.m') if train == True: # 标准化数据 scaler = StandardScaler().fit(X) # 保存标准化模型 utils.mkdirs(config.checkpoint_path) joblib.dump(scaler, scaler_path) X = scaler.transform(X) x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42) return x_train, x_test, y_train, y_test else: # 标准化数据 # 加载标准化模型 scaler = joblib.load(scaler_path) X = scaler.transform(X) return X
def compareWithGroundtruth(sal,datasetPath,imageName): print "Processing "+imageName +" ...."; confusion = getNewConfusion(); srcImgPath = os.path.join(datasetPath,imageName,"src_color",imageName+".png") image = cv2.imread(srcImgPath) mask = getSalienyMap(sal,image) #write results mask_image = image*mask[:,:,None]; mkdirs(os.path.join(datasetPath,imageName,"results")); file_name = os.path.join(datasetPath,imageName,"results",imageName+"_"+sal.method+".png") cv2.imwrite(file_name,mask_image) humanSegPath = os.path.join(datasetPath,imageName,"human_seg"); for bgImg in os.listdir(humanSegPath): if bgImg.endswith(".png"): bgMask = getMask(cv2.imread(os.path.join(humanSegPath,bgImg))) updateConfusion(confusion,comparator(bgMask,mask)) print "Confusion : ",confusion; return confusion;
def save_recon(self, iters, true_images, recon_images): # make a merge of true and recon, eg, # merged[0,...] = true[0,...], # merged[1,...] = recon[0,...], # merged[2,...] = true[1,...], # merged[3,...] = recon[1,...], ... n = true_images.shape[0] perm = torch.arange(0, 2 * n).view(2, n).transpose(1, 0) perm = perm.contiguous().view(-1) merged = torch.cat([true_images, recon_images], dim=0) merged = merged[perm, :].cpu() # save the results as image fname = os.path.join(self.output_dir_recon, 'recon_%s.jpg' % iters) mkdirs(self.output_dir_recon) save_image(tensor=merged, filename=fname, nrow=2 * int(np.sqrt(n)), pad_value=1)
def run(self): if DownloadHistoryMatch.START_DATE=="": self.backup_data() create_history_db() mkdirs("cache_web_pages/html") send_msg("分析比赛..") urls = self.last_days() for url in urls: #write_file("cache_history_data_startdate.log",day_format) self.targetUrl = url#"http://live.500.com/wanchang.php?e=" + day_format send_msg("获取赛事链接 " + self.targetUrl) matchs = self.extract_matchs(self.targetUrl) self.batch_download_data_pages(matchs) for match in matchs: msg = "分析赔率 %s" % match["odds_url"] send_msg(msg) self.parse_odds(match, get_cache_web_file_name(match["odds_url"])) msg = "分析基本面 %s" % match["base_face_url"] send_msg(msg) self.parse_baseface(match, get_cache_web_file_name(match["base_face_url"])) self.dataset = matchs send_msg("cache_history_data"); send_msg("completed") send_msg("完成.")
def run_TPI(income_tax_params, tpi_params, iterative_params, initial_values, SS_values, output_dir="./OUTPUT"): # unpack tuples of parameters analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params maxiter, mindist_SS, mindist_TPI = iterative_params J, S, T, BQ_dist, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\ g_n_vector, tau_payroll, tau_bq, rho, omega, N_tilde, lambdas, e, retire, mean_income_data,\ factor, h_wealth, p_wealth, m_wealth, b_ellipse, upsilon, chi_b, chi_n = tpi_params K0, b_sinit, b_splus1init, L0, Y0,\ w0, r0, BQ0, T_H_0, factor, tax0, c0, initial_b, initial_n = initial_values Kss, Lss, rss, wss, BQss, T_Hss, bssmat_splus1, nssmat = SS_values TPI_FIG_DIR = output_dir # Initialize guesses at time paths domain = np.linspace(0, T, T) K_init = (-1 / (domain + 1)) * (Kss - K0) + Kss K_init[-1] = Kss K_init = np.array(list(K_init) + list(np.ones(S) * Kss)) L_init = np.ones(T + S) * Lss K = K_init L = L_init Y_params = (alpha, Z) Y = firm.get_Y(K, L, Y_params) w = firm.get_w(Y, L, alpha) r_params = (alpha, delta) r = firm.get_r(Y, K, r_params) BQ = np.zeros((T + S, J)) for j in xrange(J): BQ[:, j] = list(np.linspace(BQ0[j], BQss[j], T)) + [BQss[j]] * S BQ = np.array(BQ) if T_Hss < 1e-13 and T_Hss > 0.0 : T_Hss2 = 0.0 # sometimes SS is very small but not zero, even if taxes are zero, this get's rid of the approximation error, which affects the perc changes below else: T_Hss2 = T_Hss T_H = np.ones(T + S) * T_Hss2 # Make array of initial guesses for labor supply and savings domain2 = np.tile(domain.reshape(T, 1, 1), (1, S, J)) ending_b = bssmat_splus1 guesses_b = (-1 / (domain2 + 1)) * (ending_b - initial_b) + ending_b ending_b_tail = np.tile(ending_b.reshape(1, S, J), (S, 1, 1)) guesses_b = np.append(guesses_b, ending_b_tail, axis=0) domain3 = np.tile(np.linspace(0, 1, T).reshape(T, 1, 1), (1, S, J)) guesses_n = domain3 * (nssmat - initial_n) + initial_n ending_n_tail = np.tile(nssmat.reshape(1, S, J), (S, 1, 1)) guesses_n = np.append(guesses_n, ending_n_tail, axis=0) b_mat = np.zeros((T + S, S, J)) n_mat = np.zeros((T + S, S, J)) ind = np.arange(S) TPIiter = 0 TPIdist = 10 PLOT_TPI = False euler_errors = np.zeros((T, 2 * S, J)) TPIdist_vec = np.zeros(maxiter) while (TPIiter < maxiter) and (TPIdist >= mindist_TPI): # Plot TPI for K for each iteration, so we can see if there is a # problem if PLOT_TPI is True: K_plot = list(K) + list(np.ones(10) * Kss) L_plot = list(L) + list(np.ones(10) * Lss) plt.figure() plt.axhline( y=Kss, color='black', linewidth=2, label=r"Steady State $\hat{K}$", ls='--') plt.plot(np.arange( T + 10), Kpath_plot[:T + 10], 'b', linewidth=2, label=r"TPI time path $\hat{K}_t$") plt.savefig(os.path.join(TPI_FIG_DIR, "TPI_K")) # Uncomment the following print statements to make sure all euler equations are converging. # If they don't, then you'll have negative consumption or consumption spikes. If they don't, # it is the initial guesses. You might need to scale them differently. It is rather delicate for the first # few periods and high ability groups. # theta_params = (e[-1, j], 1, omega[0].reshape(S, 1), lambdas[j]) # theta = tax.replacement_rate_vals(n, w, factor, theta_params) theta = np.zeros((J,)) guesses = (guesses_b, guesses_n) outer_loop_vars = (r, w, K, BQ, T_H) inner_loop_params = (income_tax_params, tpi_params, initial_values, theta, ind) # Solve HH problem in inner loop euler_errors, b_mat, n_mat = inner_loop(guesses, outer_loop_vars, inner_loop_params) # if euler_errors.max() > 1e-6: # print 't-loop:', euler_errors.max() # Force the initial distribution of capital to be as given above. b_mat[0, :, :] = initial_b K_params = (omega[:T].reshape(T, S, 1), lambdas.reshape(1, 1, J), g_n_vector[:T], 'TPI') K[:T] = household.get_K(b_mat[:T], K_params) L_params = (e.reshape(1, S, J), omega[:T, :].reshape(T, S, 1), lambdas.reshape(1, 1, J), 'TPI') L[:T] = firm.get_L(n_mat[:T], L_params) Y_params = (alpha, Z) Ynew = firm.get_Y(K[:T], L[:T], Y_params) wnew = firm.get_w(Ynew[:T], L[:T], alpha) r_params = (alpha, delta) rnew = firm.get_r(Ynew[:T], K[:T], r_params) BQ_params = (omega[:T].reshape(T, S, 1), lambdas.reshape(1, 1, J), rho.reshape(1, S, 1), g_n_vector[:T].reshape(T, 1), 'TPI') BQnew = household.get_BQ(rnew[:T].reshape(T, 1), b_mat[:T,:,:], BQ_params) bmat_s = np.zeros((T, S, J)) bmat_s[:, 1:, :] = b_mat[:T, :-1, :] bmat_splus1 = np.zeros((T, S, J)) bmat_splus1[:, :, :] = b_mat[1:T + 1, :, :] TH_tax_params = np.zeros((T,S,J,etr_params.shape[2])) for i in range(etr_params.shape[2]): TH_tax_params[:,:,:,i] = np.tile(np.reshape(np.transpose(etr_params[:,:T,i]),(T,S,1)),(1,1,J)) T_H_params = (np.tile(e.reshape(1, S, J),(T,1,1)), BQ_dist, lambdas.reshape(1, 1, J), omega[:T].reshape(T, S, 1), 'TPI', TH_tax_params, theta, tau_bq, tau_payroll, h_wealth, p_wealth, m_wealth, retire, T, S, J) T_H_new = np.array(list(tax.get_lump_sum(np.tile(rnew[:T].reshape(T, 1, 1),(1,S,J)), np.tile(wnew[:T].reshape(T, 1, 1),(1,S,J)), bmat_s, n_mat[:T,:,:], BQnew[:T].reshape(T, 1, J), factor, T_H_params)) + [T_Hss] * S) w[:T] = utils.convex_combo(wnew[:T], w[:T], nu) r[:T] = utils.convex_combo(rnew[:T], r[:T], nu) BQ[:T] = utils.convex_combo(BQnew[:T], BQ[:T], nu) T_H[:T] = utils.convex_combo(T_H_new[:T], T_H[:T], nu) guesses_b = utils.convex_combo(b_mat, guesses_b, nu) guesses_n = utils.convex_combo(n_mat, guesses_n, nu) if T_H.all() != 0: TPIdist = np.array(list(utils.pct_diff_func(rnew[:T], r[:T])) + list(utils.pct_diff_func(BQnew[:T], BQ[:T]).flatten()) + list( utils.pct_diff_func(wnew[:T], w[:T])) + list(utils.pct_diff_func(T_H_new[:T], T_H[:T]))).max() else: TPIdist = np.array(list(utils.pct_diff_func(rnew[:T], r[:T])) + list(utils.pct_diff_func(BQnew[:T], BQ[:T]).flatten()) + list( utils.pct_diff_func(wnew[:T], w[:T])) + list(np.abs(T_H_new[:T], T_H[:T]))).max() TPIdist_vec[TPIiter] = TPIdist # After T=10, if cycling occurs, drop the value of nu # wait til after T=10 or so, because sometimes there is a jump up # in the first couple iterations # if TPIiter > 10: # if TPIdist_vec[TPIiter] - TPIdist_vec[TPIiter - 1] > 0: # nu /= 2 # print 'New Value of nu:', nu TPIiter += 1 print '\tIteration:', TPIiter print '\t\tDistance:', TPIdist if ((TPIiter >= maxiter) or (np.absolute(TPIdist) > mindist_TPI)) and ENFORCE_SOLUTION_CHECKS : raise RuntimeError("Transition path equlibrium not found") Y[:T] = Ynew # Solve HH problem in inner loop guesses = (guesses_b, guesses_n) outer_loop_vars = (r, w, K, BQ, T_H) inner_loop_params = (income_tax_params, tpi_params, initial_values, theta, ind) euler_errors, b_mat, n_mat = inner_loop(guesses, outer_loop_vars, inner_loop_params) b_mat[0, :, :] = initial_b K_params = (omega[:T].reshape(T, S, 1), lambdas.reshape(1, 1, J), g_n_vector[:T], 'TPI') K[:T] = household.get_K(b_mat[:T], K_params) # this is what old code does, but it's strange - why use # b_mat -- what is going on with initial period, etc. etr_params_path = np.zeros((T,S,J,etr_params.shape[2])) for i in range(etr_params.shape[2]): etr_params_path[:,:,:,i] = np.tile(np.reshape(np.transpose(etr_params[:,:T,i]),(T,S,1)),(1,1,J)) tax_path_params = (np.tile(e.reshape(1, S, J),(T,1,1)), BQ_dist, lambdas, 'TPI', retire, etr_params_path, h_wealth, p_wealth, m_wealth, tau_payroll, theta, tau_bq, J, S) tax_path = tax.total_taxes(np.tile(r[:T].reshape(T, 1, 1),(1,S,J)), np.tile(w[:T].reshape(T, 1, 1),(1,S,J)), bmat_s, n_mat[:T,:,:], BQ[:T, :].reshape(T, 1, J), factor, T_H[:T].reshape(T, 1, 1), None, False, tax_path_params) cons_params = (e.reshape(1, S, J), BQ_dist, lambdas.reshape(1, 1, J), g_y) c_path = household.get_cons(omega[:T].reshape(T,S,1), r[:T].reshape(T, 1, 1), w[:T].reshape(T, 1, 1), bmat_s, bmat_splus1, n_mat[:T,:,:], BQ[:T].reshape(T, 1, J), tax_path, cons_params) C_params = (omega[:T].reshape(T, S, 1), lambdas, 'TPI') C = household.get_C(c_path, C_params) I_params = (delta, g_y, g_n_vector[:T]) I = firm.get_I(K[1:T+1], K[:T], I_params) print 'Resource Constraint Difference:', Y[:T] - C[:T] - I[:T] print'Checking time path for violations of constaints.' for t in xrange(T): household.constraint_checker_TPI( b_mat[t], n_mat[t], c_path[t], t, ltilde) eul_savings = euler_errors[:, :S, :].max(1).max(1) eul_laborleisure = euler_errors[:, S:, :].max(1).max(1) print 'Max Euler error, savings: ', eul_savings print 'Max Euler error labor supply: ', eul_laborleisure if ((np.any(np.absolute(eul_savings) >= mindist_TPI) or (np.any(np.absolute(eul_laborleisure) > mindist_TPI))) and ENFORCE_SOLUTION_CHECKS): raise RuntimeError("Transition path equlibrium not found") ''' ------------------------------------------------------------------------ Save variables/values so they can be used in other modules ------------------------------------------------------------------------ ''' output = {'Y': Y, 'K': K, 'L': L, 'C': C, 'I': I, 'BQ': BQ, 'T_H': T_H, 'r': r, 'w': w, 'b_mat': b_mat, 'n_mat': n_mat, 'c_path': c_path, 'tax_path': tax_path, 'eul_savings': eul_savings, 'eul_laborleisure': eul_laborleisure} tpi_dir = os.path.join(output_dir, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl") pickle.dump(output, open(tpi_vars, "wb")) macro_output = {'Y': Y, 'K': K, 'L': L, 'C': C, 'I': I, 'BQ': BQ, 'T_H': T_H, 'r': r, 'w': w, 'tax_path': tax_path} # Non-stationary output # macro_ns_output = {'K_ns_path': K_ns_path, 'C_ns_path': C_ns_path, 'I_ns_path': I_ns_path, # 'L_ns_path': L_ns_path, 'BQ_ns_path': BQ_ns_path, # 'rinit': rinit, 'Y_ns_path': Y_ns_path, 'T_H_ns_path': T_H_ns_path, # 'w_ns_path': w_ns_path} return output, macro_output
def TP_solutions(winit, rinit, T_H_init, BQinit2, Kss, Lss, Yss, BQss, theta, income_tax_params, wealth_tax_params, ellipse_params, parameters, g_n_vector, omega_stationary, K0, b_sinit, b_splus1init, L0, Y0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n, factor_ss, tau_bq, chi_b, chi_n, output_dir="./OUTPUT", **kwargs): ''' This function returns the solutions for all variables along the time path. ''' J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y, g_n_ss, tau_payroll, retire, mean_income_data, \ h_wealth, p_wealth, m_wealth, b_ellipse, upsilon = parameters analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params print 'Computing final solutions' # Extend time paths past T winit = np.array(list(winit) + list(np.ones(S) * wss)) rinit = np.array(list(rinit) + list(np.ones(S) * rss)) T_H_init = np.array(list(T_H_init) + list(np.ones(S) * T_Hss)) BQinit = np.zeros((T + S, J)) for j in xrange(J): BQinit[:, j] = list(BQinit2[:,j]) + [BQss[j]] * S BQinit = np.array(BQinit) # Make array of initial guesses domain = np.linspace(0, T, T) domain2 = np.tile(domain.reshape(T, 1, 1), (1, S, J)) ending_b = bssmat_splus1 guesses_b = (-1 / (domain2 + 1)) * (ending_b - initial_b) + ending_b ending_b_tail = np.tile(ending_b.reshape(1, S, J), (S, 1, 1)) guesses_b = np.append(guesses_b, ending_b_tail, axis=0) domain3 = np.tile(np.linspace(0, 1, T).reshape(T, 1, 1), (1, S, J)) guesses_n = domain3 * (nssmat - initial_n) + initial_n ending_n_tail = np.tile(nssmat.reshape(1, S, J), (S, 1, 1)) guesses_n = np.append(guesses_n, ending_n_tail, axis=0) b_mat = np.zeros((T + S, S, J)) n_mat = np.zeros((T + S, S, J)) ind = np.arange(S) # initialize array of Euler errors euler_errors = np.zeros((T, 2 * S, J)) # As in SS, you need the final distributions of b and n to match the final # w, r, BQ, etc. Otherwise the euler errors are large. You need one more # fsolve. for j in xrange(J): b_mat[1, -1, j], n_mat[0, -1, j] = np.array(opt.fsolve(SS_TPI_firstdoughnutring, [guesses_b[1, -1, j], guesses_n[0, -1, j]], args=(winit[1], rinit[1], BQinit[1, j], T_H_init[1], initial_b, factor_ss, j, income_tax_params, parameters, theta, tau_bq), xtol=1e-13)) for s in xrange(S - 2): # Upper triangle ind2 = np.arange(s + 2) b_guesses_to_use = np.diag(guesses_b[1:S + 1, :, j], S - (s + 2)) n_guesses_to_use = np.diag(guesses_n[:S, :, j], S - (s + 2)) # initialize array of diagonal elements length_diag = (np.diag(np.transpose(etr_params[:S,:,0]),S-(s+2))).shape[0] etr_params_to_use = np.zeros((length_diag,etr_params.shape[2])) mtrx_params_to_use = np.zeros((length_diag,mtrx_params.shape[2])) mtry_params_to_use = np.zeros((length_diag,mtry_params.shape[2])) for i in range(etr_params.shape[2]): etr_params_to_use[:,i] = np.diag(np.transpose(etr_params[:S,:,i]),S-(s+2)) mtrx_params_to_use[:,i] = np.diag(np.transpose(mtrx_params[:S,:,i]),S-(s+2)) mtry_params_to_use[:,i] = np.diag(np.transpose(mtry_params[:S,:,i]),S-(s+2)) inc_tax_params_upper = (analytical_mtrs, etr_params_to_use, mtrx_params_to_use, mtry_params_to_use) solutions = opt.fsolve(Steady_state_TPI_solver, list( b_guesses_to_use) + list(n_guesses_to_use), args=( winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, s, 0, inc_tax_params_upper, parameters, theta, tau_bq, rho, lambdas, e, initial_b, chi_b, chi_n), xtol=1e-13) b_vec = solutions[:len(solutions) / 2] b_mat[1 + ind2, S - (s + 2) + ind2, j] = b_vec n_vec = solutions[len(solutions) / 2:] n_mat[ind2, S - (s + 2) + ind2, j] = n_vec for t in xrange(0, T): b_guesses_to_use = .75 * np.diag(guesses_b[t + 1:t + S + 1, :, j]) n_guesses_to_use = np.diag(guesses_n[t:t + S, :, j]) # initialize array of diagonal elements length_diag = (np.diag(np.transpose(etr_params[:,t:t+S,i]))).shape[0] etr_params_to_use = np.zeros((length_diag,etr_params.shape[2])) mtrx_params_to_use = np.zeros((length_diag,mtrx_params.shape[2])) mtry_params_to_use = np.zeros((length_diag,mtry_params.shape[2])) for i in range(etr_params.shape[2]): etr_params_to_use[:,i] = np.diag(np.transpose(etr_params[:,t:t+S,i])) mtrx_params_to_use[:,i] = np.diag(np.transpose(mtrx_params[:,t:t+S,i])) mtry_params_to_use[:,i] = np.diag(np.transpose(mtry_params[:,t:t+S,i])) inc_tax_params_TP = (analytical_mtrs, etr_params_to_use, mtrx_params_to_use, mtry_params_to_use) solutions = opt.fsolve(Steady_state_TPI_solver, list( b_guesses_to_use) + list(n_guesses_to_use), args=( winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, None, t, inc_tax_params_TP, parameters, theta, tau_bq, rho, lambdas, e, None, chi_b, chi_n), xtol=1e-13) b_vec = solutions[:S] b_mat[t + 1 + ind, ind, j] = b_vec n_vec = solutions[S:] n_mat[t + ind, ind, j] = n_vec inputs = list(solutions) euler_errors[t, :, j] = np.abs(Steady_state_TPI_solver( inputs, winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, None, t, inc_tax_params_TP, parameters, theta, tau_bq, rho, lambdas, e, None, chi_b, chi_n)) b_mat[0, :, :] = initial_b ''' ------------------------------------------------------------------------ Generate variables/values so they can be used in other modules ------------------------------------------------------------------------ ''' Kinit = household.get_K(b_mat[:T], omega_stationary[:T].reshape( T, S, 1), lambdas.reshape(1, 1, J), g_n_vector[:T], 'TPI') Linit = firm.get_L(e.reshape(1, S, J), n_mat[:T], omega_stationary[ :T, :].reshape(T, S, 1), lambdas.reshape(1, 1, J), 'TPI') Kpath_TPI = np.array(list(Kinit) + list(np.ones(10) * Kss)) Lpath_TPI = np.array(list(Linit) + list(np.ones(10) * Lss)) BQpath_TPI = np.array(list(BQinit) + list(np.ones((10, J)) * BQss)) b_s = np.zeros((T, S, J)) b_s[:, 1:, :] = b_mat[:T, :-1, :] b_splus1 = np.zeros((T, S, J)) b_splus1[:, :, :] = b_mat[1:T + 1, :, :] # initialize array etr_params_path = np.zeros((T,S,J,etr_params.shape[2])) for i in range(etr_params.shape[2]): etr_params_path[:,:,:,i] = np.tile(np.reshape(np.transpose(etr_params[:,:T,i]),(T,S,1)),(1,1,J)) tax_path_params = J, S, retire, etr_params_path, h_wealth, p_wealth, m_wealth, tau_payroll tax_path = tax.total_taxes(np.tile(rinit[:T].reshape(T, 1, 1),(1,S,J)), b_s, np.tile(winit[:T].reshape(T, 1, 1),(1,S,J)), np.tile(e.reshape(1, S, J),(T,1,1)), n_mat[:T,:,:], BQinit[:T, :].reshape(T, 1, J), lambdas, factor_ss, T_H_init[:T].reshape(T, 1, 1), None, 'TPI', False, tax_path_params, theta, tau_bq) c_path = household.get_cons(rinit[:T].reshape(T, 1, 1), b_s, winit[:T].reshape(T, 1, 1), e.reshape( 1, S, J), n_mat[:T], BQinit[:T].reshape(T, 1, J), lambdas.reshape(1, 1, J), b_splus1, parameters, tax_path) Y_path = firm.get_Y(Kpath_TPI[:T], Lpath_TPI[:T], parameters) C_path = household.get_C(c_path, omega_stationary[ :T].reshape(T, S, 1), lambdas, 'TPI') I_path = firm.get_I(Kpath_TPI[1:T + 1], Kpath_TPI[:T], delta, g_y, g_n_vector[:T]) print 'Resource Constraint Difference:', Y_path - C_path - I_path print'Checking time path for violations of constaints.' hh_constraint_params = ltilde for t in xrange(T): household.constraint_checker_TPI( b_mat[t], n_mat[t], c_path[t], t, hh_constraint_params) eul_savings = euler_errors[:, :S, :].max(1).max(1) eul_laborleisure = euler_errors[:, S:, :].max(1).max(1) print 'Max Euler error, savings: ', eul_savings print 'Max Euler error labor supply: ', eul_laborleisure ''' ------------------------------------------------------------------------ Create the unstationarized versions of the paths of macro aggregates ------------------------------------------------------------------------ ''' # tvec = np.linspace(0, len(C_path), len(C_path)) # growth_path = np.exp(g_y*tvec) # pop_path = np.zeros(len(C_path)) # for i in range(0,len(C_path)): # pop_path[i] = np.exp(g_n_vector[:i].sum()) # note that this normalizes the pop in the initial period to one # growth_pop_path = growth_path*pop_path # C_ns_path = C_path * growth_pop_path # K_ns_path = Kinit * growth_pop_path # BQ_ns_path = growth_pop_path * BQinit[:T] # L_ns_path = Linit * pop_path # T_H_ns_path = T_H_init[:T] * growth_pop_path # w_ns_path = winit*growth_path # I_ns_path = I_path * growth_pop_path # Y_ns_path = Y_path * growth_pop_path ''' ------------------------------------------------------------------------ Save variables/values so they can be used in other modules ------------------------------------------------------------------------ ''' output = {'Kpath_TPI': Kpath_TPI, 'b_mat': b_mat, 'c_path': c_path, 'eul_savings': eul_savings, 'eul_laborleisure': eul_laborleisure, 'Lpath_TPI': Lpath_TPI, 'BQpath_TPI': BQpath_TPI, 'n_mat': n_mat, 'rinit': rinit, 'Y_path': Y_path, 'T_H_init': T_H_init, 'tax_path': tax_path, 'winit': winit} macro_output = {'Kpath_TPI': Kpath_TPI, 'C_path': C_path, 'I_path': I_path, 'Lpath_TPI': Lpath_TPI, 'BQpath_TPI': BQpath_TPI, 'rinit': rinit, 'Y_path': Y_path, 'T_H_init': T_H_init, 'winit': winit, 'tax_path': tax_path} # macro_ns_output = {'K_ns_path': K_ns_path, 'C_ns_path': C_ns_path, 'I_ns_path': I_ns_path, # 'L_ns_path': L_ns_path, 'BQ_ns_path': BQ_ns_path, # 'rinit': rinit, 'Y_ns_path': Y_ns_path, 'T_H_ns_path': T_H_ns_path, # 'w_ns_path': w_ns_path} tpi_dir = os.path.join(output_dir, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl") pickle.dump(output, open(tpi_vars, "wb")) tpi_dir = os.path.join(output_dir, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_macro_vars.pkl") pickle.dump(macro_output, open(tpi_vars, "wb"))
def graph_income(ages, abil_midp, abil_pcts, emat, filesuffix=""): ''' -------------------------------------------------------------------- This function graphs ability matrix in 3D, 2D, log, and nolog -------------------------------------------------------------------- INPUTS: ages = (S,) vector, ages represented in sample abil_midp = (J,) vector, midpoints of income percentile bins in each ability group abil_pcts = (J,) vector, percent of population in each ability bin emat = (S, J) matrix, lifetime ability paths filesuffix = string, suffix to be added to plot files OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION: utils.mkdirs() OBJECTS CREATED WITHIN FUNCTION: J = integer >= 1 abil_mesh = (S, J) matrix, meshgrid of abil_midp across the columns, copied down each row age_mesh = (S, J) matrix, meshgrid of ages down the rows, copied across each column cmap1 = matplotlib colormap for 3D plots cmap2 = matplotlib colormap for 3D plots output_dir = string, output directory to which figures are saved filename = string, filename of figure file fullpath = string, full path of output_dir and filename for figure being saved linestyles = (4,) string vector, line styles for plotting markers = (6,) string vector, marker types for plotting this_label = string, label for particular 2D line plot pct_lb = scalar in [0, 100], lower bound of ability percentile bin FILES CREATED AND SAVED BY THIS FUNCTION: .OUTPUT/ability/ability_2D_lev{filesuffix}.png .OUTPUT/ability/ability_2D_log{filesuffix}.png .OUTPUT/ability/ability_3D_lev{filesuffix}.png .OUTPUT/ability/ability_3D_log{filesuffix}.png Returns: None -------------------------------------------------------------------- ''' J = abil_midp.shape[0] abil_mesh, age_mesh = np.meshgrid(abil_midp, ages) cmap1 = matplotlib.cm.get_cmap('summer') cmap2 = matplotlib.cm.get_cmap('winter') # Make sure that "./OUTPUT/ability" directory is created output_dir = "./OUTPUT/ability" utils.mkdirs(output_dir) if J == 1: # Plot of 2D, J=1 in levels plt.figure() plt.plot(ages, emat) filename = "ability_2D_lev" + filesuffix fullpath = os.path.join(output_dir, filename) plt.savefig(fullpath) plt.close() # Plot of 2D, J=1 in logs plt.figure() plt.plot(ages, np.log(emat)) filename = "ability_2D_log" + filesuffix fullpath = os.path.join(output_dir, filename) plt.savefig(fullpath) plt.close() else: # Plot of 3D, J>1 in levels fig10 = plt.figure() ax10 = fig10.gca(projection='3d') ax10.plot_surface(age_mesh, abil_mesh, emat, rstride=8, cstride=1, cmap=cmap1) ax10.set_xlabel(r'age-$s$') ax10.set_ylabel(r'ability type -$j$') ax10.set_zlabel(r'ability $e_{j,s}$') filename = "ability_3D_lev" + filesuffix fullpath = os.path.join(output_dir, filename) plt.savefig(fullpath) plt.close() # Plot of 3D, J>1 in logs fig11 = plt.figure() ax11 = fig11.gca(projection='3d') ax11.plot_surface(age_mesh, abil_mesh, np.log(emat), rstride=8, cstride=1, cmap=cmap1) ax11.set_xlabel(r'age-$s$') ax11.set_ylabel(r'ability type -$j$') ax11.set_zlabel(r'log ability $log(e_{j,s})$') filename = "ability_3D_log" + filesuffix fullpath = os.path.join(output_dir, filename) plt.savefig(fullpath) plt.close() if J <= 10: # Restricted because of line and marker types # Plot of 2D lines from 3D version in logs fig112 = plt.figure() ax = plt.subplot(111) linestyles = np.array(["-", "--", "-.", ":",]) markers = np.array(["x", "v", "o", "d", ">", "|"]) pct_lb = 0 for j in range(J): this_label = (str(int(np.rint(pct_lb))) + " - " + str(int(np.rint(pct_lb + 100*abil_pcts[j]))) + "%") pct_lb += 100*abil_pcts[j] if j <= 3: ax.plot(ages, np.log(emat[:, j]), label=this_label, linestyle=linestyles[j], color='black') elif j > 3: ax.plot(ages, np.log(emat[:, j]), label=this_label, marker=markers[j-4], color='black') ax.axvline(x=80, color='black', linestyle='--') box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.set_xlabel(r'age-$s$') ax.set_ylabel(r'log ability $log(e_{j,s})$') filename = "ability_2D_log" + filesuffix fullpath = os.path.join(output_dir, filename) plt.savefig(fullpath) plt.close()
def __init__(self,props): self.method = None self.props = props if self.props.doProfile: self.PROFILE_PATH = os.environ.get("PROFILE_PATH") + os.sep; mkdirs(self.PROFILE_PATH);
import utils utils.mkdirs() #Let's create all needed directories before anything else! import requests import os import enlist import time from bs4 import BeautifulSoup welcome = \ """ AMU B.Tech Results Downloader This Python script downloads B.Tech Results of whole class based on information in attendance Excel file. First you need to put Excel file in Input/ folder Then type the name of file when asked (Default : store.xlsx) This will load the information about students from the Excel file and stores it in students.db for future faster access. Then you will prompted 3 options: First option downloads the result of whole class and stores them as html pages in Store/ folder. Note : This option should be run at least once to download all necessary result files for further options If there are no result files in Store/ folder, then script will not run properly. Second option loads CPI and SPI from downloaded
def run_time_path_iteration(Kss, Lss, Yss, BQss, theta, parameters, g_n_vector, omega_stationary, K0, b_sinit, b_splus1init, L0, Y0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n, factor_ss, tau_bq, chi_b, chi_n, get_baseline=False, output_dir="./OUTPUT", **kwargs): TPI_FIG_DIR = output_dir # Initialize Time paths domain = np.linspace(0, T, T) Kinit = (-1 / (domain + 1)) * (Kss - K0) + Kss Kinit[-1] = Kss Kinit = np.array(list(Kinit) + list(np.ones(S) * Kss)) Linit = np.ones(T + S) * Lss Yinit = firm.get_Y(Kinit, Linit, parameters) winit = firm.get_w(Yinit, Linit, parameters) rinit = firm.get_r(Yinit, Kinit, parameters) BQinit = np.zeros((T + S, J)) for j in xrange(J): BQinit[:, j] = list(np.linspace(BQ0[j], BQss[j], T)) + [BQss[j]] * S BQinit = np.array(BQinit) T_H_init = np.ones(T + S) * T_Hss # Make array of initial guesses domain2 = np.tile(domain.reshape(T, 1, 1), (1, S, J)) ending_b = bssmat_splus1 guesses_b = (-1 / (domain2 + 1)) * (ending_b - initial_b) + ending_b ending_b_tail = np.tile(ending_b.reshape(1, S, J), (S, 1, 1)) guesses_b = np.append(guesses_b, ending_b_tail, axis=0) domain3 = np.tile(np.linspace(0, 1, T).reshape(T, 1, 1), (1, S, J)) guesses_n = domain3 * (nssmat - initial_n) + initial_n ending_n_tail = np.tile(nssmat.reshape(1, S, J), (S, 1, 1)) guesses_n = np.append(guesses_n, ending_n_tail, axis=0) b_mat = np.zeros((T + S, S, J)) n_mat = np.zeros((T + S, S, J)) ind = np.arange(S) TPIiter = 0 TPIdist = 10 euler_errors = np.zeros((T, 2 * S, J)) TPIdist_vec = np.zeros(maxiter) while (TPIiter < maxiter) and (TPIdist >= mindist_TPI): Kpath_TPI = list(Kinit) + list(np.ones(10) * Kss) Lpath_TPI = list(Linit) + list(np.ones(10) * Lss) # Plot TPI for K for each iteration, so we can see if there is a # problem if PLOT_TPI is True: plt.figure() plt.axhline( y=Kss, color='black', linewidth=2, label=r"Steady State $\hat{K}$", ls='--') plt.plot(np.arange( T + 10), Kpath_TPI[:T + 10], 'b', linewidth=2, label=r"TPI time path $\hat{K}_t$") plt.savefig(os.path.join(TPI_FIG_DIR, "TPI_K")) # Uncomment the following print statements to make sure all euler equations are converging. # If they don't, then you'll have negative consumption or consumption spikes. If they don't, # it is the initial guesses. You might need to scale them differently. It is rather delicate for the first # few periods and high ability groups. for j in xrange(J): b_mat[1, -1, j], n_mat[0, -1, j] = np.array(opt.fsolve(SS_TPI_firstdoughnutring, [guesses_b[1, -1, j], guesses_n[0, -1, j]], args=(winit[1], rinit[1], BQinit[1, j], T_H_init[1], initial_b, factor_ss, j, parameters, theta, tau_bq), xtol=1e-13)) # if np.array(SS_TPI_firstdoughnutring([b_mat[1, -1, j], n_mat[0, -1, j]], winit[1], rinit[1], BQinit[1, j], T_H_init[1], initial_b, factor_ss, j, parameters, theta, tau_bq)).max() > 1e-6: # print 'minidoughnut:', # np.array(SS_TPI_firstdoughnutring([b_mat[1, -1, j], n_mat[0, -1, # j]], winit[1], rinit[1], BQinit[1, j], T_H_init[1], initial_b, # factor_ss, j, parameters, theta, tau_bq)).max() for s in xrange(S - 2): # Upper triangle ind2 = np.arange(s + 2) b_guesses_to_use = np.diag( guesses_b[1:S + 1, :, j], S - (s + 2)) n_guesses_to_use = np.diag(guesses_n[:S, :, j], S - (s + 2)) solutions = opt.fsolve(Steady_state_TPI_solver, list( b_guesses_to_use) + list(n_guesses_to_use), args=( winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, s, 0, parameters, theta, tau_bq, rho, lambdas, e, initial_b, chi_b, chi_n), xtol=1e-13) b_vec = solutions[:len(solutions) / 2] b_mat[1 + ind2, S - (s + 2) + ind2, j] = b_vec n_vec = solutions[len(solutions) / 2:] n_mat[ind2, S - (s + 2) + ind2, j] = n_vec # if abs(np.array(Steady_state_TPI_solver(solutions, winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, s, 0, parameters, theta, tau_bq, rho, lambdas, e, initial_b, chi_b, chi_n))).max() > 1e-6: # print 's-loop:', # abs(np.array(Steady_state_TPI_solver(solutions, winit, rinit, # BQinit[:, j], T_H_init, factor_ss, j, s, 0, parameters, # theta, tau_bq, rho, lambdas, e, initial_b, chi_b, # chi_n))).max() for t in xrange(0, T): b_guesses_to_use = .75 * \ np.diag(guesses_b[t + 1:t + S + 1, :, j]) n_guesses_to_use = np.diag(guesses_n[t:t + S, :, j]) solutions = opt.fsolve(Steady_state_TPI_solver, list( b_guesses_to_use) + list(n_guesses_to_use), args=( winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, None, t, parameters, theta, tau_bq, rho, lambdas, e, None, chi_b, chi_n), xtol=1e-13) b_vec = solutions[:S] b_mat[t + 1 + ind, ind, j] = b_vec n_vec = solutions[S:] n_mat[t + ind, ind, j] = n_vec inputs = list(solutions) euler_errors[t, :, j] = np.abs(Steady_state_TPI_solver( inputs, winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, None, t, parameters, theta, tau_bq, rho, lambdas, e, None, chi_b, chi_n)) # if euler_errors.max() > 1e-6: # print 't-loop:', euler_errors.max() # Force the initial distribution of capital to be as given above. b_mat[0, :, :] = initial_b Kinit = household.get_K(b_mat[:T], omega_stationary[:T].reshape( T, S, 1), lambdas.reshape(1, 1, J), g_n_vector[:T], 'TPI') Linit = firm.get_L(e.reshape(1, S, J), n_mat[:T], omega_stationary[ :T, :].reshape(T, S, 1), lambdas.reshape(1, 1, J), 'TPI') Ynew = firm.get_Y(Kinit, Linit, parameters) wnew = firm.get_w(Ynew, Linit, parameters) rnew = firm.get_r(Ynew, Kinit, parameters) # the following needs a g_n term BQnew = household.get_BQ(rnew.reshape(T, 1), b_mat[:T], omega_stationary[:T].reshape( T, S, 1), lambdas.reshape(1, 1, J), rho.reshape(1, S, 1), g_n_vector[:T].reshape(T, 1), 'TPI') bmat_s = np.zeros((T, S, J)) bmat_s[:, 1:, :] = b_mat[:T, :-1, :] T_H_new = np.array(list(tax.get_lump_sum(rnew.reshape(T, 1, 1), bmat_s, wnew.reshape( T, 1, 1), e.reshape(1, S, J), n_mat[:T], BQnew.reshape(T, 1, J), lambdas.reshape( 1, 1, J), factor_ss, omega_stationary[:T].reshape(T, S, 1), 'TPI', parameters, theta, tau_bq)) + [T_Hss] * S) winit[:T] = utils.convex_combo(wnew, winit[:T], parameters) rinit[:T] = utils.convex_combo(rnew, rinit[:T], parameters) BQinit[:T] = utils.convex_combo(BQnew, BQinit[:T], parameters) T_H_init[:T] = utils.convex_combo( T_H_new[:T], T_H_init[:T], parameters) guesses_b = utils.convex_combo(b_mat, guesses_b, parameters) guesses_n = utils.convex_combo(n_mat, guesses_n, parameters) if T_H_init.all() != 0: TPIdist = np.array(list(utils.perc_dif_func(rnew, rinit[:T])) + list(utils.perc_dif_func(BQnew, BQinit[:T]).flatten()) + list( utils.perc_dif_func(wnew, winit[:T])) + list(utils.perc_dif_func(T_H_new, T_H_init))).max() else: TPIdist = np.array(list(utils.perc_dif_func(rnew, rinit[:T])) + list(utils.perc_dif_func(BQnew, BQinit[:T]).flatten()) + list( utils.perc_dif_func(wnew, winit[:T])) + list(np.abs(T_H_new, T_H_init))).max() TPIdist_vec[TPIiter] = TPIdist # After T=10, if cycling occurs, drop the value of nu # wait til after T=10 or so, because sometimes there is a jump up # in the first couple iterations if TPIiter > 10: if TPIdist_vec[TPIiter] - TPIdist_vec[TPIiter - 1] > 0: nu /= 2 print 'New Value of nu:', nu TPIiter += 1 print '\tIteration:', TPIiter print '\t\tDistance:', TPIdist print 'Computing final solutions' # As in SS, you need the final distributions of b and n to match the final # w, r, BQ, etc. Otherwise the euler errors are large. You need one more # fsolve. for j in xrange(J): b_mat[1, -1, j], n_mat[0, -1, j] = np.array(opt.fsolve(SS_TPI_firstdoughnutring, [guesses_b[1, -1, j], guesses_n[0, -1, j]], args=(winit[1], rinit[1], BQinit[1, j], T_H_init[1], initial_b, factor_ss, j, parameters, theta, tau_bq), xtol=1e-13)) for s in xrange(S - 2): # Upper triangle ind2 = np.arange(s + 2) b_guesses_to_use = np.diag(guesses_b[1:S + 1, :, j], S - (s + 2)) n_guesses_to_use = np.diag(guesses_n[:S, :, j], S - (s + 2)) solutions = opt.fsolve(Steady_state_TPI_solver, list( b_guesses_to_use) + list(n_guesses_to_use), args=( winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, s, 0, parameters, theta, tau_bq, rho, lambdas, e, initial_b, chi_b, chi_n), xtol=1e-13) b_vec = solutions[:len(solutions) / 2] b_mat[1 + ind2, S - (s + 2) + ind2, j] = b_vec n_vec = solutions[len(solutions) / 2:] n_mat[ind2, S - (s + 2) + ind2, j] = n_vec for t in xrange(0, T): b_guesses_to_use = .75 * np.diag(guesses_b[t + 1:t + S + 1, :, j]) n_guesses_to_use = np.diag(guesses_n[t:t + S, :, j]) solutions = opt.fsolve(Steady_state_TPI_solver, list( b_guesses_to_use) + list(n_guesses_to_use), args=( winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, None, t, parameters, theta, tau_bq, rho, lambdas, e, None, chi_b, chi_n), xtol=1e-13) b_vec = solutions[:S] b_mat[t + 1 + ind, ind, j] = b_vec n_vec = solutions[S:] n_mat[t + ind, ind, j] = n_vec inputs = list(solutions) euler_errors[t, :, j] = np.abs(Steady_state_TPI_solver( inputs, winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, None, t, parameters, theta, tau_bq, rho, lambdas, e, None, chi_b, chi_n)) b_mat[0, :, :] = initial_b ''' ------------------------------------------------------------------------ Generate variables/values so they can be used in other modules ------------------------------------------------------------------------ ''' Kpath_TPI = np.array(list(Kinit) + list(np.ones(10) * Kss)) Lpath_TPI = np.array(list(Linit) + list(np.ones(10) * Lss)) BQpath_TPI = np.array(list(BQinit) + list(np.ones((10, J)) * BQss)) b_s = np.zeros((T, S, J)) b_s[:, 1:, :] = b_mat[:T, :-1, :] b_splus1 = np.zeros((T, S, J)) b_splus1[:, :, :] = b_mat[1:T + 1, :, :] tax_path = tax.total_taxes(rinit[:T].reshape(T, 1, 1), b_s, winit[:T].reshape(T, 1, 1), e.reshape( 1, S, J), n_mat[:T], BQinit[:T, :].reshape(T, 1, J), lambdas, factor_ss, T_H_init[:T].reshape(T, 1, 1), None, 'TPI', False, parameters, theta, tau_bq) c_path = household.get_cons(rinit[:T].reshape(T, 1, 1), b_s, winit[:T].reshape(T, 1, 1), e.reshape( 1, S, J), n_mat[:T], BQinit[:T].reshape(T, 1, J), lambdas.reshape(1, 1, J), b_splus1, parameters, tax_path) Y_path = firm.get_Y(Kpath_TPI[:T], Lpath_TPI[:T], parameters) C_path = household.get_C(c_path, omega_stationary[ :T].reshape(T, S, 1), lambdas, 'TPI') I_path = firm.get_I(Kpath_TPI[1:T + 1], Kpath_TPI[:T], delta, g_y, g_n_vector[:T]) print 'Resource Constraint Difference:', Y_path - C_path - I_path print'Checking time path for violations of constaints.' for t in xrange(T): household.constraint_checker_TPI( b_mat[t], n_mat[t], c_path[t], t, parameters) eul_savings = euler_errors[:, :S, :].max(1).max(1) eul_laborleisure = euler_errors[:, S:, :].max(1).max(1) ''' ------------------------------------------------------------------------ Save variables/values so they can be used in other modules ------------------------------------------------------------------------ ''' output = {'Kpath_TPI': Kpath_TPI, 'b_mat': b_mat, 'c_path': c_path, 'eul_savings': eul_savings, 'eul_laborleisure': eul_laborleisure, 'Lpath_TPI': Lpath_TPI, 'BQpath_TPI': BQpath_TPI, 'n_mat': n_mat, 'rinit': rinit, 'Yinit': Yinit, 'T_H_init': T_H_init, 'tax_path': tax_path, 'winit': winit} if get_baseline: tpi_init_dir = os.path.join(output_dir, "TPIinit") utils.mkdirs(tpi_init_dir) tpi_init_vars = os.path.join(tpi_init_dir, "TPIinit_vars.pkl") pickle.dump(output, open(tpi_init_vars, "wb")) else: tpi_dir = os.path.join(output_dir, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl") pickle.dump(output, open(tpi_vars, "wb"))
def run_TPI(income_tax_params, tpi_params, iterative_params, small_open_params, initial_values, SS_values, fiscal_params, biz_tax_params, output_dir="./OUTPUT", baseline_spending=False): # unpack tuples of parameters analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_params maxiter, mindist_SS, mindist_TPI = iterative_params J, S, T, BW, beta, sigma, alpha, gamma, epsilon, Z, delta, ltilde, nu, g_y,\ g_n_vector, tau_payroll, tau_bq, rho, omega, N_tilde, lambdas, imm_rates, e, retire, mean_income_data,\ factor, h_wealth, p_wealth, m_wealth, b_ellipse, upsilon, chi_b, chi_n, theta = tpi_params # K0, b_sinit, b_splus1init, L0, Y0,\ # w0, r0, BQ0, T_H_0, factor, tax0, c0, initial_b, initial_n, omega_S_preTP = initial_values small_open, tpi_firm_r, tpi_hh_r = small_open_params B0, b_sinit, b_splus1init, factor, initial_b, initial_n, omega_S_preTP, initial_debt = initial_values Kss, Bss, Lss, rss, wss, BQss, T_Hss, revenue_ss, bssmat_splus1, nssmat, Yss, Gss = SS_values tau_b, delta_tau = biz_tax_params if baseline_spending==False: budget_balance, ALPHA_T, ALPHA_G, tG1, tG2, rho_G, debt_ratio_ss = fiscal_params else: budget_balance, ALPHA_T, ALPHA_G, tG1, tG2, rho_G, debt_ratio_ss, T_Hbaseline, Gbaseline = fiscal_params print 'Government spending breakpoints are tG1: ', tG1, '; and tG2:', tG2 TPI_FIG_DIR = output_dir # Initialize guesses at time paths # Make array of initial guesses for labor supply and savings domain = np.linspace(0, T, T) domain2 = np.tile(domain.reshape(T, 1, 1), (1, S, J)) ending_b = bssmat_splus1 guesses_b = (-1 / (domain2 + 1)) * (ending_b - initial_b) + ending_b ending_b_tail = np.tile(ending_b.reshape(1, S, J), (S, 1, 1)) guesses_b = np.append(guesses_b, ending_b_tail, axis=0) domain3 = np.tile(np.linspace(0, 1, T).reshape(T, 1, 1), (1, S, J)) guesses_n = domain3 * (nssmat - initial_n) + initial_n ending_n_tail = np.tile(nssmat.reshape(1, S, J), (S, 1, 1)) guesses_n = np.append(guesses_n, ending_n_tail, axis=0) b_mat = guesses_b#np.zeros((T + S, S, J)) n_mat = guesses_n#np.zeros((T + S, S, J)) ind = np.arange(S) L_init = np.ones((T+S,))*Lss B_init = np.ones((T+S,))*Bss L_params = (e.reshape(1, S, J), omega[:T, :].reshape(T, S, 1), lambdas.reshape(1, 1, J), 'TPI') L_init[:T] = firm.get_L(n_mat[:T], L_params) B_params = (omega[:T-1].reshape(T-1, S, 1), lambdas.reshape(1, 1, J), imm_rates[:T-1].reshape(T-1,S,1), g_n_vector[1:T], 'TPI') B_init[1:T] = household.get_K(b_mat[:T-1], B_params) B_init[0] = B0 if small_open == False: if budget_balance: K_init = B_init else: K_init = B_init * Kss/Bss else: K_params = (Z, gamma, epsilon, delta, tau_b, delta_tau) K_init = firm.get_K(L_init, tpi_firm_r, K_params) K = K_init # if np.any(K < 0): # print 'K_init has negative elements. Setting them positive to prevent NAN.' # K[:T] = np.fmax(K[:T], 0.05*B[:T]) L = L_init B = B_init Y_params = (Z, gamma, epsilon) Y = firm.get_Y(K, L, Y_params) w_params = (Z, gamma, epsilon) w = firm.get_w(Y, L, w_params) if small_open == False: r_params = (Z, gamma, epsilon, delta, tau_b, delta_tau) r = firm.get_r(Y, K, r_params) else: r = tpi_hh_r BQ = np.zeros((T + S, J)) BQ0_params = (omega_S_preTP.reshape(S, 1), lambdas, rho.reshape(S, 1), g_n_vector[0], 'SS') BQ0 = household.get_BQ(r[0], initial_b, BQ0_params) for j in xrange(J): BQ[:, j] = list(np.linspace(BQ0[j], BQss[j], T)) + [BQss[j]] * S BQ = np.array(BQ) if budget_balance: if np.abs(T_Hss) < 1e-13 : T_Hss2 = 0.0 # sometimes SS is very small but not zero, even if taxes are zero, this get's rid of the approximation error, which affects the perc changes below else: T_Hss2 = T_Hss T_H = np.ones(T + S) * T_Hss2 REVENUE = T_H G = np.zeros(T + S) elif baseline_spending==False: T_H = ALPHA_T * Y elif baseline_spending==True: T_H = T_Hbaseline T_H_new = T_H # Need to set T_H_new for later reference G = Gbaseline G_0 = Gbaseline[0] # Initialize some inputs # D = np.zeros(T + S) D = debt_ratio_ss*Y omega_shift = np.append(omega_S_preTP.reshape(1,S),omega[:T-1,:],axis=0) BQ_params = (omega_shift.reshape(T, S, 1), lambdas.reshape(1, 1, J), rho.reshape(1, S, 1), g_n_vector[:T].reshape(T, 1), 'TPI') tax_params = np.zeros((T,S,J,etr_params.shape[2])) for i in range(etr_params.shape[2]): tax_params[:,:,:,i] = np.tile(np.reshape(np.transpose(etr_params[:,:T,i]),(T,S,1)),(1,1,J)) REVENUE_params = (np.tile(e.reshape(1, S, J),(T,1,1)), lambdas.reshape(1, 1, J), omega[:T].reshape(T, S, 1), 'TPI', tax_params, theta, tau_bq, tau_payroll, h_wealth, p_wealth, m_wealth, retire, T, S, J, tau_b, delta_tau) # print 'D/Y:', D[:T]/Y[:T] # print 'T/Y:', T_H[:T]/Y[:T] # print 'G/Y:', G[:T]/Y[:T] # print 'Int payments to GDP:', (r[:T]*D[:T])/Y[:T] # quit() TPIiter = 0 TPIdist = 10 PLOT_TPI = False report_tG1 = False euler_errors = np.zeros((T, 2 * S, J)) TPIdist_vec = np.zeros(maxiter) print 'analytical mtrs in tpi = ', analytical_mtrs while (TPIiter < maxiter) and (TPIdist >= mindist_TPI): # Plot TPI for K for each iteration, so we can see if there is a # problem if PLOT_TPI is True: #K_plot = list(K) + list(np.ones(10) * Kss) D_plot = list(D) + list(np.ones(10) * Yss * debt_ratio_ss) plt.figure() plt.axhline( y=Kss, color='black', linewidth=2, label=r"Steady State $\hat{K}$", ls='--') plt.plot(np.arange( T + 10), D_plot[:T + 10], 'b', linewidth=2, label=r"TPI time path $\hat{K}_t$") plt.savefig(os.path.join(TPI_FIG_DIR, "TPI_D")) if report_tG1 is True: print '\tAt time tG1-1:' print '\t\tG = ', G[tG1-1] print '\t\tK = ', K[tG1-1] print '\t\tr = ', r[tG1-1] print '\t\tD = ', D[tG1-1] guesses = (guesses_b, guesses_n) outer_loop_vars = (r, w, K, BQ, T_H) inner_loop_params = (income_tax_params, tpi_params, initial_values, ind) # Solve HH problem in inner loop euler_errors, b_mat, n_mat = inner_loop(guesses, outer_loop_vars, inner_loop_params) bmat_s = np.zeros((T, S, J)) bmat_s[0, 1:, :] = initial_b[:-1, :] bmat_s[1:, 1:, :] = b_mat[:T-1, :-1, :] bmat_splus1 = np.zeros((T, S, J)) bmat_splus1[:, :, :] = b_mat[:T, :, :] #L_params = (e.reshape(1, S, J), omega[:T, :].reshape(T, S, 1), lambdas.reshape(1, 1, J), 'TPI') # defined above L[:T] = firm.get_L(n_mat[:T], L_params) #B_params = (omega[:T-1].reshape(T-1, S, 1), lambdas.reshape(1, 1, J), imm_rates[:T-1].reshape(T-1,S,1), g_n_vector[1:T], 'TPI') # defined above B[1:T] = household.get_K(bmat_splus1[:T-1], B_params) if np.any(B) < 0: print 'B has negative elements. B[0:9]:', B[0:9] print 'B[T-2:T]:', B[T-2,T] if small_open == False: if budget_balance: K[:T] = B[:T] else: if baseline_spending == False: Y = T_H/ALPHA_T #SBF 3/3: This seems totally unnecessary as both these variables are defined above. # tax_params = np.zeros((T,S,J,etr_params.shape[2])) # for i in range(etr_params.shape[2]): # tax_params[:,:,:,i] = np.tile(np.reshape(np.transpose(etr_params[:,:T,i]),(T,S,1)),(1,1,J)) # REVENUE_params = (np.tile(e.reshape(1, S, J),(T,1,1)), lambdas.reshape(1, 1, J), omega[:T].reshape(T, S, 1), 'TPI', # tax_params, theta, tau_bq, tau_payroll, h_wealth, p_wealth, m_wealth, retire, T, S, J, tau_b, delta_tau) # define above REVENUE = np.array(list(tax.revenue(np.tile(r[:T].reshape(T, 1, 1),(1,S,J)), np.tile(w[:T].reshape(T, 1, 1),(1,S,J)), bmat_s, n_mat[:T,:,:], BQ[:T].reshape(T, 1, J), Y[:T], L[:T], K[:T], factor, REVENUE_params)) + [revenue_ss] * S) D_0 = initial_debt * Y[0] other_dg_params = (T, r, g_n_vector, g_y) if baseline_spending==False: G_0 = ALPHA_G[0] * Y[0] dg_fixed_values = (Y, REVENUE, T_H, D_0,G_0) Dnew, G = fiscal.D_G_path(dg_fixed_values, fiscal_params, other_dg_params, baseline_spending=baseline_spending) K[:T] = B[:T] - Dnew[:T] if np.any(K < 0): print 'K has negative elements. Setting them positive to prevent NAN.' K[:T] = np.fmax(K[:T], 0.05*B[:T]) else: # K_params previously set to = (Z, gamma, epsilon, delta, tau_b, delta_tau) K[:T] = firm.get_K(L[:T], tpi_firm_r[:T], K_params) Y_params = (Z, gamma, epsilon) Ynew = firm.get_Y(K[:T], L[:T], Y_params) Y = Ynew w_params = (Z, gamma, epsilon) wnew = firm.get_w(Ynew[:T], L[:T], w_params) if small_open == False: r_params = (Z, gamma, epsilon, delta, tau_b, delta_tau) rnew = firm.get_r(Ynew[:T], K[:T], r_params) else: rnew = r.copy() print 'Y and T_H: ', Y[3], T_H[3] # omega_shift = np.append(omega_S_preTP.reshape(1,S),omega[:T-1,:],axis=0) # defined above # BQ_params = (omega_shift.reshape(T, S, 1), lambdas.reshape(1, 1, J), rho.reshape(1, S, 1), # g_n_vector[:T].reshape(T, 1), 'TPI') # defined above b_mat_shift = np.append(np.reshape(initial_b,(1,S,J)),b_mat[:T-1,:,:],axis=0) BQnew = household.get_BQ(rnew[:T].reshape(T, 1), b_mat_shift, BQ_params) # tax_params = np.zeros((T,S,J,etr_params.shape[2])) # for i in range(etr_params.shape[2]): # tax_params[:,:,:,i] = np.tile(np.reshape(np.transpose(etr_params[:,:T,i]),(T,S,1)),(1,1,J)) # REVENUE_params = (np.tile(e.reshape(1, S, J),(T,1,1)), lambdas.reshape(1, 1, J), omega[:T].reshape(T, S, 1), 'TPI', # tax_params, theta, tau_bq, tau_payroll, h_wealth, p_wealth, m_wealth, retire, T, S, J, tau_b, delta_tau) # defined above REVENUE = np.array(list(tax.revenue(np.tile(rnew[:T].reshape(T, 1, 1),(1,S,J)), np.tile(wnew[:T].reshape(T, 1, 1),(1,S,J)), bmat_s, n_mat[:T,:,:], BQnew[:T].reshape(T, 1, J), Y[:T], L[:T], K[:T], factor, REVENUE_params)) + [revenue_ss] * S) if budget_balance: T_H_new = REVENUE elif baseline_spending==False: T_H_new = ALPHA_T[:T] * Y[:T] # If baseline_spending==True, no need to update T_H, which remains fixed. if small_open==True and budget_balance==False: # Loop through years to calculate debt and gov't spending. This is done earlier when small_open=False. D_0 = initial_debt * Y[0] other_dg_params = (T, r, g_n_vector, g_y) if baseline_spending==False: G_0 = ALPHA_G[0] * Y[0] dg_fixed_values = (Y, REVENUE, T_H, D_0,G_0) Dnew, G = fiscal.D_G_path(dg_fixed_values, fiscal_params, other_dg_params, baseline_spending=baseline_spending) w[:T] = utils.convex_combo(wnew[:T], w[:T], nu) r[:T] = utils.convex_combo(rnew[:T], r[:T], nu) BQ[:T] = utils.convex_combo(BQnew[:T], BQ[:T], nu) # D[:T] = utils.convex_combo(Dnew[:T], D[:T], nu) D = Dnew Y[:T] = utils.convex_combo(Ynew[:T], Y[:T], nu) if baseline_spending==False: T_H[:T] = utils.convex_combo(T_H_new[:T], T_H[:T], nu) guesses_b = utils.convex_combo(b_mat, guesses_b, nu) guesses_n = utils.convex_combo(n_mat, guesses_n, nu) print 'r diff: ', (rnew[:T]-r[:T]).max(), (rnew[:T]-r[:T]).min() print 'w diff: ', (wnew[:T]-w[:T]).max(), (wnew[:T]-w[:T]).min() print 'BQ diff: ', (BQnew[:T]-BQ[:T]).max(), (BQnew[:T]-BQ[:T]).min() print 'T_H diff: ', (T_H_new[:T]-T_H[:T]).max(), (T_H_new[:T]-T_H[:T]).min() if baseline_spending==False: if T_H.all() != 0: TPIdist = np.array(list(utils.pct_diff_func(rnew[:T], r[:T])) + list(utils.pct_diff_func(BQnew[:T], BQ[:T]).flatten()) + list( utils.pct_diff_func(wnew[:T], w[:T])) + list(utils.pct_diff_func(T_H_new[:T], T_H[:T]))).max() else: TPIdist = np.array(list(utils.pct_diff_func(rnew[:T], r[:T])) + list(utils.pct_diff_func(BQnew[:T], BQ[:T]).flatten()) + list( utils.pct_diff_func(wnew[:T], w[:T])) + list(np.abs(T_H[:T]))).max() else: # TPIdist = np.array(list(utils.pct_diff_func(rnew[:T], r[:T])) + list(utils.pct_diff_func(BQnew[:T], BQ[:T]).flatten()) + list( # utils.pct_diff_func(wnew[:T], w[:T])) + list(utils.pct_diff_func(Dnew[:T], D[:T]))).max() TPIdist = np.array(list(utils.pct_diff_func(rnew[:T], r[:T])) + list(utils.pct_diff_func(BQnew[:T], BQ[:T]).flatten()) + list( utils.pct_diff_func(wnew[:T], w[:T])) + list(utils.pct_diff_func(Ynew[:T], Y[:T]))).max() TPIdist_vec[TPIiter] = TPIdist # After T=10, if cycling occurs, drop the value of nu # wait til after T=10 or so, because sometimes there is a jump up # in the first couple iterations # if TPIiter > 10: # if TPIdist_vec[TPIiter] - TPIdist_vec[TPIiter - 1] > 0: # nu /= 2 # print 'New Value of nu:', nu TPIiter += 1 print 'Iteration:', TPIiter print '\tDistance:', TPIdist # print 'D/Y:', (D[:T]/Ynew[:T]).max(), (D[:T]/Ynew[:T]).min(), np.median(D[:T]/Ynew[:T]) # print 'T/Y:', (T_H_new[:T]/Ynew[:T]).max(), (T_H_new[:T]/Ynew[:T]).min(), np.median(T_H_new[:T]/Ynew[:T]) # print 'G/Y:', (G[:T]/Ynew[:T]).max(), (G[:T]/Ynew[:T]).min(), np.median(G[:T]/Ynew[:T]) # print 'Int payments to GDP:', ((r[:T]*D[:T])/Ynew[:T]).max(), ((r[:T]*D[:T])/Ynew[:T]).min(), np.median((r[:T]*D[:T])/Ynew[:T]) # # print 'D/Y:', (D[:T]/Ynew[:T]) # print 'T/Y:', (T_H_new[:T]/Ynew[:T]) # print 'G/Y:', (G[:T]/Ynew[:T]) # # print 'deficit: ', REVENUE[:T] - T_H_new[:T] - G[:T] # Loop through years to calculate debt and gov't spending. The re-assignment of G0 & D0 is necessary because Y0 may change in the TPI loop. if budget_balance == False: D_0 = initial_debt * Y[0] other_dg_params = (T, r, g_n_vector, g_y) if baseline_spending==False: G_0 = ALPHA_G[0] * Y[0] dg_fixed_values = (Y, REVENUE, T_H, D_0,G_0) D, G = fiscal.D_G_path(dg_fixed_values, fiscal_params, other_dg_params, baseline_spending=baseline_spending) # Solve HH problem in inner loop guesses = (guesses_b, guesses_n) outer_loop_vars = (r, w, K, BQ, T_H) inner_loop_params = (income_tax_params, tpi_params, initial_values, ind) euler_errors, b_mat, n_mat = inner_loop(guesses, outer_loop_vars, inner_loop_params) bmat_s = np.zeros((T, S, J)) bmat_s[0, 1:, :] = initial_b[:-1, :] bmat_s[1:, 1:, :] = b_mat[:T-1, :-1, :] bmat_splus1 = np.zeros((T, S, J)) bmat_splus1[:, :, :] = b_mat[:T, :, :] #L_params = (e.reshape(1, S, J), omega[:T, :].reshape(T, S, 1), lambdas.reshape(1, 1, J), 'TPI') # defined above L[:T] = firm.get_L(n_mat[:T], L_params) #B_params = (omega[:T-1].reshape(T-1, S, 1), lambdas.reshape(1, 1, J), imm_rates[:T-1].reshape(T-1,S,1), g_n_vector[1:T], 'TPI') # defined above B[1:T] = household.get_K(bmat_splus1[:T-1], B_params) if small_open == False: K[:T] = B[:T] - D[:T] else: # K_params previously set to = (Z, gamma, epsilon, delta, tau_b, delta_tau) K[:T] = firm.get_K(L[:T], tpi_firm_r[:T], K_params) # Y_params previously set to = (Z, gamma, epsilon) Ynew = firm.get_Y(K[:T], L[:T], Y_params) # testing for change in Y ydiff = Ynew[:T] - Y[:T] ydiff_max = np.amax(np.abs(ydiff)) print 'ydiff_max = ', ydiff_max w_params = (Z, gamma, epsilon) wnew = firm.get_w(Ynew[:T], L[:T], w_params) if small_open == False: # r_params previously set to = (Z, gamma, epsilon, delta, tau_b, delta_tau) rnew = firm.get_r(Ynew[:T], K[:T], r_params) else: rnew = r # Note: previously, Y was not reassigned to equal Ynew at this point. Y = Ynew[:] # omega_shift = np.append(omega_S_preTP.reshape(1,S),omega[:T-1,:],axis=0) # BQ_params = (omega_shift.reshape(T, S, 1), lambdas.reshape(1, 1, J), rho.reshape(1, S, 1), # g_n_vector[:T].reshape(T, 1), 'TPI') b_mat_shift = np.append(np.reshape(initial_b,(1,S,J)),b_mat[:T-1,:,:],axis=0) BQnew = household.get_BQ(rnew[:T].reshape(T, 1), b_mat_shift, BQ_params) # tax_params = np.zeros((T,S,J,etr_params.shape[2])) # for i in range(etr_params.shape[2]): # tax_params[:,:,:,i] = np.tile(np.reshape(np.transpose(etr_params[:,:T,i]),(T,S,1)),(1,1,J)) # REVENUE_params = (np.tile(e.reshape(1, S, J),(T,1,1)), lambdas.reshape(1, 1, J), omega[:T].reshape(T, S, 1), 'TPI', # tax_params, theta, tau_bq, tau_payroll, h_wealth, p_wealth, m_wealth, retire, T, S, J, tau_b, delta_tau) REVENUE = np.array(list(tax.revenue(np.tile(rnew[:T].reshape(T, 1, 1),(1,S,J)), np.tile(wnew[:T].reshape(T, 1, 1),(1,S,J)), bmat_s, n_mat[:T,:,:], BQnew[:T].reshape(T, 1, J), Ynew[:T], L[:T], K[:T], factor, REVENUE_params)) + [revenue_ss] * S) etr_params_path = np.zeros((T,S,J,etr_params.shape[2])) for i in range(etr_params.shape[2]): etr_params_path[:,:,:,i] = np.tile(np.reshape(np.transpose(etr_params[:,:T,i]),(T,S,1)),(1,1,J)) tax_path_params = (np.tile(e.reshape(1, S, J),(T,1,1)), lambdas, 'TPI', retire, etr_params_path, h_wealth, p_wealth, m_wealth, tau_payroll, theta, tau_bq, J, S) tax_path = tax.total_taxes(np.tile(r[:T].reshape(T, 1, 1),(1,S,J)), np.tile(w[:T].reshape(T, 1, 1),(1,S,J)), bmat_s, n_mat[:T,:,:], BQ[:T, :].reshape(T, 1, J), factor, T_H[:T].reshape(T, 1, 1), None, False, tax_path_params) cons_params = (e.reshape(1, S, J), lambdas.reshape(1, 1, J), g_y) c_path = household.get_cons(r[:T].reshape(T, 1, 1), w[:T].reshape(T, 1, 1), bmat_s, bmat_splus1, n_mat[:T,:,:], BQ[:T].reshape(T, 1, J), tax_path, cons_params) C_params = (omega[:T].reshape(T, S, 1), lambdas, 'TPI') C = household.get_C(c_path, C_params) if budget_balance==False: D_0 = initial_debt * Y[0] other_dg_params = (T, r, g_n_vector, g_y) if baseline_spending==False: G_0 = ALPHA_G[0] * Y[0] dg_fixed_values = (Y, REVENUE, T_H, D_0,G_0) D, G = fiscal.D_G_path(dg_fixed_values, fiscal_params, other_dg_params, baseline_spending=baseline_spending) if small_open == False: I_params = (delta, g_y, omega[:T].reshape(T, S, 1), lambdas, imm_rates[:T].reshape(T, S, 1), g_n_vector[1:T+1], 'TPI') I = firm.get_I(bmat_splus1[:T], K[1:T+1], K[:T], I_params) rc_error = Y[:T] - C[:T] - I[:T] - G[:T] else: #InvestmentPlaceholder = np.zeros(bmat_splus1[:T].shape) #I_params = (delta, g_y, omega[:T].reshape(T, S, 1), lambdas, imm_rates[:T].reshape(T, S, 1), g_n_vector[1:T+1], 'TPI') I = (1+g_n_vector[:T])*np.exp(g_y)*K[1:T+1] - (1.0 - delta) * K[:T] #firm.get_I(InvestmentPlaceholder, K[1:T+1], K[:T], I_params) BI_params = (0.0, g_y, omega[:T].reshape(T, S, 1), lambdas, imm_rates[:T].reshape(T, S, 1), g_n_vector[1:T+1], 'TPI') BI = firm.get_I(bmat_splus1[:T], B[1:T+1], B[:T], BI_params) new_borrowing = D[1:T]*(1+g_n_vector[1:T])*np.exp(g_y) - D[:T-1] rc_error = Y[:T-1] + new_borrowing - (C[:T-1] + BI[:T-1] + G[:T-1] ) + (tpi_hh_r[:T-1] * B[:T-1] - (delta + tpi_firm_r[:T-1])*K[:T-1] - tpi_hh_r[:T-1]*D[:T-1]) #print 'Y(T-1):', Y[T-1], '\n','C(T-1):', C[T-1], '\n','K(T-1):', K[T-1], '\n','B(T-1):', B[T-1], '\n','BI(T-1):', BI[T-1], '\n','I(T-1):', I[T-1] rce_max = np.amax(np.abs(rc_error)) print 'Max absolute value resource constraint error:', rce_max print'Checking time path for violations of constraints.' for t in xrange(T): household.constraint_checker_TPI( b_mat[t], n_mat[t], c_path[t], t, ltilde) eul_savings = euler_errors[:, :S, :].max(1).max(1) eul_laborleisure = euler_errors[:, S:, :].max(1).max(1) # print 'Max Euler error, savings: ', eul_savings # print 'Max Euler error labor supply: ', eul_laborleisure ''' ------------------------------------------------------------------------ Save variables/values so they can be used in other modules ------------------------------------------------------------------------ ''' output = {'Y': Y, 'K': K, 'L': L, 'C': C, 'I': I, 'BQ': BQ, 'REVENUE': REVENUE, 'T_H': T_H, 'G': G, 'D': D, 'r': r, 'w': w, 'b_mat': b_mat, 'n_mat': n_mat, 'c_path': c_path, 'tax_path': tax_path, 'eul_savings': eul_savings, 'eul_laborleisure': eul_laborleisure} tpi_dir = os.path.join(output_dir, "TPI") utils.mkdirs(tpi_dir) tpi_vars = os.path.join(tpi_dir, "TPI_vars.pkl") pickle.dump(output, open(tpi_vars, "wb")) macro_output = {'Y': Y, 'K': K, 'L': L, 'C': C, 'I': I, 'BQ': BQ, 'T_H': T_H, 'r': r, 'w': w, 'tax_path': tax_path} growth = (1+g_n_vector)*np.exp(g_y) with open('TPI_output.csv', 'wb') as csvfile: tpiwriter = csv.writer(csvfile) tpiwriter.writerow(Y) tpiwriter.writerow(D) tpiwriter.writerow(REVENUE) tpiwriter.writerow(G) tpiwriter.writerow(T_H) tpiwriter.writerow(C) tpiwriter.writerow(K) tpiwriter.writerow(I) tpiwriter.writerow(r) if small_open == True: tpiwriter.writerow(B) tpiwriter.writerow(BI) tpiwriter.writerow(new_borrowing) tpiwriter.writerow(growth) tpiwriter.writerow(rc_error) tpiwriter.writerow(ydiff) if np.any(G) < 0: print 'Government spending is negative along transition path to satisfy budget' if ((TPIiter >= maxiter) or (np.absolute(TPIdist) > mindist_TPI)) and ENFORCE_SOLUTION_CHECKS : raise RuntimeError("Transition path equlibrium not found (TPIdist)") if ((np.any(np.absolute(rc_error) >= mindist_TPI)) and ENFORCE_SOLUTION_CHECKS): raise RuntimeError("Transition path equlibrium not found (rc_error)") if ((np.any(np.absolute(eul_savings) >= mindist_TPI) or (np.any(np.absolute(eul_laborleisure) > mindist_TPI))) and ENFORCE_SOLUTION_CHECKS): raise RuntimeError("Transition path equlibrium not found (eulers)") # Non-stationary output # macro_ns_output = {'K_ns_path': K_ns_path, 'C_ns_path': C_ns_path, 'I_ns_path': I_ns_path, # 'L_ns_path': L_ns_path, 'BQ_ns_path': BQ_ns_path, # 'rinit': rinit, 'Y_ns_path': Y_ns_path, 'T_H_ns_path': T_H_ns_path, # 'w_ns_path': w_ns_path} return output, macro_output
# @Time : 2019-03-14 17:59 # @Author : xzr # @Contact : [email protected] # @Desc : 日志类 import logging import os import sys import time from utils import mkdirs from utils.multiprocesslogging import MultiProcessTimedRotatingFileHandler LOGS_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'logs') mkdirs(LOGS_DIR) class Logger(object): '''#使用django 的logging @name 记录的名字 需在setting里设置 ''' __loggers = {} def __new__(cls, name, type_name, when='M', interval=1440, backupCount=10): logger_name = '%s_%s' % (name, type_name) logger = cls.__loggers.get(logger_name, None) if not logger: logger = logging.getLogger(name) the_game_log_dir = os.path.join(LOGS_DIR, name) mkdirs(the_game_log_dir)
def run_steady_state(ss_parameters, iterative_params, get_baseline=False, calibrate_model=False, output_dir="./OUTPUT"): ''' ------------------------------------------------------------------------ Run SS ------------------------------------------------------------------------ ''' if get_baseline: # Generate initial guesses for chi^b_j and chi^n_s chi_params = np.zeros(S + J) chi_params[:J] = chi_b_guess chi_params[J:] = chi_n_guess # First run SS simulation with guesses at initial values for b, n, w, r, etc # For inital guesses of b and n, we choose very small b, and medium n b_guess = np.ones((S, J)).flatten() * .01 n_guess = np.ones((S, J)).flatten() * .5 * ltilde # For initial guesses of w, r, T_H, and factor, we use values that are close # to some steady state values. wguess = 1.2 rguess = .06 T_Hguess = 0 factorguess = 100000 solutions = SS_solver(b_guess.reshape(S, J), n_guess.reshape(S, J), wguess, rguess, T_Hguess, factorguess, chi_params[ J:], chi_params[:J], ss_parameters, iterative_params, tau_bq, rho, lambdas, omega_SS, e) if calibrate_model: outputs = {'solutions': solutions, 'chi_params': chi_params} ss_init_path = os.path.join( output_dir, "Saved_moments/SS_init_solutions.pkl") pickle.dump(outputs, open(ss_init_path, "wb")) function_to_minimize_X = lambda x: function_to_minimize( x, chi_params, ss_parameters, iterative_params, omega_SS, rho, lambdas, tau_bq, e, output_dir) bnds = tuple([(1e-6, None)] * (S + J)) # In order to scale all the parameters to estimate in the minimizer, we have the minimizer fit a vector of ones that # will be multiplied by the chi initial guesses inside the function. Otherwise, if chi^b_j=1e5 for some j, and the # minimizer peturbs that value by 1e-8, the % difference will be extremely small, outside of the tolerance of the # minimizer, and it will not change that parameter. chi_params_scalars = np.ones(S + J) chi_params_scalars = opt.minimize(function_to_minimize_X, chi_params_scalars, method='TNC', tol=MINIMIZER_TOL, bounds=bnds, options=MINIMIZER_OPTIONS).x chi_params *= chi_params_scalars print 'The final scaling params', chi_params_scalars print 'The final bequest parameter values:', chi_params solutions_dict = pickle.load(open(ss_init_path, "rb")) solutions = solutions_dict['solutions'] b_guess = solutions[:S * J] n_guess = solutions[S * J:2 * S * J] wguess, rguess, factorguess, T_Hguess = solutions[2 * S * J:] solutions = SS_solver(b_guess.reshape(S, J), n_guess.reshape(S, J), wguess, rguess, T_Hguess, factorguess, chi_params[ J:], chi_params[:J], ss_parameters, iterative_params, tau_bq, rho, lambdas, omega_SS, e) else: variables = pickle.load(open(ss_init_path, "rb")) solutions = solutions_dict['solutions'] chi_params = solutions_dict['chi_params'] b_guess = solutions[:S * J] n_guess = solutions[S * J:2 * S * J] wguess, rguess, factorguess, T_Hguess = solutions[2 * S * J:] solutions = SS_solver(b_guess.reshape(S, J), n_guess.reshape(S, J), wguess, rguess, T_Hguess, factorguess, chi_params[ J:], chi_params[:J], ss_parameters, iterative_params, tau_bq, rho, lambdas, omega_SS, e) ''' ------------------------------------------------------------------------ Generate the SS values of variables, including euler errors ------------------------------------------------------------------------ ''' if get_baseline: outputs = {'solutions': solutions, 'chi_params': chi_params} ss_init_dir = os.path.join( output_dir, "Saved_moments/SS_init_solutions.pkl") pickle.dump(outputs, open(ss_init_dir, "wb")) else: outputs = {'solutions': solutions, 'chi_params': chi_params} ss_exp_dir = os.path.join( output_dir, "Saved_moments/SS_experiment_solutions.pkl") pickle.dump(outputs, open(ss_exp_dir, "wb")) bssmat = solutions[0:(S - 1) * J].reshape(S - 1, J) bq = solutions[(S - 1) * J:S * J] bssmat_s = np.array(list(np.zeros(J).reshape(1, J)) + list(bssmat)) bssmat_splus1 = np.array(list(bssmat) + list(bq.reshape(1, J))) nssmat = solutions[S * J:2 * S * J].reshape(S, J) wss, rss, factor_ss, T_Hss = solutions[2 * S * J:] Kss = household.get_K(bssmat_splus1, omega_SS.reshape( S, 1), lambdas, g_n_ss, 'SS') Lss = firm.get_L(e, nssmat, omega_SS.reshape(S, 1), lambdas, 'SS') Yss = firm.get_Y(Kss, Lss, ss_parameters) Iss = firm.get_I(Kss, Kss, delta, g_y, g_n_ss) theta = tax.replacement_rate_vals( nssmat, wss, factor_ss, e, J, omega_SS.reshape(S, 1), lambdas) BQss = household.get_BQ(rss, bssmat_splus1, omega_SS.reshape( S, 1), lambdas, rho.reshape(S, 1), g_n_ss, 'SS') b_s = np.array(list(np.zeros(J).reshape((1, J))) + list(bssmat)) taxss = tax.total_taxes(rss, b_s, wss, e, nssmat, BQss, lambdas, factor_ss, T_Hss, None, 'SS', False, ss_parameters, theta, tau_bq) cssmat = household.get_cons(rss, b_s, wss, e, nssmat, BQss.reshape( 1, J), lambdas.reshape(1, J), bssmat_splus1, ss_parameters, taxss) Css = household.get_C(cssmat, omega_SS.reshape(S, 1), lambdas, 'SS') resource_constraint = Yss - (Css + Iss) print 'Resource Constraint Difference:', resource_constraint household.constraint_checker_SS(bssmat, nssmat, cssmat, ss_parameters) b_s = np.array(list(np.zeros(J).reshape((1, J))) + list(bssmat)) b_splus1 = bssmat_splus1 b_splus2 = np.array( list(bssmat_splus1[1:]) + list(np.zeros(J).reshape((1, J)))) chi_b = np.tile(chi_params[:J].reshape(1, J), (S, 1)) chi_n = np.array(chi_params[J:]) euler_savings = np.zeros((S, J)) euler_labor_leisure = np.zeros((S, J)) for j in xrange(J): euler_savings[:, j] = household.euler_savings_func(wss, rss, e[:, j], nssmat[:, j], b_s[:, j], b_splus1[:, j], b_splus2[ :, j], BQss[j], factor_ss, T_Hss, chi_b[:, j], ss_parameters, theta[j], tau_bq[j], rho, lambdas[j]) euler_labor_leisure[:, j] = household.euler_labor_leisure_func(wss, rss, e[:, j], nssmat[:, j], b_s[ :, j], b_splus1[:, j], BQss[j], factor_ss, T_Hss, chi_n, ss_parameters, theta[j], tau_bq[j], lambdas[j]) ''' ------------------------------------------------------------------------ Save the values in various ways, depending on the stage of the simulation, to be used in TPI or graphing functions ------------------------------------------------------------------------ ''' # Pickle variables output = {'Kss': Kss, 'bssmat': bssmat, 'Lss': Lss, 'nssmat': nssmat, 'Yss': Yss, 'wss': wss, 'rss': rss, 'theta': theta, 'BQss': BQss, 'factor_ss': factor_ss, 'bssmat_s': bssmat_s, 'cssmat': cssmat, 'bssmat_splus1': bssmat_splus1, 'T_Hss': T_Hss, 'euler_savings': euler_savings, 'euler_labor_leisure': euler_labor_leisure, 'chi_n': chi_n, 'chi_b': chi_b} if get_baseline: utils.mkdirs(os.path.join(output_dir, "SSinit")) ss_init_dir = os.path.join(output_dir, "SSinit/ss_init_vars.pkl") pickle.dump(output, open(ss_init_dir, "wb")) bssmat_init = bssmat_splus1 nssmat_init = nssmat # Pickle variables for TPI initial values output2 = {'bssmat_init': bssmat_init, 'nssmat_init': nssmat_init} ss_init_tpi = os.path.join(output_dir, "SSinit/ss_init_tpi_vars.pkl") pickle.dump(output2, open(ss_init_tpi, "wb")) else: utils.mkdirs(os.path.join(output_dir, "SS")) ss_vars = os.path.join(output_dir, "SS/ss_vars.pkl") pickle.dump(output, open(ss_vars, "wb")) return output
def run_steady_state(income_tax_parameters, ss_parameters, iterative_params, get_baseline=False, calibrate_model=False, output_dir="./OUTPUT"): ''' ------------------------------------------------------------------------ Run SS ------------------------------------------------------------------------ ''' J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\ g_n_ss, tau_payroll, retire, mean_income_data,\ h_wealth, p_wealth, m_wealth, b_ellipse, upsilon = ss_parameters analytical_mtrs, etr_params, mtrx_params, mtry_params = income_tax_parameters # Generate initial guesses for chi^b_j and chi^n_s chi_params = np.zeros(S + J) chi_params[:J] = chi_b_guess chi_params[J:] = chi_n_guess # First run SS simulation with guesses at initial values for b, n, w, r, etc # For inital guesses of b and n, we choose very small b, and medium n b_guess = np.ones((S, J)).flatten() * 0.05 n_guess = np.ones((S, J)).flatten() * .4 * ltilde # For initial guesses of w, r, T_H, and factor, we use values that are close # to some steady state values. wguess = 1.2 rguess = .06 T_Hguess = 0.12 factorguess = 70000.0 guesses = [wguess, rguess, T_Hguess, factorguess] args_ = (b_guess.reshape(S, J), n_guess.reshape(S, J), chi_params[J:], chi_params[:J], income_tax_parameters, ss_parameters, iterative_params, tau_bq, rho, lambdas, omega_SS, e) [solutions, infodict, ier, message] = opt.fsolve(SS_fsolve, guesses, args=args_, xtol=mindist_SS, full_output=True) [wguess, rguess, T_Hguess, factorguess] = solutions fsolve_flag = True solutions = SS_solver(b_guess.reshape(S, J), n_guess.reshape(S, J), wguess, rguess, T_Hguess, factorguess, chi_params[ J:], chi_params[:J], income_tax_parameters, ss_parameters, iterative_params, tau_bq, rho, lambdas, omega_SS, e, fsolve_flag) if calibrate_model: global Nfeval, value_all, chi_params_all Nfeval = 1 value_all = np.zeros((10000)) chi_params_all = np.zeros((S+J,10000)) outputs = {'solutions': solutions, 'chi_params': chi_params} ss_init_path = os.path.join( output_dir, "Saved_moments/SS_init_solutions.pkl") pickle.dump(outputs, open(ss_init_path, "wb")) function_to_minimize_X = lambda x: function_to_minimize( x, chi_params, income_tax_parameters, ss_parameters, iterative_params, omega_SS, rho, lambdas, tau_bq, e, output_dir) bnds = tuple([(1e-6, None)] * (S + J)) # In order to scale all the parameters to estimate in the minimizer, we have the minimizer fit a vector of ones that # will be multiplied by the chi initial guesses inside the function. Otherwise, if chi^b_j=1e5 for some j, and the # minimizer peturbs that value by 1e-8, the % difference will be extremely small, outside of the tolerance of the # minimizer, and it will not change that parameter. chi_params_scalars = np.ones(S + J) #chi_params_scalars = opt.minimize(function_to_minimize_X, chi_params_scalars, # method='TNC', tol=MINIMIZER_TOL, bounds=bnds, callback=callbackF(chi_params_scalars), options=MINIMIZER_OPTIONS).x # chi_params_scalars = opt.minimize(function_to_minimize, chi_params_scalars, # args=(chi_params, income_tax_parameters, ss_parameters, iterative_params, # omega_SS, rho, lambdas, tau_bq, e, output_dir), # method='TNC', tol=MINIMIZER_TOL, bounds=bnds, # callback=callbackF(chi_params_scalars,chi_params, income_tax_parameters, # ss_parameters, iterative_params, omega_SS, rho, lambdas, tau_bq, e, output_dir), # options=MINIMIZER_OPTIONS).x chi_params_scalars = opt.minimize(function_to_minimize, chi_params_scalars, args=(chi_params, income_tax_parameters, ss_parameters, iterative_params, omega_SS, rho, lambdas, tau_bq, e, output_dir), method='TNC', tol=MINIMIZER_TOL, bounds=bnds, options=MINIMIZER_OPTIONS).x chi_params *= chi_params_scalars print 'The final scaling params', chi_params_scalars print 'The final bequest parameter values:', chi_params solutions_dict = pickle.load(open(ss_init_path, "rb")) solutions = solutions_dict['solutions'] b_guess = solutions[:S * J] n_guess = solutions[S * J:2 * S * J] wguess, rguess, factorguess, T_Hguess = solutions[2 * S * J:] guesses = [wguess, rguess, T_Hguess, factorguess] args_ = (b_guess.reshape(S, J), n_guess.reshape(S, J), chi_params[J:], chi_params[:J], income_tax_parameters, ss_parameters, iterative_params, tau_bq, rho, lambdas, omega_SS, e) [solutions, infodict, ier, message] = opt.fsolve(SS_fsolve, guesses, args=args_, xtol=mindist_SS, full_output=True) [wguess, rguess, T_Hguess, factorguess] = solutions fsolve_flag = True solutions = SS_solver(b_guess.reshape(S, J), n_guess.reshape(S, J), wguess, rguess, T_Hguess, factorguess, chi_params[ J:], chi_params[:J], income_tax_parameters, ss_parameters, iterative_params, tau_bq, rho, lambdas, omega_SS, e, fsolve_flag) ''' ------------------------------------------------------------------------ Generate the SS values of variables, including euler errors ------------------------------------------------------------------------ ''' if get_baseline: outputs = {'solutions': solutions, 'chi_params': chi_params} ss_init_dir = os.path.join( output_dir, "Saved_moments/SS_baseline_solutions.pkl") pickle.dump(outputs, open(ss_init_dir, "wb")) else: outputs = {'solutions': solutions, 'chi_params': chi_params} ss_exp_dir = os.path.join( output_dir, "Saved_moments/SS_reform_solutions.pkl") pickle.dump(outputs, open(ss_exp_dir, "wb")) bssmat = solutions[0:(S - 1) * J].reshape(S - 1, J) bq = solutions[(S - 1) * J:S * J] # technically, this is just the intentional bequests - wealth of those with max age bssmat_s = np.array(list(np.zeros(J).reshape(1, J)) + list(bssmat)) bssmat_splus1 = np.array(list(bssmat) + list(bq.reshape(1, J))) nssmat = solutions[S * J:2 * S * J].reshape(S, J) wss, rss, factor_ss, T_Hss = solutions[2 * S * J:] Kss = household.get_K(bssmat_splus1, omega_SS.reshape( S, 1), lambdas, g_n_ss, 'SS') Lss = firm.get_L(e, nssmat, omega_SS.reshape(S, 1), lambdas, 'SS') Yss = firm.get_Y(Kss, Lss, ss_parameters) Iss = firm.get_I(Kss, Kss, delta, g_y, g_n_ss) theta = np.zeros(J) #tax.replacement_rate_vals( #nssmat, wss, factor_ss, e, J, omega_SS.reshape(S, 1), lambdas) BQss = household.get_BQ(rss, bssmat_splus1, omega_SS.reshape( S, 1), lambdas, rho.reshape(S, 1), g_n_ss, 'SS') b_s = np.array(list(np.zeros(J).reshape((1, J))) + list(bssmat)) etr_params_3D = np.tile(np.reshape(etr_params,(S,1,etr_params.shape[1])),(1,J,1)) mtrx_params_3D = np.tile(np.reshape(mtrx_params,(S,1,mtrx_params.shape[1])),(1,J,1)) etr_params_extended = np.append(etr_params,np.reshape(etr_params[-1,:],(1,etr_params.shape[1])),axis=0)[1:,:] etr_params_extended_3D = np.tile(np.reshape(etr_params_extended,(S,1,etr_params_extended.shape[1])),(1,J,1)) mtry_params_extended = np.append(mtry_params,np.reshape(mtry_params[-1,:],(1,mtry_params.shape[1])),axis=0)[1:,:] mtry_params_extended_3D = np.tile(np.reshape(mtry_params_extended,(S,1,mtry_params_extended.shape[1])),(1,J,1)) e_extended = np.array(list(e) + list(np.zeros(J).reshape(1, J))) nss_extended = np.array(list(nssmat) + list(np.zeros(J).reshape(1, J))) mtry_ss = tax.MTR_capital(rss, bssmat_splus1, wss, e_extended[1:,:], nss_extended[1:,:], factor_ss, analytical_mtrs, etr_params_extended_3D, mtry_params_extended_3D) mtrx_ss = tax.MTR_labor(rss, bssmat_s, wss, e, nssmat, factor_ss, analytical_mtrs, etr_params_3D, mtrx_params_3D) #np.savetxt("mtr_ss_capital.csv", mtry_ss, delimiter=",") #np.savetxt("mtr_ss_labor.csv", mtrx_ss, delimiter=",") taxss_params = (J,S, retire, np.tile(np.reshape(etr_params,(S,1,etr_params.shape[1])),(1,J,1)), h_wealth, p_wealth, m_wealth, tau_payroll) taxss = tax.total_taxes(rss, b_s, wss, e, nssmat, BQss, lambdas, factor_ss, T_Hss, None, 'SS', False, taxss_params, theta, tau_bq) cssmat = household.get_cons(rss, b_s, wss, e, nssmat, BQss.reshape( 1, J), lambdas.reshape(1, J), bssmat_splus1, ss_parameters, taxss) Css = household.get_C(cssmat, omega_SS.reshape(S, 1), lambdas, 'SS') resource_constraint = Yss - (Css + Iss) print 'Resource Constraint Difference:', resource_constraint constraint_params = ltilde household.constraint_checker_SS(bssmat, nssmat, cssmat, constraint_params) b_s = np.array(list(np.zeros(J).reshape((1, J))) + list(bssmat)) b_splus1 = bssmat_splus1 b_splus2 = np.array(list(bssmat_splus1[1:]) + list(np.zeros(J).reshape((1, J)))) chi_b = np.tile(chi_params[:J].reshape(1, J), (S, 1)) chi_n = np.array(chi_params[J:]) euler_savings = np.zeros((S, J)) euler_labor_leisure = np.zeros((S, J)) for j in xrange(J): euler_savings[:, j] = household.euler_savings_func(wss, rss, e[:, j], nssmat[:, j], b_s[:, j], b_splus1[:, j], b_splus2[:, j], BQss[j], factor_ss, T_Hss, chi_b[:, j], income_tax_parameters, ss_parameters, theta[j], tau_bq[j], rho, lambdas[j]) euler_labor_leisure[:, j] = household.euler_labor_leisure_func(wss, rss, e[:, j], nssmat[:, j], b_s[:, j], b_splus1[:, j], BQss[j], factor_ss, T_Hss, chi_n, income_tax_parameters, ss_parameters, theta[j], tau_bq[j], lambdas[j]) ''' ------------------------------------------------------------------------ Save the values in various ways, depending on the stage of the simulation, to be used in TPI or graphing functions ------------------------------------------------------------------------ ''' # Pickle variables output = {'Kss': Kss, 'bssmat': bssmat, 'Lss': Lss, 'Css':Css, 'nssmat': nssmat, 'Yss': Yss, 'wss': wss, 'rss': rss, 'theta': theta, 'BQss': BQss, 'factor_ss': factor_ss, 'bssmat_s': bssmat_s, 'cssmat': cssmat, 'bssmat_splus1': bssmat_splus1, 'T_Hss': T_Hss, 'euler_savings': euler_savings, 'euler_labor_leisure': euler_labor_leisure, 'chi_n': chi_n, 'chi_b': chi_b} utils.mkdirs(os.path.join(output_dir, "SSinit")) ss_init_dir = os.path.join(output_dir, "SSinit/ss_init_vars.pkl") pickle.dump(output, open(ss_init_dir, "wb")) bssmat_init = bssmat_splus1 nssmat_init = nssmat # Pickle variables for TPI initial values output2 = {'bssmat_init': bssmat_init, 'nssmat_init': nssmat_init} ss_init_tpi = os.path.join(output_dir, "SSinit/ss_init_tpi_vars.pkl") pickle.dump(output2, open(ss_init_tpi, "wb")) return output