def get_metrics(config, algos, labels=None, format="raw", indicators=list(INDICATORS.keys())): results = [] labels = list(labels) for i, algo in enumerate(algos): if algo.isdigit(): portfolio_changes = _load_from_summary(algo, config) logging.info("load index " + algo + " from csv file") else: logging.info("start executing " + algo) portfolio_changes = execute_backtest(algo, config) logging.info("finish executing " + algo) indicator_result = {} for indicator in indicators: indicator_result[indicator] = INDICATORS[indicator](portfolio_changes) results.append(indicator_result) if len(labels) <= i: labels.append(NAMES[algo]) dataframe = pd.DataFrame(results, index=labels) metrics = {} sharpe_ratio = dataframe.iloc[0]['sharpe ratio'] max_drawdown = dataframe.iloc[0]['max drawdown'] # not sure if it's this attribute fapv = dataframe.iloc[0]['portfolio value'] # fapv = dataframe.iloc[0]['average'] metrics['fapv'] = fapv metrics['sharpe'] = sharpe_ratio metrics['mdd'] = max_drawdown return metrics
def plot_backtest(config, algos, labels=None): """ @:param config: config dictionary @:param algos: list of strings representing the name of algorithms or index of pgportfolio result """ results = [] for i, algo in enumerate(algos): if algo.isdigit(): results.append(np.cumprod(_load_from_summary(algo, config))) logging.info("load index "+algo+" from csv file") else: logging.info("start executing "+algo) results.append(np.cumprod(execute_backtest(algo, config))) logging.info("finish executing "+algo) start, end = _extract_test(config) timestamps = np.linspace(start, end, len(results[0])) dates = [datetime.datetime.fromtimestamp(int(ts)-int(ts)%config["input"]["global_period"]) for ts in timestamps] weeks = mdates.WeekdayLocator() days = mdates.DayLocator() rc("font", **{"family": "sans-serif", "sans-serif": ["Helvetica"], "size": 8}) """ styles = [("-", None), ("--", None), ("", "+"), (":", None), ("", "o"), ("", "v"), ("", "*")] """ fig, ax = plt.subplots() fig.set_size_inches(9, 5) for i, pvs in enumerate(results): if len(labels) > i: label = labels[i] else: label = NAMES[algos[i]] ax.semilogy(dates, pvs, linewidth=1, label=label) #ax.plot(dates, pvs, linewidth=1, label=label) plt.ylabel("portfolio value $p_t/p_0$", fontsize=12) plt.xlabel("time", fontsize=12) xfmt = mdates.DateFormatter("%m-%d %H:%M") ax.xaxis.set_major_locator(weeks) ax.xaxis.set_minor_locator(days) datemin = dates[0] datemax = dates[-1] ax.set_xlim(datemin, datemax) ax.xaxis.set_major_formatter(xfmt) plt.grid(True) plt.tight_layout() ax.legend(loc="upper left", prop={"size":10}) fig.autofmt_xdate() plt.savefig("result.eps", bbox_inches='tight', pad_inches=0) plt.show()
def table_backtest(config, algos, labels=None, format="raw", indicators=list(INDICATORS.keys())): """ @:param config: config dictionary @:param algos: list of strings representing the name of algorithms or index of pgportfolio result @ output param format: "raw", "html", "latex" or "csv". If it is "csv", the result will be save in a csv file. otherwise only print it out @:return: a string of html or latex code """ results = [] labels = list(labels) for i, algo in enumerate(algos): print('algo,', algo) if algo.isdigit(): portfolio_changes, turn_over = _load_from_summary(algo, config) logging.info("load index " + algo + " from csv file") else: logging.info("start executing " + algo) portfolio_changes, turn_over = execute_backtest(algo, config) logging.info("finish executing " + algo) indicator_result = {} for indicator in indicators: indicator_result[indicator] = INDICATORS[indicator]( portfolio_changes) indicator_result["turn over"] = turn_over results.append(indicator_result) if len(labels) <= i: labels.append(NAMES[algo]) dataframe = pd.DataFrame(results, index=labels) start, end = _extract_test(config) start = datetime.datetime.fromtimestamp(start - start % config["input"]["global_period"]) end = datetime.datetime.fromtimestamp(end - end % config["input"]["global_period"]) print("backtest start from " + str(start) + " to " + str(end)) if format == "html": print(dataframe.to_html()) elif format == "latex": print(dataframe.to_latex()) elif format == "raw": print(dataframe.to_string()) elif format == "csv": dataframe.to_csv("./compare" + end.strftime("%Y-%m-%d") + ".csv") else: raise ValueError("The format " + format + " is not supported")
def main(): parser = build_parser() options = parser.parse_args() if not os.path.exists("./" + "train_package"): os.makedirs("./" + "train_package") if not os.path.exists("./" + "database"): os.makedirs("./" + "database") if options.mode == "train": import pgportfolio.autotrain.training pgportfolio.autotrain.training.train_all(int(options.processes), options.device, options.initial_asset) elif options.mode == "generate": import pgportfolio.autotrain.generate as generate logging.basicConfig(level=logging.INFO) generate.add_packages(load_config(), int(options.repeat)) elif options.mode == "backtest": config = _config_by_algo(options.algo) _set_logging_by_algo(logging.DEBUG, logging.DEBUG, options.algo, "backtestlog") execute_backtest(options.algo, config) elif options.mode == "save_test_data": # This is used to export the test data save_test_data(load_config(options.folder)) elif options.mode == "plot": logging.basicConfig(level=logging.INFO) algos = options.algos.split(",") if options.labels: labels = options.labels.replace("_"," ") labels = labels.split(",") else: labels = algos plot.plot_backtest(load_config(), algos, labels) elif options.mode == "table": algos = options.algos.split(",") if options.labels: labels = options.labels.replace("_"," ") labels = labels.split(",") else: labels = algos plot.table_backtest(load_config(), algos, labels, format=options.format)
def table_backtest(config, algos, labels=None, format="raw", indicators=list(INDICATORS.keys())): """ @:param config: config dictionary @:param algos: list of strings representing the name of algorithms or index of pgportfolio result @:param format: "raw", "html", "latex" or "csv". If it is "csv", the result will be save in a csv file. otherwise only print it out @:return: a string of html or latex code """ results = [] labels = list(labels) for i, algo in enumerate(algos): if algo.isdigit(): portfolio_changes = _load_from_summary(algo, config) logging.info("load index " + algo + " from csv file") else: logging.info("start executing " + algo) portfolio_changes = execute_backtest(algo, config) logging.info("finish executing " + algo) indicator_result = {} for indicator in indicators: indicator_result[indicator] = INDICATORS[indicator](portfolio_changes) results.append(indicator_result) if len(labels)<=i: labels.append(NAMES[algo]) dataframe = pd.DataFrame(results, index=labels) start, end = _extract_test(config) start = datetime.datetime.fromtimestamp(start - start%config["input"]["global_period"]) end = datetime.datetime.fromtimestamp(end - end%config["input"]["global_period"]) print("backtest start from "+ str(start) + " to " + str(end)) if format == "html": print(dataframe.to_html()) elif format == "latex": print(dataframe.to_latex()) elif format == "raw": print(dataframe.to_string()) elif format == "csv": dataframe.to_csv("./compare"+end.strftime("%Y-%m-%d")+".csv") else: raise ValueError("The format " + format + " is not supported")
def main(): parser = build_parser() options = parser.parse_args() if not os.path.exists("./" + "train_package"): os.makedirs("./" + "train_package") if not os.path.exists("./" + "database"): os.makedirs("./" + "database") if options.mode == "train": import pgportfolio.autotrain.training if not options.algo: pgportfolio.autotrain.training.train_all(int(options.processes), options.device) else: for folder in options.train_floder: raise NotImplementedError() elif options.mode == "generate": import pgportfolio.autotrain.generate as generate logging.basicConfig(level=logging.INFO) generate.add_packages(load_config(), int(options.repeat)) elif options.mode == "download_data": from pgportfolio.marketdata.datamatrices import DataMatrices with open("./pgportfolio/net_config.json") as file: config = json.load(file) config = preprocess_config(config) start = time.mktime(datetime.strptime(config["input"]["start_date"], "%Y/%m/%d").timetuple()) end = time.mktime(datetime.strptime(config["input"]["end_date"], "%Y/%m/%d").timetuple()) DataMatrices(start=start, end=end, feature_number=config["input"]["feature_number"], window_size=config["input"]["window_size"], online=True, period=config["input"]["global_period"], volume_average_days=config["input"]["volume_average_days"], coin_filter=config["input"]["coin_number"], is_permed=config["input"]["is_permed"], test_portion=config["input"]["test_portion"], portion_reversed=config["input"]["portion_reversed"]) elif options.mode == "backtest": config = _config_by_algo(options.algo) _set_logging_by_algo(logging.DEBUG, logging.DEBUG, options.algo, "backtestlog") execute_backtest(options.algo, config) elif options.mode == "save_test_data": # This is used to export the test data save_test_data(load_config(options.folder)) elif options.mode == "plot": logging.basicConfig(level=logging.INFO) algos = options.algos.split(",") if options.labels: labels = options.labels.replace("_"," ") labels = labels.split(",") else: labels = algos plot.plot_backtest(load_config(), algos, labels) elif options.mode == "table": algos = options.algos.split(",") if options.labels: labels = options.labels.replace("_"," ") labels = labels.split(",") else: labels = algos plot.table_backtest(load_config(), algos, labels, format=options.format)
def backtest(algo): config = config_by_algo(algo) set_logging_by_algo(logging.DEBUG, logging.DEBUG, algo, "backtestlog") portfolio_change_arr = execute_backtest(algo, config) # mu * w * y dic = {'portfolio_return': portfolio_change_arr} return dic
def main(): parser = build_parser() options = parser.parse_args() if not os.path.exists("./" + "train_package"): os.makedirs("./" + "train_package") if not os.path.exists("./" + "database"): os.makedirs("./" + "database") if options.mode == "train": import pgportfolio.autotrain.training if not options.algo: pgportfolio.autotrain.training.train_all(int(options.processes), options.device) else: for folder in options.folder: raise NotImplementedError() elif options.mode == "generate": import pgportfolio.autotrain.generate as generate logging.basicConfig(level=logging.INFO) generate.add_packages(load_config(), int(options.repeat)) elif options.mode == "download_data": from pgportfolio.marketdata.datamatrices import DataMatrices with open("./pgportfolio/net_config.json") as file: config = json.load(file) config = preprocess_config(config) start = time.mktime(datetime.strptime(config["input"]["start_date"], "%Y/%m/%d").timetuple()) end = time.mktime(datetime.strptime(config["input"]["end_date"], "%Y/%m/%d").timetuple()) DataMatrices(start=start, end=end, feature_number=config["input"]["feature_number"], window_size=config["input"]["window_size"], online=True, period=config["input"]["global_period"], volume_average_days=config["input"]["volume_average_days"], coin_filter=config["input"]["coin_number"], is_permed=config["input"]["is_permed"], test_portion=config["input"]["test_portion"], portion_reversed=config["input"]["portion_reversed"]) elif options.mode == "backtest": config = _config_by_algo(options.algo) _set_logging_by_algo(logging.DEBUG, logging.DEBUG, options.algo, "backtestlog") execute_backtest(options.algo, config) elif options.mode == "save_test_data": # This is used to export the test data save_test_data(load_config(options.folder)) elif options.mode == "plot": logging.basicConfig(level=logging.INFO) algos = options.algos.split(",") if options.labels: labels = options.labels.replace("_"," ") labels = labels.split(",") else: labels = algos plot.plot_backtest(load_config(), algos, labels) elif options.mode == "table": algos = options.algos.split(",") if options.labels: labels = options.labels.replace("_"," ") labels = labels.split(",") else: labels = algos plot.table_backtest(load_config(), algos, labels, format=options.format)
def plot_backtest(config, algos, labels=None, datess=None, coinlist=None): """ @:param config: config dictionary @:param algos: list of strings representing the name of algorithms or index of pgportfolio result """ results = [] #goes through all the named algos for i, algo in enumerate(algos): if algo.isdigit(): #appends from summary, method in plot.py, appends the portfolio value to results results.append(np.cumprod(_load_from_summary(algo, config, "result"))) logging.info("load index "+algo+" from csv file") else: logging.info("start executing "+algo) results.append(np.cumprod(execute_backtest(algo, config))) logging.info("finish executing "+algo) start, end = _extract_test(config) #returns even timestamp between start and end timestamps = np.linspace(start, end, len(results[0])) dates = [datetime.datetime.fromtimestamp(int(ts)-int(ts)%config["input"]["global_period"]) for ts in timestamps] weeks = mdates.WeekdayLocator() days = mdates.DayLocator() rc("font", **{"family": "sans-serif", "sans-serif": ["Helvetica"], "size": 8}) """ styles = [("-", None), ("--", None), ("", "+"), (":", None), ("", "o"), ("", "v"), ("", "*")] """ fig, ax = plt.subplots() fig.set_size_inches(9, 5) for i, pvs in enumerate(results): if len(labels) > i: label = labels[i] else: label = NAMES[algos[i]] ax.semilogy(dates, pvs, linewidth=1, label=label) #ax.plot(dates, pvs, linewidth=1, label=label) plt.ylabel("portfolio value $p_t/p_0$", fontsize=12) plt.xlabel("time", fontsize=12) xfmt = mdates.DateFormatter("%m-%d %H:%M") ax.xaxis.set_major_locator(weeks) ax.xaxis.set_minor_locator(days) datemin = dates[0] datemax = dates[-1] ax.set_xlim(datemin, datemax) ax.xaxis.set_major_formatter(xfmt) plt.grid(True) plt.tight_layout() #plt.axhline(y=1.28293, color='r', linestyle='--', label='y = 1.28293') #plt.axhline(y=2.12875, color='r', linestyle='--', label='y = 2.12875') #plt.axhline(y=1.15624, color='r', linestyle='--', label='y = 1.15624') #plt.axhline(y=1.71, color='r', linestyle='--', label='y = 1.71') ax.legend(loc="upper left", prop={"size": 10}) fig.autofmt_xdate() plt.savefig("result.eps", bbox_inches='tight', pad_inches=0) plt.show() plt.close() for i, algo in enumerate(algos): weights = _load_from_summary(algo, config, "weight") weights = np.cumsum(weights, 1) if datess is not None: startdate = datetime.datetime.strptime(datess[0], '%Y %m %d %H %M') enddate = datetime.datetime.strptime(datess[1], '%Y %m %d %H %M') startpoint = dates.index(startdate) endpoint = dates.index(enddate) else: startpoint = 0 endpoint = len(timestamps)-2 fig = plt.figure(labels[i]) ###### """ startdate_in_sec = int(timestamps[startpoint]) - int(timestamps[startpoint]) % config["input"][ "global_period"] enddate_in_sec = int(timestamps[endpoint]) - int(timestamps[startpoint]) % config["input"][ "global_period"] for coin in coinlist: panel = get_panel(start=startdate_in_sec, end=enddate_in_sec, period=1800, coinlist=[coin]) coinchart = panel['SUM(volume)'].tolist() fig = plt.figure(coin) ax1 = fig.add_subplot(111) print(len(dates[startpoint:endpoint+1])) print(len(coinchart[:-32])) ax1.plot(dates[startpoint:endpoint + 1], coinchart[:-32], label=coin) ax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.07), fancybox=True, shadow=True, ncol=1) plt.show() plt.close() ### """ ax1 = fig.add_subplot(111) ax2 = ax1.twiny() if coinlist is not None: ax3 = ax1.twiny() ax4 = ax1.twiny() ax5 = ax1.twiny() number_of_coins = config["input"]["coin_number"]+1 xy = [] labelss = get_coin_name_list(config=config, online=False) labelss.insert(0, 'BTC') colors = ["#7e1e9c", "#15b01a", "#0343df", "#ff81c0", "#06470c", "#e50000", "#95d0fc", "#029386", "#f97306", "#96f97b", "#c20078", "#ffff14"] #prepare polygon points for j in range(number_of_coins): xy.append([timestamps[startpoint], 0]) for y in range(startpoint,endpoint+1): xy.append([timestamps[y], weights[y][11-j]]) xy.append([timestamps[endpoint+1], 0]) xy = np.asarray(xy) polygon = Polygon(xy, color=colors[11-j], closed=True, label=labelss[11-j]) ax1.add_patch(polygon) xy = [] ax1.set_xlabel(r"Timestamps") plt.ylabel("portfolio weights", fontsize=12) datemin = timestamps[startpoint] datemax = timestamps[endpoint] ax1.set_xlim(datemin, datemax) ax2.plot(dates[startpoint:endpoint+1], np.ones(len(dates[startpoint:endpoint+1]))) ax2.cla() xfmt = mdates.DateFormatter("%m-%d %H:%M") ax2.xaxis.set_major_locator(weeks) ax2.xaxis.set_minor_locator(days) datemin = dates[startpoint] datemax = dates[endpoint] ax2.set_xlim(datemin, datemax) ax2.set_ylim(0, 1) ax1.set_ylim(0, 1) ax2.xaxis.set_major_formatter(xfmt) ax2.set_xlabel(r"Dates") if coinlist is not None: startdate_in_sec = int(timestamps[startpoint]) - int(timestamps[startpoint]) % config["input"][ "global_period"] enddate_in_sec = int(timestamps[endpoint]) - int(timestamps[startpoint]) % config["input"][ "global_period"] coinchart=[] for coin in coinlist: panel = get_panel(start=startdate_in_sec, end=enddate_in_sec, period=1800, coinlist=[coin]) coinchart.append(panel['close'].tolist()) coinchart[2] = [1 / x for x in coinchart[2]] for maxim in range(3): maximum = max(coinchart[maxim]) coinchart[maxim] = [x/maximum for x in coinchart[maxim]] ax3.plot(dates[startpoint:endpoint + 1], coinchart[0][:-3], label=coinlist[0], color='pink', linewidth=3.0) ax4.plot(dates[startpoint:endpoint + 1], coinchart[1][:-3], label=coinlist[1], color='black', linewidth=3.0) ax5.plot(dates[startpoint:endpoint + 1], coinchart[2][:-3], label=coinlist[2], color='navy', linewidth=3.0) ax3.legend(loc='upper center', bbox_to_anchor=(0, 0.05), fancybox=True, shadow=True) ax4.legend(loc='upper center', bbox_to_anchor=(0.5, 0.05), fancybox=True, shadow=True) ax5.legend(loc='upper center', bbox_to_anchor=(1, 0.05), fancybox=True, shadow=True) ax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, shadow=True, ncol=number_of_coins) plt.show() plt.close() """
def main(logPath, device): parser = build_parser() options = parser.parse_args() ''' options.folder = oneKey options.stockList = stockList options.featureList = featureList options.start_date = startDate options.end_date = endDate ''' if not os.path.exists("./" + "database"): os.makedirs("./" + "database") #options.repeat = 1 if options.mode == "train": #训练数据 if not options.algo: save_path = logPath + str(options.folder) + "/netfile" # 读取配置文件 with open(logPath + str(options.folder) + "\\net_config.json") as file: config_json = None config_json = json.load(file) config = preprocess_config(config_json) log_file_dir = logPath + str(options.folder) + "/tensorboard" # 定义错误等级 logfile_level = logging.DEBUG console_level = logging.INFO logging.basicConfig(filename=log_file_dir.replace( "tensorboard", "programlog"), level=logfile_level) console = logging.StreamHandler() console.setLevel(console_level) logging.getLogger().addHandler(console) trainer = TraderTrainer(config, options.stockList, options.featureList, options.start_date, options.end_date, save_path=save_path, device=device) #初始化训练器 trainer.train_net(log_file_dir=log_file_dir, index=str(options.folder)) #训练网络 else: for folder in options.folder: raise NotImplementedError() # 生成配置文件到路径中,要想修改配置,直接修改PGPortfolio\pgportfolio\net_config.json elif options.mode == "generate": import pgportfolio.autotrain.generate as generate logging.basicConfig(level=logging.INFO) config_ = load_config() train_dir = logPath generate.add_packages(train_dir, load_config(), int(options.repeat)) elif options.mode == "download_data": from pgportfolio.marketdata.datamatrices import DataMatrices with open("./pgportfolio/net_config.json") as file: config = json.load(file) config = preprocess_config(config) start = time.mktime( datetime.strptime(options.start_date, "%Y/%m/%d").timetuple()) end = time.mktime( datetime.strptime(options.end_date, "%Y/%m/%d").timetuple()) DataMatrices( start=start, end=end, feature_number=len(options.featureList), window_size=config["input"]["window_size"], online=True, period=config["input"]["global_period"], volume_average_days=config["input"]["volume_average_days"], coin_filter=len(options["stockList"]), is_permed=config["input"]["is_permed"], test_portion=config["input"]["test_portion"], portion_reversed=config["input"]["portion_reversed"]) elif options.mode == "backtest": config = _config_by_algo(options.algo) #读取配置文件 _set_logging_by_algo(logging.DEBUG, logging.DEBUG, options.algo, "backtestlog") #设置log的路径 values = execute_backtest(options.algo, config) #执行回测的步数为训练集的长度 elif options.mode == "save_test_data": # This is used to export the test data save_test_data(load_config(options.folder)) #保存测试集数据 elif options.mode == "plot": logging.basicConfig(level=logging.INFO) algos = options.algos.split(",") if options.labels: labels = options.labels.replace("_", " ") labels = labels.split(",") else: labels = algos plot.plot_backtest(load_config(), algos, labels) elif options.mode == "table": algos = options.algos.split(",") if options.labels: labels = options.labels.replace("_", " ") labels = labels.split(",") else: labels = algos plot.table_backtest(load_config(), algos, labels, format=options.format)
def plot_backtest(config, algos, labels=None): """ @:param config: config dictionary @:param algos: list of strings representing the name of algorithms or index of pgportfolio result """ results = [] for i, algo in enumerate(algos): print('algo', algo) if algo.isdigit(): results.append(np.cumprod(_load_from_summary(algo, config)[0])) # np.cumprod to obtain the final asset change verctor logging.info("load index " + algo + " from csv file") else: logging.info("start executing " + algo) results.append(np.cumprod(execute_backtest(algo, config)[0])) logging.info("finish executing " + algo) start, end = _extract_test(config) timestamps = np.linspace(start, end + 10, len(results[0]) // 20 + 1) dates = [ datetime.datetime.fromtimestamp( int(ts) - int(ts) % config["input"]["global_period"]) for ts in timestamps ] weeks = mdates.WeekdayLocator() days = mdates.DayLocator() rc("font", **{ "family": "sans-serif", "sans-serif": ["Helvetica"], "size": 8 }) """ styles = [("-", None), ("--", None), ("", "+"), (":", None), ("", "o"), ("", "v"), ("", "*")] """ #c = ["royalblue","r","coral","g"] #for the parameter of gamma c = [ "black", "brown", "g", "coral", "royalblue", "deepskyblue", "darkviolet", "r" ] #for different feature extractors #c=["black","black","black","black","black","black","black","black"] #style=["-+",":+",":x",":v","--*","--s","-D","-o"] #style=["-","-","-","-","-","-","-","-"] #style=["--","--","--","--","--","--","--","--"] style = ["-^", "-d", "-s", "->", "-*", "-<", "-v", "-o"] maker = ["+", "+", "x", "v", "*", "s", "D", "o"] fig, ax = plt.subplots() fig.set_size_inches(12, 7) #(9, 6) 18,10 for i, pvs in enumerate(results): if len(labels) > i: label = labels[i] else: label = NAMES[algos[i]] #print(np.array(pvs)[:2:]) length = len(pvs) pvs = pvs[range(0, length, 20)] #ax.semilogy(dates, pvs, linewidth=3, label=label) # 3 for gamma, 2 for feature extractors ax.semilogy(dates, pvs, style[i], color=c[i], linewidth=2, markersize=6, markevery=slice(4, length, 7), label=label) #ax.semilogx(dates, pvs,style[i], color=c[i],linewidth=1.5, label=label) #ax.plot(dates, pvs,style[i], color=c[i],linewidth=1.5, label=label) #style[i], plt.ylabel("APV", fontsize=18) plt.xlabel("time", fontsize=18) xfmt = mdates.DateFormatter("%m-%d %H:%M") ax.xaxis.set_major_locator(weeks) ax.xaxis.set_minor_locator(days) datemin = dates[0] datemax = dates[-1] ax.set_xlim(datemin, datemax) ax.xaxis.set_major_formatter(xfmt) plt.grid(True) plt.tight_layout() # to adjust exterior edges ax.legend(loc="upper left", prop={"size": 16}) # Adaptively adjust the date of the figure fig.autofmt_xdate() plt.tick_params(labelsize=12) #============================================================================== # plt.savefig("result.eps", bbox_inches='tight', # pad_inches=0) #============================================================================== # rename the file file_name = config["input"]["start_date"] + "_" + config["input"][ "end_date"] + ".eps" file_name = file_name.replace("/", "") plt.savefig(file_name, bbox_inches='tight', pad_inches=0) plt.show()