import functions import Wavelets # VaR Parameters alpha_array = [0.01, 0.05, 0.10] # Gaussian Kernel Smoothing Parameters smoothing_array = [0.002, 0.005, 0.008] # Retrieve Historical Data ohlc = functions.get_data("AAPL", "1d") signal = ohlc['Close'] # Compute Real & Estimated Returns real_returns = functions.compute_returns(signal) simulated_returns = np.random.normal(0, 1, 1750) / 100 # Compute Normal Density density_normal_x = np.linspace(min(simulated_returns)*100, max(simulated_returns)*100, len(simulated_returns)) density_normal_y = np.empty(len(density_normal_x)) for i in range(0, len(density_normal_x)): density_normal_y[i] = math.exp(-(density_normal_x[i] ** 2) / 2) / math.sqrt(2 * math.pi) density_normal_y[i] = functions.normal_pdf(density_normal_x[i], 0, 1) density_normal_y = density_normal_y / sum(density_normal_y) density_normal_x = density_normal_x/100 # Initializing Graph Params fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(nrows=2, ncols=2) fig.set_size_inches(16, 7.6) fig.set_tight_layout(True)
# Retrieve Historical Data ticker_id = "^GSPC" ohlc = functions.get_data(ticker=ticker_id, interval="1d", start_date="2011-09-02", end_date="2013-09-02") train_signal = np.flip(ohlc['Close'].values) ohlc = functions.get_data(ticker=ticker_id, interval="1d", start_date="2013-09-03", end_date="2015-04-17") test_signal = np.flip(ohlc['Close'].values) test_dates = np.flip(ohlc['Date'].values) # Compute Returns and Lagged Returns train_returns = functions.compute_returns(train_signal) [train_returns, train_returns_lagged] = functions.lag_returns(train_returns) test_returns = functions.compute_returns(test_signal) # Compute Bivariate Gaussian Kernel Density Estimation density_kernel_x, density_kernel_y, density_kernel_z = functions.kde2D( train_returns, train_returns_lagged, 0.01) density_kernel_x, density_kernel_y = functions.update_axis_arrays( density_kernel_x, density_kernel_y) # Backtesting (Gaussian Kernel) cpt = 0 portfolio_value = np.ones(len(test_returns)) for i in range(0, len(test_returns) - 1): predicted_return = functions.compute_VaR_2D(density_kernel_x, density_kernel_y,
# For each episode record the states, actions and rewards per time-step and store them in corresponding lists for episode in range(param.episodes): states, actions, rewards, avg_job_duration_ep, _ = run_episode( env, jobset, pg_network) states_episodes.append(states) actions_episodes.append(actions) rewards_episodes.append(rewards) # Visualization avg_job_duration_ep_list.append(avg_job_duration_ep) total_reward_episodes.append(sum(rewards)) # Compute returns returns = [ compute_returns(rewards, param.gamma) for rewards in rewards_episodes ] # Zero pad returns to have equal length zero_padded_returns = zero_pad(returns) # Compute baselines baselines = compute_baselines(zero_padded_returns) # Compute advantages advantages = compute_advantages(returns, baselines) states_jobsets.append(states_episodes) actions_jobsets.append(actions_episodes) rewards_jobsets.append(rewards_episodes)