Esempio n. 1
0
def plot_weightings():
    """Plots all weighting functions defined in :module: splweighting."""
    from scipy.signal import freqz
    from pylab import plt, np

    sample_rate = 48000
    num_samples = 2 * 4096

    fig, ax = plt.subplots()

    for name, weight_design in sorted(_weighting_coeff_design_funsd.items()):
        b, a = weight_design(sample_rate)
        w, H = freqz(b, a, worN=num_samples)

        freq = w * sample_rate / (2 * np.pi)

        ax.semilogx(freq,
                    20 * np.log10(np.abs(H) + 1e-20),
                    label='{}-Weighting'.format(name))

    plt.legend(loc='lower right')
    plt.xlabel('Frequency / Hz')
    plt.ylabel('Damping / dB')
    plt.grid(True)
    plt.axis([10, 20000, -80, 5])
    return fig, ax
Esempio n. 2
0
def CHART_Running_Annual_Vol_with_Hourly_Samples(frequency,window,trading_hours_per_day):
    Trading_Hours_in_Trading_Year=252*trading_hours_per_day #Calculates Trading Hours in a year    
    Sample=data['Price'][frequency-1::frequency] #Creates New Sampling list based on frequency input
    Returns=np.log(Sample) - np.log(Sample.shift(1)) #Calculates Returns on New Sample    
    
    Running_Variance=Returns.rolling(window).var() #Calculates hourly running variance based on 'window size' input
    Running_Annual_Vol=np.sqrt(Running_Variance)*np.sqrt(Trading_Hours_in_Trading_Year/frequency)
    
    #Place Running Vols and Time Series in DataFrame
    DF=pd.DataFrame(Sample)
    DF['Running_Vol']=Running_Annual_Vol
    
    #Create Plot
    DF.Price.plot()
    plt.legend()
    plt.ylabel('Yield (%)')
    DF.Running_Vol.plot(secondary_y=True, style='g',rot=90)
    plt.xlabel('Date')
    plt.ylabel('Running Vol') 
    plt.title('10 Year Bund Yield vs Annualized Running Vol (Window Size=200)')
    plt.legend(bbox_to_anchor=(0.8, 1))
    plt.text(0.8, 5.4, "Frequency={}. Window Size={}. Trading Hours per Day={}".format(frequency, window,trading_hours_per_day))

    
    return plt
    def plot_axis_control(self, axis, 
                          show_mux_cmd=True, show_asctec_cmd=True, show_vicon_meas=True, show_imu_meas=True):
        axismap = {'roll': (self.v.control.roll_cmd, 2, +1, self.v.asctec_ctrl_input.roll_cmd, -1),
                   'pitch': (self.v.control.pitch_cmd, 1, -1, self.v.asctec_ctrl_input.pitch_cmd, -1),
                   'yaw': (self.v.control.yaw_cmd, 0, -1, self.v.asctec_ctrl_input.yaw_rate_cmd, +1) # not sure about the multiplier here
                   }
        control_mode_cmd, state_axis, imu_mult, asctec_cmd, asctec_cmd_mult = axismap[axis]
        newfig("%s Axis Control" % axis.capitalize(), "time [s]", "%s [deg]" % axis.capitalize())
        # np.clip() and the [1:] stuff in the following to attempt deal with bogus initial data points in IMU data:
        if show_mux_cmd:
            plt.plot(self.v.control.t[self.v.control.istart:self.v.control.iend],
                     control_mode_cmd[self.v.control.istart:self.v.control.iend], label='cmd (from mux)')
        if show_vicon_meas:
            plt.plot(self.v.state.t[self.v.state.istart:self.v.state.iend], 
                     self.v.state.ori_ypr[self.v.state.istart:self.v.state.iend, state_axis], label='meas (Vicon)')
        if show_imu_meas:
            plt.plot(np.clip(self.v.imu.t[self.v.imu.istart:self.v.imu.iend], 0, np.inf), 
                     imu_mult*self.v.imu.ori_ypr[self.v.imu.istart:self.v.imu.iend, state_axis], label='meas (IMU)')
        if show_asctec_cmd and axis is not 'yaw':
            plt.plot(self.v.asctec_ctrl_input.t[self.v.asctec_ctrl_input.istart:self.v.asctec_ctrl_input.iend], 
                     asctec_cmd_mult*asctec_cmd[self.v.asctec_ctrl_input.istart:self.v.asctec_ctrl_input.iend],
                    label='cmd (AscTec)')
        # Plot difference between vicon and imu: (broken, comment it out for now..)
#        tout, data_out = uniform_resample((('linear', self.v.imu.t[self.v.asctec_ctrl_input.istart:self.v.asctec_ctrl_input.iend], 
#                                                      self.v.imu.ori_ypr[self.v.asctec_ctrl_input.istart:self.v.asctec_ctrl_input.iend,state_axis]), 
#                                           ('linear', self.v.state.t[self.v.state.istart:self.v.state.iend], 
#                                                      self.v.state.ori_ypr[self.v.state.istart:self.v.state.iend, state_axis])), 
#                                           0.02)
#        plt.plot(tout, imu_mult*data_out[0][0] - data_out[1][0], label='IMU - Vicon')
        plt.legend()
        self._timeseries_postplot()
Esempio n. 4
0
def MakePlot(x, y, styles, labels, axlabels):
    plt.figure(figsize=(10, 6))
    for i in range(len(x)):
        plt.plot(x[i], y[i], styles[i], label=labels[i])
        plt.xlabel(axlabels[0])
        plt.ylabel(axlabels[1])
    plt.legend(loc=0)
def plot_weightings():
    """Plots all weighting functions defined in :module: splweighting."""
    from scipy.signal import freqz
    from pylab import plt, np

    sample_rate = 48000
    num_samples = 2*4096

    fig, ax = plt.subplots()

    for name, weight_design in sorted(
            _weighting_coeff_design_funsd.items()):
        b, a = weight_design(sample_rate)
        w, H = freqz(b, a, worN=num_samples)

        freq = w*sample_rate / (2*np.pi)

        ax.semilogx(freq, 20*np.log10(np.abs(H)+1e-20),
                    label='{}-Weighting'.format(name))

    plt.legend(loc='lower right')
    plt.xlabel('Frequency / Hz')
    plt.ylabel('Damping / dB')
    plt.grid(True)
    plt.axis([10, 20000, -80, 5])
    return fig, ax
Esempio n. 6
0
def CHART_Running_Annual_Vol_with_Daily_Samples_on_Specific_Time_of_Day(frequency,sampling_time,window,trading_hours_per_day):
    Original_DAILY_Sample=data['Price'][sampling_time-1::trading_hours_per_day] #Grabs the Hourly Data and Converts it into Daily Data based on Sampling Time of Day AND Trading Hours per Day 
    NEW_Sample=Original_DAILY_Sample[frequency-1::frequency] #Creates New Sampling list based on sampling frequency input
    Returns=np.log(NEW_Sample) - np.log(NEW_Sample.shift(1)) #Calculates Returns on New Sample    
    Running_Variance=Returns.rolling(window).var() #Calculates daily running variance based on 'window size' input
    Running_Annual_Vol=np.sqrt(Running_Variance)*np.sqrt(252/frequency) 
    
    #Place NEW Sampled data (prices) and Running Vols in DataFrame
    DF=pd.DataFrame(NEW_Sample)
    DF['Running_Vol']=Running_Annual_Vol
    
    #Create Plot
    DF.Price.plot()
    plt.legend()
    #data.Price.plot()
    plt.ylabel('Yield (%)')
    DF.Running_Vol.plot(secondary_y=True, style='g',rot=90)
    plt.xlabel('Date')
    plt.ylabel('Running Vol') 
    plt.title('10 Year Bund Yield vs Annualized Running Vol ')
    plt.legend(bbox_to_anchor=(0.8, 1))
    plt.text(0.8, 3.5, "Sampling Time={}. Window Size={}. Trading Hours per Day={}".format(sampling_time, window,trading_hours_per_day))

    
    return plt
Esempio n. 7
0
    def draw(cls, t_max, agents_proportions, eco_idx, parameters):

        color_set = ["green", "blue", "red"]

        for agent_type in range(3):
            plt.plot(np.arange(t_max),
                     agents_proportions[:, agent_type],
                     color=color_set[agent_type],
                     linewidth=2.0,
                     label="Type-{} agents".format(agent_type))

            plt.ylim([-0.1, 1.1])

        plt.xlabel("$t$")
        plt.ylabel("Proportion of indirect exchanges")

        # plt.suptitle('Direct choices proportion per type of agents', fontsize=14, fontweight='bold')
        plt.legend(loc='upper right', fontsize=12)

        print(parameters)

        plt.title(
            "Workforce: {}, {}, {};   displacement area: {};   vision area: {};   alpha: {};   tau: {}\n"
            .format(parameters["x0"], parameters["x1"], parameters["x2"],
                    parameters["movement_area"], parameters["vision_area"],
                    parameters["alpha"], parameters["tau"]),
            fontsize=12)

        if not path.exists("../../figures"):
            mkdir("../../figures")

        plt.savefig("../../figures/figure_{}.pdf".format(eco_idx))
        plt.show()
    def plot_post_disp_decomposition(
        self,
        site,
        cmpt,
        loc=2,
        leg_fs=7,
        marker_for_obs='x',
    ):
        y = self.plot_post_obs_linres(site,
                                      cmpt,
                                      label='obs.',
                                      marker=marker_for_obs)
        y += self.plot_post_disp_pred_added(site, cmpt, label='pred.')
        y += self.plot_R_co(site,
                            cmpt,
                            style='-^',
                            label='Rco',
                            color='orange')
        y += self.plot_E_aslip(site, cmpt, color='green')
        y += self.plot_R_aslip(site, cmpt, color='black')

        plt.grid('on')

        plt.legend(loc=loc, prop={'size': leg_fs})
        plt.ylabel(r'meter')
        plt.gcf().autofmt_xdate()
        plt.title('Postseismic Disp. : {site} - {cmpt}'.format(
            site=get_site_true_name(site_id=site), cmpt=cmpt))
    def plot_cumu_disp_decomposition(self,
                                     site,
                                     cmpt,
                                     loc=2,
                                     leg_fs=7,
                                     if_ylim=False):
        self.plot_cumu_obs_linres(site, cmpt)
        y = self.plot_cumu_disp_pred_added(site, cmpt, label='pred.')
        y += self.plot_R_co(site,
                            cmpt,
                            style='-^',
                            label='Rco',
                            color='orange')
        y += self.plot_E_cumu_slip(site, cmpt, color='green')
        y += self.plot_R_aslip(site, cmpt, color='black')

        plt.grid('on')
        if if_ylim:
            plt.ylim(calculate_lim(y))

        plt.ylabel(r'meter')
        plt.legend(loc=loc, prop={'size': leg_fs})
        plt.gcf().autofmt_xdate()
        plt.title('Cumulative Disp.: {site} - {cmpt}'.format(
            site=get_site_true_name(site_id=site), cmpt=cmpt))
Esempio n. 10
0
def plot_precision_recall(precisions, recalls, thresholds):
    """
    Plots precision and recall by thresholds.
    
    Requires imports:
    from sklearn.metrics import precision_recall_curve, cross_val_predict
    
    Returns:
    Nothing

    """
    from pylab import mpl, plt
    import matplotlib.pyplot as plt
    import numpy as np
    plt.style.use('seaborn')
    mpl.rcParams['font.family'] = 'arial'

    np.random.seed(1000)
    np.set_printoptions(suppress=True, precision=4)

    plt.plot(thresholds, precisions[:-1], 'b--', label='Precision')
    plt.plot(thresholds, recalls[:-1], 'g-', label='Recall')
    plt.xlabel('Threshold')
    plt.legend(loc='center left')
    plt.ylim([0, 1])
Esempio n. 11
0
def plot_zipf(*freq):
	'''
	basic plotting using matplotlib and pylab
	'''
	ranks, frequencies = [], []
	langs, colors = [], []
	langs = ["English", "German", "Finnish"]
	colors = ['#FF0000', '#00FF00', '#0000FF']
	if bonus_part:
		colors.extend(['#00FFFF', '#FF00FF', '#FFFF00'])
		langs.extend(["English (Stemmed)", "German (Stemmed)", "Finnish (Stemmed)"])

	plt.subplot(111) # 1, 1, 1

	num = 6 if bonus_part else 3
	for i in xrange(num):
		ranks.append(range(1, len(freq[i]) + 1))
		frequencies.append([e[1] for e in freq[i]])

		# log x and y axi, both with base 10
		plt.loglog(ranks[i], frequencies[i], marker='', basex=10, color=colors[i], label=langs[i])

	plt.legend()
	plt.grid(True)
	plt.title("Zipf's law!")

	plt.xlabel('Rank')
	plt.ylabel('Frequency')

	plt.show()
    def draw(cls, t_max, agents_proportions, eco_idx, parameters):

        color_set = ["green", "blue", "red"]

        for agent_type in range(3):
            plt.plot(np.arange(t_max), agents_proportions[:, agent_type],
                     color=color_set[agent_type], linewidth=2.0, label="Type-{} agents".format(agent_type))

            plt.ylim([-0.1, 1.1])

        plt.xlabel("$t$")
        plt.ylabel("Proportion of indirect exchanges")

        # plt.suptitle('Direct choices proportion per type of agents', fontsize=14, fontweight='bold')
        plt.legend(loc='upper right', fontsize=12)

        print(parameters)

        plt.title(
            "Workforce: {}, {}, {};   displacement area: {};   vision area: {};   alpha: {};   tau: {}\n"
            .format(
                parameters["x0"],
                parameters["x1"],
                parameters["x2"],
                parameters["movement_area"],
                parameters["vision_area"],
                parameters["alpha"],
                parameters["tau"]
                          ), fontsize=12)

        if not path.exists("../../figures"):
            mkdir("../../figures")

        plt.savefig("../../figures/figure_{}.pdf".format(eco_idx))
        plt.show()
Esempio n. 13
0
    def plot(self, new_plot=False, xlim=None, ylim=None, title=None, figsize=None,
             xlabel=None, ylabel=None, fontsize=None, show_legend=True, grid=True):
        """
        Plot data using matplotlib library. Use show() method for matplotlib to see result or ::

            %pylab inline

        in IPython to see plot as cell output.

        :param bool new_plot: create or not new figure
        :param xlim: x-axis range
        :param ylim: y-axis range
        :type xlim: None or tuple(x_min, x_max)
        :type ylim: None or tuple(y_min, y_max)
        :param title: title
        :type title: None or str
        :param figsize: figure size
        :type figsize: None or tuple(weight, height)
        :param xlabel: x-axis name
        :type xlabel: None or str
        :param ylabel: y-axis name
        :type ylabel: None or str
        :param fontsize: font size
        :type fontsize: None or int
        :param bool show_legend: show or not labels for plots
        :param bool grid: show grid or not

        """
        xlabel = self.xlabel if xlabel is None else xlabel
        ylabel = self.ylabel if ylabel is None else ylabel
        figsize = self.figsize if figsize is None else figsize
        fontsize = self.fontsize if fontsize is None else fontsize
        self.fontsize_ = fontsize
        self.show_legend_ = show_legend
        title = self.title if title is None else title
        xlim = self.xlim if xlim is None else xlim
        ylim = self.ylim if ylim is None else ylim
        new_plot = self.new_plot or new_plot

        if new_plot:
            plt.figure(figsize=figsize)

        plt.xlabel(xlabel, fontsize=fontsize)
        plt.ylabel(ylabel, fontsize=fontsize)
        plt.title(title, fontsize=fontsize)
        plt.tick_params(axis='both', labelsize=fontsize)
        plt.grid(grid)

        if xlim is not None:
            plt.xlim(xlim)

        if ylim is not None:
            plt.ylim(ylim)

        self._plot()

        if show_legend:
            plt.legend(loc='best', scatterpoints=1)
Esempio n. 14
0
def create_plot(x, y, styles, labels, axlabels):
    plt.figure(figsize=(10, 6))

    plt.scatter(x[0], y[0])
    plt.scatter(x[1], y[1])
    plt.xlabel(axlabels[0])
    plt.ylabel(axlabels[1])
    plt.legend(loc=0)
    plt.show()
Esempio n. 15
0
def plot_loss(checkpoint_dir, loss_list, save_pred_every):
    x = range(0, len(loss_list) * save_pred_every, save_pred_every)
    y = loss_list
    plt.switch_backend('agg')
    plt.plot(x, y, color='blue', marker='o', label='Train loss')
    plt.xticks(range(0, len(loss_list) * save_pred_every + 3, (len(loss_list) * save_pred_every + 10) // 10))
    plt.legend()
    plt.grid()
    plt.savefig(os.path.join(checkpoint_dir, 'loss_fig.pdf'))
    plt.close()
Esempio n. 16
0
def plot_post(cfs,ifshow=False,loc=2,
              save_fig_path = None, file_type='png'):
    for cf in cfs:
        plot_cf(cf, color='blue')
        plt.legend(loc=loc)
        if ifshow:
            plt.show()
        if save_fig_path is not None:
            plt.savefig(join(save_fig_path, '%s_%s.%s'%(cf.SITE, cf.CMPT, file_type)))
        plt.close()
Esempio n. 17
0
def plot_iou(checkpoint_dir, iou_list):
    x = range(0, len(iou_list))
    y = iou_list
    plt.switch_backend('agg')
    plt.plot(x, y, color='red', marker='o', label='IOU')
    plt.xticks(range(0, len(iou_list) + 3, (len(iou_list) + 10) // 10))
    plt.legend()
    plt.grid()
    plt.savefig(os.path.join(checkpoint_dir, 'iou_fig.pdf'))
    plt.close()
def displayRetireWRate(month, rates, terms):
    plt.figure('retireRate')
    plt.clf()
    for rate in rates:
        xvals, yvals = retire(month, rate, terms)
        plt.plot(xvals,
                 yvals,
                 label='monthly: ' + str(month) + ' rate of:     ' +
                 str(int(rate * 100)))
        plt.legend(loc='upper left')
 def savefig(self, fname):
     # Graph using the parameters
     plt.xlim(-1000,
              max(self.incomes) *
              1.05)  # make it a little bigger than needed
     plt.ylim(-5, 105)
     plt.legend(loc='lower center', fontsize=9)
     plt.xticks(rotation=20)
     plt.axes().get_xaxis().set_major_formatter(
         mp.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
     plt.savefig(fname)
def displayRetireWMonthlies(monthlies, rate, terms):
    plt.figure('retireMonth')
    plt.clf()
    for monthly in monthlies:
        xvals, yvals = retire(
            monthly, rate,
            terms)  # using base and savings list as x and y values
        plt.plot(xvals,
                 yvals,
                 label='retire with monthly inst of ' + str(monthly))
        plt.legend()
Esempio n. 21
0
def plot_precisonAndjac(checkpoint_dir, pre_list, jac_list):
    x = range(0, len(pre_list))
    y = pre_list
    y2 = jac_list
    plt.switch_backend('agg')
    plt.plot(x, y, color='red', marker='o', label='precision')
    plt.plot(x, y2, color='blue', marker='o', label='jaccard')
    plt.xticks(range(0, len(pre_list) + 3, (len(pre_list) + 10) // 10))
    plt.legend()
    plt.grid()
    plt.savefig(os.path.join(checkpoint_dir, 'precisionAndjac_fig1.pdf'))
    plt.close()
Esempio n. 22
0
def plot_treward(agent):
    ''' Function to plot the total reward
        per training eposiode.
    '''
    plt.figure(figsize=(10, 6))
    x = range(1, len(agent.averages) + 1)
    y = np.polyval(np.polyfit(x, agent.averages, deg=3), x)
    plt.plot(x, agent.averages, label='moving average')
    plt.plot(x, y, 'r--', label='regression')
    plt.xlabel('episodes')
    plt.ylabel('total reward')
    plt.legend()
Esempio n. 23
0
 def plot_smoothed_alpha_comparison(self,rmsval,suffix=''):
     plt.plot(self.f,self.alpha,'ko',label='data set')
     plt.plot(self.f,self.salpha,'c-',lw=2,label='smoothed angle $\phi$')
     plt.xlabel('frequency in Hz')
     plt.ylabel('angle $\phi$ in coordinates of circle')
     plt.legend()
     ylims=plt.axes().get_ylim()
     plt.yticks((arange(9)-4)*0.5*pi, ['$-2\pi$','$-3\pi/2$','$-\pi$','$-\pi/2$','$0$','$\pi/2$','$\pi$','$3\pi/2$','$2\pi$'])
     plt.ylim(ylims)
     plt.title('RMS offset from smooth curve: {:.4f}'.format(rmsval))
     if self.show: plt.show()
     else: plt.savefig(join(self.sdc.plotpath,'salpha','c{}_salpha_on_{}_circle'.format(self.sdc.case,self.ZorY)+self.sdc.suffix+self.sdc.outsuffix+suffix+'.png'), dpi=240)
     plt.close()
def displayRetireWMonthsandRates(monthlies, rates, terms):
    plt.figure('retire both')
    plt.clf()
    plt.xlim(30 * 12,
             40 * 12)  # focusing only on the last 10 years of investment
    for monthly in monthlies:
        for rate in rates:
            xvals, yvals = retire(monthly, rate, terms)
            plt.plot(xvals,
                     yvals,
                     label='retire with ' + str(monthly) + ":" +
                     str(int(rate * 100)))
            plt.legend(loc='upper left')
Esempio n. 25
0
def draw(x, y, x_text, y_text, title):
    plt.figure(figsize=(30, 5))
    plt.plot(x, y, color='red', label='data_check_result')
    for i in range(1, len(x)):
        plt.text(x[i], y[i], str((x[i], round(y[i], 4))))
    #plt.text(x,y,(x,y),color='red')
    plt.xlabel(x_text)
    plt.ylabel(y_text)
    plt.title(title)
    plt.grid(True)
    plt.legend()
    pic = time.strftime("%Y-%m-%d_%H_%S_%M", time.localtime()) + ".pdf"

    plt.savefig(pic)
    plt.show()
Esempio n. 26
0
    def update_img((expected, output)):
        plt.cla()
        plt.ylim((vmin, vmin + vmax))
        plt.xlim((vmin, vmin + vmax))
        ax = fig.add_subplot(111)
        plt.plot([vmin, vmin + vmax], [vmin, vmin + vmax])
        ax.grid(True)
        plt.xlabel("expected output")
        plt.ylabel("network output")
        plt.legend()

        expected = expected * vmax + vmin
        output = output * vmax + vmin
        #scat.set_offsets((expected, output))
        scat = ax.scatter(expected, output)
        return scat
Esempio n. 27
0
 def test_run(self):
     plt.style.use('seaborn')
     mpl.rcParams['font.family'] = 'serif'
     # 生成市场环境
     me_gbm = MarketEnvironment('me_gbm', dt.datetime(2020, 1, 1))
     me_gbm.add_constant('initial_value', 36.0)
     me_gbm.add_constant('volatility', 0.2)
     me_gbm.add_constant('final_date', dt.datetime(2020, 12, 31))
     me_gbm.add_constant('currency', 'EUR')
     me_gbm.add_constant('frequency', 'M')
     me_gbm.add_constant('paths', 1000)
     csr = ConstantShortRate('csr', 0.06)
     me_gbm.add_curve('discount_curve', csr)
     # 生成几何布朗运动模拟类
     gbm = GeometricBrownianMotion('gbm', me_gbm)
     gbm.generate_time_grid()
     print('时间节点:{0};'.format(gbm.time_grid))
     paths_1 = gbm.get_instrument_values()
     print('paths_1: {0};'.format(paths_1.round(3)))
     gbm.update(volatility=0.5)
     paths_2 = gbm.get_instrument_values()
     # 可视化结果
     plt.figure(figsize=(10, 6))
     p1 = plt.plot(gbm.time_grid, paths_1[:, :10], 'b')
     p2 = plt.plot(gbm.time_grid, paths_2[:, :10], 'r-')
     legend1 = plt.legend([p1[0], p2[0]],
                          ['low volatility', 'high volatility'],
                          loc=2)
     plt.gca().add_artist(legend1)
     plt.xticks(rotation=30)
     plt.show()
Esempio n. 28
0
def plot_performance(agent):
    ''' Function to plot the financial gross
        performance per training episode.
    '''
    plt.figure(figsize=(10, 6))
    x = range(1, len(agent.performances) + 1)
    y = np.polyval(np.polyfit(x, agent.performances, deg=3), x)
    plt.plot(x, agent.performances[:], label='training')
    plt.plot(x, y, 'r--', label='regression (train)')
    if agent.val:
        y_ = np.polyval(np.polyfit(x, agent.vperformances, deg=3), x)
        plt.plot(x, agent.vperformances[:], label='validation')
        plt.plot(x, y_, 'r-.', label='regression (valid)')
    plt.xlabel('episodes')
    plt.ylabel('gross performance')
    plt.legend()
Esempio n. 29
0
 def test_run(self):
     # 生成几何布朗运动市场环境
     me_gbm = MarketEnvironment('me_gbm', dt.datetime(2020, 1, 1))
     me_gbm.add_constant('initial_value', 36.0)
     me_gbm.add_constant('volatility', 0.2)
     me_gbm.add_constant('final_date', dt.datetime(2020, 12, 31))
     me_gbm.add_constant('currency', 'EUR')
     me_gbm.add_constant('frequency', 'M')
     me_gbm.add_constant('paths', 1000)
     csr = ConstantShortRate('csr', 0.06)
     me_gbm.add_curve('discount_curve', csr)
     # 生成几何布朗运动模拟类
     gbm = GeometricBrownianMotion('gbm', me_gbm)
     gbm.generate_time_grid()
     # 生成跳跃扩散市场环境
     me_jd = MarketEnvironment('me_jd', dt.datetime(2020, 1, 1))
     me_jd.add_constant('lambda', 0.3)
     me_jd.add_constant('mu', -0.75)
     me_jd.add_constant('delta', 0.1)
     me_jd.add_environment(me_gbm)
     # 生成跳跃扩散模拟类
     jd = JumpDiffusion('jd', me_jd)
     paths_3 = jd.get_instrument_values()
     jd.update(lamb=0.9)
     paths_4 = jd.get_instrument_values()
     # 绘制图形
     plt.figure(figsize=(10, 6))
     p1 = plt.plot(gbm.time_grid, paths_3[:, :10], 'b')
     p2 = plt.plot(gbm.time_grid, paths_4[:, :10], 'r-')
     lengend1 = plt.legend([p1[0], p2[0]],
                           ['low intensity', 'high intensity'],
                           loc=3)
     plt.gca().add_artist(lengend1)
     plt.xticks(rotation=30)
     plt.show()
Esempio n. 30
0
    def update_img((expected, output)):
        plt.cla()
        plt.ylim((vmin, vmin+vmax))
        plt.xlim((vmin, vmin+vmax))
        ax = fig.add_subplot(111)
        plt.plot([vmin, vmin+vmax], [vmin, vmin+vmax])
        ax.grid(True)
        plt.xlabel("expected output")
        plt.ylabel("network output")
        plt.legend()

        expected = expected*vmax + vmin
        output = output*vmax + vmin
        #scat.set_offsets((expected, output))
        scat = ax.scatter(expected, output)
        return scat
def profit_firmA_against_profit_firmB(file_name, folder=None):

    if folder is None:
        folder = "data/figures"

    os.makedirs(folder, exist_ok=True)

    bkp = backup.RunBackup.load(file_name=file_name)
    parameters = bkp.parameters

    profit_max = parameters.n_positions * parameters.n_prices * parameters.unit_value

    x = np.arange(parameters.t_max)
    y = np.zeros((2, parameters.t_max))

    for f in range(2):
        for t in range(parameters.t_max):
            y[f, t] = np.mean(bkp.profits[:t + 1, f] / profit_max)

        # y = np.array([np.mean(for_y[i][t]) for t in range(parameters.t_max)])
        # y_err = np.array([np.std(for_y[i][t]) for t in range(parameters.t_max)])

    fig = plt.Figure()

    plt.plot(x, y[0], label="Firm A")
    plt.plot(x, y[1], label="Firm B")
    # plt.fill_between(x, y - (y_err / 2), y + (y_err / 2), color="C{}".format(i), alpha=.25)

    plt.legend()
    plt.xlabel("t")
    plt.ylabel("Mean profit")

    plt.text(0.005,
             0.005,
             file_name,
             transform=fig.transFigure,
             fontsize='x-small',
             color='0.5')

    plt.title("Evolution of profits over time ($r={}$)".format(
        bkp.field_of_view / 2))

    plt.tight_layout()

    plt.savefig("{}/{}_mean_profit.pdf".format(folder, file_name))

    plt.show()
Esempio n. 32
0
def plot(site):
    tp = np.loadtxt('../post_offsets/%s.post'%site)

    t = dc.asmjd([ii[0] for ii in tp]) + dc.adjust_mjd_for_plot_date
    e = [ii[1] for ii in tp]
    n = [ii[2] for ii in tp]
    u = [ii[3] for ii in tp]

    plt.plot_date(t,e,'x-', label = 'eastings')
    plt.plot(t,n,'x-', label = 'northings')
    plt.plot(t,u,'x-', label = 'upings')
    plt.gcf().autofmt_xdate()
    plt.legend(loc=0)
    plt.title(site)
    plt.savefig('%s.png'%site)
    #plt.show()
    plt.close()
Esempio n. 33
0
def create_plot(x, y, styles, labels, axlabels):
    '''
    Generate plot for multiple series X and Y
    
    Parameters
    ---------
    x = List format. List of Time Series Arrays representing x-axis
    y = List format. List of Time Series Arrays representing y-axis
    labels = List format. e.g. ['b','b'] if we have two series x and y
    axlabels = List format. Define x-axis and y-axis name to be displayed. 
    '''
    plt.figure(figsize=(10, 6))
    for i in range(len(x)):
        plt.plot(x[i], y[i], styles[i], label=labels[i])
        plt.xlabel(axlabels[0])
        plt.ylabel(axlabels[1])
    plt.legend(loc=0)
Esempio n. 34
0
def plot(site):
    tp = np.loadtxt('../post_offsets/%s.post' % site)

    t = dc.asmjd([ii[0] for ii in tp]) + dc.adjust_mjd_for_plot_date
    e = [ii[1] for ii in tp]
    n = [ii[2] for ii in tp]
    u = [ii[3] for ii in tp]

    plt.plot_date(t, e, 'x-', label='eastings')
    plt.plot(t, n, 'x-', label='northings')
    plt.plot(t, u, 'x-', label='upings')
    plt.gcf().autofmt_xdate()
    plt.legend(loc=0)
    plt.title(site)
    plt.savefig('%s.png' % site)
    #plt.show()
    plt.close()
Esempio n. 35
0
 def draw_predictions(self, data, predictions):
     from pylab import plt
     true, lstm, bp = [], [], []
     for i in range(len(predictions)):
         true.append(predictions[i][0])
         bp.append(predictions[i][1])
         lstm.append(predictions[i][2])
     x = [1, 2, 3, 4, 5]
     plt.plot(x, true, 'cx--', label='true')
     plt.plot(x, bp, 'mo:', label='fusion')
     plt.plot(x, lstm, 'kp-.', label='lstm')
     plt.legend()
     plt.margins(0)
     plt.subplots_adjust(bottom=0.15)
     plt.xlabel(u"days")
     plt.ylabel("price")
     plt.title("tendency predictions of different models")
     plt.savefig('blog/static/blog/bootstrap/img/presult.jpg')
Esempio n. 36
0
def plotter(mode,Bc,Tc,Q):
    col = ['#000080','#0000FF','#4169E1','#6495ED','#00BFFF','#B0E0E6']
    plt.figure()
    ax = plt.subplot(111)
    for p in range(Bc.shape[1]):
        plt.plot(Tc[:,p],Bc[:,p],'-',color=str(col[p]))
    plt.xlabel('Tc [TW]')
    plt.ylabel('Bc normalised to total EU load')
    plt.title(str(mode)+' flow')
    
    # Shrink current axis by 25% to make room for legend
    box = ax.get_position()
    ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])

    plt.legend(\
        ([str(Q[i]*100) for i in range(len(Q))]),\
        loc='center left', bbox_to_anchor=(1, 0.5),title='Quantiles')
    
    plt.savefig('figures/bctc_'+str(mode)+'.eps')
Esempio n. 37
0
def example_gammatone_filter():
    from pylab import plt, np
    sample_rate = 44100
    order = 4
    b, a = design_filter(sample_rate=sample_rate,
                         order=order,
                         centerfrequency=1000.0,
                         attenuation_half_bandwidth_db=-3,
                         band_width_factor=1.0)

    x = _create_impulse(1000)
    y, states = fosfilter(b, a, order, x)
    y = y[:800]
    plt.plot(np.real(y), label='Re(z)')
    plt.plot(np.imag(y), label='Im(z)')
    plt.plot(np.abs(y), label='|z|')
    plt.legend()
    plt.show()
    return y, b, a
Esempio n. 38
0
    def plot_spectral_profile(self,
                              points,
                              dd=False,
                              scale=1,
                              domain=np.array([1, 2, 3, 4, 5, 7]),
                              labels=None,
                              xlab=None,
                              ylab=None,
                              ylim=(0, None),
                              lloc='upper left',
                              **kwargs):
        '''
        Assumes the domain is the Landsat TM/ETM+ bands minus the thermal IR
        channel (bands 1-5 and 7). If the spectra are in Landsat surface
        reflectance units, they should be scaled by 0.01 to get reflectance
        as a percentage (by 0.0001 to get it as a proportion).
        '''
        assert not self.__raveled__, 'Cannot do this when the input array is raveled'
        spectra = self.__spectra__(points, dd, scale, domain, self.__nodata__)
        xs = range(1, domain.shape[0] + 1)

        # Truncate the spectra if necessary
        if len(xs) != spectra.shape[1]:
            spectra = spectra[:, 0:len(xs)]

        # Plot as lines
        lines = plot(xs, spectra.transpose(), linewidth=2, **kwargs)

        # Set the x-axis tick labels (e.g., skip band 6)
        plt.xticks(xs, domain)
        plt.ylim(ylim)

        if xlab is not None:
            plt.xlabel(xlab)

        if ylab is not None:
            plt.ylabel(ylab)

        if labels is not None:
            plt.legend(lines, labels, loc=lloc, frameon=False)

        plt.show()
    def plot_vel_decomposition(self, site, cmpt, loc=0, leg_fs=7,
                       if_ylim=False
                       ):
        y = self.plot_pred_vel_added(site, cmpt, label='total')
        y += self.plot_vel_R_co(site, cmpt,
                            style='-^', label='Rco', color='orange')
        y += self.plot_vel_E_cumu_slip(site, cmpt, color='green')
        y += self.plot_vel_R_aslip(site, cmpt, color='black')
        
        plt.grid('on')
        if if_ylim:
            plt.ylim(calculate_lim(y))

        plt.ylabel(r'mm/yr')
        plt.legend(loc=loc, prop={'size':leg_fs})
        plt.gcf().autofmt_xdate()
        plt.title('Cumulative Disp.: {site} - {cmpt}'.format(
            site = get_site_true_name(site_id=site),
            cmpt = cmpt
            ))
def plot_axis_control(v, axis):
    axismap = {'roll': (v.control.roll_cmd, 2, +1, v.asctec_ctrl_input.roll_cmd, -1),
               'pitch': (v.control.pitch_cmd, 1, -1, v.asctec_ctrl_input.pitch_cmd, -1),
               'yaw': (v.control.yaw_cmd, 0, -1, v.asctec_ctrl_input.yaw_rate_cmd, +1) # not sure about the multiplier here
               }
    control_mode_cmd, state_axis, imu_mult, asctec_cmd, asctec_cmd_mult = axismap[axis]
    newfig("%s Axis Control" % axis.capitalize(), "time [s]", "%s [deg]" % axis.capitalize())
    # np.clip() and the [1:] stuff in the following to attempt deal with bogus initial data points in IMU data:
    plt.plot(v.control.t, control_mode_cmd, label='cmd (from mux)')
    plt.plot(v.state.t[1:], v.state.ori_ypr[1:,state_axis], label='meas (Vicon)')
    plt.plot(np.clip(v.imu.t[1:], 0, np.inf), imu_mult*v.imu.ori_ypr[1:,state_axis], label='meas (IMU)')
    if axis is not 'yaw':
        plt.plot(v.asctec_ctrl_input.t, asctec_cmd_mult*asctec_cmd, label='cmd (AscTec)')
    # Plot difference between vicon and imu:
    tout, data_out = uniform_resample((('linear', v.imu.t[1:], v.imu.ori_ypr[1:,state_axis]), 
                                       ('linear', v.state.t[1:], v.state.ori_ypr[1:,state_axis])), 
                                       0.02)
    plt.plot(tout, imu_mult*data_out[0][0] - data_out[1][0], label='IMU - Vicon')

    plt.legend()
Esempio n. 41
0
def example_gammatone_filter():
    from pylab import plt, np
    sample_rate = 44100
    order = 4
    b, a = design_filter(
        sample_rate=sample_rate,
        order=order,
        centerfrequency=1000.0,
        attenuation_half_bandwidth_db=-3,
        band_width_factor=1.0)

    x = _create_impulse(1000)
    y, states = fosfilter(b, a, order, x)
    y = y[:800]
    plt.plot(np.real(y), label='Re(z)')
    plt.plot(np.imag(y), label='Im(z)')
    plt.plot(np.abs(y), label='|z|')
    plt.legend()
    plt.show()
    return y, b, a
    def plot_post_disp_decomposition(self, site, cmpt, loc=2, leg_fs=7,
                       added_label = None,
                       marker_for_obs = 'x',
                       ):
        y = self.plot_post_obs_linres(site,cmpt, label='obs.', marker=marker_for_obs)
        y += self.plot_post_disp_pred_from_result_file(site,cmpt, label='pred.')
        y += self.plot_R_co(site, cmpt,
                            style = '-^', label='Rco', color='orange')
        y += self.plot_E_aslip(site, cmpt, color='green')

        plt.grid('on')

        self.plot_post_disp_pred_added(site, cmpt, label=added_label)
        
        plt.legend(loc=loc, prop={'size':leg_fs})
        plt.ylabel(r'm')
        plt.gcf().autofmt_xdate()
        plt.title('Postseismic Disp. : {site} - {cmpt}'.format(
            site = get_site_true_name(site_id = site),
            cmpt = cmpt
            ))
    def plot_cumu_disp_decomposition(self, site, cmpt, loc=2, leg_fs=7,
                       if_ylim=False,
                       added_label = None,
                       ):        
        self.plot_cumu_obs_linres(site, cmpt)
        y = self.plot_cumu_disp_pred_from_result_file(site, cmpt, label='pred.')
        y += self.plot_R_co(site, cmpt,
                            style='-^', label='Rco', color='orange')
        y += self.plot_E_cumu_slip(site, cmpt, color='green')

        plt.grid('on')
        if if_ylim:
            plt.ylim(calculate_lim(y))

        self.plot_cumu_disp_pred_added(site, cmpt, label=added_label)
        plt.ylabel(r'm')
        plt.legend(loc=loc, prop={'size':leg_fs})
        plt.gcf().autofmt_xdate()
        plt.title('Cumulative Disp.: {site} - {cmpt}'.format(
            site = get_site_true_name(site_id=site),
            cmpt = cmpt
            ))
Esempio n. 44
0
def show(filename=None, labels=False):

    if not labels:
        # fix everything if in 3D mode
        plt.subplots_adjust(left=0.0, right=1.1, bottom=0.0, top=1.0)

        # also do this if in 2d mode
        if not is_3d:
            frame1 = plt.gca()
            frame1.axes.get_xaxis().set_visible(False)
            frame1.axes.get_yaxis().set_visible(False)

    if legend:
        plt.legend(loc="upper left", fontsize=8, prop={'family': "Monaco", 'weight': "roman", 'size': "x-small"})

    if filename is not None:
        if '.' not in filename:
            if not os.path.isdir(filename):
                os.makedirs(filename)
            filename = os.path.abspath(os.path.join(filename, "%s.png" % util.timestamp()))
        figure.savefig(filename, dpi=150, facecolor=figure.get_facecolor(), edgecolor='none')

    plt.show()
import h5py
from pylab import plt

def collect_results(outs_files, key):
    outs = []
    for file in outs_files:
        with h5py.File(file, 'r') as fid:
            out = fid[key][...]
            outs.append(out)
    return outs

files = sorted(glob.glob('../outs/ano_??.h5'))
nrough1 = collect_results(files, 'regularization/roughening/norm')
nres1 = collect_results(files, 'misfit/norm_weighted')


files = sorted(glob.glob('../../run0/outs/ano_??.h5'))
nrough0 = collect_results(files, 'regularization/roughening/norm')
nres0 = collect_results(files, 'misfit/norm_weighted')

plt.loglog(nres0, nrough0, '.', label='Result0')
plt.loglog(nres1, nrough1, '.', label='Result1')
plt.grid('on')
plt.xlabel('norm of weighted residual')
plt.ylabel('norm of solution roughness')
plt.xlim([.7,5])
plt.legend()

plt.savefig('compare_misfit.png')
plt.show()
Esempio n. 46
0
import glob

import h5py
from pylab import plt

from epochs import epochs

files = sorted(glob.glob('../outs/*'))
for no, file in enumerate(files):
    with h5py.File(file, 'r') as fid:
        rms = fid['misfit/rms_inland_at_epoch'][...]
    #if no==0:
    plt.plot(epochs, rms,'x-', label='Nrough=%d'%no)
    #plt.plot(epochs, rms,'x-')
    
plt.grid('on')
plt.xlabel('days after mainshock')
plt.ylabel('RMS(m)')
plt.legend(prop={'size':7}, bbox_to_anchor=(0.18, 1.01))
plt.savefig('RMS_misfits_at_epochs.pdf')
plt.show()
        
Esempio n. 47
0
    while arr[1] are those of image 2. 
    Each landmark vector contains 68 x-coordinates followed by 68 y-coordinates.
    """
    src = arr[0].reshape(2,-1).T 
    dst = arr[1].reshape(2,-1).T
    
    data.src=np.require(src,requirements=['C'])
    data.dst=np.require(dst,requirements=['C'])
    
    data.dname=os.path.abspath(os.path.dirname(fname))
    data.fname=fname
    
    return data


if __name__ == "__main__":
    
    from pylab import plt
    import of.plt
    name = 'LFW_5_to_6'
    data = get_data('LFW_5_to_6')
    src = data.src
    dst = data.dst
    plt.close('all')
    plt.figure(1)
    plt.plot(src[:,0],src[:,1],'go')
    plt.plot(dst[:,0],dst[:,1],'bo')
    of.plt.axis_ij()
    plt.axis('scaled')
    plt.legend(['src','dst'])
    tic=time.clock()
    tw.calc_T_fwd(tw.x_dense,pts_fwd,level=0,int_quality=1)     
    pts_fwd.gpu2cpu()
    toc=time.clock()
    print 'time',toc-tic
#    1/0

    pts_recovered = CpuGpuArray.zeros_like(tw.x_dense)
    tw.calc_T_inv(pts_fwd,pts_recovered,level=0)
    pts_recovered.gpu2cpu()

    
    plt.plot(tw.x_dense.cpu,pts_fwd.cpu)
    plt.plot(tw.x_dense.cpu,pts_recovered.cpu)   
    
    plt.legend([r'$T(x)$',r'$(T^{-1}\circ T)(x)$',r'$T_{\mathrm{alg}}(x)$'],loc='lower right')

    
    
    dx = x[1]-x[0]
    err = np.abs(pts_fwd_cf-pts_fwd.cpu.ravel())/dx
    print 'err1.mean:',err.mean()
    
    pts_fwd_simple = CpuGpuArray.zeros_like(pts_fwd)
    tic=time.clock()
    cpa_space.calc_T_simple(pts=tw.x_dense,out=pts_fwd_simple,**tw.params_flow_int_fine)
    pts_fwd_simple.gpu2cpu()
    toc=time.clock()
    print 'time',toc-tic
    
    dx = x[1]-x[0]
Esempio n. 49
0
def runAnalysis( caseDirs , resultsDir , noReweight = False):
    
    # Do a reference for each one
    for refDir in caseDirs:

        # ID of reference case
        refID = refDir.split("/")[-1]
        
        # User info
        print "Doing PCA analysis with "+refDir+" as reference"
        
        # Get the PCA limits of component 1-2 plot
        limit = 10
        with open(refDir+"/analysis/data/pca_limits_1", "r") as fi:
            limit = int(float(fi.read()))
            limit += 0.01
        
        # Go through the case dirs to plot
        for caseDir in caseDirs:
            
            print "Using "+caseDir+" as case"
            
            # ID of case
            caseID = caseDir.split("/")[-1]
            
            ## PCA PLOTTING ON REF DIR PCA COMPONENTS
            #########################################
            
            # Create & run cpptraj for plotting all cases on the axes of the first eigenvector
            # Good URLs for PCA in CPPTRAJ:
            # http://archive.ambermd.org/201404/0243.html
                        
            # PCA plotter
            pcaHandler = pcaFuncs.PCA( 
                resultsDir+"/plots/pcaComparison/PCA_"+caseID+"_on_"+refID+".pdf"
            )    
            
            # Create new submission file
            TEMPLATE = open( caseDir+"/ccptraj_analysis_pca.ptraj", 'r')
            TEMP = TEMPLATE.read().replace("[PCAREFERENCE]", refDir  )
            TEMPLATE.close()
                                  
            # Write the submission file
            FILE = open(caseDir+"/ccptraj_analysis_pca.ptraj","w");        
            FILE.write( TEMP );
            FILE.close();
            
            # Run the cpptraj utility
            os.system( "$AMBERHOME/bin/cpptraj -p "+caseDir+"/md-files/peptide_nowat.prmtop -i "+caseDir+"/ccptraj_analysis_pca.ptraj" )
        
            # Do the plots of energy landscapes & distributions
            pcaHandler.plotPCA( 
                "Case: "+caseID+". Ref case: "+refID,   # Plot Title
                caseDir+"/analysis/data/" ,        # Data Dir
                "global_pca",                      # Eigenvector file
                eigenVectorCount = 2,              # Only plot two
                plotDistibution = False,           # Do not plot the distribution
                limits = limit
            )
            
            # Save the plot
            pcaHandler.savePlot()
            
            ## REWEIGHTING OF PCA PLOTS ON RED DIR PCA COMPONENTS
            #####################################################

            # Check if we should do a reweighted version
            if noReweight == False:
                if os.path.isfile( caseDir+"/md-logs/weights.dat" ):
                    
                    # User info
                    print "aMD weights found. Now attempting 2D reweighting"   
                    
                    # Prepare input file
                    numLines = 0
                    with open(caseDir+"/analysis/data/global_pca", "r") as fi:
                        with open(caseDir+"/analysis/data/global_pca_singleColumn", "w") as fo:
                            next(fi)
                            for line in fi:
                                numLines += 1
                                fo.write( line.split()[1]+"\t"+line.split()[2]+"\n" )

                    # Set the discretization
                    reqBins = 100         
                    discretization = (2*limit) / reqBins    
                    
                    # Get the max value of normal plot
                    maxValue = math.ceil(pcaHandler.getLatestMax())
                    
                    # Run the reweighting procedure
                    command = "python $PLMDHOME/src/PyReweighting/PyReweighting-2D.py \
                                -input "+caseDir+"/analysis/data/global_pca_singleColumn \
                                -name "+caseDir+"/analysis/data/global_pca_singleColumn_reweighted \
                                -Xdim -"+str(limit)+" "+str(limit)+" \
                                -Ydim -"+str(limit)+" "+str(limit)+" \
                                -discX "+str(discretization)+" \
                                -discY "+str(discretization)+" \
                                -cutoff 10 \
                                -Emax "+str(maxValue)+" \
                                -job amdweight_CE \
                                -weight "+refDir+"/md-logs/weights.dat | tee -a reweight_variable.log"
                    print "Running command:", command
                    os.system( command )
                    
                    # Create long file for PCA module
                    with open(caseDir+"/analysis/data/global_pca_reweightedDone", "w") as fo:
                        with open(caseDir+"/analysis/data/global_pca_singleColumn_reweighted-pmf-c2.dat", "r") as fi:
                            frame = 0
                            for line in fi:
                                temp = line.split()
                                entries = int(float(temp[2])*10)
                                for i in range(0,entries):
                                    fo.write( str(frame) + "\t" + temp[0] + "\t" + temp[1] +"\n" )
                                    frame += 1

                    # Print block analysis 'family' : 'Arial',
                    fig, ax = plt.subplots(figsize=(8, 8), nrows=1, ncols=1 )
                    font = {'weight' : 'normal','size' : 10}
                    plt.rc('font', **font)
                    
                    # Now plot the 2d histogram
                    hist = np.load(caseDir+"/analysis/data/global_pca_singleColumn_reweighted_c2EnergyHist.npy")   
                    xedges = np.load(caseDir+"/analysis/data/global_pca_singleColumn_reweighted_c2edgesX.npy")   
                    yedges = np.load(caseDir+"/analysis/data/global_pca_singleColumn_reweighted_c2edgesY.npy")   
                    
                    # Remove points above limit
                    for jy in range(len(hist[0,:])):
                        for jx in range(len(hist[:,0])):
                            if hist[jx,jy] >= maxValue:
                                hist[jx,jy] = float("inf")
                    
                    # Do plot
                    img = plt.imshow(hist.transpose(),  interpolation='nearest', origin='lower',extent=[yedges[0], yedges[-1],xedges[0], xedges[-1]] , rasterized=True )
                    
                    # create an axes on the right side of ax. The width of cax will be 5%
                    # of ax and the padding between cax and ax will be fixed at 0.05 inch.
                    divider = make_axes_locatable(ax)
                    cax = divider.append_axes("right", size="5%", pad=0.05)   
                    
                    # Create colorbar
                    colorbar = plt.colorbar(img, ax=ax, cax = cax)
                    colorbar.set_label("Kcal / mol")
                    
                    # Set title, labels etc
                    plt.legend()
                    ax.set_xlabel("PC1", fontsize=12)
                    ax.set_ylabel("PC2", fontsize=12)
                    
                    ax.set_title( "PCA. Case: "+caseID+" Reweighted. Ref case: "+refID )
                    plt.rc('font', **font) 
                    
                    # Save figure
                    fig.savefig(resultsDir+"/plots/pcaComparison/PCA_"+caseID+"_on_"+refID+"_reweighted.pdf")
                    

            ## CLUSTER PLOTS ON PCA COMPONENTS
            ##################################

            # Do both hier and dbscan
            for clusterType in ["dbscan","hier"]:            
                
                # Instantiate the class
                if os.path.isfile(caseDir+"/analysis/data/cluster_"+clusterType+"_out"):   
                    
                    print "Doing the "+clusterType+" cluster equivalent of the PCA plot"
                
                    # Start the cluster handler. Load the file declaring cluster for each frame
                    clusterHandler = cluster.clusterBase( caseDir+"/analysis/data/cluster_"+clusterType+"_out" )
                    
                    # Separate the dataset.
                    # global_pca is the projection file for this case on the ref modes
                    numPCAdataSets = clusterHandler.separateDataSet( 
                        caseDir+"/analysis/data/global_pca",            # Input file
                        caseDir+"/analysis/data/cluster_"+clusterType+"_pca_",   # Output files
                        xColumn = 1
                    ) 
                    
                    # Create lists of labels and files for plotting
                    clusterLabels = []
                    clusterFiles = []
                    offset = 1 if clusterType == "hier" else 0
                    for i in range( 0+offset, numPCAdataSets+offset):
                        clusterLabels.append( "Cluster "+str(i) )
                        clusterFiles.append( caseDir+"/analysis/data/cluster_"+clusterType+"_pca_d2_c"+str(i) )
                    
                    # First one is noise
                    if offset == 0:
                        clusterLabels[0] = "Noise"                 
                    
                    myPlot.plotData( 
                        resultsDir+"/plots/pcaComparison/" , 
                        clusterType+"_"+caseID+"_on_"+refID, 
                        clusterLabels, 
                        clusterFiles , 
                        "PC2",
                        xUnit = "PC1",
                        scatter = True,
                        legendLoc = 4,
                        figWidth = 8,
                        figHeight = 8,
                        tightXlimits = False,
                        legendFrame = 1,
                        legendAlpha = 1,
                        xLimits = [-limit,limit],
                        yLimits = [-limit,limit]
                    )
Esempio n. 50
0
def bars(scheme, verbose=None, norm='load'):
    """
    Figure to compare link proportional and usage proportional for a single
    scheme and put them in ./sensitivity/figures/scheme/
    """
    # Load data and results
    F = abs(np.load('./results/' + scheme + '-flows.npy'))
    quantiles = np.load('./results/quantiles_' + scheme + '_' + str(lapse) + '.npy')
    nNodes = 30

    names = node_namer(N)  # array of node labels
    links = range(len(F))
    nodes = np.linspace(0.5, 2 * nNodes - 1.5, nNodes)
    nodes_shift = nodes + .5

    for direction in directions:
        N_usages = np.load('./results/Node_contrib_' + scheme + '_' + direction + '_' + str(lapse) + '.npy')

        # Compare node transmission to mean load
        if verbose:
            print('Plotting node comparison - ' + scheme + ' - ' + direction)
        # sort node names for x-axis
        Total_usage = np.sum(N_usages, 1)
        node_ids = np.array(range(len(N))).reshape((len(N), 1))
        node_mean_load = [n.mean for n in N]

        # Vector for normalisation
        if norm == 'cap':
            normVec = np.ones(nNodes) * sum(quantiles)
        else:
            normVec = node_mean_load

        # Calculate node proportional
        EU_load = np.sum(node_mean_load)
        Total_caps = sum(quantiles)
        Node_proportional = node_mean_load / EU_load * Total_caps / normVec
        Node_proportional = np.reshape(Node_proportional, (len(Node_proportional), 1))

        # Calculate link proportional
        link_proportional = linkProportional(N, link_dic, quantiles)
        link_proportional = [link_proportional[i] / normVec[i] for i in range(nNodes)]

        # Calculate old usage proportional
        if direction == 'combined':
            old_usages = np.load('./linkcolouring/old_' + scheme + '_copper_link_mix_import_all_alpha=same.npy')
            old_usages += np.load('./linkcolouring/old_' + scheme + '_copper_link_mix_export_all_alpha=same.npy')
        else:
            old_usages = np.load('./linkcolouring/old_' + scheme + '_copper_link_mix_' + direction + '_all_alpha=same.npy')
        avg_node_usage = np.sum(np.sum(old_usages, axis=2), axis=0) / 70128.
        avg_EU_usage = np.sum(np.sum(np.sum(old_usages, axis=2), axis=0)) / 70128.
        avg_node_usage /= avg_EU_usage
        avg_node_usage /= normVec
        avg_node_usage *= 500000

        # Calculate usage and sort countries by mean load
        normed_usage = Total_usage / normVec
        normed_usage = np.reshape(normed_usage, (len(normed_usage), 1))
        node_mean_load = np.reshape(node_mean_load, (len(node_mean_load), 1))
        data = np.hstack([normed_usage, node_ids, node_mean_load, link_proportional, Node_proportional])
        data_sort = data[data[:, 2].argsort()]
        names_sort = [names[int(i)] for i in data_sort[:, 1]]
        # flip order so largest is first
        names_sort = names_sort[::-1]
        link_proportional = data_sort[:, 3][::-1]
        Node_proportional = data_sort[:, 4][::-1]
        data_sort = data_sort[:, 0][::-1]

        plt.figure(figsize=(10, 4), facecolor='w', edgecolor='k')
        ax = plt.subplot(111)
        green = '#009900'
        blue = '#000099'

        # Plot node proportional
        plt.rc('lines', lw=2)
        plt.rc('lines', dash_capstyle='round')
        plt.plot(np.linspace(0, len(N) * 2 + 2, len(N)), Node_proportional, '--k')
        # Plot link proportional
        #plt.bar(nodes, link_proportional, width=1, color=green, edgecolor='none')
        # Plot old usage proportional
        plt.bar(nodes, avg_node_usage[loadOrder], width=1, color=green, edgecolor='none')
        # Plot usage proportional
        plt.bar(nodes_shift, data_sort, width=1, color=blue, edgecolor='none')

        # Magic with ticks and labels
        ax.set_xticks(np.linspace(2, len(N) * 2 + 2, len(N) + 1))
        ax.set_xticklabels(names_sort, rotation=60, ha="right", va="top", fontsize=10.5)

        ax.xaxis.grid(False)
        ax.xaxis.set_tick_params(width=0)
        if norm == 'cap':
            ax.set_ylabel(r'$M_n/ \mathcal{K}^T$')
        else:
            # ax.set_ylabel(r'Network usage [MW$_T$/MW$_L$]')
            ax.set_ylabel(r'$M_n/\left\langle L_n \right\rangle$')
        maxes = [max(avg_node_usage), max(data_sort)]
        plt.axis([0, nNodes * 2 + .5, 0, 1.15 * max(maxes)])

        # Legend
        artists = [plt.Line2D([0, 0], [0, 0], ls='dashed', lw=2.0, c='k'), plt.Rectangle((0, 0), 0, 0, ec=green, fc=green), plt.Rectangle((0, 0), 0, 0, ec=blue, fc=blue)]
        LABS = ['$M^1$', '$M^{3}_{old}$', '$M^{3}_{new}$']
        leg = plt.legend(artists, LABS, loc='upper left', ncol=len(artists), columnspacing=0.6, borderpad=0.4, borderaxespad=0.0, handletextpad=0.2, handleheight=1.2)
        leg.get_frame().set_alpha(0)
        leg.get_frame().set_edgecolor('white')
        ltext = leg.get_texts()
        plt.setp(ltext, fontsize=12)    # the legend text fontsize

        plt.savefig(figPath + scheme + '/network-usage-' + direction + '-' + norm + '.png', bbox_inches='tight')
        if verbose:
            print('Saved figures to ./figures/compareUsage/' + scheme + '/network-usage-' + direction + '-' + norm + '.png')
Esempio n. 51
0
import viscojapan as vj

def plot_slip(slip, nx, ny,
              label='x-',
              legend = None
              ):
    epochs = slip.get_epochs()
    s = slip.get_cumu_slip_at_subfault(nx, ny)
    plt.plot(epochs, s, label, label=legend)


res_file = '../../outs/nrough_06_naslip_11.h5'
reader = vj.inv.ResultFileReader(res_file)
slip_pred = reader.get_slip()

slip_exp = vj.epoch_3d_array.Slip.load('extra_slip_EXP.h5')
slip_log = vj.epoch_3d_array.Slip.load('extra_slip_LOG.h5')


nx = 1
ny = 1

plot_slip(slip_pred, nx, ny, label='x-', legend='Pred.')
plot_slip(slip_exp, nx, ny, label='^-', legend='EXP')
plot_slip(slip_log, nx, ny, label='o-', legend='LOG')

plt.legend(loc=0)
plt.savefig('extra_%d_%d.png'%(nx, ny))
plt.show()    
    
    nPtsDense = 10000
    mr = MonotonicRegression(base=[12],nLevels=4)    
    mr.set_dense(domain_start=-10,domain_end=10)                    
    mr.set_data(x=x,y=y,range_start=range_start,range_end=range_end)
     
    print mr
    
    if 1:
        plt.figure(0)
        plt.subplot(122)
        plt.cla()
        mr.plot_dst('.r')  
        mr.plot_src('.g')  
        plt.legend(['dst','src'],'lower right')
        ax = plt.gca()
        ax.tick_params(axis='y', labelsize=50)
        ax.tick_params(axis='x', labelsize=30)        

        
    mr.set_run_lengths([500,500,500,50000,50000][:mr.nLevels])
    
    theta,inference_record = mr.fit(use_prior=1)
    plt.figure(1000)
    of.plt.set_figure_size_and_location(50,50,1000,1000)
    plt.clf()
    mr.plot_inference_summary(inference_record)    
    

Esempio n. 53
0
def get_linear_model_histogramDouble(code, ptype='f', dtype='d', start=None, end=None, vtype='close', filter='n',
                                     df=None):
    # 399001','cyb':'zs399006','zxb':'zs399005
    # code = '999999'
    # code = '601608'
    # code = '000002'
    # asset = ts.get_hist_data(code)['close'].sort_index(ascending=True)
    # df = tdd.get_tdx_Exp_day_to_df(code, 'f').sort_index(ascending=True)
    # vtype='close'
    # if vtype == 'close' or vtype==''
    # ptype=
    if start is not None and filter == 'y':
        if code not in ['999999', '399006', '399001']:
            index_d, dl = tdd.get_duration_Index_date(dt=start)
            log.debug("index_d:%s dl:%s" % (str(index_d), dl))
        else:
            index_d = cct.day8_to_day10(start)
            log.debug("index_d:%s" % (index_d))
        start = tdd.get_duration_price_date(code, ptype='low', dt=index_d)
        log.debug("start:%s" % (start))
    if df is None:
        # df = tdd.get_tdx_append_now_df(code, ptype, start, end).sort_index(ascending=True)
        df = tdd.get_tdx_append_now_df_api(code, ptype, start, end).sort_index(ascending=True)
    if not dtype == 'd':
        df = tdd.get_tdx_stock_period_to_type(df, dtype).sort_index(ascending=True)
    asset = df[vtype]
    log.info("df:%s" % asset[:1])
    asset = asset.dropna()
    dates = asset.index

    if not code.startswith('999') or not code.startswith('399'):
        if code[:1] in ['5', '6', '9']:
            code2 = '999999'
        elif code[:1] in ['3']:
            code2 = '399006'
        else:
            code2 = '399001'
        df1 = tdd.get_tdx_append_now_df_api(code2, ptype, start, end).sort_index(ascending=True)
        # df1 = tdd.get_tdx_append_now_df(code2, ptype, start, end).sort_index(ascending=True)
        if not dtype == 'd':
            df1 = tdd.get_tdx_stock_period_to_type(df1, dtype).sort_index(ascending=True)
        asset1 = df1.loc[asset.index, vtype]
        startv = asset1[:1]
        # asset_v=asset[:1]
        # print startv,asset_v
        asset1 = asset1.apply(lambda x: round(x / asset1[:1], 2))
        # print asset1[:4]

    # 画出价格随时间变化的图像
    # _, ax = plt.subplots()
    # fig = plt.figure()
    fig = plt.figure(figsize=(16, 10))
    # fig = plt.figure(figsize=(16, 10), dpi=72)
    # fig.autofmt_xdate() #(no fact)

    # plt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)
    plt.subplots_adjust(left=0.05, bottom=0.08, right=0.95, top=0.95, wspace=0.15, hspace=0.25)
    # set (gca,'Position',[0,0,512,512])
    # fig.set_size_inches(18.5, 10.5)
    # fig=plt.fig(figsize=(14,8))
    ax1 = fig.add_subplot(321)
    # asset=asset.apply(lambda x:round( x/asset[:1],2))
    ax1.plot(asset)
    # ax1.plot(asset1,'-r', linewidth=2)
    ticks = ax1.get_xticks()
    # start, end = ax1.get_xlim()
    # print start, end, len(asset)
    # print ticks, ticks[:-1]
    # (ticks[:-1] if len(asset) > end else np.append(ticks[:-1], len(asset) - 1))
    ax1.set_xticklabels([dates[i] for i in (np.append(ticks[:-1], len(asset) - 1))],
                        rotation=15)  # Label x-axis with dates
    # 拟合
    X = np.arange(len(asset))
    x = sm.add_constant(X)
    model = regression.linear_model.OLS(asset, x).fit()
    a = model.params[0]
    b = model.params[1]
    # log.info("a:%s b:%s" % (a, b))
    log.info("X:%s a:%s b:%s" % (len(asset), a, b))
    Y_hat = X * b + a

    # 真实值-拟合值,差值最大最小作为价值波动区间
    # 向下平移
    i = (asset.values.T - Y_hat).argmin()
    c_low = X[i] * b + a - asset.values[i]
    Y_hatlow = X * b + a - c_low

    # 向上平移
    i = (asset.values.T - Y_hat).argmax()
    c_high = X[i] * b + a - asset.values[i]
    Y_hathigh = X * b + a - c_high
    plt.plot(X, Y_hat, 'k', alpha=0.9);
    plt.plot(X, Y_hatlow, 'r', alpha=0.9);
    plt.plot(X, Y_hathigh, 'r', alpha=0.9);
    # plt.xlabel('Date', fontsize=12)
    plt.ylabel('Price', fontsize=12)
    plt.title(code + " | " + str(dates[-1])[:11], fontsize=14)
    plt.legend([asset.iat[-1]], fontsize=12, loc=4)
    plt.grid(True)

    # plt.legend([code]);
    # plt.legend([code, 'Value center line', 'Value interval line']);
    # fig=plt.fig()
    # fig.figsize = [14,8]
    scale = 1.1
    zp = zoompan.ZoomPan()
    figZoom = zp.zoom_factory(ax1, base_scale=scale)
    figPan = zp.pan_factory(ax1)

    ax2 = fig.add_subplot(323)
    # ax2.plot(asset)
    # ticks = ax2.get_xticks()
    ax2.set_xticklabels([dates[i] for i in (np.append(ticks[:-1], len(asset) - 1))], rotation=15)
    # plt.plot(X, Y_hat, 'k', alpha=0.9)
    n = 5
    d = (-c_high + c_low) / n
    c = c_high
    while c <= c_low:
        Y = X * b + a - c
        plt.plot(X, Y, 'r', alpha=0.9);
        c = c + d
    # asset=asset.apply(lambda x:round(x/asset[:1],2))
    ax2.plot(asset)
    # ax2.plot(asset1,'-r', linewidth=2)
    # plt.xlabel('Date', fontsize=12)
    plt.ylabel('Price', fontsize=12)
    plt.grid(True)

    # plt.title(code, fontsize=14)
    # plt.legend([code])

    # 将Y-Y_hat股价偏离中枢线的距离单画出一张图显示,对其边界线之间的区域进行均分,大于0的区间为高估,小于0的区间为低估,0为价值中枢线。
    ax3 = fig.add_subplot(322)
    # distance = (asset.values.T - Y_hat)
    distance = (asset.values.T - Y_hat)[0]
    if code.startswith('999') or code.startswith('399'):
        ax3.plot(asset)
        plt.plot(distance)
        ticks = ax3.get_xticks()
        ax3.set_xticklabels([dates[i] for i in (np.append(ticks[:-1], len(asset) - 1))], rotation=15)
        n = 5
        d = (-c_high + c_low) / n
        c = c_high
        while c <= c_low:
            Y = X * b + a - c
            plt.plot(X, Y - Y_hat, 'r', alpha=0.9);
            c = c + d
        ax3.plot(asset)
        # plt.xlabel('Date', fontsize=12)
        plt.ylabel('Price-center price', fontsize=14)
        plt.grid(True)
    else:
        as3 = asset.apply(lambda x: round(x / asset[:1], 2))
        ax3.plot(as3)
        ax3.plot(asset1, '-r', linewidth=2)
        plt.grid(True)
        zp3 = zoompan.ZoomPan()
        figZoom = zp3.zoom_factory(ax3, base_scale=scale)
        figPan = zp3.pan_factory(ax3)
    # plt.title(code, fontsize=14)
    # plt.legend([code])



    # 统计出每个区域内各股价的频数,得到直方图,为了更精细的显示各个区域的频数,这里将整个边界区间分成100份。

    ax4 = fig.add_subplot(325)
    log.info("assert:len:%s %s" % (len(asset.values.T - Y_hat), (asset.values.T - Y_hat)[0]))
    # distance = map(lambda x:int(x),(asset.values.T - Y_hat)/Y_hat*100)
    # now_distanse=int((asset.iat[-1]-Y_hat[-1])/Y_hat[-1]*100)
    # log.debug("dis:%s now:%s"%(distance[:2],now_distanse))
    # log.debug("now_distanse:%s"%now_distanse)
    distance = (asset.values.T - Y_hat)
    now_distanse = asset.iat[-1] - Y_hat[-1]
    # distance = (asset.values.T-Y_hat)[0]
    pd.Series(distance).plot(kind='hist', stacked=True, bins=100)
    # plt.plot((asset.iat[-1].T-Y_hat),'b',alpha=0.9)
    plt.axvline(now_distanse, hold=None, label="1", color='red')
    # plt.axhline(now_distanse,hold=None,label="1",color='red')
    # plt.axvline(asset.iat[0],hold=None,label="1",color='red',linestyle="--")
    plt.xlabel('Undervalue ------------------------------------------> Overvalue', fontsize=12)
    plt.ylabel('Frequency', fontsize=14)
    # plt.title('Undervalue & Overvalue Statistical Chart', fontsize=14)
    plt.legend([code, asset.iat[-1], str(dates[-1])[5:11]], fontsize=12)
    plt.grid(True)

    # plt.show()
    # import os
    # print(os.path.abspath(os.path.curdir))


    ax5 = fig.add_subplot(326)
    # fig.figsize=(5, 10)
    log.info("assert:len:%s %s" % (len(asset.values.T - Y_hat), (asset.values.T - Y_hat)[0]))
    # distance = map(lambda x:int(x),(asset.values.T - Y_hat)/Y_hat*100)
    distance = (asset.values.T - Y_hat) / Y_hat * 100
    now_distanse = ((asset.iat[-1] - Y_hat[-1]) / Y_hat[-1] * 100)
    log.debug("dis:%s now:%s" % (distance[:2], now_distanse))
    log.debug("now_distanse:%s" % now_distanse)
    # n, bins = np.histogram(distance, 50)
    # print n, bins[:2]
    pd.Series(distance).plot(kind='hist', stacked=True, bins=100)
    # plt.plot((asset.iat[-1].T-Y_hat),'b',alpha=0.9)
    plt.axvline(now_distanse, hold=None, label="1", color='red')
    # plt.axhline(now_distanse,hold=None,label="1",color='red')
    # plt.axvline(asset.iat[0],hold=None,label="1",color='red',linestyle="--")
    plt.xlabel('Undervalue ------------------------------------------> Overvalue', fontsize=14)
    plt.ylabel('Frequency', fontsize=12)
    # plt.title('Undervalue & Overvalue Statistical Chart', fontsize=14)
    plt.legend([code, asset.iat[-1]], fontsize=12)
    plt.grid(True)

    ax6 = fig.add_subplot(324)
    h = df.loc[:, ['open', 'close', 'high', 'low']]
    highp = h['high'].values
    lowp = h['low'].values
    openp = h['open'].values
    closep = h['close'].values
    lr = LinearRegression()
    x = np.atleast_2d(np.linspace(0, len(closep), len(closep))).T
    lr.fit(x, closep)
    LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)
    xt = np.atleast_2d(np.linspace(0, len(closep) + 200, len(closep) + 200)).T
    yt = lr.predict(xt)
    bV = []
    bP = []
    for i in range(1, len(highp) - 1):
        if highp[i] <= highp[i - 1] and highp[i] < highp[i + 1] and lowp[i] <= lowp[i - 1] and lowp[i] < lowp[i + 1]:
            bV.append(lowp[i])
            bP.append(i)

    d, p = LIS(bV)

    idx = []
    for i in range(len(p)):
        idx.append(bP[p[i]])
    lr = LinearRegression()
    X = np.atleast_2d(np.array(idx)).T
    Y = np.array(d)
    lr.fit(X, Y)
    estV = lr.predict(xt)
    ax6.plot(closep, linewidth=2)
    ax6.plot(idx, d, 'ko')
    ax6.plot(xt, estV, '-r', linewidth=3)
    ax6.plot(xt, yt, '-g', linewidth=3)
    plt.grid(True)

    # plt.tight_layout()
    zp2 = zoompan.ZoomPan()
    figZoom = zp2.zoom_factory(ax6, base_scale=scale)
    figPan = zp2.pan_factory(ax6)
    # plt.ion()
    plt.show(block=False)
Esempio n. 54
0
    def plotPCA( 
        self, 
        plotTitleIdentifier, 
        dataDir, 
        eigenVectorFile , 
        eigenValueFile = False , 
        eigenVectorCount = 4, 
        plotDistibution = True, 
        limits = False 
    ):
    
        # If this is the first plot, create grid
        if self.subPlots == 0:
            self.createPdfPage()
    
        # User info
        print "Doing principal component analysis"
    
        # Do the four principal vectors
        frames = []

        # Principal components
        pcs = []
        for i in range( 0, eigenVectorCount ):
            pcs.append([])
    
        # Go through analysis file and get eigenvalues
        if eigenValueFile != False:
            eigenValues = []
            eigenValueTotal = 0
            with open(dataDir+eigenValueFile,"r") as f:
                for line in f:
                    temp = line.split()
                    eigenValueTotal += float(temp[1])
                    eigenValues.append(float(temp[1]))
            eigenValues = (np.array(eigenValues) / eigenValueTotal) * 100               
    
        # Get the file with all the projection data
        pcaFile = open(dataDir+eigenVectorFile,"r")
        n = 0
        for aline in pcaFile:
            if n > 1 and aline:
                values = aline.split()
                
                # Add frames                
                frames.append( int(values[0]) )
                
                # If requesting more vectors than present
                if eigenVectorCount > len(values):
                    raise Exception("CPPTRAJ has not projected "+str(eigenVectorCount)+" vectors")
                
                # Add to vectors
                for i in range( 0, eigenVectorCount ):
                    pcs[i].append( float(values[i+1]) )
            n = n + 1
    
        # Create numpy arrays
        frames = np.array( frames )
        self.np_arrays = [ np.array( pc ) for pc in pcs ]
        
        # Set the plotting font and default size 'family' : 'Arial',
        font = {
                'weight' : 'normal',
                'size'   : 10}
                
        # Do a plot for each PCA
        for component in range( 1, len(self.np_arrays) ):
            
            # User Info        
            print "Plotting component 1 vs. "+str(component)
        
            # Normalize the data DeltaG = -kb T * [ ln( P(v1,v2) ) - ln Pmax ]
            boltzman = 0.0019872041
            temperature = 300
            
            # Plot both distribution & Energy Landscape
            for plotType in [ "energy", "distribution" ] if plotDistibution else [ "energy" ]:        
            
                # New subplot
                ax = self.getActiveAx()
                
                # Increase subplot counter
                self.subPlots += 1
    
                # Do the plotting
                if plotType == "energy":
                    
                    # Create the histogram without plotting, so we can set the units properly        
                    H, xedges, yedges = np.histogram2d(self.np_arrays[component], self.np_arrays[0], bins=100 )
                    H_normalized = H/len(self.np_arrays[0])
                    H = -1 * boltzman * temperature * (np.log( H_normalized )-np.log(np.max(H_normalized)))
                    
                    # Set max energy
                    for vec in H:
                        for val in vec:
                            if not np.isinf(val) and val > self.latestMax:
                                self.latestMax = val                   
                    
                    # Now plot the 2d histogram
                    img = ax.imshow(H,  interpolation='nearest', origin='lower',extent=[yedges[0], yedges[-1],xedges[0], xedges[-1]] , rasterized=True )
                    
                    # create an axes on the right side of ax. The width of cax will be 5%
                    # of ax and the padding between cax and ax will be fixed at 0.05 inch.
                    divider = make_axes_locatable(ax)
                    cax = divider.append_axes("right", size="5%", pad=0.05)                    
                    
                    # Create colorbar
                    colorbar = plt.colorbar(img, ax=ax, cax = cax)
                    colorbar.set_label("Kcal / mol")
                    self.colorBars.append(colorbar)
                    
                elif plotType == "distribution":
            
                    # Directly do the 2d histogram of matplotlib        
                    _, _, _, img = ax.hist2d(self.np_arrays[0], self.np_arrays[component], bins=100 , rasterized=True, norm=LogNorm() )
                    colorbar = plt.colorbar(img, ax=ax)
                    colorbar.set_label("Occurances")
                    self.colorBars.append(colorbar)
            
                # Set limits if they are not specified
                if limits == False:
                    print "Calculating plot limits based on data"
                    mini = np.abs(np.min( [np.min(self.np_arrays[0]), np.min(self.np_arrays[component])] ))  
                    maxi = np.abs(np.max( [np.max(self.np_arrays[0]), np.max(self.np_arrays[component])] ))
                    limits = int(math.ceil(np.max( [mini,maxi] )))
                   
                print "Setting plot limits to: ",limits
                ax.set_ylim([-limits,limits])
                ax.set_xlim([-limits,limits]) 
                
                # Save the limits for the component
                with open(dataDir+"pca_limits_"+str(component), "w") as fo:
                    fo.write( str(limits) )
            
                # Set title, labels etc
                plt.legend()
                if eigenValueFile != False:
                    ax.set_xlabel("PC1 ({0:.2f}%)".format(eigenValues[0]), fontsize=12)
                    ax.set_ylabel("PC"+str(component+1)+" ({0:.2f}%)".format(eigenValues[component]), fontsize=12)
                else:
                    ax.set_xlabel("PC1", fontsize=12)
                    ax.set_ylabel("PC"+str(component+1), fontsize=12)
                
                ax.set_title( "PCA. "+plotTitleIdentifier )
                plt.rc('font', **font)   
        
                # Save pdf page if it's filled
                if self.subPlots >= (self.rows*self.columns):
                    print "Now saving to PDF. Number of plots: ",self.subPlots
                    self.savePdfPage()
                    self.subPlots = 0
Esempio n. 55
0
    def disp(self,sampler,interp_type_during_visualization):
        level=sampler.level    
        theta=sampler.theta_current
        tw=self.tw
#        interval=self.interval
#        interval_dense=self.interval_dense
        markersize = 5
        fontsize=30
        cpa_space = tw.ms.L_cpa_space[level]            
        plt.subplot(231)
        sampler.plot_ll()
        plt.title('ll',fontsize=fontsize)
        sampler.plot_wlp()
        sampler.plot_wlp_plus_ll()
        if sampler.lp_func:         
            plt.legend(['ll','wlp','ll+wlp'])
        
        plt.subplot(232)
        sampler.plot_ar()
        plt.title('accept ratio',fontsize=fontsize)
         
#        print theta
        cpa_space.theta2As(theta=theta)
        tw.update_pat_from_Avees(level=level)          
        tw.calc_v(level=level)    
        tw.v_dense.gpu2cpu()     
    
        src = self.src
#        dst = self.dst
        transformed = self.transformed
        
#        src_dense=self.src_dense
#        transformed_dense=self.transformed_dense
#        tw.calc_T(src_dense, transformed_dense, mysign=1, level=level, 
#        
#        transformed_dense.gpu2cpu()

        tw.calc_T_inv(src, transformed,  level=level, 
                  int_quality=+1)            
        transformed.gpu2cpu()
        
        if interp_type_during_visualization=='gpu_linear':
            my_dtype = np.float64
        else:
            my_dtype = np.float32 # For opencv
        
        img_src = self.signal.src.cpu.reshape(tw.nRows,tw.nCols)
        img_src = CpuGpuArray(img_src.astype(my_dtype))  
        img_wrapped = CpuGpuArray.zeros_like(img_src)

        img_dst = self.signal.dst.cpu.reshape(tw.nRows,tw.nCols)
        img_dst = CpuGpuArray(img_dst)         
        
                
        if interp_type_during_visualization=='gpu_linear':
            tw.remap_fwd(transformed,img_src,img_wrapped)
        else:
            tw.remap_fwd_opencv(transformed,img_src,img_wrapped,interp_type_during_visualization)
        img_wrapped.gpu2cpu()
             
        plt.subplot(233)   
        plt.imshow(img_src.cpu,interpolation="None")
        plt.gray()
        cpa_space.plot_cells('r')
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$I_{\mathrm{src}}$')

        
                
        
        plt.subplot(234)   
        plt.imshow(img_wrapped.cpu,interpolation="None")
        plt.gray()
#        cpa_space.plot_cells('w')
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$I_{\mathrm{src}}\circ T^{\theta}$')
        
        plt.subplot(235)   
        plt.imshow(img_dst.cpu,interpolation="None")
        plt.gray()
        plt.title(r'$I_{\mathrm{dst}}$')
        
#        cpa_space.plot_cells('w')
        tw.config_plt(axis_on_or_off='on')
        
        plt.subplot(2,6,11)
        self.tw.imshow_vx()
        pylab.jet()
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$v_x$')
        plt.subplot(2,6,12)
        self.tw.imshow_vy()
        pylab.jet()
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$v_y$')
Esempio n. 56
0
    def disp(self,sampler,ds_quiver=None):
        
        level=sampler.level    
        theta=sampler.theta_current
        tw=self.tw
        scale_quiver=self.scale_quiver
        if ds_quiver is None:
            ds_quiver=min([tw.nCols,tw.nRows])/32
        
        markersize = 4
        fontsize=30
        cpa_space = tw.ms.L_cpa_space[level]            
        plt.subplot(231)
        sampler.plot_ll()
        plt.title('ll',fontsize=fontsize)
        sampler.plot_wlp()
        sampler.plot_wlp_plus_ll()
        
        plt.subplot(232)
        sampler.plot_ar()
        plt.title('accept ratio',fontsize=fontsize)
         
         
        cpa_space.theta2As(theta)
        tw.update_pat_from_Avees(level=level)          
        tw.calc_v(level=level)    
        tw.v_dense.gpu2cpu()     
    
        src = self.src
        dst = self.dst
        transformed = self.transformed
        
        src_dense=self.src_dense
        transformed_dense=self.transformed_dense
        tw.calc_T_fwd(src_dense, transformed_dense,level=level,int_quality=0)    
        
        transformed_dense.gpu2cpu()        


    
    
        cpa_space.theta2As(theta)
        tw.update_pat_from_Avees(level=level)          
        tw.calc_v(level=level)    
        tw.v_dense.gpu2cpu()     
        transformed.gpu2cpu()
        
        
        
        plt.subplot(233)
        
#        class TF:
#            use_hand_data   =False
        if self.kind == 'landmarks' and self.landmarks_are_lin_ordered:
            lin_order=1
        else:
            lin_order=0
        if lin_order==False:
    #        plt.plot(src.cpu[:,0],src.cpu[:,1],'go',ms=markersize)
            plt.plot(transformed.cpu[:,0],transformed.cpu[:,1],'ro',ms=markersize)
            plt.plot(dst.cpu[:,0],dst.cpu[:,1],'bo',ms=markersize)
            
            tw.config_plt(axis_on_or_off='on')
        
        else:
    #        plt.plot(src.cpu[:,0],src.cpu[:,1],'g-o',ms=markersize)
            plt.plot(transformed.cpu[:,0],transformed.cpu[:,1],'r-o',ms=markersize) 
            plt.plot(dst.cpu[:,0],dst.cpu[:,1],'b-o',ms=markersize)
               
            tw.config_plt(axis_on_or_off='on')
            
        
        plt.subplot(234)
        
        tw.quiver(scale=scale_quiver,ds=ds_quiver)
#        1/0
#        cpa_space.plot_cells()
        
#        if TF.use_hand_data == False:
#            cpa_space_gt.theta2As(theta_gt)
#            tw.update_pat(level=level_gt)          
#            tw.calc_v(level=level_gt)
#            tw.v_dense.gpu2cpu() 
        
        if lin_order:
            plt.plot(src.cpu[:,0],src.cpu[:,1],'g-o',ms=markersize)
            plt.plot(dst.cpu[:,0],dst.cpu[:,1],'b-o',ms=markersize)
    #        plt.plot(transformed.cpu[:,0],transformed.cpu[:,1],'r-o',ms=markersize) 
        tw.config_plt(axis_on_or_off='on')
    
        
        if lin_order== False:
            plt.subplot(234)
            tw.quiver(scale=scale_quiver)
            cpa_space.plot_cells()
            tw.config_plt(axis_on_or_off='on')
            plt.title(r'$v^\theta$',
                       fontsize=20)
    
        else:
            plt.subplot(235)
            tw.imshow_vx()
            plt.title(r'$v^\theta_{\mathrm{horizontal}}$',
                      fontsize=20)

            cpa_space.plot_cells()
            tw.config_plt(axis_on_or_off='on')
            plt.subplot(236)
            tw.imshow_vy()
            plt.title(r'$v^\theta_{\mathrm{vertical}}$',
                       fontsize=20)
            cpa_space.plot_cells()
            tw.config_plt(axis_on_or_off='on')
     
        
        if self.kind == 'landmarks' and self.landmarks_are_lin_ordered:
            plt.subplot(233)
            plt.legend([r'$T^\theta(\mathrm{src})$',r'$\mathrm{dst}$'],loc='lower right',
                        fontsize=20)


            plt.subplot(234)
            plt.legend([r'$\mathrm{src}$',r'$\mathrm{dst}$'],loc='lower right',
                        fontsize=20)
def plot_alt_control(v):
    newfig("Altitude Control", "time [s]", "Alt [m]")
    plt.plot(v.control.t, v.control.alt_cmd, label="cmd")
    plt.plot(v.state.t[1:], v.state.up[1:], label="meas (Vicon)")
    plt.legend()
Esempio n. 58
0
import glob

from pylab import plt

import viscojapan as vj

def plot(fn, marker, label):
    result_file = fn
    fid = vj.inv.ResultFileReader(result_file)
    epochs = fid.epochs
    rms = fid.rms_inland_at_epoch

    plt.plot(epochs, rms, 'o-', label=label)    

# coupled model
result_file = '../../outs/nrough_06_naslip_11.h5'
plot(result_file, 'o-', 'SDco = 0.2')

result_file = '../../outs/nrough_06_naslip_10.h5'
plot(result_file, 'o-', 'SDco = 0.5')


plt.xlabel('days after the mainshock')
plt.ylabel('RMS misfit (m)')

plt.legend(loc=0,prop={'size':10})
plt.savefig('mis_fit_comparison.png')
plt.show()