import numpy as np
import matplotlib.backends.backend_pdf as bpdf
from sklearn.metrics import mean_squared_error
from math import sqrt
import openpyxl as opxl
#from LinearAndMultiLinearRegression import combineDay1Day2TestDay3
#from LinearAndMultiLinearRegression import combineDay2Day3TestDay1
#from LinearAndMultiLinearRegression import combineDay1Day3TestDay2

dataCompete = pd.ExcelFile(
    'C:/MASTERS/Project/slidingWindow/combinedData50000.xlsx')
wsday1 = dataCompete.parse('Sheet1')[0:50006]
#wsday2 =  dataCompete.parse('Sheet2')[0:43168]
#wsday3 =  dataCompete.parse('Sheet3')[0:43037]
wb = opxl.Workbook()
pdfRegressionParametrs = bpdf.PdfPages(
    "Analysis Plots - RegressionParametersCombinedData50000Day1.pdf")
for x in range(1, 9):
    independentDay1 = np.array(wsday1[[
        "input_heat_energy" + str(x), "input_wall_temp" + str(x),
        "input_mat_temp" + str(x)
    ]])
    dependentDay1 = np.array(wsday1["output_section_temp" + str(x)])

    relation = "output_section_temp " + str(x) + " ~ input_heat_energy" + str(
        x), " input_wall_temp" + str(x), " input_mat_temp" + str(x)

    sheet = wb.create_sheet(title=str("output_section_temp " + str(x)))

    sheet.cell(row=1, column=1).value = "RMS"
    sheet.cell(row=1, column=2).value = "R square"
    sheet.cell(row=1, column=3).value = "Adj R square"
Created on Wed Aug 26 12:21:33 2020
"""

import tools
import matplotlib.backends.backend_pdf as backend_pdf

fd_data = "./data/"
tools.mkdir(fd_data)

# Input
fname_BD_0 = fd_data + "solution_n_6_Q_0.100_10.00_0.100_R_1.000_BD_0_2.5_P0_1.0000.txt"
fname_BD_1 = fd_data + "solution_n_6_Q_0.100_10.00_0.100_R_1.000_BD_1_2.5_P0_1.0000.txt"

# Output
out_pdf_BD = fd_data + "analyze_BD.pdf"
pdf_BD = backend_pdf.PdfPages(out_pdf_BD)

# Plot
FS_PLOT = 22
SHIFT_LEGEND_X = 1.5
"""
Load data
"""
t, bd_0_pe_kf, bd_0_pn_kf, bd_0_err_p_kf, pe_ref, pn_ref, \
bd_0_err_p_kf, bd_0_sqt_cov_p_kf  \
                                        = tools.extract_da_from_sol(fname_BD_0)
t, bd_1_pe_kf, bd_1_pn_kf, bd_1_err_p_kf, pe_ref, pn_ref, \
bd_1_err_p_kf, bd_1_sqt_cov_p_kf  \
                                        = tools.extract_da_from_sol(fname_BD_1)
"""
Plot
import matplotlib.pyplot as plt
from math import sin, pi
import matplotlib.backends.backend_pdf as pdf
import numpy


def plot_sin_map(r: float, x: float, iterations: int):
    iterations_list = []
    results_list = []
    for i in range(iterations):
        x = r / 4 * sin(pi * x)
        results_list.append(x)
        iterations_list.append(i)

    plt.xlabel("Iterations")
    plt.ylabel(f"R = {r}")
    plt.plot(iterations_list, results_list)
    return plt


if __name__ == "__main__":

    pdf = pdf.PdfPages("sin_map_output.pdf")
    for i in numpy.arange(0.1, 5.0, 0.1):
        pdf.savefig(plot_sin_map(i, .02, 30).gcf())
        plt.clf()
    pdf.close()
Example #4
0
        # minor ticks, too...
        if conLvl == 0:
            conDisp_plots[conLvl, disp].set_xlabel('sf center (cpd)',
                                                   fontsize=20)
        if disp == 0:
            conDisp_plots[conLvl, disp].set_ylabel('Response (ips)',
                                                   fontsize=20)
        # remove axis from top and right, set ticks to be only bottom and left
        conDisp_plots[conLvl, disp].spines['right'].set_visible(False)
        conDisp_plots[conLvl, disp].spines['top'].set_visible(False)
        conDisp_plots[conLvl, disp].xaxis.set_ticks_position('bottom')
        conDisp_plots[conLvl, disp].yaxis.set_ticks_position('left')
conDisp_plots[0, 2].text(0.5,
                         1.2,
                         'Normalization pool responses',
                         fontsize=16,
                         horizontalalignment='center',
                         verticalalignment='center',
                         transform=conDisp_plots[0, 2].transAxes)

### now save all figures (sfMix contrasts, details, normalization stuff)
#pdb.set_trace()
allFigs = [fNorm]
saveName = "/normResp_%d.pdf" % (which_cell)
full_save = os.path.dirname(str(save_loc + 'normSandbox/'))
pdfSv = pltSave.PdfPages(full_save + saveName)
for fig in range(len(allFigs)):
    pdfSv.savefig(allFigs[fig])
    plt.close(allFigs[fig])
pdfSv.close()
Example #5
0
#model_path and file
mpath = '/group_workspaces/jasmin2/gassp/eeara/model_runs/u-ax424/All_time_steps/'

#filename='PEM-Tropics-A_DC8_so2_Tahiti.stat'
#model_file='All_time_steps_m01s34i072_mass_fraction_of_sulfur_dioxide_in_air.nc'
model_file = 'All_time_steps_m01s34i081_mass_fraction_of_hydroxyl_radical_in_air.nc'

mpath = mpath + model_file

title = [
    'ialt', 'N', 'min', 'max', 'mean', 'stddev', '5%', '25%', 'median', '75%',
    '95%'
]
check = 'mean'
pdf = mb.PdfPages(home_path + 'oh_compare.pdf')
n_plot = 5
grid_size = (n_plot, 1)

temp = 0


#f=open(path+filename,'r')
#data=f.read()
def get_latlon(filename):
    with open(filename) as f:
        content = f.readlines()
    #print content[3]   # line number 3 has the lattitude-longitude data
    a = content[3]

    #a=np.fromstring(content[3], dtype = int)
Example #6
0
        return 0.0


for i in range(n):
    #theta_prime = theta + np.random.standard_normal()
    theta_prime = np.random.uniform(
        -5, 10
    )  # using uniform random no. genrator to genrate the candidates for random no. (above commented standard normal distribution function can also be used)
    r = np.random.rand()

    if (g(theta_prime) / g(theta) > r):
        theta = theta_prime
    else:
        M.append(theta_prime)
    L.append(theta)
pm = pf.PdfPages("Q9.pdf")

plt.figure(figsize=(8, 5))
x = np.arange(1, len(L) + 1, 1)
plt.subplot(311)
plt.plot(x, L, marker='o', markersize=2.5)
plt.xlim(1, 100)
plt.ylabel("$\\theta$")
plt.title("Markov chains for 3 different number of steps")

plt.subplot(312)
plt.plot(x, L, marker='o', markersize=2.5)
plt.xlim(1, 500)
plt.ylabel("$\\theta$")

plt.subplot(313)
Example #7
0
"""

import numpy as np
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf as pf

noise = np.loadtxt(
    r'F:\Numerical-Techniques-Course-master\Computational Physics 2020\Assignment 3(Fourier Transform)\noise.txt'
)
Psdt = np.zeros(51)
N = len(noise)

k = 10  # no. of segments in which we divide the sample
m = int(N / 10)  # no. of points in each segment of sample

pm = pf.PdfPages("Q10.pdf")

freq1 = 2 * np.pi * np.fft.fftfreq(N, d=0.01)
FFT1 = np.fft.fft(noise, norm='ortho')
PSD1 = FFT1 * np.conj(FFT1)
plt.plot(noise)
plt.xlabel('time (arb.unit)')
plt.ylabel('Noise')
plt.title('noise signal')
pm.savefig()
plt.show()

ind1 = np.argsort(freq1)
plt.plot(freq1[ind1], FFT1.real[ind1], marker='o', markersize='3.0')
plt.xlabel('freq (arb.unit)')
plt.ylabel('F(w)')
Example #8
0
def madspikes(dfin,
              flag=None,
              isday=None,
              colhead=None,
              undef=-9999,
              nscan=15 * 48,
              nfill=1 * 48,
              z=7,
              deriv=2,
              swthr=10.,
              plot=False):
    """
    Spike detection for using a moving median absolute difference filter.
    Used with Eddy vovariance data in Papale et al. (Biogeosciences, 2006).

    Parameters
    ----------
    dfin : pandas.Dataframe or numpy.array
        time series of data where spike detection with MAD should be applied.

        `dfin` can be a pandas.Dataframe.

        `dfin` can also me a numpy array. In this case `colhead` must be given.
        MAD will be applied along axis=0, i.e. on each column of axis=1.
    flag : pandas.Dataframe or numpy.array, optional
        flag Dataframe or array has the same shape as dfin. Non-zero values in
        `flag` will be treated as missing values in `dfin`.

        If `flag` is numpy array, `df.columns.values` will be used as column heads.
    isday : array_like of bool, optional
        True when it is day, False when night. Must have the same length as dfin.shape[0].

        If `isday` is not given, `dfin` must have a column with head 'SW_IN' or
        starting with 'SW_IN'. `isday` will then be `dfin['SW_IN'] > swthr`.
    colhed : array_like of str, optional
        column names if `dfin` is numpy array.
    undef : float, optional
        values having `undef` value are treated as missing values in `dfin` (default: -9999)

        np.nan is not allowed (working).
    nscan : int, optional
        size of moving window to calculate mad in time steps (default: 15*48)
    nfill : int, optional
        step size of moving window to calculate mad in time steps (default: 1*48)

        mad will be calculated in `nscan` time window. Resulting mask will be applied
        only in `nfill` window in the middle of the `nscan` window. Then `nscan` window
        will be moved by `nfill` time steps.
    z : float, optional
        Input is allowed to deviate maximum `z` standard deviations from the median (default: 7)
    deriv : int, optional
        0: Act on raw input.

        1: Use first derivatives.

        2: Use 2nd derivatives (default).
    swthr : float, optional
        Threshold to determine daytime from incoming shortwave radiation if `isday` not given (default: 10).
    plot : bool, optional
        True: data and spikes are plotted into madspikes.pdf (default: False).

    Returns
    -------
    pandas.Dataframe or numpy array
        flags, 0 everywhere except detected spikes set to 2.

    History
    -------
    Written,    Matthias Cuntz & Tino Rau, 2008
    Maintained, Arndt Piayda,   Aug 2014
    Modified,   Matthias Cuntz, Apr 2020 - input can be pandas Dataframe or numpy array(s)
                                         - removed iteration
                Matthias Cuntz, May 2020 - numpy docstring format
    """
    # numpy or panda
    if isinstance(dfin, (np.ndarray, np.ma.MaskedArray)):
        isnumpy = True
        istrans = False
        assert colhead is not None, 'colhead must be given if input is numpy.ndarray.'
        if dfin.shape[0] == len(colhead):
            istrans = True
            df = pd.DataFrame(dfin.T, columns=colhead)
        elif dfin.shape[1] == len(colhead):
            df = pd.DataFrame(dfin, columns=colhead)
        else:
            raise ValueError(
                'Length of colhead must be number of columns in input array. len(colhead)='
                + str(len(colhead)) + ' shape(input)=(' + str(dfin.shape[0]) +
                ',' + str(dfin.shape[1]) + ').')
    else:
        isnumpy = False
        istrans = False
        assert isinstance(
            dfin, pd.core.frame.DataFrame
        ), 'Input must be either numpy.ndarray or pandas.DataFrame.'
        df = dfin.copy(deep=True)

    # Incoming flags
    if flag is not None:
        if isinstance(flag, (np.ndarray, np.ma.MaskedArray)):
            fisnumpy = True
            fistrans = False
            if flag.shape[0] == len(df):
                ff = pd.DataFrame(flag, columns=df.columns.values)
            elif flag.shape[1] == len(df):
                fistrans = True
                ff = pd.DataFrame(flag.T, columns=df.columns.values)
            else:
                raise ValueError(
                    'flag must have same shape as data array. data: ({:d},{:d}); flag: ({:d},{:d})'
                    .format(dfin.shape[0], dfin.shape[1], flag.shape[0],
                            flag.shape[1]))
            ff = ff.set_index(df.index)
        else:
            fisnumpy = False
            fistrans = False
            assert isinstance(
                flag, pd.core.frame.DataFrame
            ), 'Flag must be either numpy.ndarray or pandas.DataFrame.'
            ff = flag.copy(deep=True)
    else:
        fisnumpy = isnumpy
        fistrans = istrans
        # flags: 0: good; 1: input flagged; 2: output flagged
        ff = df.copy(deep=True).astype(int)
        ff[:] = 0
        ff[df == undef] = 1
        ff[df.isna()] = 1

    # day or night
    if isday is None:
        sw_id = ''
        for cc in df.columns:
            if cc.startswith('SW_IN'):
                sw_id = cc
                break
        assert sw_id, 'Global radiation with name SW or starting with SW_ must be in input if isday not given.'
        isday = df[
            sw_id] > swthr  # Papale et al. (Biogeosciences, 2006): 20; REddyProc: 10
    if isinstance(isday, (pd.core.series.Series, pd.core.frame.DataFrame)):
        isday = isday.to_numpy()
    isday[isday == undef] = np.nan
    ff[np.isnan(isday)] = 1

    # parameters
    nrow, ncol = df.shape
    half_scan_win = nscan // 2
    half_fill_win = nfill // 2

    # calculate dusk and dawn times and separate in day and night
    isdawn = np.zeros(nrow, dtype=np.bool)
    isdusk = np.zeros(nrow, dtype=np.bool)
    dis = isday.astype(int) - np.roll(isday, -1).astype(int)  # .astype(bool)
    isdawn[:-1] = np.where(dis[:-1] == -1, True, False)
    isdusk[:-1] = np.where(dis[:-1] == 1, True, False)
    isddday = isdawn
    tmp = np.roll(isdusk, 1)
    isddday[1:] += tmp[1:]  # start and end of day
    isddnight = isdusk
    tmp = np.roll(isdawn, 1)
    isddnight[1:] += tmp[1:]  # start and end of night

    # iterate over each column of data
    if plot:
        import matplotlib.pyplot as plt
        import matplotlib.backends.backend_pdf as pdf
        pd.plotting.register_matplotlib_converters()
        pp = pdf.PdfPages('madspikes.pdf')

    cols = list(df.columns)
    for hcol in df.columns:

        if hcol.startswith == 'SW_IN': continue

        data = df[hcol]
        dflag = ff[hcol]

        # get day and night data
        data_day = data.copy(deep=True)
        data_day[~(isday | isddday) | (dflag != 0) | (data == undef)] = np.nan
        data_night = data.copy(deep=True)
        data_night[~(~isday | isddnight) | (dflag != 0) |
                   (data == undef)] = np.nan

        # iterate over fill window
        for j in range(half_fill_win, nrow - 1, 2 * half_fill_win):
            j1 = max(j - half_scan_win - 1, 0)
            j2 = min(j + half_scan_win + 1, nrow)
            fill_start = max(j - half_fill_win, 1)
            fill_end = min(j + half_fill_win, nrow - 1)

            dd = data_day[j1:j2].to_numpy()
            day_flag = mad(np.ma.masked_array(data=dd, mask=np.isnan(dd)),
                           z=z,
                           deriv=deriv)
            ff.iloc[fill_start:fill_end, cols.index(hcol)] += np.where(
                day_flag[fill_start - j1 - 1:fill_end - j1 - 1], 2, 0)

            nn = data_night[j1:j2]
            night_flag = mad(np.ma.masked_array(data=nn, mask=np.isnan(nn)),
                             z=z,
                             deriv=deriv)
            ff.iloc[fill_start:fill_end, cols.index(hcol)] += np.where(
                night_flag[fill_start - j1 - 1:fill_end - j1 - 1], 2, 0)

        if plot:
            fig = plt.figure(1)
            sub = fig.add_subplot(111)
            valid = ff[hcol] == 0
            l1 = sub.plot(data[valid], 'ob')
            l3 = sub.plot(data[ff[hcol] == 2], 'or')
            plt.title(hcol)
            pp.savefig(fig)
            plt.close(fig)

    # Finish

    if plot:
        pp.close()

    if fisnumpy:
        if fistrans:
            return ff.to_numpy().T
        else:
            return ff.to_numpy()
    else:
        return ff
Example #9
0
mygrey = ListedColormap(mgrey(np.linspace(0.75, 0.95, 25)))
myPurples = ListedColormap(mPurples(np.linspace(0.75, 0.95, 25)))
myBlues = ListedColormap(mBlues(np.linspace(0.75, 0.95, 25)))
myGreens = ListedColormap(mGreens(np.linspace(0.75, 0.95, 25)))
myOranges = ListedColormap(mOranges(np.linspace(0.75, 0.95, 25)))
myReds = ListedColormap(mReds(np.linspace(0.75, 0.95, 25)))
mypink = ListedColormap(mpink(np.linspace(0.75, 0.95, 25)))
myWistia = ListedColormap(mWistia(np.linspace(0.75, 0.95, 25)))

mycon = [
    mygrey, myPurples, myBlues, myGreens, myOranges, myReds, mspring, myWistia
]
mycol = ['grey', 'purple', 'blue', 'green', 'orange', 'red', 'pink', 'orange']
# #--------------------------------------------------------------------
#
pdf = fpdf.PdfPages("Correlations_allgals_2Dcontours_GMC%s.pdf" %
                    namegmc)  # type: PdfPages
#
# # Changing the transparency value of each galaxy.
myalpha = np.arange(0.9, 0.3, -0.08)
xticks5 = [8.3, 8.4, 8.5, 8.6, 8.7]
nlevs = 3

# print "Starting for"
for k in range(5):  # not plotting metallicity
    fig, axs = plt.subplots(4,
                            2,
                            sharex='col',
                            figsize=(8, 10),
                            gridspec_kw={'hspace': 0})
    plt.subplots_adjust(wspace=0.3)
    fig.suptitle('All galaxies - Overlapping HIIregions and GMCs',
Example #10
0
n = 10000
k = 0

np.random.seed(545)
randm = np.zeros(n)

t = np.zeros(n)
r = np.zeros(n)
for i in range(n):
    r[i] = x
    x = (a * x + c) % (m)
    t[i] = i
    if (r[i] == seed):  #checking if seed repeats or not
        k = k + 1

pm = pf.PdfPages("Q7.pdf")

plt.scatter(t, r, c='k', s=1.5)
plt.axhline(y=seed, linewidth=2.0, label='seed counts -' + str(k))
plt.xlabel("$i$")
plt.ylabel("Random Numbers")
plt.title('Random distribution with non repetitive seed')
plt.legend()
pm.savefig()
plt.show()

a1 = 4452641
c1 = 1013904
m1 = 4295
x1 = m1 / 2
seed1 = x1
Example #11
0
import tools
import matplotlib.backends.backend_pdf as backend_pdf

fd_data = "./data/"
tools.mkdir(fd_data)

# Input
fname_tau_001 = fd_data + "solution_n_6_Q_0.100_0.10_0.100_R_1.000_BD_1_2.5_P0_1.0000.txt"
fname_tau_01 = fd_data + "solution_n_6_Q_0.100_1.00_0.100_R_1.000_BD_1_2.5_P0_1.0000.txt"
fname_tau_1 = fd_data + "solution_n_6_Q_0.100_10.00_0.100_R_1.000_BD_1_2.5_P0_1.0000.txt"
fname_tau_10 = fd_data + "solution_n_6_Q_0.100_100.00_0.100_R_1.000_BD_1_2.5_P0_1.0000.txt"

# Output
out_pdf_Q_tau = fd_data + "analyze_Q_tau.pdf"
pdf_Q_tau = backend_pdf.PdfPages(out_pdf_Q_tau)

# Plot
FS_PLOT = 22
SHIFT_LEGEND_X = 1.5
"""
Load data
"""
t, tau_01_pe_kf, tau_01_pn_kf, tau_01_err_p_kf, pe_ref, pn_ref, \
tau_01_err_p_kf, tau_01_sqt_cov_p_kf  \
                                        = tools.extract_da_from_sol(fname_tau_001)
t, tau_1_pe_kf, tau_1_pn_kf, tau_1_err_p_kf, pe_ref, pn_ref, \
tau_1_err_p_kf, tau_1_sqt_cov_p_kf  \
                                        = tools.extract_da_from_sol(fname_tau_01)
t, tau_10_pe_kf, tau_10_pn_kf, tau_10_err_p_kf, pe_ref, pn_ref, \
tau_10_err_p_kf, tau_10_sqt_cov_p_kf  \
Example #12
0
def statistics(args):
    data = common.getdict(args.datafile)
    #Create a new figure. Each page in the pdf file is a figure
    fig = plt.figure(1, figsize=(8, 6), dpi=400)
    #Create a subplot axis
    ax = fig.add_subplot(111)
    #Set the title, x and y labels of this page's plot.
    ax.set_title("Correlation Function, Davis and Peebles Estimator")
    plt.xlabel("Correlation distance, Mpc/h")
    plt.ylabel("Correlation")

    fig2 = plt.figure(2, figsize=(8, 6), dpi=400)
    ax2 = fig2.add_subplot(111)
    ax2.set_title("Correlation Function, Hamilton Estimator")
    plt.xlabel("Correlation distance, Mpc/h")
    plt.ylabel("Correlation")

    fig3 = plt.figure(3, figsize=(8, 6), dpi=400)
    ax3 = fig3.add_subplot(111)
    plt.ylabel("Correlation")
    plt.xlabel("Correlation distance, Mpc/h")
    ax3.set_title("Correlation Function, Landy and Szalay Estimator")

    fig4 = plt.figure(4, figsize=(8, 6), dpi=400)
    ax4 = fig4.add_subplot(111)
    plt.ylabel("Correlation+1")
    plt.xlabel("Correlation distance, Mpc/h")
    ax4.set_title(
        "Correlation Function from random points, modified Davis and Peebles Estimator"
    )

    fig5 = plt.figure(5, figsize=(8, 6), dpi=400)
    ax5 = fig5.add_subplot(111)
    plt.ylabel("Correlation")
    plt.xlabel("Correlation distance, Mpc/h")
    ax5.set_title("Correlation Function, average. Landy and Szalay estimator.")
    ax5.set_xscale('log', nonposx='clip')
    ax5.set_yscale('log', nonposy='clip')

    fig6 = plt.figure(6, figsize=(8, 6), dpi=400)
    ax6 = fig6.add_subplot(111)
    plt.ylabel("Correlation Residuals")
    plt.xlabel("Correlation distance, Mpc/h")
    ax6.set_title("Correlation Function residuals")
    ax6.set_xscale('log', nonposx='clip')
    #ax6.set_yscale('log', nonposx = 'clip')

    #Set all the axes to log scale

    ax.set_xscale("log", nonposx='clip')
    ax2.set_xscale("log", nonposx='clip')
    ax3.set_xscale("log", nonposx='clip')
    ax4.set_xscale("log", nonposx='clip')
    ax.set_yscale("log", nonposy='clip')
    ax2.set_yscale("log", nonposy='clip')
    ax3.set_yscale("log", nonposy='clip')
    ax4.set_yscale("log", nonposy='clip')
    ys = []
    maxY = 10**1
    minY = 10**-2
    maxRandom = 5 * 10**1
    minRandom = 10**-1
    numboxes = 0
    for box in data['raw_runs'][0].items():
        #The .items() function returns a tuple (Key, value)
        #That's why there are all the box[1]'s running around.
        if box[0] != "ALL_BOXES":
            #Each box has its own data associated with it, so first we plot ALL the data
            plt.figure(1)
            plt.plot(box[1]["rs"], box[1]["Davis_Peebles"], '.')
            plt.figure(2)
            plt.plot(box[1]["rs"], box[1]["Hamilton"], '.')
            plt.figure(3)
            plt.plot(box[1]["rs"], box[1]["Landy_Szalay"], '.')
            plt.figure(4)
            plt.plot(box[1]["rs"],
                     [x + 1 for x in box[1]["Random_Correlation"]], '.')
            ys.append(box[1]["Landy_Szalay"])
            #minY, maxY = updateMinMax(minY, maxY, box[1]["Davis_Peebles"])
            #minY, maxY = updateMinMax(minY, maxY, box[1]["Hamilton"])
            #minY, maxY = updateMinMax(minY, maxY, box[1]["Landy_Szalay"])
            #minRandom, maxRandom = updateMinMax(minRandom, maxRandom, box[1]["Random_Correlation"])
            #This was an attempt to give all of the graphs the same scales. I don't know why it didn't work...
            numboxes += 1
            #Here we count the number of boxes so that we know whether we can use the standard deviation
            #for error bars
    power = lambda r, r0, gamma: (r / r0)**(-gamma)
    #power law for estimating correlation and its relation to distance.
    #Used in the curvefit scipy function

    allys = list(zip(*ys))
    #This list contains tuples of y-values for a certain x value for use in calculating the standard
    #deviation easily. Format [(10,9.8,10.25),(7.776,7.90,7.745) etc] except with possibly more values per tuple
    #and definitely way more tuples.

    #Calculate the 95% confidence interval, two times the standard deviation of all the ys for a certain x.

    #yerrs = jackknife(allys)

    ys = data['raw_runs'][0]["ALL_BOXES"]["Landy_Szalay"]
    xs = data['raw_runs'][0]["ALL_BOXES"]["rs"]
    xerrs = [
        data['raw_runs'][0]["ALL_BOXES"]["dr_left"],
        data['raw_runs'][0]["ALL_BOXES"]["dr_right"]
    ]
    #Take the raw xs and ys from the dataset that was averaged over all of the boxes.
    if numboxes == 1:
        popt, pcov = scipy.optimize.curve_fit(
            power, xs, ys, p0=(10, 1.5))  #,sigma=yerrs,absolute_sigma=True)
        #When we only have one box, we need to tell the curve fit that all of the errors are "The Same"
        yerrs = [
            300 * ys[i] *
            math.sqrt(data['raw_runs'][0]["ALL_BOXES"]["DDs"][i]) /
            data['raw_runs'][0]["ALL_BOXES"]["DDs"][i] for i in range(len(ys))
        ]
    else:
        yerrs = [np.std(y) for y in allys]
        popt, pcov = scipy.optimize.curve_fit(power,
                                              xs,
                                              ys,
                                              p0=(10, 1.5),
                                              sigma=yerrs,
                                              absolute_sigma=True)
        #More than one box means that the standard deviation errors are correct.

    print(yerrs)
    # print(pcov)
    # print(popt)
    plt.figure(5)
    dot = plt.errorbar(xs,
                       ys,
                       yerr=yerrs,
                       xerr=xerrs,
                       fmt='.',
                       label="Averaged Correlation Data")
    model = [power(x, *popt) for x in xs]
    line = plt.plot(
        xs,
        model,
        label=
        "Model fit: $(r/r_0)^{{-\gamma}}$\n$r_0 = {:.3f}$\n$\gamma = {:.3f}$".
        format(popt[0], popt[1]))
    #We need {{ and }} to escape the .format thingy and pass { and } to LaTeX

    plt.legend()
    plt.figure(6)
    residuals = [y / mod for y, mod in zip(ys, model)]

    #Since a residual data point is y / model, the relative error in residual will be equal to
    #sqrt( relative error in model ^2 + relative error in point ^2)
    residuals_errors = [
        res * (dy / y) for y, dy, res in zip(ys, yerrs, residuals)
    ]
    plt.errorbar(xs,
                 residuals,
                 yerr=residuals_errors,
                 fmt='.',
                 label="Residuals")
    plt.plot(xs, [1 for x in xs], label="Model")
    plt.legend()
    ax6.axis([0, max(xs) + 1, 0, 2])  #min(residuals)*.9,max(residuals)*1.1])
    plt.figure(5)
    #Here, we set the scale of each axis. We need a better method of dynamically deciding what the bounds should be
    ax5.axis([min(xs) - 0.15, max(xs) + 3, minY, maxY])
    plt.figure(4)
    ax4.axis([min(xs) - 0.15, max(xs) + 3, minRandom, maxRandom])
    plt.figure(3)
    ax3.axis([min(xs) - 0.15, max(xs) + 3, minY, maxY])
    plt.figure(2)
    ax2.axis([min(xs) - 0.15, max(xs) + 3, minY, maxY])
    plt.figure(1)
    ax.axis([min(xs) - 0.15, max(xs) + 3, minY, maxY])
    #plt.legend([dot,line],["Data","Best fit"])
    basefilename = args.datafile.replace("rawdata.json", "")
    with open(
            "output/statistics.csv", 'a'
    ) as outfile:  #WARNING:: I usually don't like using static file paths!!!!!!!!
        #ANOTHER WARNING: every time you run stats it appends a line to this file. So, be careful and only use the
        #statistics file after cleaning it and doing a very controlled run.
        line = ""
        for datapt in [float(x) for x in popt]:
            line = line + str(datapt) + ","
        for error in np.sqrt(np.diag(pcov)):
            line = line + str(error) + ','
        line = line + str(data["settings"]["Divide"]["x_box_size"] *
                          data["settings"]["Divide"]["y_box_size"] *
                          data["settings"]["Divide"]["z_box_size"])
        outfile.write(line + '\n')
    with pdfback.PdfPages(basefilename + 'graphs.pdf') as pdf:
        pdf.savefig(fig)
        pdf.savefig(fig2)
        pdf.savefig(fig3)
        pdf.savefig(fig4)
        pdf.savefig(fig5)
        pdf.savefig(fig6)
Example #13
0
    synthcallsvstime(runs, vals)
    plt.subplot(2, 3, 6)
    synthcallsvsdepth(runs, vals)
    # Growth function only needs the -o 1 version
    plt.subplot(2, 3, 4)
    growthvsdepth([runs[0]], [vals[0]])
    plt.suptitle(title)


if __name__ == '__main__':
    titles = ["dt4", "dt16", "dt44", \
              "svm3-1", "svm3-2", "svm3-3", "svm3-4", \
              "svm4-1", "svm4-2", "svm4-3", "svm4-4", "svm4-5", \
              "svm5-1", "svm5-2", "svm5-3", "svm5-4", "svm5-5", "svm5-6"]
    i = 0
    for title in titles:
        print("building plot for", title)
        plt.figure(num=i, figsize=(16, 10))
        prefix = "orig/out/" + title + "_s0_"
        suffix = ".out"
        vals = ["o1", "o.5", "o.375", "o.25", "o.125", "o.0625", "a1..03"]
        plotbench(title, prefix, vals, suffix)
        plt.tight_layout(pad=3)
        i += 1
    #plt.show()
    print("writing to output.pdf")
    pdf = be_pdf.PdfPages("output.pdf")
    for n in range(i):
        pdf.savefig(plt.figure(n))
    pdf.close()
Example #14
0
        ax[j, jj].set_xlim((-5, maxResp))
        ax[j, jj].set_ylim((-5, 1.1 * maxResp))
        ax[j, jj].set_title('Suppression index: %.2f|%.2f' % (hmm[0], rel_c50))
        ax[j, jj].legend(fontsize='x-small')

fSuper.suptitle(
    'Superposition: %s #%d [%s; f1f0 %.2f; szSupr[dt/md] %.2f/%.2f; oriBW|CV %.2f|%.2f; tfBW %.2f]'
    % (cellType, which_cell, cellName, f1f0_rat, suprDat, suprMod, oriBW,
       oriCV, tfBW))

if fitList is None:
    save_name = 'cell_%03d.pdf' % which_cell
else:
    save_name = 'cell_%03d_mod%s.pdf' % (which_cell,
                                         hf.fitType_suffix(fitType))
pdfSv = pltSave.PdfPages(str(save_locSuper + save_name))
pdfSv.savefig(fSuper)
pdfSv.close()

#########
### Finally, add this "superposition" to the newest
#########

if fitList is None:
    from datetime import datetime
    suffix = datetime.today().strftime('%y%m%d')
    super_name = 'superposition_analysis_%s.npy' % suffix
else:
    super_name = 'superposition_analysis_mod%s.npy' % hf.fitType_suffix(
        fitType)
Example #15
0
#ax2.hist(rho,numbins,color='g',alpha=0.8)

fig2 = plt.figure(figsize=(15, 5), dpi=400)
hs = fig2.add_subplot(131)
hs2 = fig2.add_subplot(132)
hs3 = fig2.add_subplot(133)
numbins = 50
hs.hist(xs, numbins, color='g', alpha=0.8)
hs2.hist(ys, numbins, color='g', alpha=0.8)
hs3.hist(zs, numbins, color='g', alpha=0.8)
hs.set_title('Distribution of Galaxies by X position')
hs2.set_title('Distribution of Galaxies by Y position')
hs3.set_title('Distribution of Galaxies by Z position')

#ax = fig.add_subplot(131, projection='hammer')
#ax.scatter(thetas, phis, c='r', marker = 'o')

#ax2 = fig.add_subplot(132, projection='3d')
#ax2.scatter(xs, ys, zs, c='r', marker = 'o')

#ax3 = fig.add_subplot(133)
#ax3.scatter(phis, thetas, c = 'r', marker = 'o')

#plt.show()
print("Saving plots...")
with pdfback.PdfPages('out.pdf') as pdf:
    pdf.savefig(fig)
    pdf.savefig(fig2)

print("Done!")
Example #16
0
 def save_multiple_plots(self, filename_pdf, figures):
     pdf = mpb.PdfPages(filename_pdf)
     for figure in figures:
         pdf.savefig(figure)
     pdf.close()
Example #17
0
        simsAx[d + 1][ii].tick_params(labelsize=15,
                                      width=2,
                                      length=16,
                                      direction='out')
        simsAx[d + 1][ii].tick_params(width=2,
                                      length=8,
                                      which='minor',
                                      direction='out')
        # minor ticks, too...
        sns.despine(ax=simsAx[d + 1][ii], offset=10, trim=False)

# fix subplots to not overlap
fDetails.tight_layout()
# fSims must be saved separately...
saveName = "cell_%02d_simulate.pdf" % (cellNum)
pdfSv = pltSave.PdfPages(str(save_loc + saveName))
for ff in fSims:
    pdfSv.savefig(ff)
    plt.close(ff)
pdfSv.close()

# and now save it
#allFigs = [f, fDetails];
allFigs = [f, fDetails, fNorm]
if log_y:
    log_str = '_logy'
else:
    log_str = ''
saveName = "cell_%02d%s.pdf" % (cellNum, log_str)
pdf = pltSave.PdfPages(str(save_loc + saveName))
for fig in range(len(allFigs)):  ## will open an empty extra figure :(
Example #18
0
    def plot_diagnostics(self, name="full_table"):
        """
        generates hard-coded plots to display useful diagnostic information
        """

        pdf = b_pdf.PdfPages(path + "/diagnostics/" + name + ".pdf")

        # define variables

        out("Computing photometric variables...")
        M_g = [
            g + 5 - 5 * np.log10(1000 / p)
            for g, p in zip(self.full_table.table["phot_g_mean_mag"],
                            self.full_table.table["parallax"])
        ]
        g_k = subtract_cols(self.full_table, "phot_g_mean_mag", "k_m")
        j_k = subtract_cols(self.full_table, "j_m", "k_m")
        w1_w2 = subtract_cols(self.full_table, "w1mpro", "w2mpro")
        j_h = subtract_cols(self.full_table, "j_m", "h_m")
        h_k = subtract_cols(self.full_table, "h_m", "k_m")
        g_h = subtract_cols(self.full_table, "phot_g_mean_mag", "h_m")
        k_w1 = subtract_cols(self.full_table, "k_m", "w1mpro")
        k_w2 = subtract_cols(self.full_table, "k_m", "w2mpro")
        k_w3 = subtract_cols(self.full_table, "k_m", "w3mpro")

        out("Applying photometric cuts...")

        try:

            # These are all unused

            #cut_1s = """(self.full_table.table["pmra"] < -1.81 +1.31) & (self.full_table.table["pmra"] > -1.81 -1.31) & (self.full_table.table["pmdec"] < -2.67 +1.44) & (self.full_table.table["pmdec"] > -2.67 -1.44)"""

            #cut_2s = """(self.full_table.table["pmra"] < -1.81 +2*1.31) & (self.full_table.table["pmra"] > -1.81 -2*1.31) & (self.full_table.table["pmdec"] < -2.67 +2*1.44) & (self.full_table.table["pmdec"] > -2.67 -2*1.44)"""

            #cut_outliers = """(self.full_table.table["pmra"] < 15) & (self.full_table.table["pmra"] > -15) & (self.full_table.table["pmdec"] < 15) & (self.full_table.table["pmdec"] > -15)"""

            #cut_plx_1s = """(self.full_table.table["parallax"]< 1.04+0.23) & (self.full_table.table["parallax"]> 1.04-0.23)"""

            # generate and display plots with documentation

            out("2mass RA vs 2mass Dec. J-H > 0.7 shown in black, J-H < 0.7 shown in red."
                )
            self.cut_and_plot("J-H > 0.7", ("2mass_ra", "2mass RA"),
                              ("2mass_dec", "2mass Dec"),
                              squared=True,
                              invert_x=True)

            out("Gaia RA vs Gaia Dec.")
            self.plot(("gaia_ra", "Gaia RA"), ("gaia_dec", "Gaia Dec"),
                      title="Gaia RA vs Gaia Dec.",
                      squared=True,
                      invert_x=True,
                      save=pdf)

            out("Gaia PM RA vs Gaia PM Dec.")
            self.plot(("pmra", "pm RA (Gaia)"), ("pmdec", "pm Dec (Gaia)"),
                      xlim=(-30, 30),
                      ylim=(-30, 30),
                      squared=True,
                      save=pdf)

            out("Gaia PM RA vs Gaia PM Dec (closer detail).")
            self.plot(("pmra", "pm RA (Gaia)"), ("pmdec", "pm Dec (Gaia)"),
                      xlim=(-10, 10),
                      ylim=(-10, 10),
                      squared=True,
                      save=pdf)

            out("Parallax vs Gaia Dec.")
            self.plot(("parallax", "Parallax"), ("gaia_dec", "Dec (Gaia)"),
                      xlim=(0, 5),
                      save=pdf)

            out("BP/RP vs G Mean Magnitude.")
            self.plot("bp_rp", "phot_g_mean_mag", save=pdf)

            out("BP/RP vs M_g = (G Mean Magnitude + 5 - 5 * log10( 1000 / parallax ))"
                )
            self.plot("bp_rp", (M_g, "$M_G [mag]$"), save=pdf)

            out("G-K Magnitude vs G Mean Magnitude.")
            self.plot((g_k, "G-K [mag]"), "phot_g_mean_mag", save=pdf)

            out("G-K Magnitude vs M_g = (G Mean Magnitude + 5 - 5 * log10( 1000 / parallax ))"
                )
            self.plot((g_k, "G-K [mag]"), (M_g, "$M_G [mag]$"), save=pdf)

            out("J-K Magnitude vs J-M Magnitude.")
            self.plot((j_k, "J-K [mag]"), "j_m", save=pdf)

            out("W1-W2 Magnitude vs W1 Magnitude.")
            self.plot((w1_w2, "W1-W2 [mag]"), "w1mpro", save=pdf)

            out("J-H Magnitude vs H-K Magnitude.")
            self.plot((j_h, "J-H [mag]"), (h_k, "H-K [mag]"), save=pdf)

            out("G-H Magnitude vs H-K Magnitude.")
            self.plot((g_h, "G-H [mag]"), (h_k, "H-K [mag]"), save=pdf)

            out("K-W2 Magnitude vs G-K Magnitude.")
            self.plot((k_w2, "K - W2 [mag]"), (g_k, "G - K [mag]"), save=pdf)

            out("BP/RP Magnitude vs G Mean Magnitude.")
            self.plot("bp_rp", "phot_g_mean_mag", invert_y=True, save=pdf)

            out("2mass RA vs 2mass Dec. K-W1 > 0.2 shown in black, K-w1 < 0.2 shown in red."
                )
            self.cut_and_plot("K-W1 > 0.2", ("2mass_ra", "RA"),
                              ("2mass_dec", "Dec"),
                              squared=True,
                              save=pdf)

            out("PMRA vs PMDec. -3.12 < PMRA < -0.5 AND -4.17 < PMDec < -1.23 shown in black, otherwise shown in red. Units in mas/yr."
                )
            self.cut_and_plot(cut_1s, ("pmra", "pm RA [$mas\ yr^{-1}$]"),
                              ("pmdec", "pm Dec [$mas\ yr^{-1}$]"),
                              xlim=(-10, 10),
                              ylim=(-10, 10),
                              squared=True,
                              save=pdf)

            out("PMRA vs PMDec - closer detail. -3.12 < PMRA < -0.5 AND -4.17 < PMDec < -1.23 shown in black, otherwise shown in red. Units in mas/yr."
                )
            self.cut_and_plot(cut_1s, ("pmra", "pm RA [$mas\ yr^{-1}$]"),
                              ("pmdec", "pm Dec [$mas\ yr^{-1}$]"),
                              xlim=(-6, 1),
                              ylim=(-6, 1),
                              squared=True,
                              save=pdf)

            out("PMRA vs PMDec. -4.43 < PMRA < 0.81 AND -5.55 < PMDec < 0.21 shown in black, otherwise shown in red. Units in mas/yr."
                )
            self.cut_and_plot(cut_2s, ("pmra", "pm RA [$mas\ yr^{-1}$]"),
                              ("pmdec", "pm Dec [$mas\ yr^{-1}$]"),
                              xlim=(-10, 10),
                              ylim=(-10, 10),
                              squared=True,
                              save=pdf)

            out("PMRA vs PMDec - closer detail. -4.43 < PMRA < 0.81 AND -5.55 < PMDec < 0.21 shown in black, otherwise shown in red. Units in mas/yr."
                )
            self.cut_and_plot(cut_2s, ("pmra", "pm RA [$mas\ yr^{-1}$]"),
                              ("pmdec", "pm Dec [$mas\ yr^{-1}$]"),
                              xlim=(-6, 1),
                              ylim=(-6, 1),
                              squared=True,
                              save=pdf)

            out("Parallax vs PMRA. 0.81 < Parallax < 1.27 shown in black, otherwise shown in red."
                )
            self.cut_and_plot(cut_plx_1s, ("parallax", "Parallax"),
                              ("pmra", "pm RA"),
                              xlim=(0, 5),
                              squared=True,
                              save=pdf)

            out("PMRA histogram. Data shown satisfy -15 < PMRA < 15 AND -15 < PMDec < 15. Units in mas/yr."
                )
            self.plot_hist("pmra", "pm RA", cut=cut_outliers, save=pdf)

            out("PMDec histogram. Data shown satisfy -4.43 < PMRA < 0.81 AND -5.55 < PMDec < 0.21. Units in mas/yr."
                )
            self.plot_hist("pmdec", "pm Dec", cut=cut_2s, save=pdf)

        except:
            out("An error occurred while plotting diagnostics. This usually occurs because no sources passed a photometric cut."
                )

        try:
            # Lots of magic numbers here.
            cut_string = """(-1.81 -2*1.31 < self.full_table["pmra"]) & (self.full_table["pmra"] < -1.81 +2*1.31) & (-2.67 -2*1.44 < self.full_table["pmdec"]) & (self.full_table["pmdec"] < -2.67 +2*1.44)"""

            out(cut_string)
            cut_true = self.full_table.table[pd.eval(cut_string)]
            cut_false = self.full_table.table[~pd.eval(cut_string)]

            cut_mg_true = [
                g + 5 - 5 * np.log10(1000 / p) for g, p in zip(
                    cut_true["phot_g_mean_mag"], cut_true["parallax"])
            ]
            cut_mg_false = [
                g + 5 - 5 * np.log10(1000 / p) for g, p in zip(
                    cut_false["phot_g_mean_mag"], cut_false["parallax"])
            ]

            cut_bprp_true = cut_true["bp_rp"]
            cut_bprp_false = cut_false["bp_rp"]

            out("BP-RP Magnitude vs M_g = (G Mean Magnitude + 5 - 5 * log10( 1000 / parallax ))"
                )
            self.plot_removed([(cut_bprp_true, cut_mg_true),
                               (cut_bprp_false, cut_mg_false)],
                              "BP - RP",
                              "$M_G$",
                              invert_y=True,
                              save=pdf)

            out("BP-RP Magnitude vs M_g = (G Mean Magnitude + 5 - 5 * log10( 1000 / parallax )). More detail."
                )
            self.plot_removed([(cut_bprp_true, cut_mg_true),
                               (cut_bprp_false, cut_mg_false)],
                              "BP - RP",
                              "$M_G$",
                              xlim=(0.1, 3.5),
                              ylim=(-1, 15),
                              invert_y=True,
                              save=pdf)
        except:
            out("Encountered an error applying this cut. This usually occurs when no sources pass the photometric cut."
                )

        pdf.close()
Example #19
0
        except:
            print('estimation failed for ' + kind + ' in group ' + func_type)
            pass
        p_ind = p_ind + 1.
        progress = str(100 * p_ind / (len(kinds) * len(func_type_list)))

        print(
            str(progress) + '% done in computing metrics (' + kind + ' ' +
            func_type + ')')

    individual_connectivity_matrices[func_type] = subjects_connectivity
    mean_connectivity_matrix[func_type] = mean_connectivity

comp_list = cp_tools.partperm(func_type_list)

with backend_pdf.PdfPages(save_report) as pdf:
    for g_index in range(len(func_type_list) + 1):
        pdf.savefig(at_check[g_index])
        plt.close()
    #tstats for comparison of metrics  accross groups
    for kind in kinds:
        print('saving report: ' + kind)
        for func_type in func_type_list:
            #average across all subjects
            Mean_mat = mean_connectivity_matrix[func_type][kind]
            Mean_tot = np.mean(Mean_mat)
            #plot connectomes
            plotting.plot_connectome(Mean_mat,
                                     coords_ref,
                                     node_color=label_colors,
                                     title=func_type + ' ' + kind +
Example #20
0
    nargs='+',
    default=[],
    help='R|Plot specific samples',
)
parser.add_argument(
    '-a',
    '--all-plot',
    dest='all_plot',
    action='store_true',
    default=False,
    help='R|Also include PU profiles not exceeding the threshold',
)
args = parser.parse_args()

eras = args.era
output = os.path.abspath(args.output)
threshold = args.threshold
sample = args.sample
plot_all = args.all_plot

if not output.lower().endswith('.pdf'):
    raise ValueError("Output file can be in PDF format only: %s" % output)

if not os.path.isdir(os.path.dirname(output)):
    raise ValueError(
        "Cannot create file %s because its directory does not exist" % output)

with backend_pdf.PdfPages(output) as pdf:
    for era in eras:
        plot(era, pdf, threshold, sample, plot_all)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 29 10:49:03 2020
"""
import numpy as np

import tools
import matplotlib.backends.backend_pdf as backend_pdf

fd_data = "./data/"
tools.mkdir(fd_data)

# Output
out_pdf = fd_data + "kf_data.pdf"
pdf = backend_pdf.PdfPages(out_pdf)

fname_out = fd_data + "kf_data.txt"
format_out = ['%.3f', '%.3f', '%.3f', '%.3f', '%.3f', '%.3f', '%.3f']
# time, pe_ref, pn_ref, ve_ref, vn_ref, pe_meas, pn_meas
FS_PLOT = 18
D2R = np.pi / 180.0
"""
Generate reference motion data
"""
T_TOTAL = 100.0  # Total navigation time
T_TURN = 10.0  # Time length for each turn
D_T = 1.0  # Time interval
VEL = 1.0  # Speed

# Real-time states
MassesCO = [1e5 * i for i in MasscoGMCover]  #

# Limits in the properties of HIIR and GMCs
xlim, ylim, xx, yy = pickle.load(open('limits_properties.pickle', "rb"))

#===============================================================
# Plots of correlations with dots for each pair
print "Plots of all galaxies together"

df = sns.load_dataset('iris')

marker_style = dict(markersize=4)
#xticks1 = np.arange(8.4,9,0.2)
xticks5 = [8.3, 8.4, 8.5, 8.6, 8.7]

pdf3 = fpdf.PdfPages("Correlations_allgals_GMC%s.pdf" %
                     namegmc)  # type: PdfPages
print "Starting loop to create figures of all galaxies together - points"
for k in range(len(arrayxax)):
    sns.set(style='white', color_codes=True)
    fig, axs = plt.subplots(4,
                            2,
                            sharex='col',
                            figsize=(9, 10),
                            dpi=80,
                            gridspec_kw={'hspace': 0})
    plt.subplots_adjust(wspace=0.3)
    fig.suptitle('All galaxies - Overlapping HIIregions and GMCs',
                 fontsize=18,
                 va='top')
    axs = axs.ravel()
    # Galactic distance vs: Mco, avir, sigmav,Sigmamol
Example #23
0
    yMaxP = first.bounds[3]

    xMax = xMaxP if xMaxP > xMax else xMax
    yMax = yMaxP if yMaxP > yMax else yMax
    xMin = xMinP if xMinP < xMin else xMin
    yMin = yMinP if yMinP < yMin else yMin

xMin = -76
yMin = -45
xMax = -70
yMax = -34

xSpan = xMax - xMin
ySpan = yMax - yMin

outputGraphs = pdf.PdfPages("test.pdf")

fig = plt.figure(figsize=(10, 10 * (yMax - yMin) / (xMax - xMin)))
ax = fig.add_subplot(111)
ax.set_xlim(xMin, xMax)
ax.set_ylim(yMin, yMax)
ax.set_aspect('equal')

patches = []

cmap = clrmp.get_cmap('Oranges')

for provinceID in selectedProvinces:
    shape.loc[[provinceID - 1],
              'geometry'].plot(ax=ax,
                               edgecolor='black',
Example #24
0
def singlerun(filename, outputFile, binsize, chop, modelOverride=None):
    fig = plt.figure()
    galaxies = common.loadData(filename, dataType="CF2")
    distances = [galaxy.d for galaxy in galaxies]
    #get a list of all the distances to galaxies. This will let us send it directly to the histogram

    bins_orig = genBins(binsize, chop)

    #Make a histogram using pylab histogram function.
    n, bins, patches = plt.hist(
        distances,
        bins_orig,
        histtype="stepfilled",
        label="Galaxy Distribution,\n binsize={:.2f}Mpc".format(binsize))

    #Change visual properties of the histogram
    plt.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
    robot = chi_sq_solver(bins, n, selection_function)
    if modelOverride is None:
        #If we don't have an existing model to use, we find a best fit and plot it
        #Solve the chi squared optimization for the histogram and selection function
        params = robot.result.x
        #Plot the best fit
        domain = np.arange(0, chop, 1)
        model = [selection_function(r, *(robot.result.x)) for r in domain]
        plt.plot(
            domain,
            model,
            'k--',
            linewidth=1.5,
            label=
            "Model fit: $A = {:.3f}$\n$r_0 = {:.3f}$\n$n_1 = {:.3f}$\n$n_2={:.3f}$\n$\chi^2={chisq:.3f}$"
            .format(*(robot.result.x), chisq=robot.result.fun))
        chisq = robot.result.fun
    else:
        #Plot the model given in the settings function instead of calculating a new one
        mo = modelOverride["constants"]
        params = [mo['A'], mo['r_0'], mo['n_1'], mo['n_2']]
        chisq = robot.chi_sq(params)
        domain = np.arange(0, chop, 1)
        model = [selection_function(r, *params) for r in domain]
        plt.plot(
            domain,
            model,
            'k--',
            linewidth=1.5,
            label=
            "Model fit: $A = {:.3f}$\n$r_0 = {:.3f}$\n$n_1 = {:.3f}$\n$n_2={:.3f}$\n$\chi^2={chisq:.3f}$"
            .format(*params, chisq=chisq))

    #Add axis labels
    plt.ylabel("Galaxy count")
    plt.xlabel("Distance, Mpc/h")
    plt.title("Distribution of Galaxy Distance")
    plt.legend()
    plt.axis([0, chop, 0, 1300])
    fig2 = plt.figure()
    shellVolume = [
        common.shellVolCenter(robot.centerbins[i], binsize)
        for i in range(len(n))
    ]
    plt.title("Galaxies per Cubic Mpc")
    plt.xlabel("Distance, Mpc/h")
    plt.ylabel("Density, galaxies/(Mpc/h)^3")
    density = [n[i] / shellVolume[i] for i in range(len(n))]
    plt.plot(robot.centerbins, density, 'o')
    #Save figure
    with pdfback.PdfPages(outputFile + str(binsize) + '.pdf') as pdf:
        pdf.savefig(fig)
        pdf.savefig(fig2)
    if modelOverride is None:
        #Write paramaters to a file for later use.
        common.writedict(
            outputFile + str(binsize) + '_params.json', {
                'constants': {
                    'A': params[0],
                    'r_0': params[1],
                    'n_1': params[2],
                    'n_2': params[3]
                },
                'info': {
                    'shell_thickness': binsize,
                    'max_radius': chop,
                    'chisq': chisq
                }
            })
    plt.close('all')
Example #25
0
    """

    print('Stylizing each interval of the target tier')

    #computing at which iterations to give progress
    LEN = float(len(tg[targetTier]))
    totalN += LEN
    POSdisplay = set([int(float(i) / 100.0 * LEN) for i in range(0, 100, 10)])
    smooth_total = []
    time_total = []
    pl.rcParams["figure.figsize"] = [13, 7]
    fig = pl.figure()
    support = None
    haveImgInbuf = False
    if exportFigures:
        pdf = pdfLib.PdfPages(outputFigureFile)

    prog = progLib.Progress(len(tg[targetTier]))
    for pos, targetIntv in enumerate(tg[targetTier]):
        if pos in POSdisplay:
            print('Stylizing: {} contours'.format(prog.progressstring(pos)))

        supportIntvs = stylize.getSupportIntvs(targetIntv,
                                               supportTier=tg[speakerTier])
        try:
            tag = stylize.getTags(targetIntv, tg[tagTier])
        except:
            tag = None
        #compute style of current interval
        out = \
            stylize.stylizeObject(\
Example #26
0
import numpy as np
np.set_printoptions(threshold=np.inf)
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import emcee
import corner
import matplotlib.backends.backend_pdf as pf

x, y, sigma_y = np.loadtxt("data.txt",
                           delimiter='&',
                           usecols=(1, 2, 3),
                           unpack=True)
pp = pf.PdfPages("q10_plots.pdf")  #All plots will be stored in this file
"Check Ques10_params.txt for output parameters and there uncertainities"


def log_likelihood(theta, x, y, yerr):
    a, b, c = theta
    model = a * x**2 + b * x + c
    sigma2 = yerr**2
    #negative ln(L)
    return 0.5 * np.sum((y - model)**2 / sigma2 + np.log(2 * np.pi * sigma2))


def log_prior(theta):
    a, b, c = theta
    if -500 < a < 500 and -500 < b < 500 and -500 < c < 500:
        return 0.0
    return -np.inf

Example #27
0
def stats_relative(clim,
                   event,
                   ofile,
                   time=None,
                   ci=0.05,
                   verbose=False):  ##{{{
    """
	NSSEA.plot.stats_relative
	=========================
	
	Plot probabilities PR/PR[time_event] and di - di[time_event] along time
	
	Arguments
	---------
	clim    : NSSEA.Climatology
		A clim variable
	event     : NSSEA.Event
		Event variable
	ofile     : str
		output file
	time: time
		time to plot
	ci        : float
		Size of confidence interval, default is 0.05 (95% confidence)
	verbose   : bool
		Print (or not) state of execution
	"""

    if verbose: print("Plot stats_relative", end="\r")

    statsIn = clim.stats
    ## Compute stats events
    if time is None:
        time = event.time
    stats = stats_relative_event(statsIn, time)
    statsu = stats[:, 1:, :, :].quantile(ci / 2., dim="sample")
    statsl = stats[:, 1:, :, :].quantile(1. - ci / 2., dim="sample")

    ymindI = min(stats.loc[:, :, "dI", :].min(), statsu.loc[:, "dI", :].min(),
                 statsl.loc[:, "dI", :].min())
    ymaxdI = max(stats.loc[:, :, "dI", :].max(), statsu.loc[:, "dI", :].max(),
                 statsl.loc[:, "dI", :].max())
    ylabel = "\mathrm{(" + event.unit_variable + ")}"

    lp = LinkParams()

    pdf = mpdf.PdfPages(ofile)

    for m in stats.models:
        nrow, ncol = 2, 1
        fs = 10
        fig = plt.figure(figsize=(fs * ncol, 0.6 * fs * nrow))

        ## Probabilities
        ax = fig.add_subplot(nrow, ncol, 1)
        ax.plot(stats.time,
                lp.frr(stats.loc[:, "be", "PR", m]),
                color="red",
                linestyle="-",
                marker="")
        ax.fill_between(stats.time,
                        lp.frr(statsl.loc[:, "PR", m]),
                        lp.frr(statsu.loc[:, "PR", m]),
                        color="red",
                        alpha=0.5)
        ax.set_ylim((lp.rr.values.min(), lp.rr.values.max()))
        ax.set_yticks(lp.rr.values)
        ax.set_yticklabels(lp.rr.names)
        ax.set_xlabel(r"$\mathrm{Time}$")
        ax.set_ylabel(r"$\mathrm{RR}(t)$")
        ax2 = fig.add_subplot(nrow, ncol, 1, sharex=ax, frameon=False)
        ax2.yaxis.tick_right()
        ax2.set_yticks(lp.rr.values)
        ax2.set_yticklabels(lp.far.names)
        ax2.yaxis.set_label_position("right")
        ax2.set_ylabel(r"$\mathrm{FAR}(t)$")

        xlim = ax.get_xlim()
        ylim = ax.get_ylim()
        ax.plot(xlim, lp.frr([1, 1]), linestyle="-", marker="", color="black")
        ax.plot([time, time], ylim, linestyle="--", marker="", color="black")
        ax.set_xlim(xlim)
        ax.set_ylim(ylim)

        ## Intensities
        ax = fig.add_subplot(nrow, ncol, 2)
        ax.plot(stats.time,
                stats.loc[:, "be", "dI", m],
                color="red",
                linestyle="-",
                marker="")
        ax.fill_between(stats.time,
                        statsl.loc[:, "dI", m],
                        statsu.loc[:, "dI", m],
                        color="red",
                        alpha=0.5)
        ax.set_ylim((ymindI, ymaxdI))
        ax.set_xlabel("Time")
        ax.set_ylabel(r"${}$".format("\delta\mathbf{i}(t)\ " + ylabel))
        xlim = ax.get_xlim()
        ylim = ax.get_ylim()
        ax.plot([time, time], ylim, linestyle="--", marker="", color="black")
        ax.plot(xlim, [0, 0], linestyle="-", marker="", color="black")
        ax.set_xlim(xlim)
        ax.set_ylim(ylim)

        fig.set_tight_layout(True)
        pdf.savefig(fig)
        plt.close(fig)

    pdf.close()

    if verbose: print("Plot stats_relative (Done)")
Example #28
0
mlp_y, mlp_x = sklearn.calibration.calibration_curve(y_test, mlp_pred[:,1], normalize=False, n_bins=25, strategy='uniform')

%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.transforms as mtransforms

import matplotlib.backends.backend_pdf as pdf
fig, ax = plt.subplots(figsize=(10,7))
plt.plot(lgb_x,lgb_y, marker='o', linewidth=1, label='Ensemble')
plt.plot(mlp_x,mlp_y, marker='o', linewidth=1, label='ANN')
plt.plot(ens_x, ens_y, marker='o', linewidth=1, label='Lightgbm')
plt.plot(xgb_x,xgb_y, marker='o', linewidth=1, label='XGBoost')
plt.plot(cb_x,cb_y, marker='o', linewidth=1, label='CatBoost')

pdf_construct = pdf.PdfPages("calplot.pdf")
# reference line, legends, and axis labels
line = mlines.Line2D([0, 1], [0, 1], color='black')
transform = ax.transAxes
line.set_transform(transform)
ax.add_line(line)
fig.suptitle('Calibration plot')
ax.set_xlabel('Predicted probability')
ax.set_ylabel('True probability in each bin')
plt.legend()
plt.show()
pdf_construct.savefig(fig)
pdf_construct.close()

#plotting lift and gain charts
import scikitplot as skplt
Example #29
0
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf as pdf
import numpy


def plot_logistic_map(r: float, x: float, iterations: int):
    iterations_list = []
    results_list = []
    for i in range(iterations):
        x = r * (x - x ** 2)
        results_list.append(x)
        iterations_list.append(i)

    plt.xlabel("Iterations")
    plt.ylabel(f"R = {r}")
    plt.plot(iterations_list, results_list)

    return plt


if __name__ == "__main__":
    pdf = pdf.PdfPages("logistic_map_output.pdf")
    for i in numpy.arange(0.1, 5.0, 0.1):
        pdf.savefig(plot_logistic_map(i, .02, 30).gcf())
        plt.clf()
    pdf.close()
Example #30
0
def spikeflag(date, data, inflag, isday, outdir, window=13, iter=1,
              fill_days=1, t_int=48, z=7, deriv=0, udef=-9999, spike_v=2,
              plot=False):
    '''
    Spike detection for Eddy Covariance data (and basically all other data)
    using a moving median absolute difference filter. Multiple iterations
    possible. Originally coded by Tino Rau.
    
    
    Definition
    ----------
    spikeflag(date, data, inflag, isday, window=13, iter=1,
              fill_days=1, t_int=48, z=5.5, deriv=0, udef=-9999, spike_v=2,
              plot=False):
    
    
    Input
    ----- 
    date        np.array(N), julian date (used only for plotting)
    data        np.array(N,M), data array where spike detection is applied on
                each column (M)
    inflag      np.array(N,M), dtype=int, quality flag of data, spike detection
                is only applied where inflag=0, all other data is ignored
    isday       np.array(N), dtype=bool, True where it is day and False where
                it is night
    outdir      path where plots are saved
                  
                        
    Optional Input
    --------------
    window      int, size of the moving window where mad is calculated in days
                (default: 13)
    iter        int, how often the running window mad shall be applied
                (default: 1)
    fill_days   int, number of days where mad is applied within moving window
                (default: 1)
    t_int       int, number of data points within one day (default: 48)
    z           int/float, data is allowed to deviate maximum z standard
                deviations from the median (default: 7)
    deriv       int, 0: Act on raw data; 1: use first derivatives;
                2: use 2nd derivatives (default: 0)
    udef        int/float, missing value of data (default: -9999) NaN values are
                excluded from computations anyhow.
    spike_v     int, spike value which shall be returned when a spike is
                detected (default: 2)
    plot        bool, if True data and spikes are plotted (default: False)
    
    
    Output
    ------
    flag        np.array(N), flag array where everything is 0 except where
                spikes were detected, there it is spike_v.
    
    
    License
    -------
    This file is part of the JAMS Python package, distributed under the MIT
    License. The JAMS Python package originates from the former UFZ Python library,
    Department of Computational Hydrosystems, Helmholtz Centre for Environmental
    Research - UFZ, Leipzig, Germany.

    Copyright (c) 2014 Arndt Piayda

    Permission is hereby granted, free of charge, to any person obtaining a copy
    of this software and associated documentation files (the "Software"), to deal
    in the Software without restriction, including without limitation the rights
    to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    copies of the Software, and to permit persons to whom the Software is
    furnished to do so, subject to the following conditions:

    The above copyright notice and this permission notice shall be included in all
    copies or substantial portions of the Software.

    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    SOFTWARE.


    History
    -------
    Written,  AP, Aug 2014
    '''       
    rows, cols = np.shape(data)
    flag       = np.zeros_like(inflag).astype(np.int)
    # mad window length and flag window length 
    period   = np.int(window*t_int)/2
    fill_win = np.int(fill_days*t_int)/2
    
    # calculate dusk and dawn times and separate in day and night
    isdawn      = np.zeros(rows,dtype=np.bool)
    isdusk      = np.zeros(rows,dtype=np.bool)
    dis         = (isday.astype(int) - np.roll(isday,-1).astype(int)).astype(bool)
    isdawn[:-1] = np.where(dis[:-1] == -1, True, False)
    isdusk[:-1] = np.where(dis[:-1] == 1, True, False)
    isddday     = isdawn
    tmp         = np.roll(isdusk,1)
    isddday[1:] += tmp[1:]
    isddnight   = isdusk
    tmp         = np.roll(isdawn,1)
    isddnight[1:] += tmp[1:]
    
    # iterate over each column of data
    for col in xrange(cols):
        # iterate as much as iter
        for i in xrange(iter):
            # get day and night data#
            day_data   = np.where((isday | isddday) & (inflag[:,col]==0) &
                                  ((data[:,col]!=udef) | (~np.isnan(data[:,col]))),
                                  data[:,col], np.nan)
            night_data = np.where((~isday | isddnight) & (inflag[:,col]==0) &
                                  ((data[:,col]!=udef) | (~np.isnan(data[:,col]))),
                                  data[:,col], np.nan)       

            # iterate over flag window
            fill_points = xrange(fill_win, isday.size-1, 2*fill_win)
            for j in fill_points:
                j1 = np.max([ j - period - 1,0])
                j2 = np.min([ j + period + 1,isday.size])
                fill_start = np.max([ j - fill_win,1])
                fill_end   = np.min([ j + fill_win,isday.size-1])
                
                day_flag = mad(np.ma.masked_array(data=day_data[j1:j2],
                                                  mask=(np.isnan(day_data[j1:j2]))),
                               z=z, deriv=deriv)

                flag[fill_start:fill_end,col] += np.where(day_flag[fill_start-j1-1:fill_end-j1-1],
                                                          spike_v, 0)
                                
                night_flag = mad(np.ma.masked_array(data=night_data[j1:j2],
                                                    mask=(np.isnan(night_data[j1:j2]))),
                                 z=z, deriv=deriv)

                flag[fill_start:fill_end,col] += np.where(night_flag[fill_start-j1-1:fill_end-j1-1],
                                                          spike_v, 0)
                
            if plot:
                import matplotlib as mpl
                import matplotlib.pyplot as plt
                import matplotlib.backends.backend_pdf as pdf
                majticks = mpl.dates.MonthLocator(bymonthday=1)
                format_str='%d %m %Y %H:%M'
                date01 = date2dec(yr=1, mo=1, dy=2, hr=0, mi=0, sc=0)
                                
                fig1 = plt.figure(1)
                sub1 = fig1.add_subplot(111)
                valid = (inflag[:,col]==0) & ((data[:,col]!=udef) |
                                              (~np.isnan(data[:,col])))
                l1 =sub1.plot(date[valid]-date01, data[valid,col], '-b')
                l2 =sub1.plot(date[flag[:,col]!=0]-date01, data[flag[:,col]!=0,col], 'or')
                sub1.xaxis.set_major_locator(majticks)
                sub1.xaxis.set_major_formatter(mpl.dates.DateFormatter(format_str))
                fig1.autofmt_xdate()
                plt.show()
                
                pp1 = pdf.PdfPages(outdir+'/spike_%i.pdf'%col)
                fig1.savefig(pp1, format='pdf')
                pp1.close()
    
    return flag