示例#1
0
def update_pos_rk4(pos, psi, x, y, dt, dx):
    #Runge-Kutta 4th order to update the position.
    #We find the indices closest to our double value (x,y).
    xcoord = find_nearest(x, pos[0])
    ycoord = find_nearest(y, pos[1])
    #Nan to num is used here to avoid potential division by zero.
    #This part updates the x coordinate.
    k1 = np.nan_to_num(
        (x_deriv(psi, xcoord, ycoord, dx) / psi[xcoord, ycoord]).imag)
    k2 = np.nan_to_num((x_deriv(psi + 0.5 * dt * k1, xcoord, ycoord, dx) /
                        (psi[xcoord, ycoord] + 0.5 * dt * k1)).imag)
    k3 = np.nan_to_num((x_deriv(psi + 0.5 * dt * k2, xcoord, ycoord, dx) /
                        (psi[xcoord, ycoord] + 0.5 * dt * k2)).imag)
    k4 = np.nan_to_num((x_deriv(psi + dt * k3, xcoord, ycoord, dx) /
                        (psi[xcoord, ycoord] + dt * k3)).imag)
    xnew = pos[0] + dt * 1. / 6. * (k1 + 2. * k2 + 2. * k3 + k4)
    #Updating the y part with RK4..
    k1 = np.nan_to_num(
        (y_deriv(psi, xcoord, ycoord, dx) / psi[xcoord, ycoord]).imag)
    k2 = np.nan_to_num((y_deriv(psi + 0.5 * dt * k1, xcoord, ycoord, dx) /
                        (psi[xcoord, ycoord] + 0.5 * dt * k1)).imag)
    k3 = np.nan_to_num((y_deriv(psi + 0.5 * dt * k2, xcoord, ycoord, dx) /
                        (psi[xcoord, ycoord] + 0.5 * dt * k2)).imag)
    k4 = np.nan_to_num((y_deriv(psi + dt * k3, xcoord, ycoord, dx) /
                        (psi[xcoord, ycoord] + dt * k3)).imag)
    ynew = pos[1] + dt * 1. / 6. * (k1 + 2. * k2 + 2. * k3 + k4)
    return [xnew, ynew]
示例#2
0
 def create_mom0(self, vel1, vel2, mask=None): 
     'vel1, vel2, mask=None'
     idx1, idx2  = ut.find_nearest(self.vels, vel1)[1], ut.find_nearest(self.vels, vel2)[1]
     if mask:
         self.mom0 = np.sum(self.cubedat[idx1:idx2,mask], axis=0)
     else: 
         self.mom0 = np.sum(self.cubedat[idx1:idx2,:,:], axis=0)
     self.mom_props = [vel1, vel2, mask]
def lnprior(params, vsin_obs, sig_vsin_obs, dist_pc, sig_dist_pc, ranges,
            model, stellar_prior, npy_star, pdf_mas, pdf_obl, pdf_age, pdf_dis,
            pdf_ebv, grid_mas, grid_obl, grid_age, grid_dis, grid_ebv):

    if model == 'befavor':
        Mstar, oblat, Hfrac, cosi, dist, ebv = params[0], params[1],\
            params[2], params[3], params[4], params[5]
    if model == 'aara' or model == 'acol' or model == 'bcmi':
        Mstar, oblat, Hfrac, cosi, dist, ebv = params[0], params[1],\
            params[2], params[6], params[7], params[8]
    if model == 'beatlas':
        Mstar, oblat, Hfrac, cosi, dist, ebv = params[0], params[1],\
            0.3, params[4], params[5], params[6]

    # Reading Stellar Priors
    if stellar_prior is True:
        temp, idx_mas = find_nearest(grid_mas, value=Mstar)
        temp, idx_obl = find_nearest(grid_obl, value=oblat)
        temp, idx_age = find_nearest(grid_age, value=Hfrac)
        temp, idx_dis = find_nearest(grid_dis, value=dist)
        temp, idx_ebv = find_nearest(grid_ebv, value=ebv)
        chi2_stellar_prior = Mstar * pdf_mas[idx_mas] +\
            oblat * pdf_obl[idx_obl] + \
            Hfrac * pdf_age[idx_age] + \
            dist * pdf_dis[idx_dis] + \
            ebv * pdf_ebv[idx_ebv]
    else:
        chi2_stellar_prior = 0.0

    # Rpole, Lstar, Teff = vkg.geneve_par(Mstar, oblat, Hfrac, folder_tables)
    t = np.max(np.array([hfrac2tms(Hfrac), 0.]))

    Rpole, logL = geneva_interp_fast(Mstar,
                                     oblat,
                                     t,
                                     neighbours_only=True,
                                     isRpole=False)

    wcrit = np.sqrt(8. / 27. * G * Mstar * Msun / (Rpole * Rsun)**3)

    vsini = oblat2w(oblat) * wcrit * (Rpole * Rsun * oblat) *\
        np.sin(np.arccos(cosi)) * 1e-5

    chi2_vsi = (vsin_obs - vsini)**2 / sig_vsin_obs**2.

    chi2_dis = (dist_pc - dist)**2 / sig_dist_pc**2.

    chi2_prior = chi2_vsi + chi2_dis + chi2_stellar_prior

    return -0.5 * chi2_prior
示例#4
0
    def jump(self, event):
        """Handle frequency change from the marker TxtCtrl."""
        evt_obj = event.GetEventObject()
        temp_freq = evt_obj.GetValue()
        try:
            # MHz to Hz. Will raise ValueError if not a number
            temp_freq = float(temp_freq) * 1e6
        except ValueError:
            if temp_freq == "":
                # Let the user remove the marker
                self.unplot()
                return
            else:
                temp_freq = self.freq  # reset to last known good value
                if temp_freq is None:
                    return

        bin_freqs = self.frame.tb.cfg.bin_freqs
        idx = utils.find_nearest(bin_freqs, temp_freq)
        freq = bin_freqs[idx]

        if freq != self.freq:
            self.bin_idx = idx
            self.freq = freq
            self.plot()

        evt_obj.SetValue(self.get_freq_str())
示例#5
0
 def update_bin_indices(self):
     """Update common indices used in cropping and overlaying DFTs"""
     self.bin_start = int(self.fft_size * (self.overlap / 2))
     self.bin_stop = int(self.fft_size - self.bin_start)
     self.max_plotted_bin = utils.find_nearest(self.bin_freqs,
                                               self.max_freq) + 1
     self.bin_offset = (self.bin_stop - self.bin_start) / 2
示例#6
0
def lnprior(params, vsin_obs, sig_vsin_obs, plx_obs, sig_plx_obs, ranges,
            model, stellar_prior, npy_star, pdf_mas, pdf_obl, pdf_age, pdf_plx,
            pdf_ebv, grid_mas, grid_obl, grid_age, grid_plx, grid_ebv):

    if model == 'befavor':
        Mstar, oblat, Hfrac, cosi, plx, ebv = params[0], params[1],\
            params[2], params[3], params[4], params[5]
    if model == 'aara' or model == 'acol' or model == 'bcmi':
        Mstar, oblat, Hfrac, cosi, plx, ebv = params[0], params[1],\
            params[2], params[6], params[7], params[8]
    if model == 'beatlas':
        Mstar, oblat, Hfrac, cosi, plx, ebv = params[0], params[1],\
            0.3, params[4], params[5], params[6]

    # Reading Stellar Priors
    if stellar_prior is True:
        temp, idx_mas = find_nearest(grid_mas, value=Mstar)
        temp, idx_obl = find_nearest(grid_obl, value=oblat)
        temp, idx_age = find_nearest(grid_age, value=Hfrac)
        temp, idx_plx = find_nearest(grid_plx, value=plx)
        temp, idx_ebv = find_nearest(grid_ebv, value=ebv)
        chi2_stellar_prior = Mstar * pdf_mas[idx_mas] +\
            oblat * pdf_obl[idx_obl] + \
            Hfrac * pdf_age[idx_age] + \
            plx * pdf_plx[idx_plx] + \
            ebv * pdf_ebv[idx_ebv]
    else:
        chi2_stellar_prior = 0.0

    t = hfrac2tms(Hfrac)

    Rpole, logL, _ = geneva_interp_fast(Mstar, oblat, t)

    wcrit = np.sqrt(8. / 27. * G * Mstar * Msun / (Rpole * Rsun)**3)

    vsini = oblat2w(oblat) * wcrit * (Rpole * Rsun * oblat) *\
        np.sin(np.arccos(cosi)) * 1e-5

    chi2_vsi = (vsin_obs - vsini)**2 / sig_vsin_obs**2.

    chi2_plx = (plx_obs - plx)**2 / sig_plx_obs**2.

    chi2_prior = chi2_vsi + chi2_plx + chi2_stellar_prior

    return -0.5 * chi2_prior
示例#7
0
    def animate(i):
        t_detail = 1.0 * i / fps
        __, detail_idx = find_nearest(t_ods, t_detail)
        tf_line.set_xdata([t_detail, t_detail])
        t_line.set_xdata([t_detail, t_detail])
        detail.set_xdata(np.abs(TF[:, detail_idx]))

        print("  {0:.2f}/{1:.2f}".format(t_detail, max(t_ods)), end="\r")
        sys.stdout.flush()

        return tf_line, t_line, detail
示例#8
0
 def peak_search(self, event, txtctrl):
     """Find the point of max power in the whole plot or within a span."""
     bin_freqs = self.frame.tb.cfg.bin_freqs
     if self.frame.span_left and self.frame.span_right:
         left_idx = utils.find_nearest(bin_freqs, self.frame.span_left)
         right_idx = utils.find_nearest(bin_freqs, self.frame.span_right)
         power_data = self.frame.line.get_ydata()[left_idx:right_idx]
     else:
         left_idx = 0
         right_idx = utils.find_nearest(bin_freqs,
                                        self.frame.tb.cfg.max_freq)
         power_data = self.frame.line.get_ydata()[:right_idx]
     try:
         relative_idx = np.where(power_data == np.amax(power_data))[0][0]
     except ValueError:
         # User selected an area with no data in it; do nothing
         return
     # add the left index offset to get the absolute index
     self.bin_idx = relative_idx + left_idx
     self.freq = self.frame.tb.cfg.bin_freqs[self.bin_idx]
     txtctrl.SetValue(self.get_freq_str())
     self.plot()
示例#9
0
 def __init__(self, initial_frame_time, final_frame_time, dt, scan_times):
     self.initial_frame_time = initial_frame_time
     self.final_frame_time = final_frame_time
     self.dt = dt  # Temporal resolution
     self.frame_times = np.arange(self.initial_frame_time,
                                  self.final_frame_time, self.dt)  # for now
     self.scan_times = scan_times
     self.n_scans = len(scan_times)
     idx_scans_within_frames = np.zeros(self.n_scans)
     for k in range(self.n_scans):
         idx_scans_within_frames[k] = utils.find_nearest(
             self.frame_times, self.scan_times[k])
     self.idx_scans_within_frames = idx_scans_within_frames.astype(int)
示例#10
0
def update_georef(vgeo, v, vref_level):
    """ 
    Return vgeo after updating geostrophic reference depth by constraining
    velocities in vgeo to match those in v at the specified depth.
    
    """
    vgeodat = vgeo.data.filled(0)
    vdat = v.data.filled(0)
    zind = utils.find_nearest(v.z, vref_level)
    vadj = np.ones_like(vgeodat) * (vdat[:, zind, :] - vgeodat[:, zind, :])
    vgeo.data = np.ma.MaskedArray(vgeo.data + vadj, mask=vgeo.mask)

    return vgeo
示例#11
0
def animate(i):
    global psi
    global pos
    for q in range(rk4_steps_per_frame):
        psinew = update_psi_rk4(psi, timestep)
        posnew = update_pos_rk4(pos, psi, x, y, timestep, dx)
        psi = psinew
        pos = posnew

    ax.patches = []
    #Our particle!
    electron = plt.Circle((find_nearest(x, pos[1]), find_nearest(x, pos[0])),
                          2,
                          color="black")
    ax.add_patch(electron)
    currentnorm = ts.get_norm(psi, Npoints, dx)
    #If the norm changes from 1 significantly, the simulation is probably in trouble.
    print(i, currentnorm)
    plotted = abs(psi)**2 + V.real
    line.set_data(plotted)  # update the data
    line.set_clim(vmax=np.amax(np.abs(psi)**2))
    line.set_clim(vmin=0)
    return line
def process_nearest(update):
    user_id = update["message"]["from"]["id"]
    if user_id in users_locations:
        stations = scraper.scrape_bikes()
        nearest_station = utils.find_nearest(
            list(stations.values()), users_locations[user_id]["latitude"],
            users_locations[user_id]["longitude"])
        tc.send_simple_message(user_id, str(nearest_station))
        tc.send_location(user_id, {
            'lat': nearest_station.lat,
            'lng': nearest_station.lng
        })
    else:
        log.info(users_locations)
        process_start(update)
示例#13
0
def resample(x, y, delta_x=0.01, max_delta_y=400):
    #Create x array with desired spacing
    x_desired = np.arange(0, 14, delta_x)

    x_resampled = [0]
    y_resampled = [0]

    #Find value in x closest to each value in x_desired, then resample the y accordingly
    for val in x_desired:
        nearest_val, nearest_index = find_nearest(x, val)
        if nearest_val not in x_resampled:
            x_resampled.append(nearest_val)
            y_resampled.append(y[nearest_index])

    custom_filter(x_resampled, y_resampled)

    return x_resampled, y_resampled
示例#14
0
def polesFunc(S, pole_posns, Smin, Smax, coeffs):
    """
    """

    if S <= Smin or S >= Smax:
        return 0.0

    #print coeffs
    #print S,idx,Snearest#,pole_posns[0],pole_posns[-1]
    #sys.exit(0)
    #print idx,pole_posns[idx],coeffs[idx]
#    counts=calculateDnByDs(pole_posns,coeffs[:-1],eucl=False,verbose=False,\
#                    idl_style=True,errors=False,bright=False,bright_errors=False,
#                    return_all=False)

    idx, Snearest = find_nearest(pole_posns[:-1], S)
    #if idx >= len(coeffs)-1: return counts[idx-1]
    #return counts[idx]
    return coeffs[idx]  #/(pole_posns[idx+1]-pole_posns[idx])
示例#15
0
    def find_value_for_state(self, state, values, possible_states):
        effective_resource = round(
            utils.find_nearest(self.get_possible_resources(), state.resource),
            4)

        if not state.is_valid():
            result = 0
        else:
            result = None
            for value, iter_state in zip(values, possible_states):
                if (iter_state.price == state.price
                        and iter_state.resource == effective_resource):
                    result = value
                    break

            if result is None:
                utils.print_state(state)
                raise Exception("State was not found!")

        return result
示例#16
0
def custom_filter(x, y, delta_y=50):
    x_avg = np.average(x)
    to_delete = []

    _, temp_i = find_nearest(x, x_avg / 1.5)
    y_prev = np.average(y[temp_i - 2:temp_i + 2])  #Initializing y_prev

    #Simple filter based on difference with previous value
    for i, y_val in enumerate(y):
        # Remove points that have x > x_avg/2 and where the difference in values between sequential y is bigger than chosen delta_y value
        if abs(y_val - y_prev) > delta_y and x[i] > x_avg / 1.5:
            to_delete.append(i)
        else:
            y_prev = y_val

    to_delete.reverse()
    for i in to_delete:
        del x[i]
        del y[i]

    return
示例#17
0
def find_bc(profile, mc_input):
    s    = read_mesa(profile)
    r    = np.flipud(s.radius)
    m    = np.flipud(s.mass)
    rho  = np.flipud(s.rho)
    p    = np.flipud(s.pressure)

    drhodr = np.gradient(rho,r)
    drhodr_smooth = smooth(drhodr)

    dpdr = np.gradient(p,r)
    dpdr_smooth = smooth(dpdr)

    ic = find_nearest(m, mc_input)
    bc = {'r'      : r[ic],
          'm'      : m[ic],
          'rho'    : rho[ic],
          'p'      : p[ic],
          'drhodr' : drhodr[ic],
          'dpdr'   : dpdr[ic]}

    return bc
示例#18
0
def calc_vgeo(v, dh, georef=4750.):
    """ 
    Return ZonalSections containing geostrophic velocities 
    relative to specified reference level. 
    
    """
    vgeo = copy.deepcopy(v)  # Copy velocity data structure

    for nprof in range(len(vgeo.x)):  # Loop through profiles
        if not v.mask[:, :, nprof].all():

            # Extract depth and dynamic height profiles at bounds
            z = dh.z
            z1 = dh.z_as_bounds_data[:, :, nprof]
            z2 = dh.z_as_bounds_data[:, :, nprof + 1]
            dh1 = dh.bounds_data[:, :, nprof]
            dh2 = dh.bounds_data[:, :, nprof + 1]

            # Coriolis parameter at profile location
            corf = 2 * ROT * np.sin(np.pi * (vgeo.y[nprof] / 180.))

            # cell width along section
            dx = vgeo.cell_widths[nprof]

            # Clip reference depth using ocean floor.
            maxz = np.min([z1.max(), z2.max()])
            zref = min(georef, maxz)

            # Adjust dh to new reference level
            zind = utils.find_nearest(z, zref)
            dh1 -= dh1[:, zind]
            dh2 -= dh2[:, zind]

            # Calculate geostrophic velocity
            vgeo_profile = (-1. * (G / corf) * ((dh2 - dh1) / dx))
            vgeo.data[:, :, nprof] = vgeo_profile

    return vgeo
示例#19
0
    def __init__(self, octave, overlap, fft_len, delta_f, sample_rate):
        """Calculate and cache frequencies used in stitching FFT segments"""
        self.start, self.stop = octave

        # Check invariants
        assert self.start < self.stop        # low freq is lower than high freq
        assert 0 <= overlap < 1              # overlap is percentage
        assert math.log(fft_len, 2) % 2 == 0 # fft_len is power of 2

        self.span = self.stop - self.start
        self.nvalid_bins = int((fft_len - (fft_len * overlap))) // 2 * 2

        usable_bw = sample_rate * (1 - overlap)
        self.step = int(round(usable_bw / delta_f) * delta_f)

        self.center_freqs = self.cache_center_freqs()
        self.nsegments = len(self.center_freqs)

        self.bin_freqs = self.cache_bin_freqs(delta_f)

        self.bin_start = int(fft_len * (overlap / 2))
        self.bin_stop = int(fft_len - self.bin_start)
        self.max_plotted_bin = utils.find_nearest(self.bin_freqs, self.stop) + 1
        self.bin_offset = (self.bin_stop - self.bin_start) / 2
    def NCA(self,t,C):
        AUClist = []
        AUMClist = []
        Cminlist = []
        Cavglist = []
        Cmaxlist = []
        Tmaxlist = []
        Tminlist = []
        Ndoses=len(self.drugSource.parsedDoseList)
        for ndose in range(0,max(Ndoses-1,1)):
            tperiod0 = self.drugSource.parsedDoseList[ndose].t0
            if ndose+1<Ndoses:
                tperiodF = self.drugSource.parsedDoseList[ndose+1].t0-self.model.deltaT
            else:
                tperiodF =  np.max(t)-1
            idx0 = find_nearest(t,tperiod0)
            idxF = find_nearest(t,tperiodF)

            AUC0t = 0
            AUMC0t = 0
            t0 = t[idx0+1]
            for idx in range(idx0,idxF+1):
                dt = (t[idx+1]-t[idx])
                if C[idx+1]>=C[idx]: # Trapezoidal in the raise
                    AUC0t  += 0.5*dt*(C[idx]+C[idx+1])
                    AUMC0t += 0.5*dt*(C[idx]*t[idx]+C[idx+1]*t[idx+1])
                else: # Log-trapezoidal in the decay
                    decrement = C[idx]/C[idx+1]
                    K = math.log(decrement)
                    B = K/dt
                    AUC0t  += dt*(C[idx]-C[idx+1])/K
                    AUMC0t += (C[idx]*(t[idx]-tperiod0)-C[idx+1]*(t[idx+1]-tperiod0))/B-(C[idx+1]-C[idx])/(B*B)

                if idx==idx0:
                    Cmax=C[idx]
                    Tmax=t[idx]-t0
                    Cmin=C[idx]
                    Tmin=t[idx]-t0
                else:
                    if C[idx]<Cmin:
                        Cmin=C[idx]
                        Tmin=t[idx]-t0
                    elif C[idx]>Cmax:
                        Cmax=C[idx]
                        Tmax=t[idx]-t0
                        if ndose==0:
                            Cmin=C[idx]
                            Tmin=t[idx]-t0
            AUClist.append(AUC0t)
            AUMClist.append(AUMC0t)
            Cminlist.append(Cmin)
            Cmaxlist.append(Cmax)
            Tmaxlist.append(Tmax)
            Tminlist.append(Tmin)
            Cavglist.append(AUC0t/(t[idxF]-t[idx0]))

        print("Fluctuation = Cmax/Cmin")
        print("Accumulation(1) = Cavg(n)/Cavg(1) %")
        print("Accumulation(n) = Cavg(n)/Cavg(n-1) %")
        print("Steady state fraction(n) = Cavg(n)/Cavg(last) %")
        for ndose in range(0,len(AUClist)):
            fluctuation = Cmaxlist[ndose]/Cminlist[ndose]
            if ndose>0:
                accumn = Cavglist[ndose]/Cavglist[ndose-1]
            else:
                accumn = 0
            print("Dose #%d: Cavg= %f [%s] Cmin= %f [%s] Tmin= %d [min] Cmax= %f [%s] Tmax= %d [min] Fluct= %f %% Accum(1)= %f %% Accum(n)= %f %% SSFrac(n)= %f %% AUC= %f [%s] AUMC= %f [%s]"%\
                  (ndose+1,Cavglist[ndose], strUnit(self.Cunits.unit), Cminlist[ndose],strUnit(self.Cunits.unit), int(Tminlist[ndose]), Cmaxlist[ndose], strUnit(self.Cunits.unit),
                   int(Tmaxlist[ndose]), fluctuation*100, Cavglist[ndose]/Cavglist[0]*100, accumn*100, Cavglist[ndose]/Cavglist[-1]*100, AUClist[ndose],strUnit(self.AUCunits),
                   AUMClist[ndose],strUnit(self.AUMCunits)))

        self.AUC0t = AUClist[-1]
        self.AUMC0t = AUMClist[-1]
        self.MRT = self.AUMC0t/self.AUC0t
        self.Cmin = Cminlist[-1]
        self.Cmax = Cmaxlist[-1]
        self.Cavg = Cavglist[-1]
        self.fluctuation = self.Cmax/self.Cmin
        self.percentageAccumulation = Cavglist[-1]/Cavglist[0]

        print("   AUC0t=%f [%s]"%(self.AUC0t,strUnit(self.AUCunits)))
        print("   AUMC0t=%f [%s]"%(self.AUMC0t,strUnit(self.AUMCunits)))
        print("   MRT=%f [min]"%self.MRT)
示例#21
0
def t_tms_from_Xc(M, savefig=None, plot_fig=None, ttms_true=None, Xc=None):
    '''
    Calculates the t(tms) for a given Xc and mass
    Xc: float
    M: float
    '''
# ------------------------------------------------------------------------------
    # Parameters from the models
    mass = np.array([14.6, 12.5, 10.8, 9.6, 8.6, 7.7, 6.4, 5.5, 4.8,
                    4.2, 3.8, 3.4])

    nm = len(mass)
    str_mass = ['M14p60', 'M12p50', 'M10p80', 'M9p600', 'M8p600', 'M7p700',
                'M6p400', 'M5p500', 'M4p800', 'M4p200', 'M3p800', 'M3p400']
    st = ['B0.5', 'B1', 'B1.5', 'B2', 'B2.5', 'B3', 'B4', 'B5', 'B6', 'B7',
          'B8', 'B9']
    zsun = 'Z01400'
    str_vel = ['V60000', 'V70000', 'V80000', 'V90000', 'V95000']
    Hfracf = 0.  # end of main sequence

    # ****
    folder_data = 'tables/models/models_bes/'

    if plot_fig is True:
        plt.xlabel(r'$t/t_{MS}$')
        plt.ylabel(r'$X_c$')
        plt.ylim([0.0, 0.8])
        plt.xlim([0.0, 1.0])

# ------------------------------------------------------------------------------
    # Loop (reading the models)
    typ = (1, 3, 16, 21)  # Age, Lum versus Teff versus Hfrac
    arr_age = []
    arr_Hfr = []
    arr_t_tc = []
    cor = phc.gradColor(np.arange(len(st)), cmapn='inferno')
    iv = 2  # O que eh isto?
    arr_Xc = []
    for i in range(nm):
        file_data = folder_data + str_mass[i] + zsun + str_vel[iv] + '.dat'
        age, lum, Teff, Hfrac = np.loadtxt(file_data, usecols=typ,
                                           unpack=True, skiprows=2)
        arr_age.append(age)
        arr_Hfr.append(Hfrac)

        iMS = np.where(abs(Hfrac - Hfracf) == min(abs(Hfrac - Hfracf)))
        X_c = Hfrac[0:iMS[0][0]]
        arr_Xc.append(X_c)

        t_tc = age[0:iMS[0][0]] / max(age[0:iMS[0][0]])
        arr_t_tc.append(t_tc)
    if plot_fig is True:
        plt.plot(t_tc, X_c, color=cor[i], label=('%s' % st[i]))

# ------------------------------------------------------------------------------
# Interpolation
    k = find_nearest(mass, M)[1]

    if plot_fig is True:
        plt.plot(ttms_true, Xc, 'o')
        plt.autoscale()
        plt.minorticks_on()
        plt.legend(fontsize=10, ncol=2, fancybox=False, frameon=False)

# ------------------------------------------------------------------------------

    if savefig is True:
        pdfname = 'Xc_vs_Tsp.png'
        plt.savefig(pdfname)

    return k, arr_t_tc, arr_Xc
示例#22
0
def read_befavor_xdr_complete():

    folder_models = 'models/'

    dims = ['M', 'ob', 'Hfrac', 'sig0', 'Rd', 'mr', 'cosi']
    dims = dict(zip(dims, range(len(dims))))
    isig = dims["sig0"]

    ctrlarr = [np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN]

    tmp = 0
    cont = 0
    while tmp < len(ctrlarr):
        if math.isnan(ctrlarr[tmp]) is True:
            cont = cont + 1
            tmp = tmp + 1
        else:
            tmp = tmp + 1

    # Read the grid models, with the interval of parameters.
    xdrPL = folder_models + 'aara_sed.xdr'  # 'PL.xdr'
    # xdrPL = folder_models + 'aara_final.xdr'  # 'PL.xdr'
    # xdrPL = folder_models + 'aara_acs.xdr'  # 'PL.xdr'
    # xdrPL = folder_models + 'disk_flx.xdr'  # 'PL.xdr'

    listpar, lbdarr, minfo, models = bat.readBAsed(xdrPL, quiet=False)

    # F(lbd)] = 10^-4 erg/s/cm2/Ang

    for i in range(np.shape(minfo)[0]):
        for j in range(np.shape(minfo)[1]):
            if minfo[i][j] < 0:
                minfo[i][j] = 0.

    for i in range(np.shape(models)[0]):
        for j in range(np.shape(models)[1]):
            if models[i][j] < 0. or models[i][j] == 0.:
                models[i][j] = (models[i][j + 1] + models[i][j - 1]) / 2.

    # n0 to logn0
    listpar[4] = np.log10(listpar[4])
    listpar[4].sort()
    minfo[:, 4] = np.log10(minfo[:, 4])

    if True:
        mask = []
        tmp, idx = find_nearest(lbdarr, 1000)
        for i in range(len(models)):
            if models[i][idx] > 2.21834e-10:
                mask.append(i)
                # print(i)
                # plt.plot(lbdarr, models[i], alpha=0.1)
        tmp, idx = find_nearest(lbdarr, 80)
        for i in range(len(models)):
            if models[i][idx] > 2e-8:
                mask.append(i)
                # print(i)
                # # plt.plot(lbdarr, models[i], alpha=0.1)
        tmp, idx = find_nearest(lbdarr, 850)
        for i in range(len(models)):
            if models[i][idx] > 7e-11:
                mask.append(i)
        #         print(i)
        #         plt.plot(lbdarr, models[i], alpha=0.1)
        # plt.yscale('log')
        # plt.xscale('log')
        # plt.show()

        new_models = np.delete(models, mask, axis=0)
        new_minfo = np.delete(minfo, mask, axis=0)

        models = np.copy(new_models)
        minfo = np.copy(new_minfo)

    # delete columns of fixed par
    cols2keep = [0, 1, 3, 4, 5, 7, 8]
    cols2delete = [2, 6]
    listpar = [listpar[i] for i in cols2keep]
    minfo = np.delete(minfo, cols2delete, axis=1)
    listpar[3].sort()

    # for i in range(len(models)):
    #     plt.plot(lbdarr, models[i], alpha=0.1)
    # plt.yscale('log')
    # plt.xscale('log')
    # plt.show()

    return ctrlarr, minfo, models, lbdarr, listpar, dims, isig
示例#23
0
def train_bdt(config):
    # Trains BDT with given hyperparams and returns max Z_A (as calculated on bkg MC), requiring at least 4 signal events
    if config["invert_test_and_train"]:
        config["input_file"] = config["input_file_2"]
    else:
        config["input_file"] = config["input_file_1"]
    f = h5py.File(config["input_file"], "r")

    feature_names = utils.load_array(f, 'feature_names')
    training_feature_names = utils.load_array(f, 'training_feature_names')

    print(("Training with the following features: ", training_feature_names))

    #if config["invert_test_and_train"]:
    #print "Inverting test and train splits"
    #if config["sideband"]:
    #  print "Not yet implemented how to handle inverting the test/train set when training on data sidebands, exiting"
    #  return -1

    #global_features = utils.load_array(f, 'global_validation')
    #label = utils.load_array(f, 'label_validation')
    #multi_label = utils.load_array(f, 'multi_label_validation')
    #weights = utils.load_array(f, 'weights_validation')
    #mass = utils.load_array(f, 'mass_validation')

    #global_features_validation = utils.load_array(f, 'global')
    #label_validation = utils.load_array(f, 'label')
    #multi_label_validation = utils.load_array(f, 'multi_label')
    #weights_validation = utils.load_array(f, 'weights')
    #mass_validation = utils.load_array(f, 'mass')

    #else:
    global_features = utils.load_array(f, 'global')
    label = utils.load_array(f, 'label')
    multi_label = utils.load_array(f, 'multi_label')
    weights = utils.load_array(f, 'weights')
    mass = utils.load_array(f, 'mass')

    global_features_validation = utils.load_array(f, 'global_validation')
    label_validation = utils.load_array(f, 'label_validation')
    multi_label_validation = utils.load_array(f, 'multi_label_validation')
    weights_validation = utils.load_array(f, 'weights_validation')
    mass_validation = utils.load_array(f, 'mass_validation')

    if config["sideband"]:
        global_features = utils.load_array(f, 'global_data_sideband')
        label = utils.load_array(f, 'label_data_sideband')
        multi_label = utils.load_array(f, 'multi_label_data_sideband')
        weights = utils.load_array(f, 'weights_data_sideband')
        mass = utils.load_array(f, 'mass_data_sideband')

    global_features_data = utils.load_array(f, 'global_data')
    label_data = utils.load_array(f, 'label_data')
    multi_label_data = utils.load_array(f, 'multi_label_data')
    weights_data = utils.load_array(f, 'weights_data')
    mass_data = utils.load_array(f, 'mass_data')

    print((global_features.shape))
    print((label.shape))
    print((weights.shape))

    print((global_features_validation.shape))
    print((label_validation.shape))
    print((weights_validation.shape))

    print((global_features_data.shape))
    print((label_data.shape))
    print((weights_data.shape))

    x_train, y_train, y_train_multi, weights_train = global_features, label, multi_label, weights
    x_test, y_test, y_test_multi, weights_test = global_features_validation, label_validation, multi_label_validation, weights_validation

    X_train = pandas.DataFrame(data=x_train, columns=training_feature_names)
    X_test = pandas.DataFrame(data=x_test, columns=training_feature_names)
    X_data = pandas.DataFrame(data=global_features_data,
                              columns=training_feature_names)

    if config["multiclassifier"]:
        Y_train = y_train_multi
        Y_test = y_test_multi
    else:
        Y_train = y_train
        Y_test = y_test

    sum_neg_weights = utils.sum_of_weights_v2(weights_train, label, 0)
    sum_pos_weights = utils.sum_of_weights_v2(weights_train, label, 1)

    print((sum_pos_weights, sum_neg_weights))

    d_train = xgboost.DMatrix(X_train, label=Y_train, weight=weights_train)
    d_test = xgboost.DMatrix(X_test, label=Y_test)
    d_data = xgboost.DMatrix(X_data)

    param = {
        'max_depth': config["max_depth"],
        'eta': config["eta"],
        'subsample': config["subsample"],
        'colsample_bytree': config["colsample_bytree"],
        'min_child_weight': config["min_child_weight"],
        'gamma': config["gamma"],
        'reg_alpha': config["reg_alpha"],
        'reg_lambda': config["reg_lambda"],
        'scale_pos_weight': sum_neg_weights / sum_pos_weights,
        'objective': 'binary:logistic',
        'nthread': 16,
    }

    if config["multiclassifier"]:
        param["num_class"] = config["n_class"]
        param["objective"] = "multi:softprob"
        param["scale_pos_weight"] = 1

    evallist = [(d_train, 'train'), (d_test, 'test')]
    progress = {}

    n_round = config["n_round"]
    print((param, n_round))

    # train
    bdt = xgboost.train(param,
                        d_train,
                        n_round,
                        evallist,
                        evals_result=progress)

    bdt.save_model(config["tag"] + "_bdt.xgb")
    model = bdt.get_dump()

    input_variables = []
    for name in feature_names:
        input_variables.append((name, 'F'))
    #tmva_utils.convert_model(model, input_variables = input_variables, output_xml = config["tag"] + '_bdt.xml')

    # predict
    pred_train = bdt.predict(d_train, output_margin=config["multiclassifier"])
    pred_test = bdt.predict(d_test, output_margin=config["multiclassifier"])
    pred_data = bdt.predict(d_data, output_margin=config["multiclassifier"])

    fpr_train, tpr_train, thresh_train = metrics.roc_curve(
        y_train, pred_train, pos_label=1, sample_weight=weights_train)
    fpr_test, tpr_test, thresh_test = metrics.roc_curve(
        y_test, pred_test, pos_label=1, sample_weight=weights_test)

    auc_train, auc_train_unc = utils.auc_and_unc(y_train, pred_train,
                                                 weights_train, 100)
    auc_test, auc_test_unc = utils.auc_and_unc(y_test, pred_test, weights_test,
                                               100)

    #auc_train = metrics.auc(fpr_train, tpr_train, reorder = True)
    #auc_test  = metrics.auc(fpr_test , tpr_test , reorder = True)

    print(("Training AUC: %.3f" % auc_train))
    print(("Testing  AUC: %.3f" % auc_test))

    # estimate z_a w/at least 4 signal events
    n_quantiles = 25
    signal_mva_scores = {
        "bdt_score": ks_test.logical_vector(pred_test, y_test, 1)
    }
    bkg_mva_scores = {
        "bdt_score": ks_test.logical_vector(pred_test, y_test, 0)
    }
    data_mva_scores = {"bdt_score": pred_data}

    signal_mass = ks_test.logical_vector(mass_validation, y_test, 1)
    bkg_mass = ks_test.logical_vector(mass_validation, y_test, 0)

    signal_weights = ks_test.logical_vector(weights_validation, y_test, 1)
    bkg_weights = ks_test.logical_vector(weights_validation, y_test, 0)

    optimization_vars = config["optimization_vars"].split(
        ",") if config["optimization_vars"] else []
    for var in optimization_vars:
        signal_mva_scores[var] = ks_test.logical_vector(
            utils.load_array(f, var + '_validation'), y_test, 1)
        bkg_mva_scores[var] = ks_test.logical_vector(
            utils.load_array(f, var + '_validation'), y_test, 0)
        data_mva_scores[var] = utils.load_array(f, var + '_data')

    signal_events = {
        "mass": signal_mass,
        "weights": signal_weights,
        "mva_score": signal_mva_scores
    }
    bkg_events = {
        "mass": bkg_mass,
        "weights": bkg_weights,
        "mva_score": bkg_mva_scores
    }
    data_events = {
        "mass": mass_data,
        "weights": weights_data,
        "mva_score": data_mva_scores
    }

    za, za_unc, s, b, sigma_eff = significance_utils.za_scores(
        n_quantiles, signal_events, bkg_events, False)
    za_data, za_unc_data, s_data, b_data, sigma_eff_data = significance_utils.za_scores(
        n_quantiles, signal_events, data_events, True)
    za = numpy.asarray(za)
    za_data = numpy.asarray(za_data)

    if numpy.all(za == 0) or numpy.all(za_data == 0):
        return 0.0, 0.0, 0.0, 0.0

    max_za_mc = numpy.max(za[numpy.where(numpy.asarray(s) >= 4.)])
    max_za_data = numpy.max(za_data[numpy.where(numpy.asarray(s_data) >= 4.)])

    max_za_mc, max_za_mc_idx = utils.find_nearest(za, max_za_mc)
    max_za_data, max_za_data_idx = utils.find_nearest(za_data, max_za_data)

    max_za_mc_unc = za_unc[max_za_mc_idx]
    max_za_data_unc = za_unc_data[max_za_data_idx]

    print(("Max Z_A on MC:   %.4f +/- %.4f" % (max_za_mc, max_za_mc_unc)))
    print(("Max Z_A on data: %.4f +/- %.4f" % (max_za_data, max_za_data_unc)))

    return max_za_mc, max_za_mc_unc, max_za_data, max_za_data_unc, auc_train, auc_train_unc, auc_test, auc_test_unc
示例#24
0
alpha *= cm_Rsun
r *= cm_Rsun
m *= g_Msun
m_cut *= g_Msun
input_mc *= g_Msun

drhodr = np.gradient(rho, r)
drhodr_smooth = smooth(drhodr)

dpdr = np.gradient(p, r)
dpdr_smooth = smooth(dpdr)

kernel.set_hsoft(input_h * cm_Rsun)

ic = find_nearest(m, m_cut)
rc = r[ic]
mc = m[
    ic]  #mass m(r) at transition (cutoff) radius, different from mc which is mass of central point particle
rhoc = rho[ic]
pc = p[ic]

X_mle = np.flipud(s.x_mass_fraction_H)[ic]
Y_mle = np.flipud(s.y_mass_fraction_He)[ic]
Z_mle = np.flipud(s.z_mass_fraction_metals)[ic]

r_MLE = alpha * xi
len_MLE = len(r_MLE)
rprofile = np.concatenate((r_MLE[:-1], r[ic:]))
print(alpha, rho0)
示例#25
0
def read_data(dirName, varName, latName, lonName, userVariables, filelist=None):
    '''
        Purpose::
            Read gridded data into (t, lat, lon) arrays for processing

        Inputs::
            dirName: a string representing the directory to the MERG files in NETCDF format
            varName: a string representing the variable name to use from the file
            latName: a string representing the latitude from the file's metadata
            lonName: a string representing the longitude from the file's metadata
            filelist (optional): a list of strings representing the filenames betweent the start and end dates provided

        Returns:

        Outputs::
            A 3D masked array (t,lat,lon) with only the variables which meet the minimum temperature
            criteria for each frame

        Assumptions::
            (1) All the files requested to extract data are from the same instrument/model, and thus have the same
            metadata properties (varName, latName, lonName) as entered
            (2) Assumes rectilinear grids for input datasets i.e. lat, lon will be 1D arrays
    '''

    global LAT
    global LON

    timeName = 'time'

    filelistInstructions = dirName+'/*'
    if filelist == None:
        filelist = glob.glob(filelistInstructions)


    inputData = []
    timelist = []
    time2store = None
    tempMaskedValueNp = []


    filelist.sort()
    nfiles = len(filelist)

    # Crash nicely if there are no netcdf files
    if nfiles == 0:
        print 'Error: no files in this directory! Exiting elegantly'
        sys.exit()
    else:
        # Open the first file in the list to read in lats, lons and generate the  grid for comparison
        tmp = Dataset(filelist[0], 'r+', format='NETCDF4')

        alllatsraw = tmp.variables[latName][:]
        alllonsraw = tmp.variables[lonName][:]
        alllonsraw[alllonsraw > 180] = alllonsraw[alllonsraw > 180] - 360.  # convert to -180,180 if necessary

        #get the lat/lon info data (different resolution)
        latminNETCDF = utils.find_nearest(alllatsraw, float(userVariables.LATMIN))
        latmaxNETCDF = utils.find_nearest(alllatsraw, float(userVariables.LATMAX))
        lonminNETCDF = utils.find_nearest(alllonsraw, float(userVariables.LONMIN))
        lonmaxNETCDF = utils.find_nearest(alllonsraw, float(userVariables.LONMAX))
        latminIndex = (np.where(alllatsraw == latminNETCDF))[0][0]
        latmaxIndex = (np.where(alllatsraw == latmaxNETCDF))[0][0]
        lonminIndex = (np.where(alllonsraw == lonminNETCDF))[0][0]
        lonmaxIndex = (np.where(alllonsraw == lonmaxNETCDF))[0][0]

        #subsetting the data
        latsraw = alllatsraw[latminIndex: latmaxIndex]
        lonsraw = alllonsraw[lonminIndex:lonmaxIndex]

        LON, LAT = np.meshgrid(lonsraw, latsraw)

        latsraw = []
        lonsraw = []
        tmp.close

    for files in filelist:

        try:
            thisFile = Dataset(files, 'r', format='NETCDF4')
            #clip the dataset according to user lat,lon coordinates
            #mask the data and fill with zeros for later
            tempRaw = thisFile.variables[varName][:, latminIndex:latmaxIndex, lonminIndex:lonmaxIndex].astype('int16')
            tempMask = ma.masked_array(tempRaw, mask=(tempRaw > userVariables.T_BB_MAX), fill_value=0)
            #get the actual values that the mask returned
            tempMaskedValue = ma.zeros((tempRaw.shape)).astype('int16')

            for index, value in utils.maenumerate(tempMask):
                timeIndex, latIndex, lonIndex = index
                tempMaskedValue[timeIndex, latIndex, lonIndex] = value

            xtimes = thisFile.variables[timeName]

            #convert this time to a python datastring
            time2store, _ = get_model_times(xtimes, timeName)

            #extend instead of append because get_model_times returns a list already and we don't
            #want a list of list
            timelist.extend(time2store)
            inputData.extend(tempMaskedValue)
            thisFile.close
            thisFile = None

        except:
            print 'bad file! ', files

    inputData = ma.array(inputData)

    return inputData, timelist, LAT, LON
示例#26
0
ax.yaxis.set_ticks_position('both')
ax.grid(True)
plt.plot(fpr_re, tpr_re, color='darkred', lw=2, label='RelIso')
#plt.plot(fpr_bdt, tpr_bdt, color='blue', lw=2, label='BDT')
plt.plot(fpr_nn, tpr_nn, color='darkorange', lw=2, label='DNN')
plt.xscale('log')
plt.grid()

plt.xlim([0.005, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate (bkg. eff.)')
plt.ylabel('True Positive Rate (sig. eff.)')
plt.legend(loc='lower right')
plt.savefig('plot.pdf')

value1, idx1 = utils.find_nearest(tpr_nn, 0.90)
value2, idx2 = utils.find_nearest(tpr_nn, 0.99)
value3, idx3 = utils.find_nearest(fpr_nn, 0.01)
value4, idx4 = utils.find_nearest(fpr_nn, 0.1)

#value1BDT, idx1BDT = utils.find_nearest(tpr_bdt, 0.90)
#value2BDT, idx2BDT = utils.find_nearest(tpr_bdt, 0.99)
#value3BDT, idx3BDT = utils.find_nearest(fpr_bdt, 0.01)
#value4BDT, idx4BDT = utils.find_nearest(fpr_bdt, 0.1)

value1RE, idx1RE = utils.find_nearest(tpr_re, 0.90)
value2RE, idx2RE = utils.find_nearest(tpr_re, 0.99)
value3RE, idx3RE = utils.find_nearest(fpr_re, 0.01)
value4RE, idx4RE = utils.find_nearest(fpr_re, 0.1)

print('Neural net FPR, TPR: (%.3f, %.3f)' % (fpr_nn[idx1], tpr_nn[idx1]))
示例#27
0
def tf_detail(TF, t, f, title=None, t_detail=None, x=None, display_op=np.abs,
              figsize=None, cmap='binary', vmin=None, vmax=None):
    """
    Detailed time-frequency representation (TFR).

    Show the TFR in the top plot. Also show the frequency representation at a
    specific time instants (last time by default) on the plot on the right. If
    specified, the original time signal ``x`` is shown the bottom plot.

    Args:
        TF (:class:`numpy.ndarray`): time-frequency representation
        t (:class:`numpy.ndarray`): time vector
        f (:class:`numpy.ndarray`): frequency vector
        title (``string``): title of the plot
        t_detail (``float`` or ``list``): time instant(s) to be detailed
        x (:class:`numpy.ndarray`): original time domain signal. If *None*, not
            time domain plot is shown
        display_op (``function``): operator to apply to the TF representation
            (e.g. :func:`numpy.angle`)
        figsize (``tuple``): matplotlib's figure size (optional)
        cmap (``string``): colormap to use in the TF representation
        vmin (``float``): lower limit of the colormap
        vmax (``float``): upper limit of the colormap

    Returns:
        ``(handles, ...)``: tuple of handles to plotted elements. They can be
            used to create animations

    Note:
        ``vmin`` and ``vmax`` are useful when comparing different time-frequency
        representations, so they all share the same color scale.


    Note:
        Is the caller's responsibility to issue :func:`matplotlib.pyplot.show()`
        if necessary.

    """
    if figsize is not None:
        fig = plt.figure(figsize=figsize)
    else:
        fig = plt.figure()
    opTF = display_op(TF)

    if t_detail is None:
        wr = [1, 2, 20]
        detail = None
    else:
        wr = [1, 2, 20, 6]

    if x is None:
        hr = [1]
        axOnset = None
    else:
        hr = [3, 1]

    gs = gridspec.GridSpec(len(hr), len(wr),
                           width_ratios=wr,
                           height_ratios=hr
                           )
    gs.update(wspace=0.0, hspace=0.0)  # set the spacing between axes.


    axCB = fig.add_subplot(gs[0])
    axTF = fig.add_subplot(gs[2])

    if x is not None:
        axOnset = fig.add_subplot(gs[len(wr)+2], sharex=axTF)

    if t_detail is not None:
        axF = fig.add_subplot(gs[3], sharey=axTF)

    nice_freqs = nice_log_values(f)

    # TF image
    # im = axTF.pcolormesh(t, f, opTF, cmap=cmap)
    im = axTF.imshow(opTF,
                     extent=[min(t), max(t), min(f), max(f)],
                     cmap=cmap,
                     vmin=vmin,
                     vmax=vmax,
                     origin='lower'
                     )
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore")
        axTF.set_yscale('log')
        axTF.set_yticks(nice_freqs)
        axTF.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
        axTF.invert_yaxis()

    if title is not None:
        axTF.set_title(title)

    # Add colorbar
    cb = plt.colorbar(im, ax=axTF, cax=axCB)
    cb.ax.yaxis.set_ticks_position('left')

    # TF detail
    # find detail index
    tf_line = None
    tf_x_min, tf_x_max = 0, np.max(opTF)
    if vmin is not None:
        tf_x_min = vmin
    if vmax is not None:
        tf_x_max = vmax

    if t_detail is not None:
        if isinstance(t_detail, np.ndarray):
            t_detail = t_detail.tolist()
        elif not isinstance(t_detail, list):
            t_detail = [t_detail]
        t_detail, idx = find_nearest(t, t_detail)
        # axF.invert_xaxis()
        detail = axF.semilogy(opTF[:, idx], f)
        axF.set_yticks(nice_freqs)
        axF.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
        axF.xaxis.set_ticks_position('top')
        axF.axis('tight')
        axF.set_xlim(tf_x_min, tf_x_max)
        axF.yaxis.set_ticks_position('right')
        plt.setp(axF.get_xaxis().get_ticklabels(), rotation=-90 )
        axTF.hold(True)
        tf_line = axTF.plot([t_detail, t_detail], [np.min(f), np.max(f)])
        # tf_line = [axTF.axvline(td) for td in t_detail]
        axTF.hold(False)
    axTF.axis('tight')


    # onset signal
    t_line = None
    if axOnset is not None:
        plt.setp(axTF.get_xticklabels(), visible=False)
        axOnset.plot(t, x, color='k')
        if t_detail is not None:
            t_line = axOnset.plot([t_detail, t_detail], [np.min(x), np.max(x)])
            # t_line = [axOnset.axvline(td) for td in t_detail]
        axOnset.yaxis.set_ticks_position('right')
        axOnset.axis('tight')

    # plt.show()

    return (fig, im, tf_line, t_line, detail)
示例#28
0
def plot_connections(connection, title=None, f_detail=None, display_op=np.abs,
                     detail_type='polar', cmap='binary', vmin=None, vmax=None):
    """plot_connections(connection, t_detail=None, display_op=np.abs,
                        detail_type='polar')

    Args:
        connection (:class:`.Connection`): connection object
        title (``string``): Title to be displayed
        f_detail (``float``): frequency of the detail plot
        display_op (``function``): operator to apply to the connection
            matrix (e.g. :func:`numpy.abs`)
        detail_type (``string``): detail complex display type (``'cartesian',
            'polar', 'magnitude'`` or ``'phase'``)
        cmap (``string``): colormap to use in the TF representation
        vmin (``float``): lower limit of the colormap
        vmax (``float``): upper limit of the colormap

    Note:
        Is the caller's responsibility to issue :func:`matplotlib.pyplot.show()`
        if necessary.

    """

    fig = plt.figure()

    if f_detail is not None:
        gs = gridspec.GridSpec(2, 1,
                               width_ratios=[1],
                               height_ratios=[3, 1]
                               )
        gs.update(wspace=0.0, hspace=0.0)  # set the spacing between axes.
        axConn = fig.add_subplot(gs[0])
        axDetail = fig.add_subplot(gs[1])
    else:
        axConn = fig.add_subplot(1, 1, 1)

    f_source = connection.source.f
    f_dest = connection.destination.f
    matrix = connection.matrix
    opMat = display_op(matrix)

    # axConn.pcolormesh(f_source, f_dest, opMat, cmap=cmap)
    axConn.imshow(opMat,
                     extent=[min(f_source), max(f_source),
                             min(f_dest), max(f_dest)],
                     cmap=cmap,
                     vmin=vmin,
                     vmax=vmax,
                     origin='lower'
                     )
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore")
        # axConn.invert_yaxis()
        axConn.set_xscale('log')
        axConn.set_xticks(nice_log_values(f_source))
        axConn.get_xaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
        axConn.set_yscale('log')
        axConn.set_yticks(nice_log_values(f_dest))
        axConn.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
        axConn.set_ylabel(r'$f_{\mathrm{dest}}$')

    if title is not None:
        axConn.set_title(title)

    if f_detail is None:
        axConn.set_xlabel(r'$f_{\mathrm{source}}$')
    else:
        (f_detail, idx) = find_nearest(f_dest, f_detail)
        conn = matrix[idx, :]

        axConn.hold(True)
        axConn.plot([np.min(f_source), np.max(f_source)],
                    [f_detail, f_detail],
                    color='r')
        axConn.hold(False)

        scalar_formatter = mpl.ticker.ScalarFormatter()

        if detail_type is 'polar':
            axDetail.semilogx(f_source, np.abs(conn))
            axDetailb = axDetail.twinx()
            axDetailb.semilogx(f_source, np.angle(conn), color='r')
            axDetailb.set_xticks(nice_log_values(f_source))
            axDetailb.get_xaxis().set_major_formatter(scalar_formatter)
            axDetailb.set_ylim([-np.pi, np.pi])
            axDetail.axis('tight')
        elif detail_type is 'magnitude':
            y_min, y_max = 0, np.abs(conn)
            if vmin is not None:
                y_min = vmin
            if vmax is not None:
                y_max = vmax
            axDetail.semilogx(f_source, np.abs(conn))
            axDetail.set_xticks(nice_log_values(f_source))
            axDetail.get_xaxis().set_major_formatter(scalar_formatter)
            # axDetail.axis('tight')
            axDetail.set_ylim([y_min, y_max])
        elif detail_type is 'phase':
            axDetail.semilogx(f_source, np.angle(conn), color='r')
            axDetail.set_xticks(nice_log_values(f_source))
            axDetail.get_xaxis().set_major_formatter(scalar_formatter)
            axDetail.set_ylim([-np.pi, np.pi])
        else:
            axDetail.semilogx(f_source, np.real(conn))
            axDetailb = axDetail.twinx()
            axDetailb.semilogx(f_source, np.imag(conn), color='r')
            axDetailb.set_xticks(nice_log_values(f_source))
            axDetailb.get_xaxis().set_major_formatter(scalar_formatter)
            axDetail.axis('tight')
        axDetail.set_xlabel(r'$f_{\mathrm{dest}}$')

        axConn.set(aspect=1, adjustable='box-forced')
示例#29
0
def simulate(family,params,paramsList,bins,\
             seed=None,N=None,noise=None,output=None,\
             dump=None,version=2,verbose=False,area=None,\
             skadsf=None,pole_posns=None,simarrayf=None,\
             simdocatnoise=True):
    """
    Based on lumfunc.simtable()
    Specify family + parameters
    Specify number of sources
    Build CDF (or set up function)
    Draw deviates
    Sample CDF given deviates
    Add noise (None or some value)
    Bin
    Write out
    Return

    Look at simulate.ipynb for an example run

    Need to add normalization capability
    
    Families:
    ========

    skads:
    -----

    r=countUtils.simulate('skads',[0.01,85.0],['S0','S1'],numpy.linspace(-60.0,100.0,26),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)
    
    ppl:
    ---

    r=countUtils.simulate('ppl',[1000.0,5.0,75.0,-1.6],['C','S0','S1','a0'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)

    r=countUtils.simulate('ppl',[1000.0,5.0,25.0,75.0,-1.6,-2.5],['C','S0','S1','S2','a0','a1'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)

    r=countUtils.simulate('ppl',[1000.0,5.0,25.0,40.0,75.0,-1.6,-2.5,-1.0],['C','S0','S1','S2','S3','a0','a1','a2'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)

    r=countUtils.simulate('ppl',[1000.0,5.0,25.0,40.0,75.0,90.0,-1.6,-2.5,-1.0,2.0],['C','S0','S1','S2','S3','S4','a0','a1','a2','a3'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)

    poly:
    ----

    r=countUtils.simulate('poly',[5.0,75.0,1.0],['S0','S1','p0'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)

    r=countUtils.simulate('poly',[5.0,75.0,1.0,-1.0],['S0','S1','p0','p1'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)
    
    r=countUtils.simulate('poly',[5.0,75.0,1.0,-1.0,5.0],['S0','S1','p0','p1','p2'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)
    
    bins:
    ----

    

    test:
    ----

    array:
    -----


    """

    # Initialize seed for variates AND any noise
    if seed is not None:
        numpy.random.seed(seed=SEED_SIM)

    if family == 'ppl':
        C = alpha = Smin = Smax = beta = S0 = gamma = S1 = delta = S2 = -99.0
        nlaws = int(0.5 * len(paramsList) - 1)
        C = params[paramsList.index('C')]
        Smin = params[paramsList.index('S0')]
        alpha = params[paramsList.index('a0')]
        if nlaws > 1:
            beta = params[paramsList.index('a1')]
            S0 = params[paramsList.index('S1')]
        if nlaws > 2:
            gamma = params[paramsList.index('a2')]
            S1 = params[paramsList.index('S2')]
        if nlaws > 3:
            delta = params[paramsList.index('a3')]
            S2 = params[paramsList.index('S3')]
        iSmax = int([i for i in paramsList if i.startswith('S')][-1][-1])
        Smax = params[paramsList.index('S%i' % iSmax)]

        function = lambda S:powerLawFuncWrap(nlaws,S,C,alpha,-99.0,beta,\
                                      Smin/1e6,Smax/1e6,S0/1e6,gamma,S1/1e6,delta,S2/1e6,1.0)

    elif family == 'test':
        Smin = params[paramsList.index('S0')]
        Smax = params[paramsList.index('S1')]
        function = lambda S: S**2

    elif family == 'poly':
        Smin = params[paramsList.index('S0')]
        Smax = params[paramsList.index('S1')]
        coeffs = [
            params[paramsList.index(p)] for p in paramsList
            if p.startswith('p')
        ]
        S_1 = 1.0
        function = lambda S: polyFunc(S, S_1, Smin, Smax, coeffs)

    elif family == 'bins':
        Smin = params[paramsList.index('S0')]
        Smax = params[paramsList.index('S1')]
        coeffs = [
            params[paramsList.index(p)] for p in paramsList
            if p.startswith('b')
        ]
        if pole_posns is None:
            pole_posns = numpy.logspace(numpy.log10(Smin), numpy.log10(Smax),
                                        len(coeffs) + 1)
        assert (len(coeffs) == len(pole_posns) -
                1), '***Mismatch in number of poles!!'
        Smin = pole_posns[0]
        Smax = pole_posns[-1]
        function = lambda S: polesFunc(S, pole_posns, Smin, Smax, coeffs)

    elif family == 'array':
        Smin = params[paramsList.index('S0')]
        Smax = params[paramsList.index('S1')]
        assert (simarrayf
                is not None), '***Need to specify an input simulation!'
        print 'Reading %s...' % simarrayf
        dataMatrix = numpy.genfromtxt(simarrayf)
        dndsInArr = dataMatrix[:, 4]
        binsDogleg = numpy.concatenate((dataMatrix[:, 0], [dataMatrix[-1, 1]]))
        binsMedian = dataMatrix[:, 2]
        assert ((
            medianArray(binsDogleg) == binsMedian).all()), '***bin mismatch!'
        Smin = binsDogleg[0]
        Smax = binsDogleg[-1]
        if not simdocatnoise:
            Smin = -5.01  #-2.01 # binsMedian[0]
        print dndsInArr
        function = lambda S: arrayFunc(S, binsMedian, dndsInArr, Smin, Smax)

        #function2=lambda S:arrayFunc(S,binsMedian,dndsInArr,Smin,Smax)
        #for x in numpy.linspace(-10.0,100.0,500):
        #    print x,function(x),function2(x)
        #sys.exit(0)

    elif family == 'skads':
        Smin = params[paramsList.index('S0')]
        Smax = params[paramsList.index('S1')]
        function = None
        assert (skadsf is not None), '***Need to specify input SKADS file!'
        print 'Reading %s...' % skadsf
        R = Jy2muJy * 10**numpy.genfromtxt(skadsf)
        numpy.ndarray.sort(R)
        iRmin, Rmin = find_nearest(R, Smin)
        iRmax, Rmax = find_nearest(R, Smax)
        F = R[iRmin:iRmax]
        print '%i/%i sources ingested after Smin/Smax cuts' % (len(F), len(R))
        if N is not None:
            F = numpy.random.choice(F, size=N, replace=False)
        N = len(F)
        print 'NSKADS = %i' % N

    elif family == 'Lrad':
        Smin = params[paramsList.index('LoptMIN')]
        Smax = params[paramsList.index('LoptMAX')]
        A = params[paramsList.index('A')]
        B = params[paramsList.index('B')]
        sigma_Lrad = params[paramsList.index('sigma_Lrad')]
        #print Loptmin,Loptmax
        print 'Doing LF simulation'
        inta = None
        #intg = integrate.quad(lambda Lopt:Lopt2Lrad(Lopt,A=A,B=B,flux=False),Loptmin,Loptmax,epsabs=0.)[0]

        function = lambda Lopt: Lopt2Lrad(Lopt, A=A, B=B, flux=False)
    elif family in ['LFsch', 'LFdpl']:
        redshift = 0.325
        z_min = 0.2
        z_max = 0.45
        Lmin = params[paramsList.index('LMIN')]
        Lmax = params[paramsList.index('LMAX')]
        [Smin, Smax] = SMIN_SIM, SMAX_SIM
        print Smin, Smax
        [Smin, Smax] = get_sbins([10**Lmin, 10**Lmax], redshift, dl) * 1e6
        print Smin, Smax, Lmin, Lmax
        print 'Doing LF simulation'
        Vmax = get_Vmax(z_min, z_max)
        dsdl = get_dsdl(redshift, dl)
        inta = None
        intg = integrate.quad(lambda S:LF(S,redshift,dsdl,Vmax,dl,params=params,paramsList=paramsList,\
                inta=inta,area=area,family=family),Smin*1e-6,Smax*1e-6,epsabs=0.)[0]
        print intg * Vmax
        print Vmax
        area = N / (Vmax * intg)
        area1 = area
        print N, area

        function = lambda S:dNdS_LF(S,z_min,redshift,z_max,dl,params=params,paramsList=paramsList,\
                area=area,family=family)

    if family != 'skads':
        # Set up the 'rough' array
        gridlength = 10000  # Good enough to prevent bleeding at the edges
        Ss = numpy.linspace(Smin, Smax, gridlength)
        print Smin, Smax
        print 'checking for one sample'
        kl = function(20 / 1e6)
        print kl
        #sys.exit()
        values = numpy.array([function(ix / 1e6) for ix in Ss])
        print values[:10]
        # Build the CDF
        CDF = buildCDF(values)
        plt.plot(CDF, Ss)
        #plt.xscale('log')
        #plt.yscale('log')
        plt.ylabel('Flux')
        plt.xlabel('CDF')
        plt.show()
        print CDF.max()
        # Create the interpolant object
        sampler = interp1d(CDF, Ss)

        plt.plot(Ss, values, '.')
        plt.xscale('log')
        plt.yscale('log')
        plt.xlabel('Flux')
        plt.ylabel('LF')
        plt.show()

        x = numpy.linspace(0., 1., 10000)
        z = numpy.logspace(0, 1, 1000) / 10.
        f = sampler(z)
        y = sampler(x)
        plt.yscale('log')
        #plt.xscale('log')
        plt.axhline(Smin)
        plt.axhline(Smax)
        plt.xlabel('R')
        plt.ylabel('Sampler(R) [flux]')
        #plt.plot(x,y)
        plt.plot(z, f)
        plt.show()
        #sys.exit()

        # Test that the sampler extrema match
        print Smin, sampler(0.0), 'you know wa mean'
        print Smax, sampler(0.99999)
        #        assert(numpy.isclose(sampler(0.0),Smin)[0])
        #        assert(numpy.isclose(sampler(0.99999),Smax,atol=1.0e-3)[0])

        # Draw the random deviates
        R = numpy.random.rand(N)
        print len(R)
        F = sampler(R)
        Nt = 0.
        for f in F:
            if f < 1.:
                Nt += 1.
        F = F[F > 1.]
        print N, Nt
        print len(F)
        Nt = len(F)
        #sys.exit()

        # Normalize here - this is N2C
        # EITHER N is specified explicitly
        # BOTH N2C and C2N are useful
        # Integrate the original function
        #intg = integrate.quad(lambda S:LF(S,redshift,dsdl,Vmax,dl,params=params,paramsList=paramsList,\
        #    inta=inta,area=area,family=family),Smin*1e-6,Smax*1e-6,epsabs=0.)[0]
        #print intg*Vmax
        #print Vmax
        #area = Nt/(Vmax*intg)
        #print N,area,area1
        #plt.show()
        #sys.exit()
        A = integrate.quad(function, Smin, Smax)[0]
        #        print A,N
        # Bin the random samples
        bbins = numpy.linspace(Smin, Smax, 100)
        E = numpy.histogram(F, bins=bbins)[0]
        # And calculate their area
        G = integrate.trapz(E, x=medianArray(bbins))
        #        print G
        #        print G/A
        # Gunpowder, treason and....
        if False:
            plt.xlim(0.0, 100.0)
            plt.xlabel('S / $\mu$Jy')
            plt.hist(F, bins=bbins)
            plt.plot(Ss, values * G / A, 'r')
            plt.savefig('N2C.pdf')
            plt.close()

    # Want: C given N, to compare to original C
    numbins = 1000
    if family == 'ppl':
        C_calc = N / N2C(function, F, Smin, Smax, numbins)
        #print N2C(function,F,Smin,Smax,numbins),C
        print 'For %i sources, C is %e (should be %e)' % (N, C_calc, C)
    elif family == 'poly':
        C_calc = log10(N / N2C(function, F, Smin, Smax, numbins))
        print 'For %i sources, C is %e (should be %e)' % (N, C_calc, coeffs[0])

    # Dump noiseless fluxes to file
    puredumpf = dump
    idl_style = False
    numpy.savetxt(puredumpf, F)
    print 'Draws (noiseless) are in %s' % puredumpf
    writeCountsFile(output[1],
                    bins,
                    F,
                    area,
                    idl_style=idl_style,
                    verbose=verbose)
    print output[1]
    # Now add noise if requested
    if simdocatnoise:
        numpy.random.seed(seed=SEED_SIM)
        poln = False
        if poln:
            F += rice.rvs(F / noise, size=N)
        else:
            F += numpy.random.normal(0.0, noise, Nt)

    # Dump noisy fluxes to file
    if dump is not None:
        noisydumpf = '%s_noisy.txt' % puredumpf.split('.')[0]
        numpy.savetxt(noisydumpf, F)
        print 'Draws (noisy) are in %s' % noisydumpf
        print 'Minimum flux in catalogue = %f' % F.min()
        print 'Maximum flux in catalogue = %f' % F.max()

    # Write counts file
    print output[0]
    writeCountsFile(output[0],
                    bins,
                    F,
                    area,
                    idl_style=idl_style,
                    verbose=verbose)
    print N, area  #,area1

    return F
示例#30
0
  def train_with_early_stopping(self):
    best_auc = 0.5
    keep_training = True

    max_batch_size = 10000
    epochs = 1
    bad_epochs = 0
    while keep_training:
      auc_train, auc, rocs = self.train(epochs, self.batch_size_train)
      improvement = ((1-best_auc)-(1-auc))/(1-best_auc)
      overfit = (auc_train - auc) / auc_train
      if improvement > 0.01:
          print(("Improvement in (1-AUC) of %.3f percent! Keeping batch size the same" % (improvement*100.)))
          best_auc = auc
          bad_epochs = 0
      elif self.batch_size_train * 4 < max_batch_size:
          print(("Improvement in (1-AUC) of %.3f percent. Increasing batch size" % (improvement*100.)))
          self.batch_size_train *= 4
          bad_epochs = 0
          if auc > best_auc:
              best_auc = auc
      elif self.batch_size_train < max_batch_size:
          print(("Improvement in (1-AUC) of %.3f percent. Increasing batch size" % (improvement*100.)))
          self.batch_size_train = max_batch_size
          bad_epochs = 0
          if auc > best_auc:
              best_auc = auc 
      elif improvement > 0:
          print(("Improvement in (1-AUC) of %.3f percent. Can't increase batch size anymore" % (improvement*100.))) 
          bad_epochs = 0
          best_auc = auc
      #elif improvement < 0 and overfit < 0.01 and bad_epochs < 3:
      #    print (("Overfitting by less than 1%, continue training"))
      #    bad_epochs += 1
      else:
          print("AUC did not improve and we can't increase batch size anymore. Stopping training.")
          keep_training = False
      if self.n_epochs >= self.max_epochs:
          print("Have already trained for 25 epochs. Stopping training.")
          keep_training = False
      if self.curriculum_learn:
          value, idx = utils.find_nearest(rocs["tpr_train"], 0.90)
          cut = rocs["thresh_train"][idx]
          good_indices = numpy.where(self.predictions["train"] > cut)
          self.features_train.features[0] = self.features_train.features[0][good_indices]
          self.features_train.features[1] = self.features_train.features[1][good_indices]
          self.features_train.global_features = self.features_train.global_features[good_indices]
          self.features_train.objects = self.features_train.objects[good_indices]
          self.features_train.label = self.features_train.label[good_indices]
          self.features_train.weights = self.features_train.weights[good_indices]
          self.prepped = False


    auc, auc_unc, fpr, tpr, thresh = utils.auc_and_unc(self.features_validation.label, self.predictions["validation"], self.features_validation.weights, 50)
    auc_train, auc_unc_train, fpr_train, tpr_train, threshd_train = utils.auc_and_unc(self.features_train.label, self.predictions["train"], self.features_train.weights, 50)
    self.auc_unc["validation"] = auc_unc
    self.auc_unc["train"] = auc_unc_train

    self.model.save_weights("dnn_weights/" + self.tag + "_weights.hdf5")
    with open("dnn_weights/" + self.tag + "_model_architecture.json", "w") as f_out:
      f_out.write(self.model.to_json())

    return
示例#31
0
#signal_effs = [0.95, 0.97, 0.98, 0.99]

effs = {}

for i in range(len(files)):
    if not "dnn_roc" in inputs[i]:
        fpr = files[i]["fpr_test"]
        tpr = files[i]["tpr_test"]
    else:
        fpr = files[i]["fpr_validation"]
        tpr = files[i]["tpr_validation"]

    tprs = []
    fprs = []
    for eff in signal_effs:
        sig_eff, idx = utils.find_nearest(tpr, eff)
        tprs.append(tpr[idx])
        fprs.append(fpr[idx])

    auc = metrics.auc(fpr, tpr, reorder=True)
    effs[labels[i]] = {"fpr": fprs, "tpr": tprs, "auc": auc}

    ax1.plot(fpr,
             tpr,
             label=labels[i] + " [AUC = %.3f]" % (auc),
             color=colors[i])
    if "tpr_unc_test" in files[i].files:
        tpr_unc = files[i]["tpr_unc_test"]
        ax1.fill_between(fpr,
                         tpr - 1 * (tpr_unc / 2.),
                         tpr + 1 * (tpr_unc / 2.),
示例#32
0
def household_ss_olg(params, w, tau, d):
    """Solves the household's problem for a given real wage, interest rate, and social security policy."""
    # Construct labor income
    iret = 1 * (params['jvec'] >= params['Tr'])  # Retirement indicator
    y_js = (1 - tau) * w * params['y_eps'][
        np.newaxis, :] * params['h'][:, np.newaxis] + d * iret[:, np.newaxis]

    # allocate arrays to store results
    uc, c, a, D = (np.empty((params['T'] + 1, params['N_eps'], params['N_a']))
                   for _ in range(4))
    a[:params['Tw']], c[:params['Tw']], D[:params[
        'Tw']] = 0.0, 0.0, 0.0  # Make sure = 0 before age Tw

    # Backward iteration to obtain policy function
    for j in reversed(range(params['Tw'], params['T'] + 1)):
        if j == params['T']:
            # Compute cash-on-hand and bequest factor
            coh_T = get_coh(y_js[j, ], params['r'], params['phi'][j],
                            params['a'])
            # Call backward iteration function
            a[j, ], c[j, ] = constrained(coh_T, params['a'])
            uc[j, ] = c[j, ]**(-params['sigma'])

        # Compute cash-on-hand and bequest factor
        coh = get_coh(y_js[j - 1, ], params['r'], params['phi'][j - 1],
                      params['a'])
        # Call backward iteration function
        a[j-1,], c[j-1,], uc[j-1,] = \
            backward_iterate_olg(uc[j,], params['a'], params['Pi_eps'], coh, params['r'], params['beta'], params['sigma'])

    # initialize age-Tw distribution: point mass at 0
    Dst_start = np.zeros((params['N_eps'], params['N_a']))
    Dst_start[:, find_nearest(params['a'], 0.0)] = 1.0

    # to make matrix multiplication more efficient, make separate copy of Pi transpose
    Pi_T = params['Pi_eps'].T.copy()

    # forward iteration to obtain distributions at each future age
    for j in range(params['Tw'], params['T']):
        if j == params['Tw']:
            D_temp = Dst_start
            D[j, ] = D_temp
        else:
            D_temp = D[j, ]

        # get interpolated rule corresponding to a and iterate forward
        a_pol_i, a_pol_pi = interpolate_coord(params['a'], a[j, ])
        D[j + 1, ] = forward_step(D_temp, Pi_T, a_pol_i, a_pol_pi)

    # Assets
    A_j = np.einsum('jsa,jsa->j', D, a)  # by age j
    A = np.einsum('j,j', params['pi'], A_j)  # Aggregate assets

    # Consumption
    C_j = np.einsum('jsa,jsa->j', D, c)  # by age j
    C = np.einsum('j,j', params['pi'], C_j)  # Aggregate consumption

    return {
        'D': D,  # Distribution of agents
        'a': a,
        'A_j': A_j,
        'A': A,  # Asset policy, by age, aggregate
        'c': c,
        'C_j': C_j,
        'C': C,  # Consumption policy, by age, aggregate
    }
示例#33
0
def create_netcdf(config, rapid_trans, model_trans, fc_trans, wbw_trans,
                  int_trans, ek_trans):
    """ 
    Return diagnostics for comparison with RAPID-MOCHA observations
    in netcdf object for plotting/output.
    
    """

    # Configuration options
    zind = utils.find_nearest(rapid_trans.z, 1000)
    fc_minlon = config.getfloat('options', 'fc_minlon')
    fc_maxlon = config.getfloat('options', 'fc_maxlon')
    wbw_maxlon = config.getfloat('options', 'wbw_maxlon')
    int_maxlon = config.getfloat('options', 'int_maxlon')
    georef = config.getfloat('options', 'georef_level')
    ek_level = config.getfloat('options', 'ekman_depth')

    try:
        vref_level = config.getfloat('options', 'vref_level')
    except:
        vref_level = 'None'

    # Create netcdf file and add dimensions
    dataset = open_ncfile(config, rapid_trans.dates)
    zdim = dataset.createDimension('depth', rapid_trans.z.size)
    tdim = dataset.createDimension('time', None)

    # Create time coordinate
    time = dataset.createVariable('time', np.float64, (tdim.name, ))
    time.units = 'hours since 0001-01-01 00:00:00.0'
    time.calendar = 'gregorian'
    time[:] = date2num(rapid_trans.dates, time.units, calendar=time.calendar)

    # Create depth coordinate
    z = dataset.createVariable('depth', np.float64, (zdim.name, ))
    z.units = 'm'
    z[:] = rapid_trans.z

    # Create depth coordinate
    dz = dataset.createVariable('level_thickness', np.float64, (zdim.name, ))
    dz.units = 'm'
    dz[:] = rapid_trans.dz

    # Global attributes
    dataset.geostrophic_reference_level = georef
    dataset.rhocp = rapid_trans.rhocp
    dataset.ekman_level = ek_level
    dataset.reference_to_model_velocity = vref_level
    dataset.contact = '*****@*****.**'
    dataset.code_reference = 'Roberts, C. D., et al. (2013), Atmosphere drives recent interannual variability of the Atlantic meridional overturning circulation at 26.5N, Geophys. Res. Lett., 40, 5164-5170 doi:10.1002/grl.50930.'
    dataset.method_references = '(1) McCarthy, G. D., and Coauthors, 2015: Measuring the Atlantic Meridional Overturning Circulation at 26 degrees N. Progress in Oceanography, 130, 91-111. (2) Johns, W.E., M.O. Baringer, L.M. Beal, S.A. Cunningham, T. Kanzow, H.L. Bryden, J.J. Hirschi, J. Marotzke, C.S. Meinen, B. Shaw, and R. Curry, 2011: Continuous, Array-Based Estimates of Atlantic Ocean Heat Transport at 26.5N. J. Climate, 24, 2429-2449, doi: 10.1175/2010JCLI3997.1.'

    # Basinwide potential temperature profile
    t_basin = dataset.createVariable('t_basin', np.float64,
                                     (tdim.name, zdim.name))
    t_basin.units = 'degC'
    t_basin.minimum_longitude = fc_minlon
    t_basin.maximum_longitude = int_maxlon
    t_basin.comment = 'Basinwide zonal mean potential temperature profile'
    t_basin[:] = rapid_trans.zonal_avg_t

    # Florida current flow-weighted potential temperature
    t_fc_fwt = dataset.createVariable('t_fc_fwt', np.float64, (tdim.name))
    t_fc_fwt.units = 'degC'
    t_fc_fwt.minimum_longitude = fc_minlon
    t_fc_fwt.maximum_longitude = fc_maxlon
    t_fc_fwt.comment = 'Florida current flow-weighted potential temperature'
    t_fc_fwt[:] = fc_trans.oht_total / (fc_trans.rhocp *
                                        fc_trans.net_transport)

    # Basinwide transport profile - RAPID approx
    v_basin_rapid = dataset.createVariable('v_basin_rapid', np.float64,
                                           (tdim.name, zdim.name))
    v_basin_rapid.units = 'Sv/m'
    v_basin_rapid.minimum_longitude = fc_minlon
    v_basin_rapid.maximum_longitude = int_maxlon
    v_basin_rapid.comment = 'Basinwide transport profile using RAPID approximations'
    v_basin_rapid[:] = rapid_trans.zonal_sum_v / 1e6

    # Basinwide transport profile - model v
    v_basin_model = dataset.createVariable('v_basin_model', np.float64,
                                           (tdim.name, zdim.name))
    v_basin_model.units = 'Sv/m'
    v_basin_model.minimum_longitude = fc_minlon
    v_basin_model.maximum_longitude = int_maxlon
    v_basin_model.comment = 'Basinwide transport profile using model velocities'
    v_basin_model[:] = model_trans.zonal_sum_v / 1e6

    # Florida current transport profile
    v_fc = dataset.createVariable('v_fc', np.float64, (tdim.name, zdim.name))
    v_fc.units = 'Sv/m'
    v_fc.minimum_longitude = fc_minlon
    v_fc.maximum_longitude = fc_maxlon
    v_fc.comment = 'Florida current transport profile'
    v_fc[:] = fc_trans.zonal_sum_v / 1e6

    # Ekman transport time series
    ekman = dataset.createVariable('ekman', np.float64, (tdim.name))
    ekman.units = 'Sv'
    ekman.minimum_longitude = wbw_maxlon
    ekman.maximum_longitude = int_maxlon
    ekman.comment = 'Ekman transport time series (streamfunction at 1000m)'
    ekman[:] = ek_trans.streamfunction[:, zind] / 1e6

    # Gyre interior transport time series
    geoint = dataset.createVariable('geoint', np.float64, (tdim.name))
    geoint.units = 'Sv'
    geoint.minimum_longitude = wbw_maxlon
    geoint.maximum_longitude = int_maxlon
    geoint.comment = 'Geostrophic interior transport time series (streamfunction at 1000m).'
    geoint[:] = int_trans.streamfunction[:, zind] / 1e6

    # Western-boundary wedge transport time series
    wbw = dataset.createVariable('wbw', np.float64, (tdim.name))
    wbw.units = 'Sv'
    wbw.minimum_longitude = fc_maxlon
    wbw.maximum_longitude = wbw_maxlon
    wbw.comment = 'Western boundary wedge transport time series (streamfunction at 1000m).'
    wbw[:] = wbw_trans.streamfunction[:, zind] / 1e6

    # Florida current transport time series
    fc = dataset.createVariable('fc', np.float64, (tdim.name))
    fc.units = 'Sv'
    fc.minimum_longitude = fc_minlon
    fc.maximum_longitude = fc_maxlon
    fc.comment = 'Florida current transport time series (streamfunction at 1000m).'
    fc[:] = fc_trans.streamfunction[:, zind] / 1e6

    # Upper mid ocean transport time series
    umo = dataset.createVariable('umo', np.float64, (tdim.name))
    umo.units = 'Sv'
    umo.minimum_longitude = fc_maxlon
    umo.maximum_longitude = int_maxlon
    umo.comment = 'Upper mid-ocean transport time series (streamfunction at 1000m). umo = wbw + geoint'
    umo[:] = wbw[:] + geoint[:]

    # Meridional overturning transport time series - RAPID approx
    moc_rapid = dataset.createVariable('moc_rapid', np.float64, (tdim.name))
    moc_rapid.units = 'Sv'
    moc_rapid.minimum_longitude = fc_minlon
    moc_rapid.maximum_longitude = int_maxlon
    moc_rapid.comment = 'Time series of meridional overturning transport using RAPID approximation (streamfunction at 1000m)'
    moc_rapid[:] = rapid_trans.streamfunction[:, zind] / 1e6

    # Meridional overturning transport time series - model v
    moc_model = dataset.createVariable('moc_model', np.float64, (tdim.name))
    moc_model.units = 'Sv'
    moc_model.minimum_longitude = fc_minlon
    moc_model.maximum_longitude = int_maxlon
    moc_model.comment = 'Time series of meridional overturning transport using model velocities (streamfunction at 1000m)'
    moc_model[:] = model_trans.streamfunction[:, zind] / 1e6

    # Meridional overturning transport maxima time series - RAPID approx
    mocmax_rapid = dataset.createVariable('mocmax_rapid', np.float64,
                                          (tdim.name))
    mocmax_rapid.units = 'Sv'
    mocmax_rapid.minimum_longitude = fc_minlon
    mocmax_rapid.maximum_longitude = int_maxlon
    mocmax_rapid.comment = 'Time series of meridional overturning transport using RAPID approximation (streamfunction maxima)'
    mocmax_rapid[:] = rapid_trans.streamfunction.max(axis=1) / 1e6

    # Meridional overturning transport maxima time series - model v
    mocmax_model = dataset.createVariable('mocmax_model', np.float64,
                                          (tdim.name))
    mocmax_model.units = 'Sv'
    mocmax_model.minimum_longitude = fc_minlon
    mocmax_model.maximum_longitude = int_maxlon
    mocmax_model.comment = 'Time series of meridional overturning transport using model velocities (streamfunction maxima)'
    mocmax_model[:] = model_trans.streamfunction.max(axis=1) / 1e6

    # Overturning streamfunctions - RAPID approx
    sf_rapid = dataset.createVariable('sf_rapid', np.float64,
                                      (tdim.name, zdim.name))
    sf_rapid.units = 'Sv'
    sf_rapid.minimum_longitude = fc_minlon
    sf_rapid.maximum_longitude = int_maxlon
    sf_rapid.comment = 'Overturning streamfunctions using RAPID approximation.'
    sf_rapid[:] = rapid_trans.streamfunction / 1e6

    # Meridional overturning transport time series - model v
    sf_model = dataset.createVariable('sf_model', np.float64,
                                      (tdim.name, zdim.name))
    sf_model.units = 'Sv'
    sf_model.minimum_longitude = fc_minlon
    sf_model.maximum_longitude = int_maxlon
    sf_model.comment = 'Overturning streamfunctions using model velocities.'
    sf_model[:] = model_trans.streamfunction / 1e6

    # Florida current stream function
    sf_fc = dataset.createVariable('sf_fc', np.float64, (tdim.name, zdim.name))
    sf_fc.units = 'Sv'
    sf_fc.minimum_longitude = fc_minlon
    sf_fc.maximum_longitude = fc_maxlon
    sf_fc.comment = 'Florida current overturning streamfunction.'
    sf_fc[:] = fc_trans.streamfunction / 1e6

    # Ekman stream function
    sf_ek = dataset.createVariable('sf_ek', np.float64, (tdim.name, zdim.name))
    sf_ek.units = 'Sv'
    sf_ek.minimum_longitude = wbw_maxlon
    sf_ek.maximum_longitude = int_maxlon
    sf_ek.comment = 'Ekman overturning streamfunction.'
    sf_ek[:] = ek_trans.streamfunction / 1e6

    # Wbw stream function
    sf_wbw = dataset.createVariable('sf_wbw', np.float64,
                                    (tdim.name, zdim.name))
    sf_wbw.units = 'Sv'
    sf_wbw.minimum_longitude = fc_minlon
    sf_wbw.maximum_longitude = wbw_maxlon
    sf_wbw.comment = 'Western boundary wedge overturning streamfunction.'
    sf_wbw[:] = wbw_trans.streamfunction / 1e6

    # Geostrophic interior stream function
    sf_geoint = dataset.createVariable('sf_geoint', np.float64,
                                       (tdim.name, zdim.name))
    sf_geoint.units = 'Sv'
    sf_geoint.minimum_longitude = wbw_maxlon
    sf_geoint.maximum_longitude = int_maxlon
    sf_geoint.comment = 'Geostrophic interior overturning streamfunction.'
    sf_geoint[:] = int_trans.streamfunction / 1e6

    # mid ocean stream function
    sf_mo = dataset.createVariable('sf_mo', np.float64, (tdim.name, zdim.name))
    sf_mo.units = 'Sv'
    sf_mo.minimum_longitude = fc_maxlon
    sf_mo.maximum_longitude = int_maxlon
    sf_mo.comment = 'Mid ocean overturning streamfunction (sf_mo = sf_wbw + sf_int).'
    sf_mo[:] = sf_geoint[:] + sf_wbw[:]

    # Total heat transport - RAPID approx
    q_sum_rapid = dataset.createVariable('q_sum_rapid', np.float64,
                                         (tdim.name))
    q_sum_rapid.units = 'PW'
    q_sum_rapid.minimum_longitude = fc_minlon
    q_sum_rapid.maximum_longitude = int_maxlon
    q_sum_rapid.comment = 'Total heat transport across section calculated using RAPID approximations (q_sum_rapid = q_fc + q_ek + q_mo = q_ot_rapid + q_gyre_rapid + q_net_rapid)'
    q_sum_rapid[:] = rapid_trans.oht_total / 1e15

    # Gyre heat transport - RAPID approx
    q_gyre_rapid = dataset.createVariable('q_gyre_rapid', np.float64,
                                          (tdim.name))
    q_gyre_rapid.units = 'PW'
    q_gyre_rapid.minimum_longitude = fc_minlon
    q_gyre_rapid.maximum_longitude = int_maxlon
    q_gyre_rapid.comment = 'Heat transport by the horizontal circulation calculated using RAPID approximations '
    q_gyre_rapid[:] = rapid_trans.oht_by_horizontal / 1e15

    # Overturning heat transport - RAPID approx
    q_ot_rapid = dataset.createVariable('q_ot_rapid', np.float64, (tdim.name))
    q_ot_rapid.units = 'PW'
    q_ot_rapid.minimum_longitude = fc_minlon
    q_ot_rapid.maximum_longitude = int_maxlon
    q_ot_rapid.comment = 'Heat transport by the overturning circulation calculated using RAPID approximations'
    q_ot_rapid[:] = rapid_trans.oht_by_overturning / 1e15

    # Heat transport by net throughflow - RAPID approx
    q_net_rapid = dataset.createVariable('q_net_rapid', np.float64,
                                         (tdim.name))
    q_net_rapid.units = 'PW'
    q_net_rapid.minimum_longitude = fc_minlon
    q_net_rapid.maximum_longitude = int_maxlon
    q_net_rapid.comment = 'Heat transport referenced to 0C by the net flow through the section using RAPID approximations'
    q_net_rapid[:] = rapid_trans.oht_by_net / 1e15

    # Total heat transport - model v
    q_sum_model = dataset.createVariable('q_sum_model', np.float64,
                                         (tdim.name))
    q_sum_model.units = 'PW'
    q_sum_model.minimum_longitude = fc_minlon
    q_sum_model.maximum_longitude = int_maxlon
    q_sum_model.comment = 'Total heat transport across section calculated using model velocities (q_sum_model = q_gyre_model + q_ot_model + q_net_model)'
    q_sum_model[:] = model_trans.oht_total / 1e15

    # Gyre heat transport -model v
    q_gyre_model = dataset.createVariable('q_gyre_model', np.float64,
                                          (tdim.name))
    q_gyre_model.units = 'PW'
    q_gyre_model.minimum_longitude = fc_minlon
    q_gyre_model.maximum_longitude = int_maxlon
    q_gyre_model.comment = 'Heat transport by the horizontal circulation calculated using model velocities'
    q_gyre_model[:] = model_trans.oht_by_horizontal / 1e15

    # Overturning heat transport - model v
    q_ot_model = dataset.createVariable('q_ot_model', np.float64, (tdim.name))
    q_ot_model.units = 'PW'
    q_ot_model.minimum_longitude = fc_minlon
    q_ot_model.maximum_longitude = int_maxlon
    q_ot_model.comment = 'Heat transport by the overturning circulation calculated using model velocities'
    q_ot_model[:] = model_trans.oht_by_overturning / 1e15

    # Heat transport by net throughflow - model v
    q_net_model = dataset.createVariable('q_net_model', np.float64,
                                         (tdim.name))
    q_net_model.units = 'PW'
    q_net_model.minimum_longitude = fc_minlon
    q_net_model.maximum_longitude = int_maxlon
    q_net_model.comment = 'Heat transport referenced to 0C by the net flow through the section using model velocities'
    q_net_model[:] = model_trans.oht_by_net / 1e15

    # Heat transport by florida current
    q_fc = dataset.createVariable('q_fc', np.float64, (tdim.name))
    q_fc.units = 'PW'
    q_fc.minimum_longitude = fc_minlon
    q_fc.maximum_longitude = fc_maxlon
    q_fc.comment = 'Heat transport referenced to 0C by the Florida current'
    q_fc[:] = fc_trans.oht_total / 1e15

    # Heat transport by ekman
    q_ek = dataset.createVariable('q_ek', np.float64, (tdim.name))
    q_ek.units = 'PW'
    q_ek.minimum_longitude = wbw_maxlon
    q_ek.maximum_longitude = int_maxlon
    q_ek.comment = 'Heat transport referenced to 0C by Ekman transport'
    q_ek[:] = ek_trans.oht_total / 1e15

    # Heat transport by wbw
    q_wbw = dataset.createVariable('q_wbw', np.float64, (tdim.name))
    q_wbw.units = 'PW'
    q_wbw.minimum_longitude = fc_maxlon
    q_wbw.maximum_longitude = wbw_maxlon
    q_wbw.comment = 'Heat transport referenced to 0C by western boundary wedge transport'
    q_wbw[:] = wbw_trans.oht_total / 1e15

    # Heat transport by zonal mean geostrophic interior
    q_geoint = dataset.createVariable('q_geoint', np.float64, (tdim.name))
    q_geoint.units = 'PW'
    q_geoint.minimum_longitude = wbw_maxlon
    q_geoint.maximum_longitude = int_maxlon
    q_geoint.comment = 'Heat transport referenced to 0C by zonal mean of geostrophic interior transport'
    q_geoint[:] = (int_trans.oht_total - int_trans.oht_by_horizontal) / 1e15

    # Heat transport by standing "eddy" component of geostrophic interior
    q_eddy = dataset.createVariable('q_eddy', np.float64, (tdim.name))
    q_eddy.units = 'PW'
    q_eddy.minimum_longitude = wbw_maxlon
    q_eddy.maximum_longitude = int_maxlon
    q_eddy.comment = 'Heat transport referenced to 0C by standing eddy component of geostrophic interior transport'
    q_eddy[:] = (int_trans.oht_by_horizontal) / 1e15

    # Heat transport by mid ocean
    q_mo = dataset.createVariable('q_mo', np.float64, (tdim.name))
    q_mo.units = 'PW'
    q_mo.minimum_longitude = wbw_maxlon
    q_mo.maximum_longitude = int_maxlon
    q_mo.comment = 'Heat transport referenced to 0C by mid-ocean transport (q_mo = q_geoint + q_wbw + q_eddy)'
    q_mo[:] = q_geoint[:] + q_wbw[:] + q_eddy[:]

    return dataset
示例#34
0
    def NCA(self, t, C):
        AUClist = []
        AUMClist = []
        Cminlist = []
        Cavglist = []
        Cmaxlist = []
        Tmaxlist = []
        Tminlist = []
        Ndoses = len(self.drugSource.parsedDoseList)
        for ndose in range(0, max(Ndoses - 1, 1)):
            tperiod0 = self.drugSource.parsedDoseList[ndose].t0
            if ndose + 1 < Ndoses:
                tperiodF = self.drugSource.parsedDoseList[
                    ndose + 1].t0 - self.model.deltaT
            else:
                tperiodF = np.max(t) - 1
            idx0 = find_nearest(t, tperiod0)
            idxF = find_nearest(t, tperiodF)

            AUC0t = 0
            AUMC0t = 0
            t0 = t[idx0 + 1]
            for idx in range(idx0, idxF + 1):
                dt = (t[idx + 1] - t[idx])
                if C[idx + 1] >= C[idx]:  # Trapezoidal in the raise
                    AUC0t += 0.5 * dt * (C[idx] + C[idx + 1])
                    AUMC0t += 0.5 * dt * (C[idx] * t[idx] +
                                          C[idx + 1] * t[idx + 1])
                else:  # Log-trapezoidal in the decay
                    decrement = C[idx] / C[idx + 1]
                    K = math.log(decrement)
                    B = K / dt
                    AUC0t += dt * (C[idx] - C[idx + 1]) / K
                    AUMC0t += (C[idx] * (t[idx] - tperiod0) - C[idx + 1] *
                               (t[idx + 1] - tperiod0)) / B - (
                                   C[idx + 1] - C[idx]) / (B * B)

                if idx == idx0:
                    Cmax = C[idx]
                    Tmax = t[idx] - t0
                    Cmin = C[idx]
                    Tmin = t[idx] - t0
                else:
                    if C[idx] < Cmin:
                        Cmin = C[idx]
                        Tmin = t[idx] - t0
                    elif C[idx] > Cmax:
                        Cmax = C[idx]
                        Tmax = t[idx] - t0
                        if ndose == 0:
                            Cmin = C[idx]
                            Tmin = t[idx] - t0
            AUClist.append(AUC0t)
            AUMClist.append(AUMC0t)
            Cminlist.append(Cmin)
            Cmaxlist.append(Cmax)
            Tmaxlist.append(Tmax)
            Tminlist.append(Tmin)
            Cavglist.append(AUC0t / (t[idxF] - t[idx0]))

        print("Fluctuation = Cmax/Cmin")
        print("Accumulation(1) = Cavg(n)/Cavg(1) %")
        print("Accumulation(n) = Cavg(n)/Cavg(n-1) %")
        print("Steady state fraction(n) = Cavg(n)/Cavg(last) %")
        for ndose in range(0, len(AUClist)):
            fluctuation = Cmaxlist[ndose] / Cminlist[ndose]
            if ndose > 0:
                accumn = Cavglist[ndose] / Cavglist[ndose - 1]
            else:
                accumn = 0
            print("Dose #%d: Cavg= %f [%s] Cmin= %f [%s] Tmin= %d [min] Cmax= %f [%s] Tmax= %d [min] Fluct= %f %% Accum(1)= %f %% Accum(n)= %f %% SSFrac(n)= %f %% AUC= %f [%s] AUMC= %f [%s]"%\
                  (ndose+1,Cavglist[ndose], strUnit(self.Cunits.unit), Cminlist[ndose],strUnit(self.Cunits.unit), int(Tminlist[ndose]), Cmaxlist[ndose], strUnit(self.Cunits.unit),
                   int(Tmaxlist[ndose]), fluctuation*100, Cavglist[ndose]/Cavglist[0]*100, accumn*100, Cavglist[ndose]/Cavglist[-1]*100, AUClist[ndose],strUnit(self.AUCunits),
                   AUMClist[ndose],strUnit(self.AUMCunits)))

        self.AUC0t = AUClist[-1]
        self.AUMC0t = AUMClist[-1]
        self.MRT = self.AUMC0t / self.AUC0t
        self.Cmin = Cminlist[-1]
        self.Cmax = Cmaxlist[-1]
        self.Cavg = Cavglist[-1]
        self.fluctuation = self.Cmax / self.Cmin
        self.percentageAccumulation = Cavglist[-1] / Cavglist[0]

        print("   AUC0t=%f [%s]" % (self.AUC0t, strUnit(self.AUCunits)))
        print("   AUMC0t=%f [%s]" % (self.AUMC0t, strUnit(self.AUMCunits)))
        print("   MRT=%f [min]" % self.MRT)
示例#35
0
def emcee_inference(star, Ndim, ranges, lbdarr, wave, logF, dlogF, minfo,
                    listpar, logF_grid, vsin_obs, sig_vsin_obs, incl, dist_pc,
                    sig_dist_pc, isig, dims, include_rv, a_parameter,
                    af_filter, tag, plot_fits, long_process, log_scale, model,
                    acrux, pool, Nproc, stellar_prior, npy_star, pdf_mas,
                    pdf_obl, pdf_age, grid_mas, grid_obl, grid_age, band,
                    lbd_range):

    #star, Ndim, ranges, lbdarr, wave, logF, dlogF, minfo,
    #                    listpar, logF_grid, vsin_obs, sig_vsin_obs, dist_pc,
    #                    sig_dist_pc, isig, dims, include_rv, a_parameter,
    #                    af_filter, tag, plot_fits, long_process, log_scale,
    #                    model, acrux, pool, Nproc, stellar_prior, npy_star,
    #                    pdf_mas, pdf_obl, pdf_age, grid_mas,
    #                    grid_obl, grid_age, band):

    if long_process is True:
        #Nwalk = 2500 #500  # 200  # 500
        #nint_burnin = 500 #100  # 50
        #nint_mcmc = 5000  # 500  # 1000
        Nwalk = 500  # 200  # 500
        nint_burnin = 100  # 50
        nint_mcmc = 1000  # 500  # 1000
    else:
        Nwalk = 50
        nint_burnin = 20
        nint_mcmc = 100

    p0 = [
        np.random.rand(Ndim) * (ranges[:, 1] - ranges[:, 0]) + ranges[:, 0]
        for i in range(Nwalk)
    ]
    start_time = time.time()

    with Pool() as pool:
        if acrux is True:
            sampler = emcee.EnsembleSampler(
                Nwalk,
                Ndim,
                lnprob,
                args=[
                    incl, wave, logF, dlogF, minfo, listpar, logF_grid,
                    vsin_obs, sig_vsin_obs, dist_pc, sig_dist_pc, isig, ranges,
                    dims, include_rv, model, stellar_prior, npy_star, pdf_mas,
                    pdf_obl, pdf_age, grid_mas, grid_obl, grid_age
                ],
                a=a_parameter,
                pool=pool)
        else:
            sampler = emcee.EnsembleSampler(
                Nwalk,
                Ndim,
                lnprob,
                args=[
                    incl, wave, logF, dlogF, minfo, listpar, logF_grid,
                    vsin_obs, sig_vsin_obs, dist_pc, sig_dist_pc, isig, ranges,
                    dims, include_rv, model, stellar_prior, npy_star, pdf_mas,
                    pdf_obl, pdf_age, grid_mas, grid_obl, grid_age
                ],
                a=a_parameter)

        sampler_tmp = run_emcee(p0,
                                sampler,
                                nint_burnin,
                                nint_mcmc,
                                Ndim,
                                file_name=star)
    print("--- %s minutes ---" % ((time.time() - start_time) / 60))

    sampler, params_fit, errors_fit, maxprob_index,\
        minprob_index, af = sampler_tmp

    if include_rv is True:
        mass_true, obt_true, xc_true,\
            cos_true, ebv_true, dist_true, rv_true = params_fit
    else:
        if model == 'befavor' or model == 'befavor_new':
            mass_true, obt_true, xc_true,\
                cos_true, ebv_true, dist_true = params_fit
        if model == 'aara' or model == 'acol' or\
           model == 'bcmi':
            mass_true, obt_true, xc_true, n0, Rd, n_true,\
                cos_true, ebv_true, dist_true = params_fit
        if model == 'bcmi_pol' or model == 'aara_pol':
            mass_true, obt_true, xc_true, n0, Rd, n_true,\
                cos_true = params_fit
        if model == 'beatlas':
            mass_true, obt_true, rh0_true, nix_true,\
                inc_true, dis_true, ebv_true = params_fit
    # if model is False:
    #     angle_in_rad = np.arccos(params_fit[6])
    #     params_fit[6] = (np.arccos(params_fit[6])) * (180. / np.pi)
    #     errors_fit[6] = (errors_fit[6] / (np.sqrt(1. -
    #                      (np.cos(angle_in_rad)**2.)))) * (180. / np.pi)

    chain = sampler.chain

    if af_filter is True:
        acceptance_fractions = sampler.acceptance_fraction
        chain = chain[(acceptance_fractions >= 0.20)
                      & (acceptance_fractions <= 0.50)]
        af = acceptance_fractions[(acceptance_fractions >= 0.20)
                                  & (acceptance_fractions <= 0.50)]
        af = np.mean(af)

    af = str('{0:.2f}'.format(af))

    # Saving first sample
    # file_npy = 'figures/' + str(star) + '/' + 'Walkers_' +\
    #     str(Nwalk) + '_Nmcmc_' + str(nint_mcmc) +\
    #     '_af_' + str(af) + '_a_' + str(a_parameter) +\
    #     tag + ".npy"
    # np.save(file_npy, chain)
    current_folder = 'figures/' + str(star) + '/' + str(lbd_range) + '/'
    file_npy = current_folder + str(star) + ".npy"
    np.save(file_npy, chain)

    # plot results
    mpl.rcParams['mathtext.fontset'] = 'stix'
    mpl.rcParams['font.family'] = 'STIXGeneral'
    mpl.rcParams['font.size'] = 16

    # Loading first dataframe
    chain_1 = np.load(file_npy)
    Ndim_1 = np.shape(chain_1)[-1]
    flatchain_1 = chain_1.reshape((-1, Ndim_1))

    if model == 'befavor' or 'befavor_new':
        mas_1 = flatchain_1[:, 0]
        obl_1 = flatchain_1[:, 1]
        age_1 = flatchain_1[:, 2]
        inc_1 = flatchain_1[:, 3]
        dis_1 = flatchain_1[:, 4]
        ebv_1 = flatchain_1[:, 5]
    if model == 'aara' or model == 'acol' or\
       model == 'bcmi':
        mas_1 = flatchain_1[:, 0]
        obl_1 = flatchain_1[:, 1]
        age_1 = flatchain_1[:, 2]
        rh0_1 = flatchain_1[:, 3]
        rdk_1 = flatchain_1[:, 4]
        nix_1 = flatchain_1[:, 5]
        inc_1 = flatchain_1[:, 6]
        dis_1 = flatchain_1[:, 7]
        ebv_1 = flatchain_1[:, 8]
    if model == 'bcmi_pol' or model == 'aara_pol':
        mas_1 = flatchain_1[:, 0]
        obl_1 = flatchain_1[:, 1]
        age_1 = flatchain_1[:, 2]
        rh0_1 = flatchain_1[:, 3]
        rdk_1 = flatchain_1[:, 4]
        nix_1 = flatchain_1[:, 5]
        inc_1 = flatchain_1[:, 6]
    if model == 'beatlas':
        mas_1 = flatchain_1[:, 0]
        obl_1 = flatchain_1[:, 1]
        rh0_1 = flatchain_1[:, 2]
        nix_1 = flatchain_1[:, 3]
        inc_1 = flatchain_1[:, 4]
        dis_1 = flatchain_1[:, 5]
        ebv_1 = flatchain_1[:, 6]

    if include_rv is True:
        rv_1 = flatchain_1[:, 6]
        par_list = np.zeros([len(mas_1), 7])
    else:
        if model == 'befavor' or model == 'befavor_new':
            par_list = np.zeros([len(mas_1), 6])
        if model == 'aara' or model == 'acol' or\
           model == 'bcmi':
            par_list = np.zeros([len(mas_1), 9])
        if model == 'bcmi_pol' or model == 'aara_pol':
            par_list = np.zeros([len(mas_1), 7])
        if model == 'beatlas':
            par_list = np.zeros([len(mas_1), 7])

    for i in range(len(mas_1)):
        if include_rv is True:
            par_list[i] = [
                mas_1[i], obl_1[i], age_1[i], inc_1[i], dis_1[i], ebv_1[i],
                rv_1[i]
            ]
        else:
            if model == 'befavor' or model == 'befavor_new':
                par_list[i] = [
                    mas_1[i], obl_1[i], age_1[i], inc_1[i], dis_1[i], ebv_1[i]
                ]
            if model == 'aara' or model == 'acol' or\
               model == 'bcmi':
                par_list[i] = [
                    mas_1[i], obl_1[i], age_1[i], rh0_1[i], rdk_1[i], nix_1[i],
                    inc_1[i], dis_1[i], ebv_1[i]
                ]
            if model == 'bcmi_pol' or model == 'aara_pol':
                par_list[i] = [
                    mas_1[i], obl_1[i], age_1[i], rh0_1[i], rdk_1[i], nix_1[i],
                    inc_1[i]
                ]
            if model == 'beatlas':
                par_list[i] = [
                    mas_1[i], obl_1[i], rh0_1[i], nix_1[i], inc_1[i], dis_1[i],
                    ebv_1[i]
                ]

    # plot corner
    if include_rv is True:
        samples = np.vstack((mas_1, obl_1, age_1, inc_1, dis_1, ebv_1, rv_1)).T
    else:
        if model == 'befavor' or model == 'befavor_new':
            samples = np.vstack((mas_1, obl_1, age_1, inc_1, dis_1, ebv_1)).T
        if model == 'aara' or model == 'acol' or\
           model == 'bcmi':
            samples = np.vstack((mas_1, obl_1, age_1, rh0_1, rdk_1, nix_1,
                                 inc_1, dis_1, ebv_1)).T
        if model == 'bcmi_pol' or model == 'aara_pol':
            samples = np.vstack(
                (mas_1, obl_1, age_1, rh0_1, rdk_1, nix_1, inc_1)).T
        if model == 'beatlas':
            samples = np.vstack(
                (mas_1, obl_1, rh0_1, nix_1, inc_1, dis_1, ebv_1)).T

    k, arr_t_tc, arr_Xc = t_tms_from_Xc(mass_true,
                                        savefig=False,
                                        plot_fig=False)
    ttms_ip = np.arange(0.001, 1., 0.001)
    Xc_ip = np.interp(ttms_ip, arr_t_tc[k], arr_Xc[k])

    for i in range(len(samples)):
        if model == 'befavor' or model == 'aara' or\
           model == 'acol' or model == 'bcmi' or\
           model == 'befavor_new' or model == 'bcmi_pol' or\
           model == 'aara_pol':
            # Calculating logg for the non_rotating case
            Mstar, oblat, Hfrac = samples[i][0], samples[i][1],\
                samples[i][2]
        else:
            Mstar, oblat, Hfrac = samples[i][0], samples[i][1], 0.3

        if model != 'befavor_new':
            t = np.max(np.array([hfrac2tms(Hfrac), 0.]))
        else:
            t = np.copy(Hfrac)

        Rpole, logL = geneva_interp_fast(Mstar,
                                         oblat,
                                         t,
                                         neighbours_only=True,
                                         isRpole=False)

        # Converting oblat to W
        samples[i][1] = obl2W(samples[i][1])

        if model == 'befavor':
            # Converting angles to degrees
            samples[i][3] = (np.arccos(samples[i][3])) * (180. / np.pi)
            # Converting Xc to t(tms)
            samples[i][2] = ttms_ip[find_nearest(Xc_ip, samples[i][2])[1]]
        if model == 'befavor_new':
            # Converting angles to degrees
            samples[i][3] = (np.arccos(samples[i][3])) * (180. / np.pi)
        if model == 'aara':
            # Converting Xc to t(tms)
            samples[i][2] = ttms_ip[find_nearest(Xc_ip, samples[i][2])[1]]
            samples[i][5] = samples[i][5] + 1.5
            samples[i][6] = (np.arccos(samples[i][6])) * (180. / np.pi)
        if model == 'acol' or model == 'bcmi' or model == 'bcmi_pol' or\
           model == 'aara_pol':
            # Converting Xc to t(tms)
            samples[i][2] = ttms_ip[find_nearest(Xc_ip, samples[i][2])[1]]
            samples[i][6] = (np.arccos(samples[i][6])) * (180. / np.pi)
        if model == 'beatlas':
            samples[i][4] = (np.arccos(samples[i][4])) * (180. / np.pi)
        if model == 'aara_pol':
            samples[i][5] = samples[i][5] + 1.5
    # plot corner
    quantiles = [0.16, 0.5, 0.84]
    if include_rv is True:
        labels = [
            r'$M\,[\mathrm{M_\odot}]$', r'$W$', r"$t/t_\mathrm{ms}$",
            r'$i[\mathrm{^o}]$', r'$d\,[pc]$', r'E(B-V)', r'$R_\mathrm{V}$'
        ]
    else:
        if model == 'befavor' or model == 'befavor_new':
            labels = [
                r'$M\,[\mathrm{M_\odot}]$', r'$W$', r"$t/t_\mathrm{ms}$",
                r'$i[\mathrm{^o}]$', r'$d\,[\mathrm{pc}]$', r'E(B-V)'
            ]
        if model == 'aara' or model == 'acol' or\
           model == 'bcmi':
            labels = [
                r'$M\,[\mathrm{M_\odot}]$', r'$W$', r"$t/t_\mathrm{ms}$",
                r'$\log \, n_0 \, [\mathrm{cm^{-3}}]$',
                r'$R_\mathrm{D}\, [R_\star]$', r'$n$', r'$i[\mathrm{^o}]$',
                r'$d\,[\mathrm{pc}]$', r'E(B-V)'
            ]
        if model == 'bcmi_pol' or model == 'aara_pol':
            labels = [
                r'$M\,[\mathrm{M_\odot}]$', r'$W$', r"$t/t_\mathrm{ms}$",
                r'$\log \, n_0$', r'$R_\mathrm{D}\, [R_\star]$', r'$n$',
                r'$i[\mathrm{^o}]$'
            ]
        if model == 'beatlas':
            labels = [
                r'$M\,[\mathrm{M_\odot}]$', r'$W$', r'$\Sigma_0$', r'$n$',
                r'$i[\mathrm{^o}]$', r'$d\,[pc]$', r'E(B-V)'
            ]
    if model == 'befavor':
        ranges[1] = obl2W(ranges[1])
        ranges[2][0] = ttms_ip[find_nearest(Xc_ip, ranges[2][1])[1]]
        ranges[2][1] = ttms_ip[find_nearest(Xc_ip, ranges[2][0])[1]]
        ranges[3] = (np.arccos(ranges[3])) * (180. / np.pi)
        ranges[3] = np.array([ranges[3][1], ranges[3][0]])
    if model == 'befavor_new':
        ranges[1] = obl2W(ranges[1])
        ranges[3] = (np.arccos(ranges[3])) * (180. / np.pi)
        ranges[3] = np.array([ranges[3][1], ranges[3][0]])
    if model == 'aara':
        ranges[1] = obl2W(ranges[1])
        ranges[2][0] = ttms_ip[find_nearest(Xc_ip, ranges[2][1])[1]]
        ranges[2][1] = ttms_ip[find_nearest(Xc_ip, ranges[2][0])[1]]
        ranges[3] = np.array([ranges[3][1], ranges[3][0]])
        ranges[5] = ranges[5] + 1.5
        ranges[6] = (np.arccos(ranges[6])) * (180. / np.pi)
        ranges[6] = np.array([ranges[6][1], ranges[6][0]])
    if model == 'acol' or model == 'bcmi' or model == 'bcmi_pol' or\
       model == 'aara_pol':
        ranges[1] = obl2W(ranges[1])
        ranges[2][0] = ttms_ip[find_nearest(Xc_ip, ranges[2][1])[1]]
        ranges[2][1] = ttms_ip[find_nearest(Xc_ip, ranges[2][0])[1]]
        ranges[3] = np.array([ranges[3][1], ranges[3][0]])
        ranges[6] = (np.arccos(ranges[6])) * (180. / np.pi)
        ranges[6] = np.array([ranges[6][1], ranges[6][0]])
    if model == 'beatlas':
        ranges[1] = obl2W(ranges[1])
        ranges[3] = np.array([ranges[3][-1], ranges[3][0]])
        ranges[4] = (np.arccos(ranges[4])) * (180. / np.pi)
        ranges[4] = np.array([ranges[4][1], ranges[4][0]])

    corner(samples,
           labels=labels,
           range=ranges,
           quantiles=quantiles,
           plot_contours=True,
           smooth=2.,
           smooth1d=False,
           plot_datapoints=True,
           label_kwargs={'fontsize': 17},
           title_kwargs={'fontsize': 17},
           truths=None,
           show_titles=True,
           color_hist='black',
           plot_density=True,
           fill_contours=False,
           no_fill_contours=False,
           normed=True)

    if plot_fits is True:
        plot_fit_last(params_fit, wave, logF, dlogF, minfo, listpar, lbdarr,
                      logF_grid, isig, dims, Nwalk, nint_mcmc, ranges,
                      include_rv, file_npy, log_scale, model)


    fig_name = 'Walkers_' + np.str(Nwalk) + '_Nmcmc_' +\
        np.str(nint_mcmc) + '_af_' + str(af) + '_a_' +\
        str(a_parameter) + tag

    plot_residuals(params_fit, wave, logF, dlogF, minfo, listpar, lbdarr,
                   logF_grid, isig, dims, Nwalk, nint_mcmc, ranges, include_rv,
                   file_npy, log_scale, model)
    plt.savefig(current_folder + fig_name + '_' + lbd_range + '.png', dpi=100)

    plot_convergence(file_npy, file_npy[:-4] + '_convergence', model)

    # Saving the chord diagram
    if model == 'aara' or model == 'bcmi' or model == 'acol':
        chord_plot_complete(folder='figures/', file=str(star))
    elif model == 'bcmi_pol' or model == 'aara_pol':
        chord_plot_pol(folder='figures/', file=str(star))
    else:
        chord_plot(folder='figures/', file=str(star))

    return