Beispiel #1
0
def cost_dSIR(self, pars, dataset, initial, t, w):
    """
    The function to compute the error to guide the learning
    algorithm. It computes the quadratic error.

    :param tuple p: Tuple with Beta and r parameters, respectivelly.
    :param array S: The suceptible data values.
    :param array I: The infected data values.
    :param array initial: The initial values of suceptible and infected, respectivelly.
    :param array t: The time respective to each sample.
    :param array w: The weight respective to the suceptible and infected errors.

    :return: The sum of the quadratic error, between simulated and real data.
    :rtype: float
  """
    model_pars = [p for p in pars]
    model_init = [item for item in initial]

    S = pyasl.smooth(dataset[0], 13, "hamming")
    I = pyasl.smooth(dataset[1], 13, "hamming")
    R = pyasl.smooth(dataset[2], 13, "hamming")

    erro = dict(S=1.0, I=1.0, R=1.0)

    try:
        # Simulate the differential equation system
        result = [[], [], []]
        for s, i, r in zip(S, I, R):
            rsim = self.differential_model((s, i, r), t, model_pars[0],
                                           model_pars[1])
            for k in range(3):
                result[k].append(rsim[k])
        for k in range(3):
            result[k] = np.array(result[k])
        # Compute the error for all samples
        #erro["S"] = w[0] * ( result[0] - np.gradient(S) )**2
        erro["I"] = w[1] * (result[1] - np.gradient(I))**2
        erro["R"] = w[2] * (result[2] - np.gradient(R))**2
        # Merging the error
        erro_acc = 0.0
        for item in self.focus:
            erro_acc += np.sqrt(np.mean(erro[item]))
        self._iter_error.append(erro_acc)
    except:
        print("Except da merda")
        erro_acc = self._iter_error[-1]
    return erro_acc
Beispiel #2
0
def findEpidemyBreaks(cases, 
    threshold_prop=1.0, 
    cases_before=10):
  """
    The function responsible for determining the initial 
    and final points of the epidemies windows.

    :param array cases: The array with the cases values along time.
    :param float threshold_prop: The standard deviation proportion used as threshold for windowing. Default is `1.0`.
    :param int cases_before: The number of back samples to check for the initial window point. Default is `10`.
    
    :return: With the list of window's starting points and window's final points, respectively.
    :rtype: tuple
  """
  # Filtering the data
  filt_cases = pyasl.smooth(cases, 11, 'hamming')
  # Compute the derivative and standard deviation
  cases_variation = np.diff(filt_cases).tolist()
  threshold = threshold_prop * np.std(cases_variation)
  # Initializing the variables
  start_points, end_points = [], []
  in_epidemy = False
  for k, value in enumerate(cases_variation):
    if not in_epidemy:
      # Check value
      if value > threshold:
        in_epidemy = True
        # Find the start point
        start_index = 0 if k-cases_before < 0 else k-cases_before
        window = [abs(v) for v in cases_variation[start_index:k]]
        ref_index = window.index(min(window))
        start_index = k - (cases_before - ref_index)
        if cases[start_index] == 0:
          while cases[start_index] == 0:
            start_index += 1
        start_points.append(start_index)
    else:
      check_1 = (cases_variation[k-1] < 0)
      check_2 = (value >= 0)
      if check_1 and check_2:
        in_epidemy = False
        end_points.append(k)
  return start_points, end_points
Beispiel #3
0
from PyAstronomy import pyasl
import os, fnmatch
import GeneralSpecUtils as GSU
import PlotUtils as PU

Jupiter_1996UT = scipy.fromfile(
    file="f:/Astronomy/Projects/Planets/Saturn/PROJECT/JUPS.DAT",
    dtype=float,
    count=-1,
    sep='\t')
Jupiter_1996UT = scipy.reshape(Jupiter_1996UT, [Jupiter_1996UT.size / 2, 2])
Jupiter_1996UT[:, 1] = Jupiter_1996UT[:, 1] / Jupiter_1996UT[:, 1].max(
)  #Normalize the data
JupiterSlopeCorrection = np.linspace(1.02, 0.93, Jupiter_1996UT.size / 2)
Jupiter_1996UT[:, 1] = Jupiter_1996UT[:, 1] * JupiterSlopeCorrection
Jupiter_1996Smooth = pyasl.smooth(Jupiter_1996UT[:, 1], 9, 'flat')

NIRFiles = []
listOfFiles = os.listdir('../1D Spectra/')
pattern = '*100lpm-742NIR_Albedo.txt'
for entry in listOfFiles:
    if fnmatch.fnmatch(entry, pattern):
        NIRFiles.append("../1D Spectra/" + entry)
NIRspecarray = GSU.SpectrumAggregation('f:', NIRFiles, FileList=True)
NIRspecarray.ComputeAverageandStats()

CLRFiles = []
listOfFiles = os.listdir('../1D Spectra/')
pattern = '*100lpm-550CLR_Albedo.txt'
for entry in listOfFiles:
    if fnmatch.fnmatch(entry, pattern):
        else:
            len_order=13
#    output_cc=str(ccd)+"-wasp33"+str(whichdata)+"_cc_"+str(opt)+".h5"
#    h5f_cc = h5py.File(specdir+"cc_result/"+str(output_cc), 'w')
    for va in tqdm(range(varimin,varimax),ascii="True",desc=str(ccd)+" CCD"):
        cc_map_full=[]
        for var in tqdm(range(len_sv),ascii="True",desc=str(vari)+"="+str(va)+" R"+str(varname)):
            cc_svd_collect=[] #Different SV are saved in this matrix                       
            for order in tqdm (range(1,len_order),ascii="True",desc="Order"):
#                y_temp_order=h5f_temp[str(ccd)+"-vmr-"+str(va)+"-order-"+str(order)][:]
                yobs=h5f_reduced[str(ccd)+"-flux"+str(vari)+"-"+str(va)+"-order-"+str(order)+"sysrem"][:][var]
#                yobs=h5f_reduced[str(ccd)+"-flux-"+str(vari)+"-"+str(va)+"-order-"+str(order)+"sysrem"][:][var]
                y_temp_order=h5f_temp[str(ccd)+"-vmr-9-order-"+str(order)][:]
#                yobs= h5f_reduced[str(ccd)+"-flux-"+str(vari)+"-7-order-"+str(order)+str(varname)+"-"+str(var+1)][:]
                for filenum in range (len(yobs)):
                    sm = pyasl.smooth(yobs[filenum], 25, 'flat')
                    sm1 = pyasl.smooth(sm, 51, 'flat')
                    yobs[filenum]=(yobs[filenum]+1.)/(sm1+1.)-1.
                std_frames=[]
                std_pix=[]
                for wvbin in range (len(yobs[1])):
                    std_pix.append(np.std(yobs[:,wvbin]))
                for filenum in range (len(yobs)):
                    std_frames.append(np.std(yobs[filenum,:]))
                std_frames=np.array(std_frames,dtype="float")
                yobs=divstd_prime(yobs,std_pix,std_frames)
                cc_order=np.zeros((len(yobs),len(drvs)),dtype="float")
                for numspec in range(len(yobs)):
                    for rv in range(len(drvs)):
                        cc_order[numspec][rv]=crossco(yobs[numspec],y_temp_order[rv])
#                        cc_order[numspec][rv]=cc_sp(yobs[numspec],y_temp_order[rv])
#Retrieve Target Parameters and create data paths
J = CF.Target_Parameters(
    "f:/Astronomy/Python Play/SpectroPhotometry/Spectroscopy/Target_Parameters.txt"
)
J.loadtargetparams(Target)
JupPath = CF.built_path(J)
JupPath.spectra(DateUT)

#Load response calibration and solar reference spectrum
Response = scipy.fromfile(file="../PolluxResponse20150123UT.txt",
                          dtype=float,
                          count=-1,
                          sep=" ")
Response = scipy.reshape(Response, [Response.size / 2, 2])
Response[:, 0] = Response[:, 0] / 10.
Response[:, 1] = pyasl.smooth(Response[:, 1], 3, 'flat')
MasterDispersion = (Response[(Response.size / 2 - 1), 0] -
                    Response[0, 0]) / (Response.size / 2 - 1)

Ref_g2v = scipy.loadtxt(JupPath.reference_path + J.SpecType,
                        dtype=float,
                        skiprows=3,
                        usecols=(0, 1))
Ref_g2v[:, 0] = Ref_g2v[:, 0] / 10.
Ref_g2v[:, 1] = pyasl.smooth(Ref_g2v[:, 1], 3, 'flat')

#Load comparison albedo spectrum from Karkoschka, 1994 (1993 observations)
Jupiter_Karkoschka1993 = scipy.fromfile(
    file=
    "f:/Astronomy/Projects/Planets/Saturn/Spectral Data/Karkoschka/1993.tab.txt",
    dtype=float,
Beispiel #6
0
 def monteCarlo_multiple(self, Sd, Id, Bd, td, 
     threshold_prop=1,
     cases_before=10,
     minimum_days=60,
     steps_indays=10,
     filt_estimate=False,
     filt_window=55,
     beta_sens=[1000,10],
     r_sens=[1000,10],
     out_type=0,
     **kwargs):
   """
     The method responsible for estimating a set of beta and r 
     parameters for each epidemy period existent in the provided
     dataset. It assumes that in the data there are several epidemic
     periods.
     
     :param array Sd: Array with the suceptible data.
     :param array Id: Array with the infected data.
     :param array Bd: Array with the births data.
     :param array td: The time respective to each set of samples.
     :param float threshold_prop: The standard deviation proportion used as threshold for windowing. Default is :code:`1.0`.
     :param int cases_before: The number of back samples to check for the initial window point. Default is :code:`10`.
     :param bool filt_estimate: Flag to use filtered data to estimate the model parameters. Default is :code:`False`.
     :param int filt_window: The window size used on the filtering technique, only if :code:`filt_estimate=True`. Default is :code:`55`.
     :param list beta_sens: The beta parameter sensibility minimun and maximun boundaries, respectivelly. Default is :code:`[100,100]`.
     :param list r_sens: The r parameter sensibility minimun and maximun boundaries, respectivelly. Default is :code:`[100,1000]`.
     :param int out_type: The output type, it can be :code:`1` or :code:`0`. Default is :code:`0`. 
     
     :return: If the :code:`out_type=0`, it returns a tuple with the estimated beta and r, estimated, with the year of each respective window. If `out_type=1` it returns the self.data of the model, a summary with all model information.
     :rtype: tuple
   """
   self.data["full"] = {
     "I": Id, "S": Sd, 
     "B": Bd, "t": td }
   # Find the epidemy start and end points
   start, end = findEpidemyBreaks(Id, threshold_prop, cases_before)
   # Check the window sizes
   if len(start) < 2:
     print("The windows are too small!")
   if len(start) != len(end):
     end = end[:-1]
   # Check the window sizes - 2
   if self.verbose:
     print("├─ Windows starting at: ", start)
     print("├─ Windows ending at:   ", end)
     print("├─ Window start cases:  ", [Id[s] for s in start])
     print("│")
   # Computing the approximate values 
   # of the parameters to build the 
   # parameter boundaries
   beta_approx = 1 
   r_approx = 1 / 7 
   # For each epidemy window
   for s, e in zip(start, end):
     if self.verbose:
       print("├──┬ ✣✣✣ New window ➙ ", self.iter_counter, " ✣✣✣")
       self.iter_counter += 1
     # Reading the SIR window variables
     B, S = Bd[s:e], Sd[s:e]
     I, t = Id[s:e], td[s:e]
     # Computing variables references 
     year_ref = t[0] # Year reference
     t = (t - year_ref) * 365 # Time in days
     # Initial conditions
     y0 = int(S[0]), int(I[0])
     # Parameter weights
     w = [max(I)/max(S), 1]
     # Resampling the data
     Sd_res, t_res = scs.resample(S, int(t[-1]), t=t)
     Id_res, t_res = scs.resample(I, int(t[-1]), t=t)
     # Filtering the values
     if filt_estimate:
       Sd_res = pyasl.smooth(Sd_res, filt_window, 'hamming')
       Id_res = pyasl.smooth(Id_res, filt_window, 'hamming')
     # Computing the parameter bounds   
     x0 = [beta_approx, r_approx]
     lower = [x0[0]/beta_sens[0], x0[1]/r_sens[0]]
     upper = [beta_sens[1]*x0[0], r_sens[1]*x0[1]]
     if self.verbose:
       print("│  ├─ S(0) ─  ", y0[0], "  I(0) ─  ", y0[1])
       print("│  ├─ beta ─  ", x0[0], "  r ─  ", x0[1])
       print("│  ├─ beta bound ─  ", lower[0], " ─ ", upper[0])
       print("│  ├─ r bound ─  ", lower[1], " ─ ", upper[1])
       print("│  │")
       print("│  ├─┬─ ⨭ Initializing Monte Carlo ⨮")
       self.__prop_ind = 0
     # Create the simulation steps
     initial_indexes = range(minimum_days, int(t[-1]), steps_indays)
     # Estimate for each sample set
     mc_window_data = dict(pars=list(), time=list(), bounds=[s,e])
     for bound in initial_indexes:
       # Get only a fraction of the data
       S_, I_, t_ = Sd_res[:bound], Id_res[:bound], t_res[:bound]
       # Minimize the cost funciton for 
       # the selected window
       c = differential_evolution(
         self.cost_function, 
         list(zip(lower, upper)),
         maxiter=60000, 
         popsize=15,
         mutation=(0.5, 1.5),
         strategy="best1exp",
         workers=-1,
         updating='deferred',
         tol=0.00001,
         args=((S_, I_), y0, t_, w)
       ).x # <- Get only the parameters
       # Print some information
       sim_prop = initial_indexes.index(bound) / len(initial_indexes)
       if self.verbose and (sim_prop > self.__mc_props[self.__prop_ind]):
         print("│  │ ├─ Progress at : {}%".format(int(100*sim_prop)))
         self.__prop_ind += 1
       # Save monte carlo estimated data
       mc_window_data["pars"].append(c)
       mc_window_data["time"].append(t_[-1]*365 + year_ref)
     if self.verbose and (sim_prop > 0.5):
       print("│  │ └─ Finished! ✓")
     # Save the window data
     self.mc["results"][str(self.iter_counter-1)] = mc_window_data
     # Simulando os dados
     [Sa, Ia] = self.simulate(y0, t_res, c)
     # Save the year data
     self.data["data"]["original"].append(
       { "I": I, "B": B, "S": S, "t": t/365 + year_ref })
     self.data["data"]["resampled"].append(
       {"I": Id_res, "S": Sd_res, "t": t_res/365 + year_ref})
     self.data["data"]["simulated"].append(
       {"I": Ia, "S": Sa, "t": t_res/365 + year_ref})
     self.data["pars"]["beta"].append( c[0] )
     self.data["pars"]["r"].append( c[1] )
     self.data["time"].append( year_ref )
     # Printing summary
     if self.verbose:
       print("│  └─ ✣✣✣ ➙ ", self.iter_counter-1, " ✣✣✣\n│")
   if self.verbose:
     print("│")
     print("└─ Done! ✓")
   return self.mc
Beispiel #7
0
 def fit_multiple(self, Sd, Id, Bd, td, 
     threshold_prop=1,
     cases_before=10,
     filt_estimate=False,
     filt_window=55,
     beta_sens=[100,10],
     r_sens=[100,10],
     out_type=0,
     **kwargs):
   """
     The method responsible for estimating a set of beta and r 
     parameters for each epidemy period existent in the provided
     dataset. It assumes that in the data there are several epidemic
     periods.
     
     :param array Sd: Array with the suceptible data.
     :param array Id: Array with the infected data.
     :param array Bd: Array with the births data.
     :param array td: The time respective to each set of samples.
     :param float threshold_prop: The standard deviation proportion used as threshold for windowing. Default is :code:`1.0`.
     :param int cases_before: The number of back samples to check for the initial window point. Default is :code:`10`.
     :param bool filt_estimate: Flag to use filtered data to estimate the model parameters. Default is :code:`False`.
     :param int filt_window: The window size used on the filtering technique, only if :code:`filt_estimate=True`. Default is :code:`55`.
     :param list beta_sens: The beta parameter sensibility minimun and maximun boundaries, respectivelly. Default is :code:`[100,100]`.
     :param list r_sens: The r parameter sensibility minimun and maximun boundaries, respectivelly. Default is :code:`[100,1000]`.
     :param int out_type: The output type, it can be :code:`1` or :code:`0`. Default is :code:`0`.
     
     :return: If the :code:`out_type=0`, it returns a tuple with the estimated beta and r, estimated, with the year of each respective window. If `out_type=1` it returns the self.data of the model, a summary with all model information.
     :rtype: tuple
   """
   self.data["full"] = {
     "I": Id, "S": Sd, 
     "B": Bd, "t": td }
   # Find the epidemy start and end points
   start, end = findEpidemyBreaks(Id, threshold_prop, cases_before)
   # Check the window sizes
   if len(start) < 2:
     print("The windows are too small!")
   if len(start) != len(end):
     end = end[:-1]
   # Check the window sizes - 2
   if self.verbose:
     print("Windows starting at: ", start)
     print("Windows ending at:   ", end)
     print("Window start cases:  ", [Id[s] for s in start])
   # Computing the approximate values 
   # of the parameters to build the 
   # parameter boundaries
   beta_approx = 1 
   r_approx = 1 / 10
   # For each epidemy window
   for s, e in zip(start, end):
     if self.verbose:
       print("New iter::: ", self.iter_counter)
       self.iter_counter += 1
     # Reading the SIR window variables
     B, S = Bd[s:e], Sd[s:e]
     I, t = Id[s:e], td[s:e]
     # Computing variables references 
     year_ref = t[0] # Year reference
     t = (t - year_ref) * 365 # Time in days
     # Initial conditions
     y0 = int(S[0]), int(I[0])
     # Parameter weights
     w = [max(I)/max(S), 1]
     # Resampling the data
     Sd_res, t_res = scs.resample(S, int(t[-1]), t=t)
     Id_res, t_res = scs.resample(I, int(t[-1]), t=t)
     # Filtering the values
     if filt_estimate:
       Sd_res = pyasl.smooth(Sd_res, filt_window, 'hamming')
       Id_res = pyasl.smooth(Id_res, filt_window, 'hamming')
     # Computing the parameter bounds   
     x0 = [beta_approx, r_approx]
     lower = [x0[0]/beta_sens[0], x0[1]/r_sens[0]]
     upper = [beta_sens[1]*x0[0], r_sens[1]*x0[1]]
     if self.verbose:
       print("\t ├─ S(0) ─  ", y0[0], "  I(0) ─  ", y0[1])
       print("\t ├─ beta ─  ", x0[0], "  r ─  ", x0[1])
       print("\t ├─ beta bound ─  ", lower[0], " ─ ", upper[0])
       print("\t ├─ r bound ─  ", lower[1], " ─ ", upper[1])
     #(c, kvg) = leastsq(obj, theta0, args=(Sd_res, Id_res, y0, t_res, w))
     c = differential_evolution(
         self.cost_function, 
         list(zip(lower, upper)),
         maxiter=60000,
         popsize=35,
         mutation=(1.5, 1.99),
         strategy="best1exp",
         workers=-1,
         updating='deferred',
         tol=0.00001,
         args=((Sd_res, Id_res), y0, t_res, w)
       ).x
     # Simulando os dados
     [Sa, Ia] = self.simulate(y0, t_res, c)
     # Save the year data
     self.data["data"]["original"].append(
       { "I": I, "B": B, "S": S, "t": t/365 + year_ref })
     self.data["data"]["resampled"].append(
       {"I": Id_res, "S": Sd_res, "t": t_res/365 + year_ref})
     self.data["data"]["simulated"].append(
       {"I": Ia, "S": Sa, "t": t_res/365 + year_ref})
     self.data["pars"]["beta"].append( c[0] )
     self.data["pars"]["r"].append( c[1] )
     self.data["time"].append( year_ref )
     # Printing summary
     if self.verbose:
       print("\t └─ Defined at: ", c[0], " ─ ", c[1], "\n")
   if out_type == 0:
     return (
       self.data["pars"]["beta"], 
       self.data["pars"]["r"],  
       self.data["time"] 
     )
   return self.data
Beispiel #8
0
def JupiterSpectrumProcessor(Target, DateUT, Grating):
    #Example Call: JupiterSpectrumProcessor("Jupiter","20150123UT","100lpm-742NIR")
    #  Add Response Ref.,
    #  e.g., PolluxResponse20150123UT.txt. Do we ever want to address
    #  CLR + NIR spectra?
    import sys
    sys.path.append('f:\\Astronomy\Python Play')
    sys.path.append('f:\\Astronomy\Python Play\Utils')
    sys.path.append('f:\\Astronomy\Python Play\Spectrophotometry\Spectroscopy')
    import matplotlib.pyplot as pl
    import numpy as np
    import scipy
    from copy import deepcopy
    import EquivWidthUtils as EWU
    import PlotUtils as PU
    import ConfigFiles as CF
    import GeneralSpecUtils as GSU
    from PyAstronomy import pyasl  #This is where the best smoothing algorithm is!
    import datetime

    #Retrieve Target Parameters and create data paths
    J = CF.Target_Parameters(
        "f:/Astronomy/Python Play/SpectroPhotometry/Spectroscopy/Target_Parameters.txt"
    )
    J.loadtargetparams(Target)
    JupPath = CF.built_path(J)
    JupPath.spectra(DateUT)

    #Load response calibration and solar reference spectrum
    ResponseFile = {
        '20150123UT': 'PolluxResponse20150123UT.txt',
        '20150209UT': 'PolluxResponse20150210UT.txt',
        '20150210UT': 'PolluxResponse20150210UT.txt',
        '20150318UT': 'PolluxResponse20150318UT.txt',
        '20150322UT': 'PolluxResponse20150322UT.txt',
        '20150331UT': 'PolluxResponse20150322UT.txt'
    }
    Response = scipy.fromfile(
        file="f:/Astronomy/Projects/Planets/Jupiter/Spectral Data/" +
        ResponseFile[DateUT],
        dtype=float,
        count=-1,
        sep=" ")
    Response = scipy.reshape(Response, [Response.size / 2, 2])
    Response[:, 0] = Response[:, 0] / 10.
    Response[:, 1] = pyasl.smooth(Response[:, 1], 5, 'flat')
    #MasterDispersion=(Response[(Response.size/2-1),0]-Response[0,0])/(Response.size/2-1)

    Ref_g2v = scipy.loadtxt(JupPath.reference_path + J.SpecType,
                            dtype=float,
                            skiprows=3,
                            usecols=(0, 1))
    Ref_g2v[:, 0] = Ref_g2v[:, 0] / 10.
    Ref_g2v[:, 1] = pyasl.smooth(Ref_g2v[:, 1], 5, 'flat')

    #Load comparison albedo spectrum from Karkoschka, 1994 (1993 observations)
    Jupiter_Karkoschka1993 = scipy.fromfile(
        file=
        "f:/Astronomy/Projects/Planets/Saturn/Spectral Data/Karkoschka/1993.tab.txt",
        dtype=float,
        count=-1,
        sep=" ")
    Jupiter_Karkoschka1993 = scipy.reshape(
        Jupiter_Karkoschka1993, [Jupiter_Karkoschka1993.size / 8, 8])
    Jupiter_KarkRef1993 = np.zeros((Jupiter_Karkoschka1993.size / 8, 2))
    Jupiter_KarkRef1993[:, 0] = Jupiter_Karkoschka1993[:, 0]
    Jupiter_KarkRef1993[:, 1] = Jupiter_Karkoschka1993[:, 3]

    #Load plot parameters
    Jupiter = PU.PlotSetup("JupiterSpecPlotConfig.txt")
    Jupiter.loadplotparams("f:", Target, "Spectra")
    #Load plot parameters
    JupiterAlb = PU.PlotSetup("JupiterSpecPlotConfig.txt")
    JupiterAlb.loadplotparams("f:", "JupiterAlbedo", "Spectra")

    #Load observations files and create Target+DateTime keys
    #O=CF.measurement_list(Jupiter.DataFile)
    O = CF.meas_extend_test(
        Jupiter.DataFile)  #Experiment in multiple level inheritance

    O.load_records(MeasTgt=Target, DateUTSelect=DateUT, Grating=Grating)
    print "O=", O.FileList
    O.load_extra_field(MeasTgt=Target, DateUTSelect=DateUT, Grating=Grating)
    F = CF.ObsFileNames(O.FileList[0])
    F.GetFileNames()

    #Load spectral bands to measure
    Bands = EWU.LinesBands_to_Measure("Jupiter_ObsBands_135mm100lpm.txt")
    AlbedoBands = EWU.LinesBands_to_Measure("Jupiter_ObsBands_135mm100lpm.txt")
    print "Grating", O.Grating[0]
    if O.Grating[0] == "100lpm-550CLR":
        WVR = [400., 750.]
    elif O.Grating[0] == "100lpm-685NIR":
        WVR = [700., 1000.]
    elif O.Grating[0] == "100lpm-742NIR":
        WVR = [750., 1000.]
    Bands.load_records(WVRange=WVR)
    AlbedoBands.load_records(Type="Planetary", WVRange=WVR)
    print Bands.ID, AlbedoBands.ID
    flagA = False
    flagB = False

    for time in range(0, len(F.FNArray)):
        print "************************Extra Field: ", O.extra_field[0]

        #Make key, read raw spectrum
        print F.FNArray[time]
        Key, DateTime = CF.MakeKeyDate(F.FNArray[time])
        print Key
        CLR = scipy.fromfile(file=JupPath.input_path + F.FNArray[time],
                             dtype=float,
                             count=-1,
                             sep='\t')
        CLR = scipy.reshape(CLR, [CLR.size / 2, 2])
        CLR[:, 0] = CLR[:, 0] / 10.
        NativeDispersion = (CLR[(CLR.size / 2 - 1), 0] -
                            CLR[0, 0]) / (CLR.size / 2 - 1)
        wave, sig = GSU.uniform_wave_grid(CLR[:, 0], CLR[:, 1], Extend=False)
        CLRonRef = np.transpose(np.array([wave, sig]))
        #CLRonRef[:,1]=pyasl.smooth(CLRonRef[:,1],3,'flat')

        #Compute solar, telluric, and planetary equivalent widths from raw spectrum
        EWFN = JupPath.EW_path + DateUT + "-" + Grating + "-RawFlux-EW.txt"
        for B in range(0, len(Bands.ID)):
            Temp = EWU.ComputeEW1(CLRonRef, Target, DateTime, Bands.Type[B],
                                  Bands.ID[B], Bands.WV0[B], Bands.WV1[B],
                                  Bands.WVCont[B], EWFN, flagA)
            flagA = True

        #Compute top of atmosphere spectrum and albedo
        ToA = GSU.SpectrumMath(CLRonRef, Response, "Divide")
        Albedo = GSU.SpectrumMath(ToA, Ref_g2v, "Divide")
        mAlbedo = np.ma.masked_invalid(Albedo)
        if Grating == '100lpm-550CLR':
            AlbedoNormRangeIndices=np.where((Ref_g2v[:,0] >600.) & \
                 (Ref_g2v[:,0] < 610.))
        elif Grating == '100lpm-742NIR':
            AlbedoNormRangeIndices=np.where((Ref_g2v[:,0] >740.) & \
                 (Ref_g2v[:,0] < 800.))
        NormAlbedo = deepcopy(Albedo)
        NormAlbedo[:, 1] = Albedo[:, 1] / mAlbedo[AlbedoNormRangeIndices].max()
        np.savetxt(JupPath.One_D_path + Key + "Albedo.txt",
                   NormAlbedo,
                   delimiter=" ",
                   fmt="%10.3F %10.7F")

        #Compute planetary equivalent widths from albedo spectrum
        EWFN = JupPath.EW_path + DateUT + "-" + Grating + "-Albedo-EW.txt"
        for B in range(0, len(AlbedoBands.ID)):
            #print "B=",B
            Temp = EWU.ComputeEW1(NormAlbedo, Target, DateTime,
                                  AlbedoBands.Type[B], AlbedoBands.ID[B],
                                  AlbedoBands.WV0[B], AlbedoBands.WV1[B],
                                  AlbedoBands.WVCont[B], EWFN, flagB)
            flagB = True

        #Begin plotting
        ExposureCLR = 300.  #seconds
        Aperture = (0.135 / 22.)**2.  #meters^2
        Jupiter.Setup_Plot()
        pl.title(Target + " " +
                 datetime.datetime.strftime(DateTime, '%Y-%m-%d %H:%M:%S') +
                 " Spectrum",
                 fontsize=9)

        pl.step(CLRonRef[:, 0],
                CLRonRef[:, 1] / (ExposureCLR * Aperture * NativeDispersion),
                label='CLR',
                linewidth=0.5)
        pl.step(CLRonRef[:, 0],
                ToA[:, 1] / (ExposureCLR * Aperture * NativeDispersion),
                label='Top of Atm.')
        #pl.plot(Ref_g2v[:,0],Ref_g2v[:,1]*1e7,label='Solar Ref. x 1e7')
        pl.legend(loc=0, ncol=4, borderaxespad=0., prop={'size': 6})
        pl.subplots_adjust(left=0.08, right=0.98, top=0.90, bottom=0.15)
        pl.savefig(JupPath.One_D_path + Key + "_" + Grating + "_Spectrum.png",
                   dpi=300)

        JupiterAlb.Setup_Plot()
        pl.title(Target + " Albedo " +
                 datetime.datetime.strftime(DateTime, '%Y-%m-%d %H:%M:%S') +
                 " Spectrum",
                 fontsize=9)
        pl.step(NormAlbedo[:, 0], NormAlbedo[:, 1] * .56, label='Norm. Albedo')
        pl.step(Jupiter_KarkRef1993[:, 0],
                Jupiter_KarkRef1993[:, 1],
                label='Karkoschka, 1993',
                linewidth=1,
                color='0.5')
        pl.legend(loc=0, ncol=4, borderaxespad=0., prop={'size': 6})
        pl.subplots_adjust(left=0.08, right=0.98, top=0.90, bottom=0.15)
        pl.savefig(JupPath.One_D_path + Key + "_" + Grating + "_Albedo.png",
                   dpi=300)

        #Save spectrum plot, raw spectra text file, and albedo text file
        np.savetxt(JupPath.One_D_path + Key + "_" + Grating + "_" +
                   "Spectrum.txt",
                   CLRonRef,
                   delimiter=" ",
                   fmt="%10.3F %10.7F")
        np.savetxt(JupPath.One_D_path + Key + "_" + Grating + "_" +
                   "Albedo.txt",
                   NormAlbedo,
                   delimiter=" ",
                   fmt="%10.3F %10.7F")
                                        dtype=float,
                                        count=-1,
                                        sep=" ")
    NIR = scipy.reshape(NIR, [NIR.size / 2, 2])
    NativeDispersion = (NIR[(NIR.size / 2. - 1), 0] -
                        NIR[0, 0]) / (NIR.size / 2. - 1.)
    #    NIR[:,0]=NIR[:,0]+16.
    NRespWV = scipy.reshape(NormResponsewithWV,
                            [NormResponsewithWV.size / 2, 2])
    MasterDispersion = (NRespWV[(NRespWV.size / 2. - 1), 0] -
                        NRespWV[0, 0]) / (NRespWV.size / 2. - 1.)

    #Load Reference Spectrum: Average G2v for albedo calculations
    Ref = scipy.loadtxt("g2v.dat", dtype=float, skiprows=3, usecols=(0, 1))
    temp = Ref[:, 1]
    temp = pyasl.smooth(temp, 11, 'flat')
    Ref[:, 1] = temp

    #Interpolate NIR, Response and Reference spectra onto NIR Wavelengths

    NIRInterp = interpolate.interp1d(NIR[:, 0],
                                     NIR[:, 1],
                                     kind='linear',
                                     copy=True,
                                     bounds_error=False,
                                     fill_value=0.0)
    NIRonRef = NIRInterp(Ref[:, 0])

    NRespInterp = interpolate.interp1d(NRespWV[:, 0],
                                       NRespWV[:, 1],
                                       kind='linear',
Beispiel #10
0
def fit_background(ffi, percentile=10, plots_on=False):
    """
    Estimate the background of a Full Frame Image (FFI) by treating each line of
    data as a spectroscopic spectrum and fitting to the background accordingly.
    This is done in both the x and y directions, and averaged across to create a
    final background.

    Parameters:
        ffi (ndarray): A single TESS Full Frame Image in the form of a 2D array.

        percentile (int): Default 10. The percentile value taken as the continuum value.

        plots_on (bool): Default False. When True, it will plot an example of
            the method fitting to the first line of data on the first iteration
            of the fitting loop.

    Returns:
        ndarray: Estimated background with the same size as the input image.

    .. codeauthor:: Carolina Von Essen <*****@*****.**>
    .. codeauthor:: Oliver James Hall <*****@*****.**>
    """

    # Setting up image dimensions
    ndim = ffi.shape[0]
    nbin = int(ndim / 60)  #Binning into ndim/60 bins
    ndim_vec = np.linspace(0, ndim, ndim)  #Creating a vector of length ndim
    nbin_vec = np.linspace(0, ndim, nbin)  #Creating a vector of length nbin
    int_nbin_vec = np.array(map(int,
                                nbin_vec))  #Converting nbin_vec to integer

    # Setting up filter
    background_map = np.zeros([ndim, ndim])
    hamming_filter = ndim / 2 + 1

    # Calculating data metadata
    data_min = np.min(ffi)
    data_max = np.max(ffi)
    data_mean_img = np.mean(ffi)

    # Preparing arrays for smoothed data
    ffi_smooth_y = np.zeros_like(background_map)
    ffi_smooth_x = np.zeros_like(background_map)

    # Cutting line by line in both x and y
    for i in range(ndim):
        min_vecx = np.zeros(nbin - 1)
        min_vecy = np.zeros(nbin - 1)
        y_msky = ffi[i, ::]  #All X values for Y = i
        x_msky = ffi[::, i]  #All Y values for X = i

        for h in range(nbin -
                       1):  #Get the value of the chosen percentile in each bin
            min_vecx[h] = np.nanpercentile(
                x_msky[int_nbin_vec[h]:int_nbin_vec[h + 1]], [percentile])
            min_vecy[h] = np.nanpercentile(
                y_msky[int_nbin_vec[h]:int_nbin_vec[h + 1]], [percentile])

        nbin_vec = nbin_vec[
            0:nbin -
            1]  #Adjusting nbin_vec to the same size as min_vec for interpolation

        #Interpolating the minimum values to the scale of ndim
        x_min_vec_int = np.interp(ndim_vec, nbin_vec, min_vecx)
        y_min_vec_int = np.interp(ndim_vec, nbin_vec, min_vecy)

        #Smoothing the interpolation
        x_min_vec_int_smooth = pyasl.smooth(x_min_vec_int, hamming_filter,
                                            'hamming')
        y_min_vec_int_smooth = pyasl.smooth(y_min_vec_int, hamming_filter,
                                            'hamming')

        #Saving the smoothed background line on this level
        ffi_smooth_x[::, i] = x_min_vec_int_smooth
        ffi_smooth_y[i, ::] = y_min_vec_int_smooth

        if plots_on:
            if (i == 0):
                #Showing data with continuum estimation from method
                fig, ax = plt.subplots()
                ax.set_ylim(130000., 90000.)
                ax.set_title(
                    'Low end of FFI data sliced in y with continuum estimation shown'
                )
                ax.plot(ndim_vec, y_msky, 'r-', label='FFI data')
                ax.plot(ndim_vec, y_min_vec_int, 'b-', label='Binned estimate')
                ax.plot(ndim_vec,
                        y_min_vec_int_smooth,
                        'k-',
                        label='Smoothed estimate (final)')
                ax.legend(fancybox=True, loc='best')
                plt.show()

    #Smooth across the background estimation using a Gaussian filter
    kern_px = 100
    ffi_smooth_yy = scipy.ndimage.gaussian_filter(
        ffi_smooth_y, kern_px / (2 * np.sqrt(2 * np.log(2))))
    ffi_smooth_xx = scipy.ndimage.gaussian_filter(
        ffi_smooth_x, kern_px / (2 * np.sqrt(2 * np.log(2))))

    # Taking the average between the two images
    bkg_est = (ffi_smooth_yy + ffi_smooth_xx) / 2.

    return bkg_est
Beispiel #11
0
             lng_brite[1:len(lat_brite) - skip],
             '-b',
             label='estimsted position',
             linewidth=2)
    plt.xlabel('Latitude', fontsize=14)
    plt.ylabel('Longitude', fontsize=14)
    plt.legend(loc="upper left")
    plt.show()

    error_vector = []
    print(len(lat_brite))
    print(len(lat_gps))
    for i in range(1, len(lat_brite) - 1000):
        errori = get_distance_to_another_anchor(lat_brite[i], lng_brite[i],
                                                lat_gps[i], lng_gps[i]) / 1000
        error_vector.append(errori)
    plt.figure(1)
    plt.plot(error_vector, '-r', label='Localization error', linewidth=2)
    error_vector = np.array(error_vector)
    sm1 = pyasl.smooth(error_vector, 99, 'hamming')
    plt.plot(sm1, '-b', label='Average Localization error', linewidth=2)
    plt.xlabel('Frames', fontsize=14)
    plt.ylabel('Location estimation error (m)', fontsize=14)
    plt.legend(loc="upper left")
    plt.show()

    avg = sum(error_vector) / len(error_vector)
    print(avg)
    print(min(error_vector))
    print(max(error_vector))
Beispiel #12
0
def train_country_pipe(country=None):
    """
  """

    # Setting some control variables
    START_SIZE = SETUP_COUNTRY[country]["start_size"]
    PEAK_EXISTS = SETUP_COUNTRY[country]["peak_exist"]

    print("Running update on : {} ...".format(country))

    # Get the model data
    covid_api = 'https://corona-api.com/countries/'
    rest_countries = 'https://restcountries.eu/rest/v2/alpha/'
    data_json = requests.get(covid_api + country).json()
    # country = requests.get(covid_api + country).json()
    N = data_json['data']['population']

    print("\t(1) Organizing the data...")

    # Creating the dataframe with the data
    df = pd.DataFrame(data_json['data']['timeline'])
    df = df.sort_values('date').reset_index()
    df['date'] = [datetime.fromisoformat(f) for f in df['date']]
    df = df.drop_duplicates(subset='date', keep='last')
    # Criando o vetor de tempo
    first_date = df['date'].iloc[0]
    size_days = (df['date'].iloc[-1] - df['date'].iloc[0]).days
    date_vec = [first_date + timedelta(days=k) for k in range(size_days)]
    new_df = pd.DataFrame(date_vec, columns=['date'])
    new_df = pd.merge(new_df, df, how='left', on='date')
    new_df = new_df.drop(columns=['index', 'updated_at', 'is_in_progress'])
    for col in new_df.columns[1:]:
        new_df[col] = new_df[col].interpolate(method='polynomial', order=1)
    df = new_df.dropna()

    # Solve particular problems
    #
    # For Brazil, the measure on day 135 has an incorrect value
    # so we interpolate that measure to not loose the final of
    # the time series.
    if country == "BR":
        df.iloc[135, :] = [df.iloc[135, 0]] + [None] * 7
        df = df.interpolate(method='linear', limit_direction='forward')
        df = df.where(df.active != 0.0).dropna()

    # Creating the time vector --- for plotly
    datetime_64 = df["date"].values
    ts = (datetime_64 -
          np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
    time = [datetime.utcfromtimestamp(t) for t in ts]

    print("\t(2) Reading the model log...")
    # If the model log does not exist, we create a model log
    # with a particular structure -> dictionary

    try:
        # Reading the log table...
        log_df = pandas_gbq.read_gbq(LOG_QUERY, project_id=PROJECT_ID)
        country_list = log_df["country"].to_list()
        start_p_list = log_df["start_point"].to_list()
        log_data = dict(zip(country_list, start_p_list))
    except:
        log_data = dict()
        print("\t\tCountry log table does not yet, exist...")

    print("\t(3) Creating SIR data...")
    # Create the SIR model structure, for the model trainning
    start_moment = np.argmax(df["active"].to_numpy() >= START_SIZE)
    time_ref = time[start_moment:]
    I = df['active'].to_numpy()[start_moment:]
    R = df['recovered'].to_numpy()[start_moment:]
    M = df['deaths'].to_numpy()[start_moment:]
    S = N - R - I
    # Creating the time vector
    t = np.linspace(0, len(I) - 1, len(I))
    # Create the trainning variables
    Sd, Id, Md, Rd, td = S, I, M, R, t

    print("\t(4) Running the time shift learning...")
    # Create the structures to save the time shift results
    saved_param = {'Ro': [], 'D': [], 'pop': [], "date": []}
    saved_prediction = {"S": [], "I": [], "R": [], "date": [], "at_date": []}

    # Check if the country exists in the logging
    # if does not, create the logging structure
    # for the country, if exists, start the
    # time shift learning from the logged one
    start_day = 8  # Starting with 8 day points
    if country in log_data.keys():
        start_day = log_data[country]

    # If start_day on the logging is less than
    # the size of the data, there is room for
    # running windowed shifting learning
    if start_day < len(I):
        # If peak does not exists, predict
        # 120 days ahead to find the peak
        if PEAK_EXISTS:
            pred_t = range(int(td[-1]))
        else:
            pred_t = range(int(td[-1]) + 120)
        # Compute the time vector
        time_vector = [time_ref[0] + timedelta(days=i) for i in pred_t]
        pred_t = np.array(pred_t)
        for i in range(start_day, len(I)):
            # Compute this day data...
            current_date = time_ref[0] + timedelta(days=i)
            current_date_vector = [current_date] * len(pred_t)
            # Get a partial window of the dataset
            dataset = dict(S=Sd[:i], I=Id[:i], R=Rd[:i])
            # Create the model
            sir_model = ss.SIR(pop=N, focus=["S", "I", "R"], verbose=False)
            # Adjust the parameters
            sir_model.fit(dataset,
                          td[:i],
                          search_pop=True,
                          pop_sens=[0.001, 0.05],
                          Ro_sens=[0.8, 15.0],
                          D_sens=[5.0, 40.0])
            # Save the estimated parameters
            saved_param['Ro'].append(sir_model.parameters[0])
            saved_param['D'].append(sir_model.parameters[1])
            saved_param['pop'].append(sir_model.parameters[2])
            saved_param['date'].append(current_date)
            # Save the model prediction
            result = sir_model.predict((Sd[0], Id[0], Rd[0]), pred_t)
            saved_prediction["S"].append(result[0])
            saved_prediction["I"].append(result[1])
            saved_prediction["R"].append(result[2])
            saved_prediction["date"] += time_vector
            saved_prediction["at_date"] += current_date_vector
            # Print the progress...
            print("\t\t߷ Run {} of {}".format(i - start_day + 1,
                                              len(I) - start_day))

        print("\t(5) Determining the peak...")
        if PEAK_EXISTS:
            # Compute the derivative of the smoothed
            # active infected time series
            dI = np.gradient(pyasl.smooth(I, 13, "hamming"))
            t = np.linspace(0, len(dI), len(dI))
            # Find the derivative bigger then zero
            signal = np.array([di >= 0 for di in dI[::-1]])
            # Find the first point where the derivative
            # change signal
            peak_pos = int(len(Id) - np.argmax(signal))
            peak_date = [time[0] + timedelta(days=peak_pos)] * len(
                saved_param["D"])
        else:
            estimated_peaks = []
            for data in saved_prediction["I"]:
                # Computing the prediction derivative
                dI = np.gradient(data)
                # Computing the derivative signal
                signal_pred = np.array([di >= 0 for di in dI[::-1]])
                # Computing the peak estimate point
                # print("Signal shape: ", len(data) - np.argmax(signal_pred))
                estimated_peaks.append(len(data) - np.argmax(signal_pred))
            # print(estimated_peaks)
            peak_date = [
                time_ref[0] + timedelta(days=int(p)) for p in estimated_peaks
            ]

        print("\t(6) Transforming data to lists...")
        # Transform all in lists
        for item in ["S", "I", "R"]:
            auxiliar_list = []
            for data in saved_prediction[item]:
                auxiliar_list += data.tolist()
            saved_prediction[item] = auxiliar_list

        print("\t(7) Uploading data to cloud...")
        try:
            # Build the data tables to upload
            print("\t\tUploading the parameters...")
            par_df = pd.DataFrame(data=saved_param)
            par_df["country"] = country  # Create the country column
            par_df["peak_est"] = peak_date  # Creating the column of peak dates
            pandas_gbq.to_gbq(par_df,
                              PAR_TABLE_ID,
                              project_id=PROJECT_ID,
                              credentials=CREDENTIALS,
                              if_exists='append')

            print("\t\tUploading the predictions...")
            pred_df = pd.DataFrame(data=saved_prediction)
            pred_df["country"] = country  # Create the country column
            pandas_gbq.to_gbq(pred_df,
                              PRED_TABLE_ID,
                              project_id=PROJECT_ID,
                              credentials=CREDENTIALS,
                              if_exists='append')

            # Update the logging values
            print("\t\tUpdating the log...")
            log_data[country] = len(I)
        except:
            print("\t\tCloud uploading error!")

        # Saving the log data into the cloud
        print("\t(8) Saving log data...")
        try:
            log_upload = {"country": [], "start_point": []}
            for c in log_data.keys():
                log_upload["country"].append(c)
                log_upload["start_point"].append(log_data[c])
            df_log = pd.DataFrame(log_upload)
            pandas_gbq.to_gbq(df_log,
                              TABLE_LOG_ID,
                              project_id=PROJECT_ID,
                              credentials=CREDENTIALS,
                              if_exists="replace")
        except Exception as e:
            print("\t\tUnable to upload log due to {}".format(e))
    else:
        print("\t߷ Nothing to update...")

    print("DONE! -> Model Update - Process from: {}".format(datetime.now()))
Beispiel #13
0
def BeltProfileProcessor(filename, colatrange, N):
    import sys
    sys.path.append('f:\\Astronomy\Python Play')
    #import matplotlib.pyplot as pl
    import pylab as pl
    import numpy as np
    import scipy
    from copy import deepcopy
    from PyAstronomy import pyasl  #This is where the best smoothing algorithm is!

    #Loadfile and reshape
    Temp = scipy.fromfile(file=filename, dtype=float, count=-1, sep=" ")
    Temp1 = scipy.reshape(Temp, [Temp.size / 4, 4])
    lat = 90. - Temp1[colatrange[0]:colatrange[1], 0]
    profile = Temp1[colatrange[0]:colatrange[1], 1]

    #Smooth profile and normalize
    latsmooth = lat
    if np.max(profile) < 256:
        profilesmooth = pyasl.smooth(profile, N, 'flat') / 255.
    elif np.max(profile) > 256:
        profilesmooth = pyasl.smooth(profile, N, 'flat') / 65535.

    #Compute first differences and latitude grid
    dlat = latsmooth[1:] + 0.5
    dprofile = (profilesmooth[1:colatrange[1]] -
                profilesmooth[0:(colatrange[1] - 1)]) * 10. + 0.2
    dprofilesmooth = pyasl.smooth(dprofile, N, 'flat')

    #Compute second difference and latitutde grid.
    ddlat = dlat[1:] + 0.5
    ddprofile = (dprofilesmooth[0:(colatrange[1] - 2)] -
                 dprofilesmooth[1:(colatrange[1] - 1)]) * 10.

    belts = ((ddprofile >= 0.0).astype(float) - 0.5) * 0.1
    latbelts = ddlat

    dbeltslat = ddlat[1:] + 0.5
    dbelts = (belts[0:(colatrange[1] - 3)] -
              belts[1:(colatrange[1] - 1)]) * 10.

    #From north to south
    ToBelt = dbeltslat[np.where(dbelts == 1)]
    ToZone = dbeltslat[np.where(dbelts == -1)]

    Belts = np.zeros((1, 3))
    Zones = np.zeros((1, 3))

    if ToBelt[0] > ToZone[0]:
        Zones[0, :] = [90., ToBelt[0], np.mean([90., ToBelt[0]])]
        Belts[0, :] = [ToBelt[0], ToZone[0], np.mean([ToBelt[0], ToZone[0]])]
        for i in range(0, np.min([len(ToZone), len(ToBelt)]) - 1):
            Zones = np.append(Zones, [[
                ToZone[i], ToBelt[i + 1],
                np.mean([ToZone[i], ToBelt[i + 1]])
            ]], 0)
            Belts = np.append(Belts, [[
                ToBelt[i + 1], ToZone[i + 1],
                np.mean([ToZone[i + 1], ToBelt[i + 1]])
            ]], 0)
    else:
        Belts[0, :] = [90., ToZone[0], np.mean([90., ToZone[0]])]
        Zones[0, :] = [ToZone[0], ToBelt[0], np.mean([ToBelt[0], ToZone[0]])]
        for i in range(0, np.min([len(ToZone), len(ToBelt)]) - 1):
            Zones = np.append(Zones, [[
                ToZone[i + 1], ToBelt[i + 1],
                np.mean([ToZone[i + 1], ToBelt[i + 1]])
            ]], 0)
            Belts = np.append(Belts, [[
                ToBelt[i], ToZone[i + 1],
                np.mean([ToZone[i], ToBelt[i + 1]])
            ]], 0)

    print "Belts=", Belts
    print "Zones=", Zones

    #print "To Belt",ToBelt
    #print "To Zone",ToZone
    #print ' '

    return Belts, Zones, lat, profilesmooth, dlat, dprofilesmooth, ddlat, ddprofile, latbelts, belts