示例#1
0
 def log10ccfRutten(self, bv, lc="ms"):
   """
     Ccf conversion factor from Rutten 1984 (Eqs. 10a and 10b).
     
     Parameters
     ----------
     bv : float
         B - V color [mag].
     lc : string, {ms, g}, optional
         Specifies whether the relation for
         main-sequence (ms) or giant (g) stars
         shall be evaluated.
     
     Returns
     -------
     log10(Ccf) : float
         The logarithm of the conversion factor.
   """
   if lc == "ms":
     if (bv < 0.3) or (bv > 1.6):
       PE.warn(PE.PyAValError("B-V color out of range. Rutten 1984 states a validity range of 0.3 <= b-v <= 1.6 " +
                              "for main-sequence stars. You specified: " + str(bv) + "."))
     logccf = 0.25*bv**3 - 1.33*bv**2 + 0.43*bv + 0.24
   elif lc == "g":
     if (bv < 0.3) or (bv > 1.7):
       PE.warn(PE.PyAValError("B-V color out of range. Rutten 1984 states a validity range of 0.3 <= b-v <= 1.7 " +
                              "for giant stars. You specified: " + str(bv) + "."))
     logccf = -0.066*bv**3 - 0.25*bv**2 - 0.49*bv + 0.45
   else:
     raise(PE.PyAValError("No such luminosity class: " + str(lc), \
                          solution="Specify either 'ms' or 'g'."))
   return logccf
示例#2
0
  def plotHist(self, parsList=None):
    """
      Plots distributions for a number of traces.

      Parameters
      ----------
      parsList : string or list of strings, optional,
          Refers to a parameter name or a list of parameter names.
          If None, all available parameters are plotted.
    """
    if not ic.check["matplotlib"]:
      PE.warn(PE.PyARequiredImport("To use 'plotHists', matplotlib has to be installed.", \
                                   solution="Install matplotlib."))
      return
    if isinstance(parsList, basestring):
      parsList = [parsList]
    tracesDic = {}
    if parsList is not None:
      for parm in parsList:
        self._parmCheck(parm)
        tracesDic[parm] = self[parm]
    else:
      # Use all available traces
      for parm in self.availableParameters():
        tracesDic[parm] = self[parm]

    cols, rows = self.__plotsizeHelper(len(tracesDic))

    for i,[pars,trace] in enumerate(tracesDic.items()):
      if len(parsList) > 1:
        plt.subplot(rows, cols, i+1)
      plt.hist(trace, label=pars + " hist")
      plt.legend()
示例#3
0
 def logRphotNoyes(self, bv, lc="ms"):
   """
     Photospheric contribution to surface flux in the H and K pass-bands.
     
     Relation given by Noyes et al. 1984.
     
     Parameters
     ----------
     bv : float
         B-V color [mag]
     lc : string, {ms, g}, optional
         Luminosity class.
     
     Returns
     -------
     log10(Rphot) : float
         Logarithm of the photospheric contribution.
   """
   if (bv < 0.44) or (bv > 0.82):
     PE.warn(PE.PyAValError("Noyes et al. 1984 give a validity range of 0.44 < B-V < 0.82 for the " + \
                            "photospheric correction. However, the authors use it for B-V > 0.82, " + \
                            "where it quickly decreases."))
   if lc != "ms":
     PE.warn(PE.PyAValError("Noyes et al. 1984 specify the photospheric correction only for main-sequence stars."))
   rp = -4.898 + 1.918*bv**2 - 2.893*bv**3
   return rp
def ffmodelExplorer(odf, plotter, version="list"):
  """
    Instantiate the model explorer.
    
    Parameters
    ----------
    odf : Instance of OneDFit
        The model to be adapted.
    plotter : Instance of FFModelPlotFit or custom
        Class instance managing the actual plotting.
    version : string, {list}
        The version of model explorer. Currently, only
        'list' is supported.
    
    Returns
    -------
    ffmod : Model explorer
        An instance of the model explorer.
  """
  if version == "list":
    ffmod = FFModelExplorerList(odf, plotter)
    return ffmod
  elif version == "dropdown":
    PE.warn(PE.PyADeprecationError("Please note that the dropdown version is no longer supported. " + \
                                   "The list version will be called instead."))
    ffmod = FFModelExplorerList(odf, plotter)
    return ffmod
  else:
    raise(PE.PyAValError("Unknown version: '" + str(version) + "'", \
                         where="ffmodelExplorer", \
                         solution="Choose between 'list' and 'dropdown'."))
示例#5
0
    def plotHist(self, parsList=None):
        """
      Plots distributions for a number of traces.

      Parameters
      ----------
      parsList : string or list of strings, optional,
          Refers to a parameter name or a list of parameter names.
          If None, all available parameters are plotted.
    """
        if not ic.check["matplotlib"]:
            PE.warn(PE.PyARequiredImport("To use 'plotHists', matplotlib has to be installed.", \
                                         solution="Install matplotlib."))
            return
        if isinstance(parsList, six.string_types):
            parsList = [parsList]
        tracesDic = {}
        if parsList is not None:
            for parm in parsList:
                self._parmCheck(parm)
                tracesDic[parm] = self[parm]
        else:
            # Use all available traces
            for parm in self.availableParameters():
                tracesDic[parm] = self[parm]

        cols, rows = self.__plotsizeHelper(len(tracesDic))

        for i, [pars, trace] in enumerate(tracesDic.items()):
            if len(tracesDic) > 1:
                plt.subplot(rows, cols, i + 1)
            plt.hist(trace, label=pars + " hist")
            plt.legend()
示例#6
0
def ffmodelExplorer(odf, plotter, version="list"):
  """
    Instantiate the model explorer.
    
    Parameters
    ----------
    odf : Instance of OneDFit
        The model to be adapted.
    plotter : Instance of FFModelPlotFit or custom
        Class instance managing the actual plotting.
    version : string, {list}
        The version of model explorer. Currently, only
        'list' is supported.
    
    Returns
    -------
    ffmod : Model explorer
        An instance of the model explorer.
  """
  if version == "list":
    ffmod = FFModelExplorerList(odf, plotter)
    return ffmod
  elif version == "dropdown":
    PE.warn(PE.PyADeprecationError("Please note that the dropdown version is no longer supported. " + \
                                   "The list version will be called instead."))
    ffmod = FFModelExplorerList(odf, plotter)
    return ffmod
  else:
    raise(PE.PyAValError("Unknown version: '" + str(version) + "'", \
                         where="ffmodelExplorer", \
                         solution="Choose between 'list' and 'dropdown'."))
示例#7
0
 def logRphotNoyes(self, bv, lc="ms"):
     """
   Photospheric contribution to surface flux in the H and K pass-bands.
   
   Relation given by Noyes et al. 1984.
   
   Parameters
   ----------
   bv : float
       B-V color [mag]
   lc : string, {ms, g}, optional
       Luminosity class.
   
   Returns
   -------
   log10(Rphot) : float
       Logarithm of the photospheric contribution.
 """
     if (bv < 0.44) or (bv > 0.82):
         PE.warn(PE.PyAValError("Noyes et al. 1984 give a validity range of 0.44 < B-V < 0.82 for the " + \
                                "photospheric correction. However, the authors use it for B-V > 0.82, " + \
                                "where it quickly decreases."))
     if lc != "ms":
         PE.warn(
             PE.PyAValError(
                 "Noyes et al. 1984 specify the photospheric correction only for main-sequence stars."
             ))
     rp = -4.898 + 1.918 * bv**2 - 2.893 * bv**3
     return rp
示例#8
0
  def plotHist(self, parsList=None, **histArgs):
    """
      Plots distributions for a number of traces.

      Parameters
      ----------
      parsList : string or list of strings, optional,
          Refers to a parameter name or a list of parameter names.
          If None, all available parameters are plotted.
      histArgs : dict, optional
          Keyword arguments (e.g., `nbins`) passed to the
          histogram plotter (`pymc.Matplot.histogram`).
    """
    if not ic.check["matplotlib"]:
      PE.warn(PE.PyARequiredImport("To use 'plotHists', matplotlib has to be installed.", \
                                   solution="Install matplotlib."))
      return
    if isinstance(parsList, basestring):
      parsList = [parsList]
    tracesDic = {}
    if parsList is not None:
      for parm in parsList:
        self._parmCheck(parm)
        tracesDic[parm] = self[parm]
    else:
      # Use all available traces
      for parm in self.availableParameters():
        tracesDic[parm] = self[parm]

    ps = self.__plotsizeHelper(len(tracesDic))

    for i,[pars,trace] in enumerate(tracesDic.items()):
      pymc.Matplot.histogram(trace,pars,columns=ps[0],rows=ps[1],num=i+1,**histArgs)
示例#9
0
def decomposeFilename(fn):
  """
    Decompose PHOENIX filename.
    
    Parameters
    ----------
    fn : string
        The filename.
    
    Returns
    -------
    Parameters : dictionary
        A dictionary with the following keys:
         - teff: The effective temperature in K
         - logg: Log(g [cm/s**2])
         - met: Metallicity (M/H)
         - fn: Complete filename
         - notParsed: Part of the filename not parsed for teff, logg, and metallicity.
        Note that `None` is returned if the filename could not be
        parsed.
  """
  r = re.match("lte(\d+)([+-])(\d+\.\d+)([-+]\d+\.\d+)(.*)", fn)
  if r is None:
    PE.warn(PE.PyAValError("Cannot decompose PHOENIX filename: " + str(fn)))
    return None
  result = {}
  result["fn"] = fn
  result["teff"] = int(r.group(1)) * 100
  result["logg"] = float(r.group(3))
  if r.group(2) == "+":
    result["logg"] *= -1.0
  result["met"] = float(r.group(4))
  result["notParsed"] = r.group(5)
  return result
示例#10
0
    def plotDeviance(self, parsList=None):
        """
      Plots value of deviance over parameter values encountered during sampling.

      Parameters
      ----------
      parsList : string or list of strings, optional,
          Refers to a parameter name or a list of parameter names.
          If None, all available parameters are plotted.
    """
        if not ic.check["matplotlib"]:
            PE.warn(PE.PyARequiredImport("To use 'plotHists', matplotlib has to be installed.", \
                                         solution="Install matplotlib."))
            return
        if isinstance(parsList, six.string_types):
            parsList = [parsList]
        tracesDic = {}
        if parsList is not None:
            for parm in parsList:
                self._parmCheck(parm)
                tracesDic[parm] = self[parm]
        else:
            # Use all available traces
            for parm in self.availableParameters():
                tracesDic[parm] = self[parm]

        ps = self.__plotsizeHelper(len(tracesDic))

        for i, [pars, trace] in enumerate(six.iteritems(tracesDic), 1):
            plt.subplot(ps[0], ps[1], i)
            plt.xlabel(pars)
            plt.ylabel("Deviance")
            plt.plot(self[pars], self["deviance"], '.')
示例#11
0
  def plotDeviance(self, parsList=None):
    """
      Plots value of deviance over parameter values encountered during sampling.

      Parameters
      ----------
      parsList : string or list of strings, optional,
          Refers to a parameter name or a list of parameter names.
          If None, all available parameters are plotted.
    """
    if not ic.check["matplotlib"]:
      PE.warn(PE.PyARequiredImport("To use 'plotHists', matplotlib has to be installed.", \
                                   solution="Install matplotlib."))
      return
    if isinstance(parsList, basestring):
      parsList = [parsList]
    tracesDic = {}
    if parsList is not None:
      for parm in parsList:
        self._parmCheck(parm)
        tracesDic[parm] = self[parm]
    else:
      # Use all available traces
      for parm in self.availableParameters():
        tracesDic[parm] = self[parm]

    ps = self.__plotsizeHelper(len(tracesDic))

    for i,[pars,trace] in enumerate(tracesDic.items()):
      plt.subplot(ps[0],ps[1],i)
      plt.xlabel(pars)
      plt.ylabel("Deviance")
      plt.plot(self[pars],self["deviance"],'.')
示例#12
0
    def _output(self):
        """
      Some statistical output.
    """
        # Index with maximum power
        bbin = argmax(self.power)
        # Maximum power
        pmax = self._upow[bbin]

        rms = sqrt(self._YY * (1.0 - pmax))

        # Get the curvature in the power peak by fitting a parabola y=aa*x^2
        if (bbin > 1) and (bbin < len(self.freq) - 2):
            # Shift the parabola origin to power peak
            xh = (self.freq[bbin - 1 : bbin + 2] - self.freq[bbin]) ** 2
            yh = self._upow[bbin - 1 : bbin + 2] - self._upow[bbin]
            # Calculate the curvature (final equation from least square)
            aa = sum(yh * xh) / sum(xh * xh)
            nt = float(self.N)
            f_err = sqrt(-2.0 / nt * pmax / aa * (1.0 - pmax) / pmax)
            Psin_err = sqrt(-2.0 / nt * pmax / aa * (1.0 - pmax) / pmax) / self.freq[bbin] ** 2
        else:
            f_err = None
            Psin_err = None
            PE.warn(
                PE.PyAValError(
                    "WARNING: Highest peak is at the edge of the frequency range.\nNo output of frequency error.\nIncrease frequency range to sample the peak maximum."
                )
            )

        fbest = self.freq[bbin]
        amp = sqrt(self._a[bbin] ** 2 + self._b[bbin] ** 2)
        ph = arctan2(self._a[bbin], self._b[bbin]) / (2.0 * pi)
        T0 = min(self.th) - ph / fbest
        # Re-add the mean
        offset = self._off[bbin] + self._Y

        # Statistics
        print("Generalized LS - statistical output")
        print("-----------------------------------")
        print("Number of input points:     %6d" % (nt))
        print("Weighted mean of dataset:   % e" % (self._Y))
        print("Weighted rms of dataset:    % e" % (sqrt(self._YY)))
        print("Time base:                  % e" % (max(self.th) - min(self.th)))
        print("Number of frequency points: %6d" % (len(self.freq)))
        print()
        print("Maximum power, p :    % e " % (self.power[bbin]))
        print("Maximum power (without normalization):   %e" % (pmax))
        print("Normalization    : ", self.norm)
        print("RMS of residuals :    % e " % (rms))
        if self.error is not None:
            print("  Mean weighted internal error:  % e" % (sqrt(nt / sum(1.0 / self.error ** 2))))
        print("Best sine frequency : % e +/- % e" % (fbest, f_err))
        print("Best sine period    : % e +/- % e" % (1.0 / fbest, Psin_err))
        print("Amplitude:          : % e +/- % e" % (amp, sqrt(2.0 / nt) * rms))
        print("Phase (ph) : % e +/- % e" % (ph, sqrt(2.0 / nt) * rms / amp / (2.0 * pi)))
        print("Phase (T0) : % e +/- % e" % (T0, sqrt(2.0 / nt) * rms / amp / (2.0 * pi) / fbest))
        print("Offset     : % e +/- % e" % (offset, sqrt(1.0 / nt) * rms))
        print("-----------------------------------")
示例#13
0
 def show(self):
     """
   Call *show()* from matplotlib to bring graphs to screen.
 """
     try:
         plt.show()
     except Exception as e:
         PE.warn(PE.PyAUnclassifiedError("Plot could not be shown. The following exception occurred:\n" \
                                         + str(e)))
示例#14
0
 def show(self):
   """
     Call *show()* from matplotlib to bring graphs to screen.
   """
   try:
     plt.show()
   except Exception, e:
     PE.warn(PE.PyAUnclassifiedError("Plot could not be shown. The following exception occurred:\n" \
                                     + str(e)))
示例#15
0
    def plotCorrEnh(self, parsList=None, **plotArgs):
        """
      Produces enhanced correlation plots.
      
      Parameters
      ----------
      parsList : list of string,  optional
          If not given, all available traces are used.
          Otherwise a list of at least two parameters
          has to be specified.
      plotArgs : dict, optional
          Keyword arguments handed to plot procedures of
          pylab. The following keywords are available:
          contour,bins,cmap,origin,interpolation,colors
    """
        if not ic.check["matplotlib"]:
            PE.warn(PE.PyARequiredImport("To use 'plotCorr', matplotlib has to be installed.", \
                                         solution="Install matplotlib."))
            return
        tracesDic = {}
        if parsList is not None:
            for parm in parsList:
                self._parmCheck(parm)
                tracesDic[parm] = self[parm]
            if len(tracesDic) < 2:
                raise(PE.PyAValError("For plotting correlations, at least two valid parameters are needed.", \
                                     where="TraceAnalysis::plotCorr"))
        else:
            # Use all available traces
            for parm in self.availableParameters():
                tracesDic[parm] = self[parm]

        pars = list(tracesDic.keys())
        traces = list(tracesDic.values())

        fontmap = {1: 10, 2: 9, 3: 8, 4: 8, 5: 8}
        if not len(tracesDic) - 1 in fontmap:
            fontmap[len(tracesDic) - 1] = 8

        k = 1
        for j in range(len(tracesDic)):
            for i in range(len(tracesDic)):
                if i > j:
                    plt.subplot(len(tracesDic) - 1, len(tracesDic) - 1, k)
                    #          plt.title("Pearson's R: %1.5f" % self.pearsonr(pars[j],pars[i])[0], fontsize='x-small')
                    plt.xlabel(pars[j], fontsize=fontmap[len(tracesDic) - 1])
                    plt.ylabel(pars[i], fontsize=fontmap[len(tracesDic) - 1])
                    tlabels = plt.gca().get_xticklabels()
                    plt.setp(tlabels, 'fontsize', fontmap[len(tracesDic) - 1])
                    tlabels = plt.gca().get_yticklabels()
                    plt.setp(tlabels, 'fontsize', fontmap[len(tracesDic) - 1])
                    #          plt.plot(traces[j],traces[i],'.',**plotArgs)
                    self.__hist2d(traces[j], traces[i], **plotArgs)
                if i != j:
                    k += 1
示例#16
0
  def plotCorrEnh(self, parsList=None, **plotArgs):
    """
      Produces enhanced correlation plots.
      
      Parameters
      ----------
      parsList : list of string,  optional
          If not given, all available traces are used.
          Otherwise a list of at least two parameters
          has to be specified.
      plotArgs : dict, optional
          Keyword arguments handed to plot procedures of
          pylab. The following keywords are available:
          contour,bins,cmap,origin,interpolation,colors
    """
    if not ic.check["matplotlib"]:
      PE.warn(PE.PyARequiredImport("To use 'plotCorr', matplotlib has to be installed.", \
                                   solution="Install matplotlib."))
      return
    tracesDic = {}
    if parsList is not None:
      for parm in parsList:
        self._parmCheck(parm)
        tracesDic[parm] = self[parm]
      if len(tracesDic) < 2:
        raise(PE.PyAValError("For plotting correlations, at least two valid parameters are needed.", \
                             where="TraceAnalysis::plotCorr"))
    else:
      # Use all available traces
      for parm in self.availableParameters():
        tracesDic[parm] = self[parm]

    pars = tracesDic.keys()
    traces = tracesDic.values()

    fontmap = {1:10, 2:9, 3:8, 4:8, 5:8}
    if not len(tracesDic)-1 in fontmap:
      fontmap[len(tracesDic)-1] = 8

    k = 1
    for j in range(len(tracesDic)):
      for i in range(len(tracesDic)):
        if i>j:
          plt.subplot(len(tracesDic)-1,len(tracesDic)-1,k)
#          plt.title("Pearson's R: %1.5f" % self.pearsonr(pars[j],pars[i])[0], fontsize='x-small')
          plt.xlabel(pars[j], fontsize=fontmap[len(tracesDic)-1])
          plt.ylabel(pars[i], fontsize=fontmap[len(tracesDic)-1])
          tlabels = plt.gca().get_xticklabels()
          plt.setp(tlabels, 'fontsize', fontmap[len(tracesDic)-1])
          tlabels = plt.gca().get_yticklabels()
          plt.setp(tlabels, 'fontsize', fontmap[len(tracesDic)-1])
#          plt.plot(traces[j],traces[i],'.',**plotArgs)
          self.__hist2d(traces[j],traces[i],**plotArgs)
        if i!=j:
          k+=1
示例#17
0
 def _readData(self):
     """
     """
     # Determine number of planets in the csv file
     r = csv.DictReader(self._fs.requestFile(self.dataFileName, 'rt',
                                             gzip.open),
                        delimiter=',')
     for nplanets, x in enumerate(r):
         pass
     # Reinitialize csv file
     r = csv.DictReader(self._fs.requestFile(self.dataFileName, 'rt',
                                             gzip.open),
                        delimiter=',')
     # Determine data types for numpy recarray from columns
     # and initialize
     dtype = [(self._columns[x][0], self._columns[x][3])
              for x in range(len(self._columns))]
     self.data = np.recarray((nplanets + 1, ), dtype=dtype)
     colnotfilled = [
         self._columns[x][0] for x in six.iterkeys(self._columns)
     ]
     for i, x in enumerate(r):
         for k, v in six.iteritems(x):
             # Remove hash and white spaces from column names
             k = k.strip('#')
             k = k.strip()
             # Translate csv column name into internal column name
             if k in self._ident:
                 key = self._ident[k]
             else:
                 key = k
             if len(v) == 0:
                 v = None
             # Accept only expected fields
             if not key in self.data.dtype.names:
                 continue
             try:
                 colnotfilled.remove(key)
             except ValueError:
                 # Ignoring already removed value
                 pass
             self.data[key][i] = v
     if len(colnotfilled) > 0:
         PE.warn(
             PE.PyAAlgorithmFailure(
                 "Not all columns could be filled with data. The following columns must not be used: "
                 + ", ".join(colnotfilled),
                 where="ExoplanetEU",
                 solution=
                 "The format of the data base must be checked. Please consider issuing a bug report via github."
             ))
示例#18
0
 def plotTrace(self, parm):
   """
     Plots the trace.
     
     Parameters
     ----------
     parm : string
         The variable name.
   """
   if not ic.check["matplotlib"]:
     PE.warn(PE.PyARequiredImport("To use 'plotTrace', matplotlib has to be installed.", \
                                  solution="Install matplotlib."))
     return
   self._parmCheck(parm)
   plt.plot(self[parm], 'b-', label=parm + " trace")
   plt.legend()
示例#19
0
 def plotTrace(self, parm, **traceArgs):
   """
     Plots the trace.
     
     Parameters
     ----------
     parm : string
         The variable name.
     traceArgs : dict
         Keyword arguments handed to `pymc.Matplot.trace`.
   """
   if not ic.check["matplotlib"]:
     PE.warn(PE.PyARequiredImport("To use 'plotTrace', matplotlib has to be installed.", \
                                  solution="Install matplotlib."))
     return
   self._parmCheck(parm)
   pymc.Matplot.trace(self[parm],parm,fontmap={0.5: 10, 1:10, 2:8, 3:6, 4:5, 5:4},**traceArgs)
示例#20
0
  def plotTraceHist(self, parm, **plotArgs):
    """
      Plots trace and histogram (distribution).

      Parameters
      ----------
      parm : string
          The variable name.
      plotArgs : dict
          Keyword arguments handed to `pymc.Matplot.plot`.
    """
    if not ic.check["matplotlib"]:
      PE.warn(PE.PyARequiredImport("To use 'plotTraceHist', matplotlib has to be installed.", \
                                   solution="Install matplotlib."))
      return
    self._parmCheck(parm)
    pymc.Matplot.plot(self[parm], parm, **plotArgs)
示例#21
0
def gyroAgeBarnes(p, bv):
    """
    Calculate gyrochronological age according to Barnes 2007.
    
    The gyrochronological age is calculated according to Eq. 3 in
    Barnes 2007 (ApJ 669, 1167). The derivation of the error follows Eq. 16. 
    
    Parameters
    ----------
    p : float
        Stellar rotation period [d].
    bv : float
        B-V color [mag]. Supported range is 0.4-1.6 mag.
    
    Returns
    -------
    Stellar age : float
        The gyrochronological age [Ga].
    Age error : float
        The error on the age [Ga].
  """
    # See Eq. 3
    n = 0.5189
    a = 0.7725
    b = 0.601
    x = bv - 0.4

    if bv < 0.4:
        raise(PE.PyAValError("Relation not defined for B-V < 0.4. Value of " + str(bv) + " was given.", \
                             where="gyroAgeBarnes", \
                             solution="Use a value between 0.4 and 1.6 for B-V."))
    if bv > 1.6:
        PE.warn(PE.PyAValError("No calibration for B-V > 1.6. Value given is " + str(bv) + "." +\
                               "The result is an extrapolation.", \
                               where="gyroAgeBarnes"))

    # Logarithm of the age [Myr]
    lnt = 1. / n * (np.log(p) - np.log(a) - b * np.log(x))
    # Age in Myr
    t = np.exp(lnt)
    # Relative error (Eq. 16)
    dtt = 0.02 * np.sqrt(3. + 0.5 * lnt**2 + 2.0 * p**0.6 + (0.6 / x)**2 +
                         (2.4 * np.log(x))**2)
    return t / 1000., t * dtt / 1000.
示例#22
0
def gyroAgeBarnes(p, bv):
  """
    Calculate gyrochronological age according to Barnes 2007.
    
    The gyrochronological age is calculated according to Eq. 3 in
    Barnes 2007 (ApJ 669, 1167). The derivation of the error follows Eq. 16. 
    
    Parameters
    ----------
    p : float
        Stellar rotation period [d].
    bv : float
        B-V color [mag]. Supported range is 0.4-1.6 mag.
    
    Returns
    -------
    Stellar age : float
        The gyrochronological age [Ga].
    Age error : float
        The error on the age [Ga].
  """
  # See Eq. 3
  n = 0.5189
  a = 0.7725
  b = 0.601
  x = bv-0.4
  
  if bv < 0.4:
    raise(PE.PyAValError("Relation not defined for B-V < 0.4. Value of " + str(bv) + " was given.", \
                         where="gyroAgeBarnes", \
                         solution="Use a value between 0.4 and 1.6 for B-V."))
  if bv > 1.6:
    PE.warn(PE.PyAValError("No calibration for B-V > 1.6. Value given is " + str(bv) + "." +\
                           "The result is an extrapolation.", \
                           where="gyroAgeBarnes"))
  
  # Logarithm of the age [Myr]
  lnt = 1./n * (np.log(p) - np.log(a) - b*np.log(x))
  # Age in Myr
  t = np.exp(lnt)
  # Relative error (Eq. 16)
  dtt = 0.02*np.sqrt(3. + 0.5*lnt**2 + 2.0*p**0.6 + (0.6/x)**2 + (2.4*np.log(x))**2)
  return t/1000., t*dtt/1000.
示例#23
0
    def plotTrace(self, parm, fmt='b-'):
        """
        Plots the trace.

        Parameters
        ----------
        parm : string
            The variable name.
        fmt : string, optional
            The matplotlib format string used to plot the trace.
            Default is 'b-'.
        """
        if not ic.check["matplotlib"]:
            PE.warn(PE.PyARequiredImport("To use 'plotTrace', matplotlib has to be installed.",
                                         solution="Install matplotlib."))
            return
        self._parmCheck(parm)
        plt.plot(self[parm], fmt, label=parm + " trace")
        plt.legend()
示例#24
0
 def _createSet(self, bn, lines):
   """
     Create data set from band name and set of lines.
     
     Parameters
     ----------
     bn : string
         Name of the band
     lines : list of string
         The data (table) specifying transmission as
         a function of wavelength.
   """
   if bn in self.bands:
     PE.warn(PE.PyAValError("Band named '" + str(bn) + "' already exists.", \
                            where="TransmissionCurves (reading data from file)", \
                            solution="Use unique band names."))
   self.bands[bn] = np.zeros( (len(lines), 2) )
   for i in smo.range(len(lines)):
     self.bands[bn][i,::] = np.array(lines[i].split(), dtype=np.float)
示例#25
0
 def _createSet(self, bn, lines):
   """
     Create data set from band name and set of lines.
     
     Parameters
     ----------
     bn : string
         Name of the band
     lines : list of string
         The data (table) specifying transmission as
         a function of wavelength.
   """
   if bn in self.bands:
     PE.warn(PE.PyAValError("Band named '" + str(bn) + "' already exists.", \
                            where="TransmissionCurves (reading data from file)", \
                            solution="Use unique band names."))
   self.bands[bn] = np.zeros( (len(lines), 2) )
   for i in smo.range(len(lines)):
     self.bands[bn][i,::] = np.array(lines[i].split(), dtype=np.float)
示例#26
0
    def plotTraceHist(self, parm):
        """
      Plots trace and histogram (distribution).

      Parameters
      ----------
      parm : string
          The variable name.
    """
        if not ic.check["matplotlib"]:
            PE.warn(PE.PyARequiredImport("To use 'plotTraceHist', matplotlib has to be installed.", \
                                         solution="Install matplotlib."))
            return
        self._parmCheck(parm)

        plt.subplot(1, 2, 1)
        self.plotTrace(parm)
        plt.subplot(1, 2, 2)
        self.plotHist(parm)
示例#27
0
 def plotTrace(self, parm, fmt='b-'):
     """
   Plots the trace.
   
   Parameters
   ----------
   parm : string
       The variable name.
   fmt : string, optional
       The matplotlib format string used to plot the trace.
       Default is 'b-'.
 """
     if not ic.check["matplotlib"]:
         PE.warn(PE.PyARequiredImport("To use 'plotTrace', matplotlib has to be installed.", \
                                      solution="Install matplotlib."))
         return
     self._parmCheck(parm)
     plt.plot(self[parm], fmt, label=parm + " trace")
     plt.legend()
示例#28
0
  def plotTraceHist(self, parm):
    """
      Plots trace and histogram (distribution).

      Parameters
      ----------
      parm : string
          The variable name.
    """
    if not ic.check["matplotlib"]:
      PE.warn(PE.PyARequiredImport("To use 'plotTraceHist', matplotlib has to be installed.", \
                                   solution="Install matplotlib."))
      return
    self._parmCheck(parm)
    
    plt.subplot(1,2,1)
    self.plotTrace(parm)
    plt.subplot(1,2,2)
    self.plotHist(parm)
示例#29
0
 def _readData(self):
     """
     """
     # Determine number of planets in the csv file
     r = csv.DictReader(self._fs.requestFile(
         self.dataFileName, 'rt', gzip.open), delimiter=',')
     for nplanets, x in enumerate(r):
         pass
     # Reinitialize csv file
     r = csv.DictReader(self._fs.requestFile(
         self.dataFileName, 'rt', gzip.open), delimiter=',')
     # Determine data types for numpy recarray from columns
     # and initialize
     dtype = [(self._columns[x][0], self._columns[x][3])
              for x in range(len(self._columns))]
     self.data = np.recarray((nplanets + 1,), dtype=dtype)
     colnotfilled = [self._columns[x][0]
                     for x in six.iterkeys(self._columns)]
     for i, x in enumerate(r):
         for k, v in six.iteritems(x):
             # Remove hash and white spaces from column names
             k = k.strip('#')
             k = k.strip()
             # Translate csv column name into internal column name
             if k in self._ident:
                 key = self._ident[k]
             else:
                 key = k
             if len(v) == 0:
                 v = None
             # Accept only expected fields
             if not key in self.data.dtype.names:
                 continue
             try:
                 colnotfilled.remove(key)
             except ValueError:
                 # Ignoring already removed value
                 pass
             self.data[key][i] = v
     if len(colnotfilled) > 0:
         PE.warn(PE.PyAAlgorithmFailure("Not all columns could be filled with data. The following columns must not be used: " + ", ".join(colnotfilled),
                                        where="ExoplanetEU",
                                        solution="The format of the data base must be checked. Please consider issuing a bug report via github."))
示例#30
0
def decomposeFilename(fn):
    """
    Decompose PHOENIX filename.
    
    Parameters
    ----------
    fn : string
        The filename.
    
    Returns
    -------
    Parameters : dictionary
        A dictionary with the following keys:
         - teff: The effective temperature in K
         - logg: Log(g [cm/s**2])
         - met: Metallicity (M/H)
         - fn: Complete filename
         - notParsed: Part of the filename not parsed for teff, logg, and metallicity.
        Note that `None` is returned if the filename could not be
        parsed.
  """
    r = re.match("lte(\d+)([+-])(\d+\.\d+)([-+]\d+\.\d+)(.*)", fn)
    if r is None:
        PE.warn(PE.PyAValError("Cannot decompose PHOENIX filename: " +
                               str(fn)))
        return None
    result = {}
    result["fn"] = fn
    teff = int(r.group(1))
    if teff < 1000:
        result["teff"] = teff * 100
    else:
        result["teff"] = teff
    result["logg"] = float(r.group(3))
    if r.group(2) == "+":
        result["logg"] *= -1.0
    result["met"] = float(r.group(4))
    result["notParsed"] = r.group(5)
    return result
示例#31
0
 def log10ccfNoyes(self, bv, **kwargs):
   """
     Ccf conversion factor according to Noyes et al. 1984.
     
     Parameters
     ----------
     bv : float
         The B-V color [mag].
     
     Returns
     -------
     log10(Ccf) : float
         The logarithm of the conversion factor.
   """
   if ("lc" in kwargs) and (kwargs["lc"] != "ms"):
     PE.warn(PE.PyAValError("The Ccf conversion factor by Noyes et al. 1984 is only valid for main-sequence stars.", \
             solution="Use the conversion factor by Rutten 1984 for giants."))
   logccf = 1.13*bv**3 - 3.91*bv**2 + 2.84*bv - 0.47
   if bv <= 0.63:
     x = 0.63 - bv
     dlogccf = 0.135*x - 0.814*x**2 + 6.03*x**3
     logccf += dlogccf
   return logccf
示例#32
0
 def log10ccfNoyes(self, bv, **kwargs):
     """
   Ccf conversion factor according to Noyes et al. 1984.
   
   Parameters
   ----------
   bv : float
       The B-V color [mag].
   
   Returns
   -------
   log10(Ccf) : float
       The logarithm of the conversion factor.
 """
     if ("lc" in kwargs) and (kwargs["lc"] != "ms"):
         PE.warn(PE.PyAValError("The Ccf conversion factor by Noyes et al. 1984 is only valid for main-sequence stars.", \
                 solution="Use the conversion factor by Rutten 1984 for giants."))
     logccf = 1.13 * bv**3 - 3.91 * bv**2 + 2.84 * bv - 0.47
     if bv <= 0.63:
         x = 0.63 - bv
         dlogccf = 0.135 * x - 0.814 * x**2 + 6.03 * x**3
         logccf += dlogccf
     return logccf
示例#33
0
 def log10ccfRutten(self, bv, lc="ms"):
     """
   Ccf conversion factor from Rutten 1984 (Eqs. 10a and 10b).
   
   Parameters
   ----------
   bv : float
       B - V color [mag].
   lc : string, {ms, g}, optional
       Specifies whether the relation for
       main-sequence (ms) or giant (g) stars
       shall be evaluated.
   
   Returns
   -------
   log10(Ccf) : float
       The logarithm of the conversion factor.
 """
     if lc == "ms":
         if (bv < 0.3) or (bv > 1.6):
             PE.warn(
                 PE.PyAValError(
                     "B-V color out of range. Rutten 1984 states a validity range of 0.3 <= b-v <= 1.6 "
                     + "for main-sequence stars. You specified: " +
                     str(bv) + "."))
         logccf = 0.25 * bv**3 - 1.33 * bv**2 + 0.43 * bv + 0.24
     elif lc == "g":
         if (bv < 0.3) or (bv > 1.7):
             PE.warn(
                 PE.PyAValError(
                     "B-V color out of range. Rutten 1984 states a validity range of 0.3 <= b-v <= 1.7 "
                     + "for giant stars. You specified: " + str(bv) + "."))
         logccf = -0.066 * bv**3 - 0.25 * bv**2 - 0.49 * bv + 0.45
     else:
         raise(PE.PyAValError("No such luminosity class: " + str(lc), \
                              solution="Specify either 'ms' or 'g'."))
     return logccf
示例#34
0
    def _checkOnline(self, url="http://www.google.com", raiseNOC=True):
        """
        Check whether network can be reached.

        Parameters
        ----------
        url : string, optional
            The reference URL tried to be reached to check
            network availability.
        raiseNOC : boolean, optional
            If True (default), raise an exception if network
            cannot be reached.

        Returns
        -------
        Online flag : boolean
            True if network could be reached.
        """
        if not pyaRC.supposedOnline():
            PE.warn(PE.PyANetworkError("Internet connection disallowed by pyaRC.", \
                                       solution="Use 'goOnline' method in pyaRC."))
            return False

        online = True
        try:
            _ = urllib.request.urlopen(url)
        except Exception as e:
            # No connection seems to be available
            online = False
            if raiseNOC:
                raise (PE.PyANetworkError(
                    "You seem to be offline. Could not reach URL: '" +
                    str(url) + "'.",
                    solution="Get online",
                    tbfe=e))
        return online
示例#35
0
 def load(self, fn, constClobber=False):
     """
   Load constants file.
   
   Parameters
   ----------
   fn : string
       The file name.
   constClobber : boolean, optional
       If True, existing constants will be overwritten.
 """
     config = _ConfigParser.RawConfigParser()
     config.read(fn)
     for s in config.sections():
         nc = None
         try:
             nc = {}
             nc["symbol"] = config.get(s, "symbol")
             nc["descr"] = config.get(s, "descr")
             nc["valueSI"] = config.get(s, "valueSI")
             nc["errSI"] = config.get(s, "errSI")
             nc["units"] = {}
             for u in _systems:
                 nc["units"][u] = config.get(s, "unit" + u)
             nc["source"] = config.get(s, "source")
         except _ConfigParser.NoOptionError as e:
             _PE.warn(_PE.PyAValError("The definition of the constant defined in section '" + s + "' of file '" + \
                                      fn + "' is incomplete. Ignoring this constant. Caught the error:\n" + \
                                      str(e)))
             nc = None
         if nc is not None:
             if (nc["symbol"] in self.inventory) and (not constClobber):
                 raise(_PE.PyANameClash("A constant with the symbol '" + nc["symbol"] + "' is already defined.", \
                                        solution=["Choose another symbol.", "Set the `constClobber` flag to True to overwrite existing definitions of constants."]))
             self.inventory[nc["symbol"]] = nc
         self._inventoryToScope()
示例#36
0
 def load(self, fn, constClobber=False):
   """
     Load constants file.
     
     Parameters
     ----------
     fn : string
         The file name.
     constClobber : boolean, optional
         If True, existing constants will be overwritten.
   """
   config = _ConfigParser.RawConfigParser()
   config.read(fn)
   for s in config.sections():
     nc = None
     try:
       nc = {}
       nc["symbol"]  = config.get(s, "symbol")
       nc["descr"]   = config.get(s, "descr")
       nc["valueSI"] = config.get(s, "valueSI")
       nc["errSI"]   = config.get(s, "errSI")
       nc["units"] = {}
       for u in _systems:
         nc["units"][u] = config.get(s, "unit"+u)
       nc["source"] = config.get(s, "source")
     except _ConfigParser.NoOptionError as e:
       _PE.warn(_PE.PyAValError("The definition of the constant defined in section '" + s + "' of file '" + \
                                fn + "' is incomplete. Ignoring this constant. Caught the error:\n" + \
                                str(e)))
       nc = None
     if nc is not None:
       if (nc["symbol"] in self.inventory) and (not constClobber):
         raise(_PE.PyANameClash("A constant with the symbol '" + nc["symbol"] + "' is already defined.", \
                                solution=["Choose another symbol.", "Set the `constClobber` flag to True to overwrite existing definitions of constants."])) 
       self.inventory[nc["symbol"]] = nc
     self._inventoryToScope()
示例#37
0
    def fit(self,
            m,
            ds,
            objf="chisqr",
            initDelta=None,
            maxIter=1e4,
            callback=None,
            nmCritLim=None):
        """
        Carry out the model fit.
        
        After the iteration, the `iterCount` attribute contains the
        number of iterations. The `maxIterReached` attribute flag is
        False, if the maximum number of iterations has not been reached
        and True otherwise. 
        
        Parameters
        ----------
        m : Instance of OneDFit
            The model to be fitted.
        ds : Instance of FufDS
            The data.
        objf : string
            The objective function to be used. Possible
            choices are "chisqr" (default), "sqrdiff", and
            "cash79".
        initDelta : dictionary, optional
            A dictionary mapping parameter names to the
            initial step width. This can be very useful, if
            the starting values are zero or very small. The
            here defined step will be added to the starting
            value to construct the simplex.
        maxIter : int, optional
            The maximum number of iterations. The default is
            10000.
        nmCritLim : float, optional
            Critical value for stopping criterion. The default is
            1e-8.
        callback : callable, optional
            If not None, "callback" will be called with the
            three parameters: number of iteration (int), current
            best parameter set (array), and current simplex (array).
        
        Returns
        -------
        Best-fit values : dictionary
            Maps parameter name to the best-fit value.
        """
        # Stopping criterion
        if not nmCritLim is None:
            self.nmCritLim = nmCritLim
        # Number of free parameters
        self._n = m.numberOfFreeParams()
        # Set objective function
        m.setObjectiveFunction(objf)
        # Assign data object
        m._fufDS = ds
        # Names of free parameters (order guaranteed)
        self._fpns = m.freeParamNames()
        # Initial simplex
        self._initSimplex(m, initDelta)
        # MaxIter flag
        self.maxIterReached = False

        self.iterCount = 0
        while (not self._stopCrit()) and (self.iterCount < maxIter):
            self.iterCount += 1
            self._step(m)
            if callback is not None:
                l = np.argmin(self._yi)
                callback(self.iterCount, self._simplex[l, ::], self._simplex)

        # Find the optimum parameter set
        l = np.argmin(self._yi)
        m.pars.setFreeParams(self._simplex[l, ::])
        # Evaluate model so that model attribute holds the best match
        m.evaluate(ds.x)

        if self.iterCount == maxIter:
            self.maxIterReached = True
            PE.warn(
                PE.PyAAlgorithmFailure(
                    "The maximum number of iterations has been reached.\n" +
                    "The fit may be inappropriate.",
                    where="NelderMead",
                    solution=[
                        "Increase number of iterations.",
                        "Change starting values.",
                        "Change algorithm parameters (e.g., alpha, beta, gamma)."
                    ]))
        # Return a dictionary with the best-bit parameters
        return dict(zip(self._fpns, self._simplex[l, ::]))
示例#38
0
 def steppar(self, pars, ranges, extractFctVal=None, quiet=False):
     """
   Allows to step a parameter through a specified range.
   
   This function steps the specified parameters through the given
   ranges. During each steps, all free parameters, except for those
   which are stepped, are fitted. The resulting contours allow
   to estimate confidence intervals.
   
   This command uses the fitting parameters specified on a call
   to the `fit` method. In particular, the same values for `x`,
   `y`, `yerr`, `minAlgo`, `miniFunc`, `fminPars`, and `fminArgs`
   are used.
   
   .. note:: You need to have carried out a fit before you can
             use `steppar`.
   
   Parameters
   ----------
   pars : string or list of strings
       The parameter(s) which are to be stepped.
   ranges : dictionary
       A dictionary mapping parameter name to range specifier.
       The latter is a list containing [lower limit, upper limit,
       no. of steps, 'lin'/'log']. The fourth entry, which
       is optional, is a string specifying whether a constant
       linear step size ('lin') or a constant logarithmic
       step size ('log') shall be used.
   quiet : boolean, optional
       If True, output will be suppressed.
   extractFctVal : callable, optional
       A function specifying how the function value is extracted
       from the fit result. If standard settings are used, the
       default of None is adequate.
   
   Returns
   -------
   Parameter steps : list
       The return value is a list of lists. Each individual list
       contains the values of the stepped parameters as the first
       entries (same order as the input `pars` list), the
       following entry is the value of the objective function
       (e.g., chi square), and the last entry is a tuple
       containing the indices of the steps of the parameter values.
       This last entry can be useful to convert the result into
       an arrow to plot, e.g., contours. 
 """
     if not self._stepparEnabled:
         raise(PE.PyAOrderError("Before you can use steppar, you must call a function, which enables its use (e.g., `fit`).", \
               solution="Call the `fit` method first and then try again."))
     if isinstance(pars, six.string_types):
         # Make it a list
         pars = [pars]
     # Check parameter consistency
     for p in pars:
         # Check existence
         tmp = self[p]
         if not p in ranges:
             raise(PE.PyAValError("There is no range for parameter: " + p, \
                                  solution="Specify a range; e.g., {'xyz':[0.5,1.9,20,'lin']}"))
     # Function to extract function value from the fit result
     if extractFctVal is None:
         self._extractFctVal = self.__extractFunctionValue
     else:
         if not hasattr(extractFctVal, "__call__"):
             raise(PE.PyAValError("`extractFctVal` needs to be callable!", \
                                  solution="Specify a function here or try to use None."))
         self._extractFctVal = extractFctVal
     # Set up ranges
     rs = []
     for par in pars:
         r = ranges[par]
         if len(r) > 4:
             # Use the axis as given
             rs.append(r)
             continue
         if len(r) < 4:
             # By default, use linear spacing
             mode = 'lin'
         else:
             if not isinstance(r[3], six.string_types):
                 raise(PE.PyAValError("If the range has 4 entries, the fourth must be a string specifying the mode.", \
                                      solution="Use either 'lin' or 'log' as the fourth entry."))
             mode = r[3]
         if mode == 'lin':
             rs.append(numpy.linspace(r[0], r[1], r[2]))
         elif mode == 'log':
             # Calculate factor
             s = numpy.power((r[1] / r[0]), 1.0 / r[2])
             rs.append(r[0] * numpy.power(s, numpy.arange(r[2])))
         else:
             raise(PE.PyAValError("Unknown mode: " + str(mode), \
                                  solution="Use either 'lin' or 'log'."))
     # Save state of object
     saveObj = self.saveState()
     saveFitResult = self.fitResult
     saveModels = {}
     for k in six.iterkeys(self._compos):
         saveModels[k] = self.models[k].copy()
     # Freeze parameters, which are affected
     self.freeze(pars)
     # Store result
     result = []
     # Loop over the axes
     nli = pyaC.NestedLoop(list(map(len, rs)))
     for index in nli:
         for i, p in enumerate(pars):
             self[p] = rs[i][index[i]]
         # Fit using previous setting
         # Note that mAA is dispensable, because self.minAlgo will be a callable.
         self.fit(None, None, minAlgo=self.minAlgo, miniFunc=self.miniFunc, \
                  *self.fminPars, **self.fminArgs)
         # Build up result
         ppr = []
         for par in pars:
             ppr.append(self[par])
         try:
             ppr.append(self._extractFctVal(self.fitResult))
         except Exception as e:
             PE.warn(PE.PyAValError("The call to the `extractFctVal` function failed. Using full output." + \
                                    "\n  Original message: " + str(e)))
             ppr.append(self.fitResult)
         if not quiet:
             print("Result from last iteration:")
             print("  ", ppr)
         ppr.append(index)
         result.append(ppr)
     # Restore old state of object
     self.restoreState(saveObj)
     self.fitResult = saveFitResult
     for k in six.iterkeys(self._compos):
         self.models[k] = saveModels[k]
     return result
示例#39
0
def turnIntoRebin(CO):
    """
    Turn a "normal" fitting object into rebinning fitting object.

    This function accepts a class object representing a model and
    returns another class object extended by the rebinning functionality.

    Parameters
    ----------
    CO : A class object
        The class object describing the model to use
        rebinning.

    Returns
    -------
    Rebinned model : Fitting object
        Another class object extended by the rebinning functionality.
    """

    # Check for evaluate
    if not hasattr(CO, "evaluate"):
        raise(PE.PyANotImplemented("The class object has no evaluate method. Is it really a model?",
                                   where="turnIntoRebin"))
    # Check for other properties and methods
    for p in ["setRebinArray_Ndt", "rebinTimes", "rebinIdent"]:
        if hasattr(CO, p):
            PE.warn(PE.PyANameClash("Class object already has attribute: `" + p + "`. Potentially harmful..",
                                    solution="Check class object, maybe rename attribute", where="turnIntoRebin"))

    class _ModelRebin(CO):
        """
        Base class providing rebinning functionality.

        The model is evaluated at more points than actually needed.
        Several points are than averaged to obtain a "binned" model,
        which can, for example, account for finite integration times
        in observations.

        Attributes
        ----------
        rebinTimes : array
            Defined the abscissa values at which
            to evaluate the model to be rebinned afterwards.
        rebinIdent : dict
            A dictionary associating bin number (in the unbinned
            model) with a list holding the bins in `rebinTimes`,
            which are to be averaged to obtain the binned model.
        """

        def __init__(self, *args, **kwargs):
            CO.__init__(self, *args, **kwargs)
            self.rebinTimes = None
            self.rebinIdent = None

        def setRebinArray_Ndt(self, time, N, dt):
            """  
            Defines the overbinning parameters (`rebinTimes`, `rebinIdent`).

            It is assumed that the time points given in the `time`
            array refer to the center of the time bins and every bin has
            length `dt`. The bins are then subdivided into `N` subintervals;
            the center of each such subinterval becomes
            a point in the overbinned time axis (`rebinTimes`).

            Parameters
            ----------
            time : array
                The time axis of the "observed" (not overbinned)
                transit light-curve.
            N : int
                The number of point into which to subdivide each time
                bin of length `dt`.
            dt : float
                The length of each time bin (on the original not
                oversampled time axis).
            """

            self.rebinTimes = numpy.zeros(time.size * N)
            self.rebinIdent = {}
            for i in smo.range(time.size):
                self.rebinTimes[i * N:(i + 1) * N] = \
                    (time[i] - dt / 2.0) + (numpy.arange(N) * dt) / \
                    float(N) + dt / float(N) / 2.0
                self.rebinIdent[i] = list(range(i * N, (i + 1) * N))

        def evaluate(self, x):
            """
            Calculate the model.

            Parameters
            ----------
            x : array
                The abscissa values.

            Returns
            -------
            model : array,
                The binned model.

            Notes
            -----
            This function calculates the model at those time points
            specified by the `rebinTimes` property and saves the result in the
            class property `unbinnedModel`. Then it bins
            according to the definitions in `rebinIdent` and save the resulting model
            in the `binnedModel` property.
            """
            if (self.rebinTimes is None) or (self.rebinIdent is None):
                raise(PE.PyAValError("Rebinning parameters (properties rebinTimes and/or rebinIdent) not appropriately defined.",
                                     solution=["Use setRebinArray_Ndt method.", "Define the properties by accessing them directly."]))
            # Calculate the model using current parameters at the time points
            # defined in the `rebinTimes` array
            self.unbinnedModel = CO.evaluate(self, self.rebinTimes)
            # Build up rebinned model
            self.binnedModel = numpy.zeros(x.size)
            for i in smo.range(x.size):
                self.binnedModel[i] = numpy.mean(
                    self.unbinnedModel[self.rebinIdent[i]])
            # Return the resulting model
            return self.binnedModel

    return _ModelRebin
示例#40
0
  def __init__(self):
    self.dpath = None
    # Try to locate home directory via environment variable
    self.homeDir = os.getenv("HOME")
    if self.homeDir is None:
      # Try alternative solution
      self.homeDir = os.path.expanduser("~")
    if self.homeDir is None:
      PE.warn(PE.PyAValError("Could not find a home directory. Data directory cannot be set up.", \
                             solution="Set 'HOME' environment variable."))
      return
    # Hard-coded name of file, which will only contain path to "real" configuration.
    # This is done, because at one point, information must be stored, which can be
    # found without any extra information.
    self.configWhere = os.path.join(self.homeDir, ".pyaConfigWhere")
    if not os.path.isfile(self.configWhere):
      # Ask user whether she/he wants to configure data path
      print(" -------------- Configure PyA's data path ------------------------")
      print("  Why do you get this message?")
      print("    Most probably PyA tries to save permanent data on your")
      print("    system for the first time. This may be the case when, e.g.,")
      print("    a table is downloaded and saved. These data are stored")
      print("    under PyA's 'data path', i.e., a regular directory on")
      print("    your disk, which PyA can access.")
      print("  Is anything else saved to the disk?") 
      print("    The location of the data path")
      print("    directory will be written to the file '.pyaConfigWhere' in")
      print("    your home directory, so that you need not provide it")
      print("    again.")
      print("  Can I delete it?")
      print("    Yes, you are free to delete anything of this at any time.")
      print("  What is a valid data path?")
      print("    You may provide any existing or non-existing path, although")
      print("    it is strongly encouraged to use a fresh directory to avoid any")
      print("    confusion. The given path needs to be absolute, to uniquely")
      print("    identify it.")
      print(" -----------------------------------------------------------------")
      print("")
      if self._ynQuestion("Configure PyA's data path now (y/n)? "):
        print()
        print("  Configure data path now.")
        print()
      else:
        print()
        print("  Configure data path later.")
        print()
        return
      
      try:
        while True:
          print("Please provide a directory where PyA can store data (may already exist):")
          print("Press enter to accept default; use 'exit' to abort.")
          suggestion = os.path.join(self.homeDir, "PyAData")
          dpath = smo.input("  Path (default = "+suggestion+"): ")
          if dpath.lower().strip() == "exit":
            print("  Process aborted. No data path configured.")
            dpath = None
            break
          if dpath == "":
            dpath = suggestion
          # Check whether data path is an absolute path
          if not os.path.isabs(dpath):
            print("The path you specified (" + dpath + ") is not absolute, but it needs to be.")
            dpatha = os.path.abspath(dpath)
            print("Did you intend to use this path:")
            print("  ", dpatha)
            if self._ynQuestion("(y/n) ?"):
              dpath = dpatha
              break
            else:
              # Retry to ask for path
              continue
          if not os.path.isdir(dpath):
            # Try to create the directory
            os.makedirs(dpath)
            break
          else:
            print("Directory '"+dpath+"' exists.")
            if self._ynQuestion("Use as PyA data directory (you may want to use, e.g., a subdirectory instead) (y/n) "):
              if not os.access(dpath, os.W_OK):
                # Exists and shall be used. Is it writable?
                print("")
                print("The directory '" + dpath + "' is not writable! Therefore, it cannot be used.")
                print("")
                continue
            break

        if dpath is None:
          # Process has been aborted
          print("No valid data path configured.")
          return
        
        self.dpath = dpath
        
        # There is not yet a file '.pyaConfigWhere', which stores the place to look
        # for the real configure-file and data files.
        with open(self.configWhere, 'wt') as f:
          f.write(self.dpath)
        
        # All could be created appropriately
        print("PyA data path configured successfully. Using path: ")
        print("  " + self.dpath)
        
      except Exception as e:
        PE.warn(PE.PyAValError("The directory: '" + str(dpath) + "' " + \
                               " could not be created. Data directory cannot be set up.\n" + \
                               "Error message: " + str(e)))
        os.remove(self.configWhere)
        return
    
    else:
      # There is a .pyaConfigWhere file.
      try:
        self.dpath = open(self.configWhere).readline()
      except Exception as e:
        PE.warn(PE.PyAValError("The file "+self.configWhere+ \
                               " exists, but could not be opened for reading.", \
                               solution="Check permissions of the file.", \
                               addInfo="Error message: " + str(e)))
        self.dpath = None
      try:
        self.dpath = os.path.realpath(self.dpath)
      except Exception as e:
        PE.warn(PE.PyAValError("Obtained the path '" + self.dpath + "' from .pyaConfigWhere, but" + \
                               "could not expand soft-links etc. (using os.path.realpath).", \
                               solution="Check the path written to .pyaConfigWhere in home directory.", \
                               addInfo="Error message: " + str(e)))
        self.dpath = None
      if not os.path.isdir(self.dpath):
        PE.warn(PE.PyAValError("The directory " + self.dpath + "' " + \
                               "was specified as data path in file '" + self.configWhere + "', " + \
                               "but it does no longer exist! No data can be stored.", \
                               solution=["Delete file '" + self.configWhere + "' to allow reconfiguration.",
                                         "Modify the file content by giving the (modified) location " + \
                                         "of the data directory."]))
        self.dpath = None
        return
    self.__createConfigStub()
    # Open the "root" configuration file for later access
    if PyAConfig._rootConfig is None:
      PyAConfig._rootConfig = ConfigParser.RawConfigParser()
      PyAConfig._rootConfig.read(os.path.join(self.dpath, "pyaConfig.cfg"))
示例#41
0
# -*- coding: utf-8 -*-
from __future__ import print_function
from PyAstronomy.pyaC import pyaErrors as PE
from PyAstronomy.pyaC import ImportCheck
import sys

# Check Python version
_majV, _minV = sys.version_info[0:2]
if (_minV < 7) and (_majV == 2):
  PE.warn("funcFit needs 2.7.x or greater. See documentation (Prerequisites) for explanation.")
  
ic = ImportCheck(["numpy", "scipy", "pymc", "matplotlib", "matplotlib.pylab", "pyfits", "emcee", "progressbar"])

# Get out if numpy not present
if not ic.check["numpy"]:
  raise(PE.PyARequiredImport("Numpy cannot be imported.", solution="Install numpy (see http://numpy.scipy.org/, you probably should also install SciPy).", \
                             addInfo="The numpy package provides array support for Python and is indispensable in many scientific applications."))

# Check whether fitting modules can be imported
_scoImport = ic.check["scipy"]
_pymcImport = ic.check["pymc"]
_mplImport = ic.check["matplotlib"]

from PyAstronomy.funcFit.utils import *
from .modelRebin import turnIntoRebin, _ModelRebinDocu
from .onedfit import *
from .gauss1d import *
from .gauss2d import *
from .params import *
from .sinexp1d import *
from .polyModel import *
示例#42
0
 def setitem(specifier, value):
     if specifier == "T0pa":
         PE.warn(T0paE)
     PalLC.__setitem__(self, specifier, value)
示例#43
0
def helio_jd(date, ra, dec, B1950=False, TIME_DIFF=False):
    """
    Convert geocentric (reduced) Julian date to heliocentric Julian date
  
    Parameters
    ----------
    date : float
        (Reduced) Julian date (2.4e6 subtracted)
    ra, dec : float
        Right ascension and declination in degrees
    B1950 : boolean
        If True, input coordinates are assumed to be given in equinox
        1950 coordinates.
    TIME_DIFF : boolean
        If True, this function returns the time difference
        (heliocentric JD - geocentric JD ) in seconds
    
    Returns
    -------
    HJD : float
        The heliocentric Julian date.
      
    Notes
    -----
    
    .. note:: This function was ported from the IDL Astronomy User's Library.
    
    :IDL - Documentation:
    
    NAME:
         HELIO_JD
    PURPOSE:
         Convert geocentric (reduced) Julian date to heliocentric Julian date
    EXPLANATION:
         This procedure correct for the extra light travel time between the Earth 
         and the Sun.
    
          An online calculator for this quantity is available at 
          http://www.physics.sfasu.edu/astro/javascript/hjd.html
    CALLING SEQUENCE:
          jdhelio = HELIO_JD( date, ra, dec, /B1950, /TIME_DIFF)
    
    INPUTS
          date - reduced Julian date (= JD - 2400000), scalar or vector, MUST
                  be double precision
          ra,dec - scalars giving right ascension and declination in DEGREES
                  Equinox is J2000 unless the /B1950 keyword is set
    
    OUTPUTS:
          jdhelio - heliocentric reduced Julian date.  If /TIME_DIFF is set, then
                    HELIO_JD() instead returns the time difference in seconds
                    between the geocentric and heliocentric Julian date.
    
    OPTIONAL INPUT KEYWORDS 
          /B1950 - if set, then input coordinates are assumed to be in equinox 
                   B1950 coordinates.
          /TIME_DIFF - if set, then HELIO_JD() returns the time difference
                   (heliocentric JD - geocentric JD ) in seconds 
    
    EXAMPLE:
          What is the heliocentric Julian date of an observation of V402 Cygni
          (J2000: RA = 20 9 7.8, Dec = 37 09 07) taken June 15, 1973 at 11:40 UT?
    
          IDL> juldate, [1973,6,15,11,40], jd      ;Get geocentric Julian date
          IDL> hjd = helio_jd( jd, ten(20,9,7.8)*15., ten(37,9,7) )  
    
          ==> hjd = 41848.9881
    
    Wayne Warren (Raytheon ITSS) has compared the results of HELIO_JD with the
    FORTRAN subroutines in the STARLINK SLALIB library (see 
    http://star-www.rl.ac.uk/).    
                                                     Time Diff (sec)
         Date               RA(2000)   Dec(2000)  STARLINK      IDL
    
    1999-10-29T00:00:00.0  21 08 25.  -67 22 00.  -59.0        -59.0
    1999-10-29T00:00:00.0  02 56 33.4 +00 26 55.  474.1        474.1
    1940-12-11T06:55:00.0  07 34 41.9 -00 30 42.  366.3        370.2
    1992-02-29T03:15:56.2  12 56 27.4 +42 10 17.  350.8        350.9
    2000-03-01T10:26:31.8  14 28 36.7 -20 42 11.  243.7        243.7
    2100-02-26T09:18:24.2  08 26 51.7 +85 47 28.  104.0        108.8
    PROCEDURES CALLED:
          bprecess, xyz, zparcheck
    
    REVISION HISTORY:
          Algorithm from the book Astronomical Photometry by Henden, p. 114
          Written,   W. Landsman       STX     June, 1989 
          Make J2000 default equinox, add B1950, /TIME_DIFF keywords, compute
          variation of the obliquity      W. Landsman   November 1999
  """

    # Because XYZ uses default B1950 coordinates, we'll convert everything to B1950

    if date > 2.4e6:
        PE.warn(
            PE.PyAValError("The given Julian Date ( " + str(date) +
                           ") is exceedingly large far a reduced JD.",
                           solution="Did you forget to subtract 2.4e6?",
                           where="helio_jd"))

    if not B1950:
        bpresult = bprecess(ra, dec)
        ra1 = bpresult[0]
        dec1 = bpresult[1]
    else:
        ra1 = ra
        dec1 = dec

    radeg = 180.0 / numpy.pi
    # I think, this is not needed in Python, even at this stage...
    # zparcheck,'HELIO_JD',date,1,[3,4,5],[0,1],'Reduced Julian Date'

    delta_t = (date - 33282.42345905) / 36525.0
    epsilon_sec = 44.836 - 46.8495 * delta_t - 0.00429 * delta_t**2 + 0.00181 * delta_t**3
    epsilon = (23.433333 + epsilon_sec / 3600.0) / radeg
    ra1 = ra1 / radeg
    dec1 = dec1 / radeg

    x, y, z, tmp, tmp, tmp = xyz(date)

    # Find extra distance light must travel in AU, multiply by 1.49598e13 cm/AU,
    # and divide by the speed of light, and multiply by 86400 second/year

    time = -499.00522*( cos(dec1)*cos(ra1)*x + \
                    (tan(epsilon)*sin(dec1) + cos(dec1)*sin(ra1))*y)

    if TIME_DIFF:
        return time
    else:
        return (date + time / 86400.0)
示例#44
0
 def fit(self, m, ds, objf="chisqr", initDelta=None, maxIter=1e4, callback=None, nmCritLim=None):
   """
     Carry out the model fit.
     
     After the iteration, the `iterCount` attribute contains the
     number of iterations. The `maxIterReached` attribute flag is
     False, if the maximum number of iterations has not been reached
     and True otherwise. 
     
     Parameters
     ----------
     m : Instance of OneDFit
         The model to be fitted.
     ds : Instance of FufDS
         The data.
     objf : string
         The objective function to be used. Possible
         choices are "chisqr" (default), "sqrdiff", and
         "cash79".
     initDelta : dictionary, optional
         A dictionary mapping parameter names to the
         initial step width. This can be very useful, if
         the starting values are zero or very small. The
         here defined step will be added to the starting
         value to construct the simplex.
     maxIter : int, optional
         The maximum number of iterations. The default is
         10000.
     nmCritLim : float, optional
         Critical value for stopping criterion. The default is
         1e-8.
     callback : callable, optional
         If not None, "callback" will be called with the
         three parameters: number of iteration (int), current
         best parameter set (array), and current simplex (array).
     
     Returns
     -------
     Best-fit values : dictionary
         Maps parameter name to the best-fit value.
   """
   # Stopping criterion
   if not nmCritLim is None:
     self.nmCritLim = nmCritLim
   # Number of free parameters
   self._n = m.numberOfFreeParams()
   # Set objective function
   m.setObjectiveFunction(objf)
   # Assign data object
   m._fufDS = ds
   # Names of free parameters (order guaranteed)
   self._fpns = m.freeParamNames()
   # Initial simplex
   self._initSimplex(m, initDelta)
   # MaxIter flag
   self.maxIterReached = False
   
   self.iterCount = 0
   while (not self._stopCrit()) and (self.iterCount < maxIter):
     self.iterCount += 1
     self._step(m)
     if callback is not None:
       l = np.argmin(self._yi)
       callback(self.iterCount, self._simplex[l,::], self._simplex)
   
   # Find the optimum parameter set
   l = np.argmin(self._yi)
   m.pars.setFreeParams(self._simplex[l,::])
   # Evaluate model so that model attribute holds the best match
   m.evaluate(ds.x)
   
   if self.iterCount == maxIter:
     self.maxIterReached = True
     PE.warn(PE.PyAAlgorithmFailure("The maximum number of iterations has been reached.\n" + \
                                    "The fit may be inappropriate.", \
                                    where="NelderMead", \
                                    solution=["Increase number of iterations.", \
                                              "Change starting values.", \
                                              "Change algorithm parameters (e.g., alpha, beta, gamma)."]))
   # Return a dictionary with the best-bit parameters
   return dict(zip(self._fpns, self._simplex[l,::]))
   
示例#45
0
def quadExtreme(x, y, mode="max", dp=(1,1), exInd=None, fullOutput=False, fullPoint=False):
  """
    Find the extreme (minimum or maximum) by means of a parabolic fit.
    
    This function searches for the maximum (or minimum) in the given
    ordinate values, fits a second-order polynomial to the
    surroundings of that point, and, finally, determines the
    thus approximated abscissa value of the extreme point. 
    
    Parameters
    ----------
    x : array
        Abscissa values.
    y : array
        Ordinate values.
    mode : string, {min, max}, optional
        Determines whether a minimum or a maximum is searched
        for. The default is a maximum.
    dp : tuple with two integers, optional
        Determines the width around the extreme point, which will
        be used in the fitting. The default is one point to the
        right and left, i.e., dp = (1,1).
    extInd : integer, optional
        If given, the function will assume that this is the
        index of the extreme point and not search for it.
    fullOutput : boolean, optional
        If True, the output will also cover the points used in
        the fitting and the resulting polynomial.
    fullPoint : boolean, optional
        If True, the return value `epos` will be a tuple holding
        the abscissa and ordinate values of the extreme point.
        The default is False.
    
    Returns
    -------
    epos : float or tuple
        Position of the extreme point. If `fullPoint` is True,
        a tuple with the abscissa and ordinate values of the
        extreme point.
    mi : int
        The index of the extreme point (maximum or minimum).
    xb : array, optional
        Only returned if `fullOutput` is True. The abscissa
        values used in the polynomial fit. Note the the
        x-value of the extreme point has been subtracted.
    yb : array, optional
        Only returned if `fullOutput` is True. The ordinate
        values used in the polynomial fit.
    p : numpy polynomial, optional
        Only returned if `fullOutput` is True. The best-fit
        polynomial. Note that the fit refers to the `xb` axis,
        where the x-value of the extreme point has been
        subtracted.
  """
  if exInd is None:
    if mode == "max":
      mi = np.argmax(y)
    elif mode == "min":
      mi = np.argmin(y)
    else:
      raise(PE.PyAValError("Unknown mode '" + str(mode) + "'.", \
                           solution="Choose 'min' or 'max'"))
  else:
    mi = exInd

  if ((mi-dp[0]) < 0) or ((mi+dp[0]+1 > len(x))):
    raise(PE.PyAValError("The requested fitting range around the extreme is not covered by the data.", \
                         solution=["Adapt 'dp'.", "Do you need to change the mode?"]))
  
  # Cut out the relevant range
  xb = x[mi-dp[0]:mi+dp[1]+1].copy()
  yb = y[mi-dp[0]:mi+dp[1]+1].copy()
  
  # Subtract the x-value of the extreme
  xm = x[mi]
  xb -= xm
  
  # Fit second-order polynomial
  p = np.polyfit(xb, yb, 2)
  
  # Check whether curvature agrees with mode
  signs = {"max":-1, "min":+1}
  if np.sign(p[0]) != signs[mode]:
    PE.warn(PE.PyAValError("The curvature of the fit does not match the requested mode of '" + str(mode) + "'.", \
                           solution="Did you choose the right mode (min/max)?"))
  
  # Location of the extreme
  epos = -p[1]/(2.0 * p[0])
  # Shift back the location
  epos += xm
  
  if fullPoint:
    epos = (epos, -p[1]**2/(4.0*p[0]) + p[2])
  
  if not fullOutput:
    return epos, mi
  else:
    return epos, mi, xb, yb, p
示例#46
0
def write1dFitsSpec(fn,
                    flux,
                    wvl=None,
                    waveParams=None,
                    fluxErr=None,
                    header=None,
                    clobber=False,
                    refFileName=None,
                    refFileExt=0):
    """
    Write a 1d spectrum with equidistant binning to a fits file.

    Write a 1d-spectrum to a file. Wavelength axis and header keywords
    are related through the following expression:
    wvl = ((np.arange(N) + 1.0) - CRPIX1) * CDELT1 + CRVAL1,
    where CRPIX1, CDELT1, and CRVAL1 are the
    relevant header keywords.

    The function allows to specify an existing fits extension, from
    which the header will be cloned. Alternatively, an arbitrary,
    user-defined header may be given.

    Parameters
    ----------
    fn : string
        Filename
    flux : array
        Flux array
    wvl : array, optional
        Wavelength array. Either the wavelength array or the header
        keywords have to be provided (see `waveParams`). 
    waveParams : dict, optional
        Wavelength information required in the fits-header.
        Required keys are CDELT, CRVAL, CRPIX or (CDELT1, CRVAL1, CRPIX1).
    fluxErr : array, optional
        Flux errors. If given, the error will be stored in an additional extension.
    header : dict, optional
        Dictionary with header information to be transfered to the new file.
        Note that the wavelength information will be overwritten
        by the information given to this routine.
        If both, a reference file to clone the header from and the header parameters
        are given, the cloned header from the reference file will be overwritten and
        extended by the keywords specified here.
    refFileName : string, optional
        Clone header keywords from a reference file.
        Note that the wavelength information will be overwritten by the information
        by the information given to this routine.
    refFileExt : int, optional
        Reference-file extension to be used for cloning the header keywords.
        The default is 0.

    """

    reservedKeyWords = [
        "BITPIX", "SIMPLE", "NAXIS", "NAXIS1", "CDELT1", "CRPIX1", "CRVAL1",
        "EXTEND"
    ]

    if (not _ic.check["pyfits"]) and (not _ic.check["astropy.io.fits"]):
        raise (PE.PyARequiredImport(
            "Could neither import module 'pyfits' and 'astropy.io.fits'.",
            where="write1dFitsSpec",
            solution=
            "Install pyfits: http://www.stsci.edu/institute/software_hardware/pyfits"
        ))

    if wvl is None and waveParams is None:
        raise (PE.PyAValError(
            "The wavelength axis is not defined, i.e., neither \'wvl\' nor \'waveParams\' is specified.",
            where="write1dFitsSpec",
            solution=
            "Provide wavelength array as \'wvl\' or wavelength information as \'waveParams\'."
        ))

    if wvl is not None and waveParams is not None:
        raise (PE.PyAValError(
            "You provided the wavelength axis as \'wvl\' AND wavelength information as \'waveParams\'. Don't know which to use.",
            where="write1dFitsSpec",
            solution=
            "Provide only wavelength array via \'wvl\' or wavelength information via \'waveParams\'."
        ))

    if wvl is not None:
        # Check whether the wavelength axis is equidistant
        dwl = wvl[1:] - wvl[0:-1]
        if (np.max(dwl) - np.min(dwl)) / np.max(dwl) > 1e-6:
            raise (PE.PyAValError(
                "Wavelength axis seems not to be equidistant.",
                where="write1dFitsSpec",
                solution=[
                    "Check wavelength array.",
                    "Consider passing wavelength information via `waveParams`."
                ]))

    if os.path.isfile(fn) and not clobber:
        raise (PE.PyAFileError(
            fn,
            "ae",
            where="write1dFitsSpec",
            solution="File exists; set clobber=True to overwrite."))

    if _ic.check["pyfits"]:
        import pyfits
    else:
        import astropy.io.fits as pyfits

    # Put the flux into the output file.
    # Generate primary HDU
    hdu = pyfits.PrimaryHDU(flux)

    if refFileName:
        # If the is a reference file, its header will be used to populate
        # the header of the file to be written.
        if not os.path.isfile(refFileName):
            raise (PE.PyAFileError(str(refFileName),
                                   "ne",
                                   where="write1dFitsSpec",
                                   solution="Check file name."))
        ff = pyfits.open(refFileName)[refFileExt]

        for k in ff.header.keys():
            if k not in reservedKeyWords and k != "COMMENT":
                try:
                    if len(k) > 8:
                        hdu.header["HIERARCH " + k] = ff.header[k]
                    else:
                        hdu.header[k] = ff.header[k]
                except:
                    PE.warn(
                        PE.PyAValError("Cannot write keyword <" + str(k) +
                                       "> with content " + str(ff.header[k])))

    if header is not None:
        # A use-defined header was specified
        for k in header.keys():
            hdu.header[k] = header[k]

    # Header keywords relevant for wavelength axis
    hk = {}
    # The only allowed type here
    hk["CTYPE1"] = "Linear"
    # If wavelength array is provided, create header keywords:
    if wvl is not None:
        hk["CRPIX1"] = 1
        hk["CRVAL1"] = wvl[0]
        hk["CDELT1"] = float(wvl[-1] - wvl[0]) / (len(wvl) - 1)
    elif waveParams is not None:
        requiredKeys = ["CRPIX", "CRVAL", "CDELT"]
        # Counts whether all required keywords are provided
        count = 0
        for k in requiredKeys:
            subCount = 0
            for p in six.iterkeys(waveParams):
                if p.upper() == k or p.upper() == k + "1":
                    hk[k + "1"] = waveParams[p]
                    subCount += 1
            if subCount == 1:
                count += 1
            else:
                # This occurs if, e.g., both "CRVAL" and "CRVAL1" are present
                raise (PE.PyAValError(
                    "The wavelength parameters provided via \'waveParams\' contain ambiguous parameters. "
                    + "You provided multiple values for " + k + ".",
                    where="write1dFitsSpec",
                    solution=
                    "Check content of \'waveParams\' so that only one value for "
                    + k +
                    " is provided (including lower/upper case and presence of a trailing \'1\'."
                ))
        if count < 3:
            raise (PE.PyAValError(
                "You provided an incomplete set of waveParams.",
                where="write1dFitsSpec",
                solution="Required keywords are CRPIX, CRVAL, and CDELT."))

    # (Over-)Write wavelength-related header information
    for k in hk.keys():
        hdu.header[k] = hk[k]

    if not fluxErr is None:
        # An error on the flux was given
        hdue = pyfits.ImageHDU(fluxErr)
        for k in hk.keys():
            # Add wavelength information to error header
            hdue.header[k] = hk[k]
        hdulist = pyfits.HDUList([hdu, hdue])
    else:
        hdulist = pyfits.HDUList([hdu])

    hdulist.writeto(fn, overwrite=clobber)
示例#47
0
def helcorr(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, debug=False):
  """
    Calculate barycentric velocity correction.
    
    This function calculates the motion of an observer in
    the direction of a star. In contract to :py:func:`baryvel`
    and :py:func:`baryCorr`, the rotation of the Earth is
    taken into account.
    
    .. note:: This function was ported from the REDUCE IDL package.
              See Piskunov & Valenti 2002, A&A 385, 1095 for a detailed
              description of the package and/or visit
              http://www.astro.uu.se/~piskunov/RESEARCH/REDUCE/
    
    .. warning:: Contrary to the original implementation the longitude
                 increases toward the East and the right ascension is
                 given in degrees instead of hours. The JD is given as is,
                 in particular, nothing needs to be subtracted.
    
    Parameters
    ----------
    obs_long : float
        Longitude of observatory (degrees, **eastern** direction is positive)
    obs_lat : float
        Latitude of observatory [deg]
    obs_alt : float
        Altitude of observatory [m]
    ra2000 : float
        Right ascension of object for epoch 2000.0 [deg]
    dec2000 : float
        Declination of object for epoch 2000.0 [deg]
    jd : float
        Julian date for the middle of exposure.
    
    Returns
    -------
    Barycentric correction : float
        The barycentric correction accounting for the rotation
        of the Earth, the rotation of the Earth's center around
        the Earth-Moon barycenter, and the motion of the Earth-Moon 
        barycenter around the center of the Sun [km/s].
    HJD : float
        Heliocentric Julian date for middle of exposure.

    Notes
    -----

    :IDL REDUCE - Documentation:


    Calculates heliocentric Julian date, barycentric and heliocentric radial
    velocity corrections from:
    
    INPUT:
    <OBSLON> Longitude of observatory (degrees, western direction is positive)
    <OBSLAT> Latitude of observatory (degrees)
    <OBSALT> Altitude of observatory (meters)
    <RA2000> Right ascension of object for epoch 2000.0 (hours)
    <DE2000> Declination of object for epoch 2000.0 (degrees)
    <JD> Julian date for the middle of exposure
    [DEBUG=] set keyword to get additional results for debugging
    
    OUTPUT:
    <CORRECTION> barycentric correction - correction for rotation of earth,
       rotation of earth center about the earth-moon barycenter, earth-moon 
       barycenter about the center of the Sun.
    <HJD> Heliocentric Julian date for middle of exposure
    
    Algorithms used are taken from the IRAF task noao.astutils.rvcorrect
    and some procedures of the IDL Astrolib are used as well.
    Accuracy is about 0.5 seconds in time and about 1 m/s in velocity.
    
    History:
    written by Peter Mittermayer, Nov 8,2003
    2005-January-13   Kudryavtsev   Made more accurate calculation of the sidereal time.
                                    Conformity with MIDAS compute/barycorr is checked.
    2005-June-20      Kochukhov Included precession of RA2000 and DEC2000 to current epoch

"""
  from PyAstronomy.pyaC import degtorad

  # This reverts the original longitude convention. After this,
  # East longitudes are positive
  obs_long = -obs_long

  if jd < 2.4e6:
    PE.warn(PE.PyAValError("The given Julian Date (" + str(jd) + ") is exceedingly small. Did you subtract 2.4e6?"))

  # Covert JD to Gregorian calendar date
  xjd = jd
  
  year, month, day, ut = tuple(daycnv(xjd))

  # Current epoch
  epoch = year + month/12. + day/365.

  # Precess ra2000 and dec2000 to current epoch, resulting ra is in degrees
  ra = ra2000
  dec = dec2000
  ra, dec = precess(ra, dec, 2000.0, epoch)  

  # Calculate heliocentric julian date
  rjd = jd-2.4e6
  hjd = helio_jd(rjd, ra, dec) + 2.4e6

  # DIURNAL VELOCITY (see IRAF task noao.astutil.rvcorrect)
  # convert geodetic latitude into geocentric latitude to correct
  # for rotation of earth
  dlat = -(11.*60.+32.743)*np.sin(2.0*degtorad(obs_lat)) \
         +1.1633*np.sin(4.0*degtorad(obs_lat)) - 0.0026*np.sin(6.0*degtorad(obs_lat))
  lat = obs_lat + dlat/3600.0

  # Calculate distance of observer from earth center
  r = 6378160.0 * (0.998327073+0.001676438*np.cos(2.0*degtorad(lat)) \
     -0.00000351 * np.cos(4.0*degtorad(lat)) + 0.000000008*np.cos(6.0*degtorad(lat))) \
     + obs_alt

  # Calculate rotational velocity (perpendicular to the radius vector) in km/s
  # 23.934469591229 is the sidereal day in hours for 1986
  v = 2.*np.pi * (r/1000.) / (23.934469591229*3600.)

  # Calculating local mean sidereal time (see astronomical almanach)
  tu = (rjd-51545.0)/36525.0
  gmst = 6.697374558 + ut + \
        (236.555367908*(rjd-51545.0) + 0.093104*tu**2 - 6.2e-6*tu**3)/3600.0
  lmst = idlMod(gmst-obs_long/15, 24)

  # Projection of rotational velocity along the line of sight
  vdiurnal = v*np.cos(degtorad(lat))*np.cos(degtorad(dec))*np.sin(degtorad(ra-lmst*15))

  # BARICENTRIC and HELIOCENTRIC VELOCITIES
  vh, vb = baryvel(xjd,0)

  # Project to line of sight
  vbar = vb[0]*np.cos(degtorad(dec))*np.cos(degtorad(ra)) + vb[1]*np.cos(degtorad(dec))*np.sin(degtorad(ra)) + \
         vb[2]*np.sin(degtorad(dec))
  vhel = vh[0]*np.cos(degtorad(dec))*np.cos(degtorad(ra)) + vh[1]*np.cos(degtorad(dec))*np.sin(degtorad(ra)) + \
         vh[2]*np.sin(degtorad(dec))
  
  # Use barycentric velocity for correction
  corr = (vdiurnal + vbar) 

  if debug:
    print ''
    print '----- HELCORR.PRO - DEBUG INFO - START ----'
    print '(obs_long (East positive),obs_lat,obs_alt) Observatory coordinates [deg,m]: ', -obs_long, obs_lat, obs_alt
    print '(ra,dec) Object coordinates (for epoch 2000.0) [deg]: ', ra,dec
    print '(ut) Universal time (middle of exposure) [hrs]: ', ut
    print '(jd) Julian date (middle of exposure) (JD): ', jd
    print '(hjd) Heliocentric Julian date (middle of exposure) (HJD): ', hjd
    print '(gmst) Greenwich mean sidereal time [hrs]: ', idlMod(gmst, 24)
    print '(lmst) Local mean sidereal time [hrs]: ', lmst
    print '(dlat) Latitude correction [deg]: ', dlat
    print '(lat) Geocentric latitude of observer [deg]: ', lat
    print '(r) Distance of observer from center of earth [m]: ', r
    print '(v) Rotational velocity of earth at the position of the observer [km/s]: ', v
    print '(vdiurnal) Projected earth rotation and earth-moon revolution [km/s]: ', vdiurnal
    print '(vbar) Barycentric velocity [km/s]: ', vbar
    print '(vhel) Heliocentric velocity [km/s]: ', vhel
    print '(corr) Vdiurnal+vbar [km/s]: ', corr
    print '----- HELCORR.PRO - DEBUG INFO - END -----'
    print ''
  
  return corr, hjd
示例#48
0
 def setitem(specifier, value):
     if specifier == "T0pa":
         PE.warn(T0paE)
     PalLC.__setitem__(self, specifier, value)
示例#49
0
 def getitem(specifier, **kwargs):
     if specifier == "T0pa":
         PE.warn(T0paE)
     return PalLC.__getitem__(self, specifier, **kwargs)
示例#50
0
def quadExtreme(x,
                y,
                mode="max",
                dp=(1, 1),
                exInd=None,
                fullOutput=False,
                fullPoint=False):
    """
    Find the extreme (minimum or maximum) by means of a parabolic fit.
    
    This function searches for the maximum (or minimum) in the given
    ordinate values, fits a second-order polynomial to the
    surroundings of that point, and, finally, determines the
    thus approximated abscissa value of the extreme point. 
    
    Parameters
    ----------
    x : array
        Abscissa values.
    y : array
        Ordinate values.
    mode : string, {min, max}, optional
        Determines whether a minimum or a maximum is searched
        for. The default is a maximum.
    dp : tuple with two integers, optional
        Determines the width around the extreme point, which will
        be used in the fitting. The default is one point to the
        right and left, i.e., dp = (1,1).
    extInd : integer, optional
        If given, the function will assume that this is the
        index of the extreme point and not search for it.
    fullOutput : boolean, optional
        If True, the output will also cover the points used in
        the fitting and the resulting polynomial.
    fullPoint : boolean, optional
        If True, the return value `epos` will be a tuple holding
        the abscissa and ordinate values of the extreme point.
        The default is False.
    
    Returns
    -------
    epos : float or tuple
        Position of the extreme point. If `fullPoint` is True,
        a tuple with the abscissa and ordinate values of the
        extreme point.
    mi : int
        The index of the extreme point (maximum or minimum).
    xb : array, optional
        Only returned if `fullOutput` is True. The abscissa
        values used in the polynomial fit. Note the the
        x-value of the extreme point has been subtracted.
    yb : array, optional
        Only returned if `fullOutput` is True. The ordinate
        values used in the polynomial fit.
    p : numpy polynomial, optional
        Only returned if `fullOutput` is True. The best-fit
        polynomial. Note that the fit refers to the `xb` axis,
        where the x-value of the extreme point has been
        subtracted.
  """
    if exInd is None:
        if mode == "max":
            mi = np.argmax(y)
        elif mode == "min":
            mi = np.argmin(y)
        else:
            raise(PE.PyAValError("Unknown mode '" + str(mode) + "'.", \
                                 solution="Choose 'min' or 'max'"))
    else:
        mi = exInd

    if ((mi - dp[0]) < 0) or ((mi + dp[0] + 1 > len(x))):
        raise(PE.PyAValError("The requested fitting range around the extreme is not covered by the data.", \
                             solution=["Adapt 'dp'.", "Do you need to change the mode?"]))

    # Cut out the relevant range
    xb = x[mi - dp[0]:mi + dp[1] + 1].copy()
    yb = y[mi - dp[0]:mi + dp[1] + 1].copy()

    # Subtract the x-value of the extreme
    xm = x[mi]
    xb -= xm

    # Fit second-order polynomial
    p = np.polyfit(xb, yb, 2)

    # Check whether curvature agrees with mode
    signs = {"max": -1, "min": +1}
    if np.sign(p[0]) != signs[mode]:
        PE.warn(PE.PyAValError("The curvature of the fit does not match the requested mode of '" + str(mode) + "'.", \
                               solution="Did you choose the right mode (min/max)?"))

    # Location of the extreme
    epos = -p[1] / (2.0 * p[0])
    # Shift back the location
    epos += xm

    if fullPoint:
        epos = (epos, -p[1]**2 / (4.0 * p[0]) + p[2])

    if not fullOutput:
        return epos, mi
    else:
        return epos, mi, xb, yb, p
示例#51
0
# -*- coding: utf-8 -*-
from __future__ import print_function
from PyAstronomy.pyaC import pyaErrors as PE
from PyAstronomy.pyaC import ImportCheck
import sys

# Check Python version
_majV, _minV = sys.version_info[0:2]
if (_minV < 7) and (_majV == 2):
    PE.warn(
        "funcFit needs 2.7.x or greater. See documentation (Prerequisites) for explanation."
    )

ic = ImportCheck([
    "numpy", "scipy", "pymc", "matplotlib", "matplotlib.pylab", "pyfits",
    "emcee", "progressbar"
])

# Get out if numpy not present
if not ic.check["numpy"]:
    raise(PE.PyARequiredImport("Numpy cannot be imported.", solution="Install numpy (see http://numpy.scipy.org/, you probably should also install SciPy).", \
                               addInfo="The numpy package provides array support for Python and is indispensable in many scientific applications."))

# Check whether fitting modules can be imported
_scoImport = ic.check["scipy"]
_pymcImport = ic.check["pymc"]
_mplImport = ic.check["matplotlib"]

from PyAstronomy.funcFit.utils import *
from .modelRebin import turnIntoRebin, _ModelRebinDocu
from .onedfit import *
示例#52
0
def vactoair(wave, depWarn=True):
  """
    Convert vacuum wavelengths to air wavelengths

    .. warning::
       The conversion implemented here is based on the older formulae
       given by Edlen 1953. Furthermore, it seems that wave numbers in air
       are used, where vacuum wave numbers should be used, which, however,
       produces only a second-order deviation. Consider using
       :py:func:`vactoair2` instead.

    Parameters
    ----------
    wave : float, array
        The wavelength in vacuum [Angstrom]
   
    Returns
    -------
    Wavelength : array,
        Wavelength in air [Angstrom]
    depWarn : boolean, optional
        If True (default), a deprecation warning will be
        given.
   
    Notes
    -----

    .. note:: This function was ported from the IDL Astronomy User's Library.
    
    :IDL - Documentation:

    NAME:
          VACTOAIR
    PURPOSE:
          Convert vacuum wavelengths to air wavelengths
    EXPLANATION:
          Corrects for the index of refraction of air under standard conditions.  
          Wavelength values below 2000 A will not be altered.  Accurate to 
          about 0.005 A 

    CALLING SEQUENCE:
          VACTOAIR, WAVE

    INPUT/OUTPUT:
          WAVE - Wavelength in Angstroms, scalar or vector
                  WAVE should be input as vacuum wavelength(s), it will be
                  returned as air wavelength(s).  WAVE is always converted to
                  double precision

    EXAMPLE:
          If the vacuum wavelength is  W = 2000, then 

          IDL> VACTOAIR, W 

          yields an air wavelength of W = 1999.353 Angstroms

    METHOD:
          An approximation to the 4th power of inverse wavenumber is used
          See IUE Image Processing Manual   Page 6-15.

    REVISION HISTORY
          Written, D. Lindler 1982 
          Documentation W. Landsman  Feb. 1989
          Converted to IDL V5.0   W. Landsman   September 1997
  """

  if depWarn:
    PE.warn(PE.PyADeprecationError("Note: vactoair is outdated; see documentation for reasons.", \
                                   solution="Consider using 'vactoair2'."))

  wave2 = wave**2.
  fact = 1. + 2.735182e-4 + 131.4182/wave2 + 2.76249e8/(wave2**2.)
  fact = fact * ( wave >= 2000. ) + 1.*( wave < 2000. )

  # Convert wavelengths
  
  wave = wave/fact

  return wave
示例#53
0
 def getitem(specifier, **kwargs):
     if specifier == "T0pa":
         PE.warn(T0paE)
     return PalLC.__getitem__(self, specifier, **kwargs)
示例#54
0
def smooth(x, windowLen, window='flat'):
    """
    Smooth data using a window function.
    
    This method is based on the convolution of a window function with the signal.
    The window function is normalized so that the sum of its entries amounts to
    one. The signal is prepared by adding reflected copies of the signal 
    (with the window size) to both ends of the input array, so that the output
    array can have the same length as the input. Consequently the smoothing at
    the edges is actually based on extrapolation.
    
    .. note:: This algorithm was adopted from the scipy cookbook
              (http://www.scipy.org/Cookbook/SignalSmooth). The copyright
              of the original algorithm belongs to the authors of that
              cookbook algorithm.
    
    Parameters
    ----------
    x : array
        The input signal 
    windowLen : int
        The dimension of the smoothing window. It must be an odd integer.
    window : string, {'flat', 'hanning', 'hamming', 'bartlett', 'blackman'}
        The window function to be used. A flat window will
        produce a moving average smoothing.

    Returns
    -------
    Smoothed signal : array
        The smoothed signal. Same length as input array.
  """

    if x.ndim != 1:
        raise(PE.PyAValError("Only one dimensional arrays can be smoothed. Dimension of " + \
              "given array is " + str(x.ndim),
              solution="Change dimension of input."))

    if x.size < windowLen:
        raise(PE.PyAValError("Input vector needs to be bigger than window size.", \
              solution="Check the length of the input array and window size."))

    if windowLen < 3:
        PE.warn(PE.PyAValError("Length of window is smaller then 3. No smoothing is done.", \
            solution="Check window size."))
        return x

    if windowLen % 2 != 1:
        raise (
            PE.PyAValError("Parameter `windowLen` should be an odd integer"))

    # Extend input array at the edges to have the same
    # length for the output array. Insert a mirrored version
    # of the first part of the data array in front of the
    # first data point; apply the same scheme to the end of the
    # data array.
    s = np.r_[x[windowLen - 1:0:-1], x, x[-1:-windowLen:-1]]

    if window == 'flat':
        # This is a moving average
        w = np.ones(windowLen, 'd')
    elif window == "hanning":
        w = np.hanning(windowLen)
    elif window == "hamming":
        w = np.hamming(windowLen)
    elif window == "bartlett":
        w = np.bartlett(windowLen)
    elif window == "blackman":
        w = np.blackman(windowLen)
    else:
        raise(PE.PyAValError("Current `window` parameter (" + str(window) + ") is not supported. " + \
                             "Must be one of: 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'", \
              solution="Choose one of the above window types."))

    y = np.convolve(w / w.sum(), s, mode='valid')
    return y[(windowLen / 2):-(windowLen / 2)]
示例#55
0
def transitTimes(tmin, tmax, planetData, obsOffset=0., hjd=True,
                 observatory=None, lon=None, lat=None, alt=None, minAltitude=None,
                 showTwilight="all", moonDist=None, nexaInput=False, fileOutput=None):
    """
    Calculate transit times for a given planet and a given period of time.

    The `planetData` dictionary must contain the following information:

    =======  =================================
    Key      Value
    -------  ---------------------------------
    ra       Right ascension of object [deg]
    dec      Declination of object [deg]
    T0       Time reference point (HJD)
    orbPer   Orbital period [d]
    orbInc   Orbital inclination [deg]
    SMA      Semi-major axis [AU]
    RpJ      Planetary radius [Jovian radii]
    RsSun    Stellar Radius [solar]
    Tdur     OPTIONAL, Transit duration [d]
    =======  =================================

    If the transit duration (Tdur) is not given, the duration will
    be estimated using pyasl's `transitDuration` function. 

    .. note:: The input times (`tmin` and `tmax`) are expected in JD (UT).
              Input time will be calculated to HJD.
              Time output is in HJD.

    Parameters
    ----------
    tmin : float
        Start of time interval in Julian days (UT).
    tmax : float
        End of time interval in Julian days (UT).
    planetData: dictionary
        A dictionary containing the parameters of the exoplanet
        for which the transit times should be calculated.
        The required keys are specified above.
    obs_offset : float, optional
        Specifies additional time before AND after the transit in DAYS.
        This is useful if the observation should start and end
        some time before and after the actual transit.
    hjd : boolean, optional
        If True (default), the given Julian dates specifying the time
        interval (`tmin` and `tmax`) are automatically
        converted into the heliocentric frame (HJD).
    observatory : string, optional
        If given, pyasl's `observatory` function will be used to automatically
        resolve the name and obtain longitude, latitude, and altitude
        of the observatory.
        If `observatory` is given, `lon`, `lat`, and `alt` must not be specified.
    lon : float, optional
        East longitude of the observatory given in DEGREES.

        Longitude is positive in EASTWARD direction.
        If LON is not given, transitTimes will only return beginning and end
        of the observation and the time of mid transit.
    lat : float, optional
        Latitude of the observatory given in DEGREES
        (positive in NORTHWARD direction).
    alt : float, optional
        Altitude of the observatory given in METER.
    minAltitude : float, optional
        Minimum altitude of the object in DEGREES.

        If a minimum altitude is given, only transits for which the
        object is above the given altitude during the ENTIRE
        observation are included in the list
        created by `transitTimes`. Note that `minAltitude` can
        only be used if the observer's location has been specified
        either via `observatory` or via `lon`, `lat`, and `alt`.
    showTwilight : string, optional, {"all", "civil", "nautical", "astronomical", "night"}
        Specifies the twilight acceptable during the observation.
        By default all twilight conditions are acceptable.

        Only the transits for which the ENTIRE observation
        occurs during the specified or darker twilight conditions
        are listed.

        The choices are:
          - "all": all transits are shown (even during day)
          - "civil": only transits during civil twilight and better are shown
          - "nautical": only transits during nautical twilight and better are shown
          - "astronomical": only transits during astronomical twilight and better are shown
          - "night": only transits during night are shown

        Note that this can only have an effect, if the observer's location is
        specified.
    moonDist : float
        Minimum distance between the Moon and the target in DEGREES.
        By default all Moon distances are acceptable (moonDist=0.0).

        Only observations are listed for which the angular distance between
        the Moon and the target is larger
        than `moonDist` during the ENTIRE observation.

        Note that this can only have an effect, if the observer's location is
        specified.
    fileOutput : string or file, optional
        If a string is given, a file with the name will be created
        and the output will be written to that file. If a (writable)
        file object is given, the output will be written to that
        file. In both cases, no output will be given on screen. 

    Returns
    -------
    Transit times : dictionary
        Returns a dictionary containing the transit details. The dictionary key
        is a running number (starting with one), which is equivalent to that 
        listed in the first column of the table.

        For each transit, the function returns a dictionary with the transit
        details.

        If the observer's location was not specified, the dictionary has the
        following keys:

          ============    ====================================================
          Key             Description
          ------------    ----------------------------------------------------
          Planet name     Name of the planet
          Tmid            HJD of transit center
          Transit jd      Array giving JD of start, mid-time, and end of
                          transit.
          Obs jd          Array specifying the HJD of the start, center and
                          end of the observation.
          Obs cal         Equivalent to 'Obs jd', but in the form of the
                          calendar date. In particular, for each date, a list
                          containing [Year, month, day, fractional hours]
                          is given.

                          **Below follows optional output only present**
                          **if the observer's location is known**

          Obs coord       East longitude [deg], latitude [deg], and
                          altitude [m] of the observatory.
          Sun ra          Right ascension of the Sun at center of
                          observation.
          Sun dec         Declination of the Sun at center of
                          observation.
          Sun alt         Altitude of the Sun [deg] at begin, center, and
                          end of the observation.
          Sun az          Azimuth if the Sun [deg] at begin, center, and
                          end of the observation.
          Moon phase      Array giving lunar phase (in percent) at start,
                          center, and end of the observation.
          Moon AD         Angular distance between the target and the Moon
                          at begin, center, and end of the observation [deg].
          Moon ra         Right ascension of the Moon at begin, center, and
                          end of the observation [deg].
          Moon dec        Declination of the Moon at begin, center, and
                          end of the observation [deg].
          Star ra         Right ascension of the star [deg].
          Star dec        Declination of the star [deg].
          Star CP         Cardinal point of the star at begin, center, and
                          end of the observation.
          Star alt        Altitude of the star [deg] at begin, center, and
                          end of the observation.
          Star az         Azimuth of the star [deg] at begin, center, and
                          end of the observation.
          Twilight        The worst, i.e., brightest type of twilight
                          encountered during the observation.
          ============    ====================================================

    """

    if fileOutput is not None:
        oldStdout = sys.stdout
        if isinstance(fileOutput, six.string_types):
            sys.stdout = open(fileOutput, 'w')
        else:
            sys.stdout = fileOutput

    try:

        if tmin >= tmax:
            raise(PE.PyAValError("The given time range is inconsistent (tmin >= tmax)",
                                 where="transitTimes",
                                 solution="Adapt tmin and tmax."))

        # Copy input dictionary, because it may be changed
        planetData = planetData.copy()

        if nexaInput:
            pdin = planetData.copy()
            planetData = {}
            planetData["ra"] = pdin["ra"]
            planetData["dec"] = pdin["dec"]
            planetData["orbPer"] = pdin["pl_orbper"]
            planetData["T0"] = pdin["pl_tranmid"]
            planetData["orbInc"] = pdin["pl_orbincl"]
            planetData["SMA"] = pdin["pl_orbsmax"]
            planetData["RpJ"] = pdin["pl_radj"]
            planetData["RsSun"] = pdin["st_rad"]
            planetData["Tdur"] = pdin["pl_trandur"]
            planetData["plName"] = pdin["pl_name"]
            if np.isnan(planetData["Tdur"]):
                del planetData["Tdur"]

        # Check whether required keys are present
        reke = ["ra", "dec", "orbPer", "T0",
                "orbInc", "SMA", "RpJ", "RsSun", "plName"]
        msg = ""
        fail = False
        for key in reke:
            if (not key in planetData):
                msg += "The required key '" + key + "' is missing in the input data!\n"
                fail = True
                continue
            if isinstance(planetData[key], (tuple(six.integer_types) + (float,))):
                if np.isnan(planetData[key]):
                    msg += "The required key '" + key + "' has NaN value in the input data!\n"
                    fail = True
        if fail:
            raise(PE.PyAValError("The input `planetData` is inappropriate:\n" + msg,
                                 where="transitTimes",
                                 solution="Specify all required input values."))

        # Object position [degrees]
        ra = planetData["ra"]
        dec = planetData["dec"]

        if hjd:
            # Convert input times into heliocentric frame
            # Using 'reduced' JD in calculation
            tmin = helio_jd(tmin - 2.4e6, ra, dec) + 2.4e6
            tmax = helio_jd(tmax - 2.4e6, ra, dec) + 2.4e6

        print("Specified time span")
        print(
            "Start date (DDDD-MM-YY and fractional hours): {0:4d}-{1:02d}-{2:02d} {3:6.3f}".format(*daycnv(tmin)))
        print(
            "End date (DDDD-MM-YY and fractional hours): {0:4d}-{1:02d}-{2:02d} {3:6.3f}".format(*daycnv(tmax)))
        print()

        # Transit parameters
        # Orbital period in days
        period = planetData["orbPer"]
        # Transit reference time (should be HJD)
        T0 = planetData["T0"]

        if not "Tdur" in planetData:
            # No duration specified in the data
            inc = planetData["orbInc"]  # deg
            sma = planetData["SMA"]  # au
            rp = planetData["RpJ"]  # Mjup
            rs = planetData["RsSun"]  # Msun
            dur = transitDuration(sma, rp, rs, inc, period)
            print(
                "Estimating transit duration using orbital inclination, semi-major axis,")
            print("  planetary radius, and stellar radius")
        else:
            dur = planetData["Tdur"]

        print("Transit duration: ", dur * 24. * 60., " minutes")
        print("Off-transit time before and after transit: ",
              obsOffset * 24. * 60., " minutes")

        # First and last epoch contained in specified range
        trnum_start = np.floor((tmin - T0) / period)
        trnum_end = np.ceil((tmax - T0) / period)
        # Relevant transit epochs
        tr = np.arange(trnum_start, trnum_end, 1)

        if (observatory is not None) and \
                ((lon is not None) or (lat is not None) or (alt is not None)):
            raise(PE.PyAParameterConflict("You must either specify `observatory` OR `lon`, `lat`, and `alt`.",
                                          where="transitTimes",
                                          solution="Adapt function call."))

        if observatory is not None:
            # Resolve observatory string
            observatory_data = pyaobs.observatory(observatory)
            lon = observatory_data["longitude"]
            lat = observatory_data["latitude"]
            alt = observatory_data["altitude"]

        # Check if the observatory data are complete
        obsCompl = (lon is None) + (lat is None) + (alt is None)
        if (obsCompl == 1) or (obsCompl == 2):
            raise(PE.PyAValError("Observatory data is incomplete. `lon`, `lat`, and `alt` must all be specified.\n" +
                                 "Current values are: lon = " +
                                 str(lon) + ", lat = " + str(lat) +
                                 ", alt = " + str(alt),
                                 where="transitTimes",
                                 solution="Provide complete observatory information."))

        if (minAltitude is not None) and (lon is None):
            # Observer's location not given so minAltitude cannot have any effect
            raise(PE.PyAParameterConflict("The observer's location is not specified, but `minAltitude` is given.\n" +
                                          "This parameter can only be used, if the observer's location is known.",
                                          where="transitTimes",
                                          solution="Either specify the observer's location or set `minAltitude` to None."))

        if (showTwilight != "all") and (lon is None):
            # Observer's location not given so showTwilight cannot have any effect
            raise(PE.PyAParameterConflict("The observer's location is not specified, but `showTwilight` is given.\n" +
                                          "This parameter can only be used, if the observer's location is known.",
                                          where="transitTimes",
                                          solution="Either specify the observer's location or set `showTwilight` to \"all\"."))

        if (showTwilight != "all") and (showTwilight != "civil") and (showTwilight != "nautical") and \
           (showTwilight != "astronomical") and (showTwilight != "night"):
            # None of the possible choices for showTwilight have been used.
            raise(PE.PyAValError("Wrong keyword given for showTwilight.\n" +
                                 "Current keyword is " + showTwilight,
                                 where="transitTimes",
                                 solution="Select a valid keyword for showTwilight: `all', `civil', `nautical', `astronomical', or `night'."))

        if moonDist is None:
            # No limit on Moon distance
            moonDist = 0.0
        if (moonDist != 0.0) and (lon is None):
            # Observer's location not given so showTwilight cannot have any effect
            raise(PE.PyAParameterConflict("The observer's location is not specified, but `moonDist` is given.\n" +
                                          "This parameter can only be used, if the observer's location is known.",
                                          where="transitTimes",
                                          solution="Either specify the observer's location or set `moonDist` to 0.0 or None."))

        if moonDist < 0.0:
            # Moon distance below zero does not make sense
            PE.warn("The specified `moonDist' is below zero (" + str(moonDist) + ") which does not make sense.\n" +
                    "It was changed to 0.0.\n" +
                    "Please use a value >= 0.0 or None if specifying `moonDist'.")

        print()
        if np.logical_and(lon != None, lat != None):
            print("No. Tmid [HJD]      Obs. start [UT] [ALT, DIR(AZI)]     Transit mid [UT] [ALT, DIR(AZI)]     Obs. end [UT] [ALT, DIR(AZI)]   twilight" +
                  " (SUN ALT)                   moon distance     moon phase")
        else:
            print(
                "No. Tmid [HJD]      Obs. start [UT]    Transit mid [UT]   Obs. end [UT]")

        allData = {}
        trcounter = 1
        for i in tr:
            trData = {}
            # Get times
            Tmid = T0 + float(i) * period
            if (Tmid < tmin) or (Tmid > tmax):
                # This may happen because the transit may occur in the first
                # relevant epoch but still before tmin. Likewise for tmax.
                continue
            obs_start_hjd = Tmid - (dur / 2.0) - obsOffset
            obs_start = daycnv(obs_start_hjd)
            obs_mid = daycnv(Tmid)
            obs_end_hjd = Tmid + (dur / 2.0) + obsOffset
            obs_end = daycnv(obs_end_hjd)
            time_temp = np.array([obs_start_hjd, Tmid, obs_end_hjd])
            transit_only = np.array(
                [Tmid - (dur / 2.0), Tmid, Tmid + (dur / 2.0)])

            # Get visibility
            if (lon is not None) and (lat is not None):
                # Get alt/az of object for current transit
                altaz = eq2hor.eq2hor(time_temp, np.ones(time_temp.size) * ra,
                                      np.ones(time_temp.size) * dec, lon=lon, lat=lat, alt=alt)
                # If minimum altitude is not fulfilled during observation,
                # do not show transit
                if minAltitude is not None:
                    minalt = np.where(altaz[0] >= minAltitude)[0]
                    if len(minalt) < time_temp.size:
                        # Skip this transit
                        continue
                # Get Sun position for current transit
                sunpos_radec = sunpos.sunpos(time_temp[1])
                sunpos_altaz = eq2hor.eq2hor(time_temp, np.ones(time_temp.size) * sunpos_radec[1],
                                             np.ones(time_temp.size) *
                                             sunpos_radec[2],
                                             lon=lon, lat=lat, alt=alt)
                twi = twilight.twilightName(max(sunpos_altaz[0]))
                # Check type of twilight -> if requirement not fulfilled, don't show transit
                if showTwilight == "civil":
                    # Show civil or better
                    if twi == "day":
                        continue
                if showTwilight == "nautical":
                    # Show nautical or better
                    if twi == "day":
                        continue
                    if twi == "civil twilight":
                        continue
                if showTwilight == "astronomical":
                    # Show astronomical or better
                    if twi == "day":
                        continue
                    if twi == "civil twilight":
                        continue
                    if twi == "nautical twilight":
                        continue
                if showTwilight == "night":
                    # Only show night
                    if twi != "night":
                        continue

                # Get Moon position for current transit
                mpos = moonpos(time_temp)
                mdists = []
                for i in range(time_temp.size):
                    mdists.append(getAngDist(mpos[0][i], mpos[1][i], ra, dec))
                mdist = min(mdists)
                # Check Moon distance, if not fulfilled, neglect the transit
                if mdist < moonDist:
                    continue
                # Get lunar phase in percent
                moonpha = moonphase(time_temp) * 100.
                print("%3d %10.5f   %2d.%2d. %2d:%02d    [%3d°,%s(%3d°)]      %2d.%2d. %2d:%02d     [%3d°,%s(%3d°)]      %2d.%2d. %2d:%02d  [%3d°,%s(%3d°)]   %18s (%3d°,%3d°,%3d°)   (%3d°,%3d°,%3d°)  %3d%%"
                      % (trcounter, Tmid, obs_start[2], obs_start[1], np.floor(obs_start[3]), (obs_start[3] - np.floor(obs_start[3])) * 60.,
                         altaz[0][0], getCardinalPoint(
                             altaz[1][0]), altaz[1][0],
                         obs_mid[2], obs_mid[1], np.floor(
                             obs_mid[3]), (obs_mid[3] - np.floor(obs_mid[3])) * 60.,
                         altaz[0][1], getCardinalPoint(
                             altaz[1][1]), altaz[1][1],
                         obs_end[2], obs_end[1], np.floor(
                             obs_end[3]), (obs_end[3] - np.floor(obs_end[3])) * 60.,
                         altaz[0][2], getCardinalPoint(
                             altaz[1][2]), altaz[1][2], twi, sunpos_altaz[0][0], sunpos_altaz[0][1], sunpos_altaz[0][2],
                         mdists[0], mdists[1], mdists[2], np.max(moonpha)))
                # Save transit data
                trData["Tmid"] = Tmid
                trData["Obs jd"] = time_temp
                trData["Obs cal"] = [obs_start, obs_mid, obs_end]
                trData["Star ra"] = ra
                trData["Star dec"] = dec
                trData["Star alt"] = altaz[0]
                trData["Star az"] = altaz[1]
                trData["Sun ra"] = sunpos_radec[1]
                trData["Sun dec"] = sunpos_radec[2]
                trData["Sun alt"] = sunpos_altaz[0]
                trData["Sun az"] = sunpos_altaz[1]
                trData["Twilight"] = twi
                trData["Moon ra"] = mpos[0]
                trData["Moon dec"] = mpos[1]
                trData["Moon AD"] = mdist
                trData["Moon phase"] = moonpha
                trData["Star CP"] = [getCardinalPoint(altaz[1][0]), getCardinalPoint(
                    altaz[1][1]), getCardinalPoint(altaz[1][2])]

                trData["Obs coord"] = [lon, lat, alt]
            else:
                # If you do not specify the observer's location, return all transits of the object
                print("%3d %10.5f   %2d.%2d. %2d:%02d       %2d.%2d. %2d:%02d       %2d.%2d. %2d:%02d"
                      % (trcounter, Tmid, obs_start[2], obs_start[1], np.floor(obs_start[3]), (obs_start[3] - np.floor(obs_start[3])) * 60.,
                         obs_mid[2], obs_mid[1], np.floor(
                             obs_mid[3]), (obs_mid[3] - np.floor(obs_mid[3])) * 60.,
                         obs_end[2], obs_end[1], np.floor(obs_end[3]), (obs_end[3] - np.floor(obs_end[3])) * 60.))
                trData["Tmid"] = Tmid
                trData["Obs jd"] = time_temp
                trData["Obs cal"] = [obs_start, obs_mid, obs_end]

            trData["Transit jd"] = transit_only
            trData["Planet name"] = planetData["plName"]
            allData[trcounter] = trData
            trcounter += 1

        if len(allData) == 0:
            print()
            print("------------------------------------------------------")
            print("!!! No transits found for the given restrictions. !!!")
            print("------------------------------------------------------")
            print()

    except:
        raise
    finally:
        if fileOutput is not None:
            if isinstance(fileOutput, six.string_types):
                sys.stdout.close()
            sys.stdout = oldStdout

    return allData
示例#56
0
    def downloadToFile(self,
                       url,
                       fn,
                       clobber=False,
                       verbose=True,
                       openMethod=open,
                       context=None):
        """
        Download content from URL.

        Parameters
        ----------
        url : string
            The location of the content.
        fn : string
            The relative or absolute name of the file
            to which the download shall be saved.
        clobber : boolean, optional
            If True, an existing file will be overwritten.
        verbose : boolean, optional
            If True, information on the download will be
            printed to the screen.
        openMethod : callable
            The method used to open the file to write to (default is
            open, other choices may be gzip.open or io.ipen)
        context : ssl context
            SSL context parameter handed to urlopen.
        """
        if self.fileExists(fn) and (clobber == False):
            return

        if not pyaRC.supposedOnline():
            PE.warn(PE.PyANetworkError("Internet connection disallowed by pyaRC.", \
                                       solution="Use 'goOnline' method in pyaRC."))
            return

        def download(url, context, nocontext=False):
            if not nocontext:
                # Use context
                response = urllib.request.urlopen(url, context=context)
            else:
                # Disregard context
                response = urllib.request.urlopen(url)
            data = response.read()  # a `bytes` object
            self.requestFile(fn, 'wb', openMethod).write(data)

        ana = self._analyzeFilename(fn, True)
        self.touchFile(ana["fullname"])
        try:
            if verbose:
                print("PyA download info:")
                print("  - Downloading from URL: " + str(url))
            download(url, context)
        except (KeyboardInterrupt, SystemExit):
            self.removeFile(ana["fullname"])
            raise
        except TypeError as e:
            # Possibly, context is not supported
            cs = self._checkContext()
            if not cs:
                # Network is all right, but context parameter must
                # not be specified.
                if verbose:
                    print("PyA download info:")
                    print("  - Downloading from URL: " + str(url) +
                          ", (no context)")
                download(url, context, nocontext=True)
        except URLError as e:
            # Trying to download without context (side-lining ssl verification!)
            if verbose:
                print("PyA download info:")
                print("  - Downloading from URL: " + str(url) +
                      ", (unverified context)")
            context = ssl._create_unverified_context()
            download(url, context)
        except Exception as e:
            self.removeFile(ana["fullname"])
            sols = ["Check whether URL exists and is spelled correctly."]
            # Check whether network can be reached.
            netreach = self._checkOnline(raiseNOC=False)
            if not netreach:
                sols.append(
                    "Network could not be reached. Check your network status.")
            raise (PE.PyADownloadError(
                "Could not download data from URL: " + str(url) + ".\n",
                solution=sols,
                tbfe=e,
                addInfo="Could network be reached (online)? " + {
                    True: "yes",
                    False: "No"
                }[netreach]))
        if verbose:
            print("  - Downloaded " +
                  str(os.path.getsize(ana["fullname"]) / 1000.0) + " kb")
            print("    to file: " + ana["fullname"])
示例#57
0
def helcorr(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, debug=False):
    """
    Calculate barycentric velocity correction.
    
    This function calculates the motion of an observer in
    the direction of a star. In contract to :py:func:`baryvel`
    and :py:func:`baryCorr`, the rotation of the Earth is
    taken into account.
    
    .. note:: This function was ported from the REDUCE IDL package.
              See Piskunov & Valenti 2002, A&A 385, 1095 for a detailed
              description of the package and/or visit
              http://www.astro.uu.se/~piskunov/RESEARCH/REDUCE/
    
    .. warning:: Contrary to the original implementation the longitude
                 increases toward the East and the right ascension is
                 given in degrees instead of hours. The JD is given as is,
                 in particular, nothing needs to be subtracted.
    
    Parameters
    ----------
    obs_long : float
        Longitude of observatory (degrees, **eastern** direction is positive)
    obs_lat : float
        Latitude of observatory [deg]
    obs_alt : float
        Altitude of observatory [m]
    ra2000 : float
        Right ascension of object for epoch 2000.0 [deg]
    dec2000 : float
        Declination of object for epoch 2000.0 [deg]
    jd : float
        Julian date for the middle of exposure.
    
    Returns
    -------
    Barycentric correction : float
        The barycentric correction accounting for the rotation
        of the Earth, the rotation of the Earth's center around
        the Earth-Moon barycenter, and the motion of the Earth-Moon 
        barycenter around the center of the Sun [km/s].
    HJD : float
        Heliocentric Julian date for middle of exposure.

    Notes
    -----

    :IDL REDUCE - Documentation:


    Calculates heliocentric Julian date, barycentric and heliocentric radial
    velocity corrections from:
    
    INPUT:
    <OBSLON> Longitude of observatory (degrees, western direction is positive)
    <OBSLAT> Latitude of observatory (degrees)
    <OBSALT> Altitude of observatory (meters)
    <RA2000> Right ascension of object for epoch 2000.0 (hours)
    <DE2000> Declination of object for epoch 2000.0 (degrees)
    <JD> Julian date for the middle of exposure
    [DEBUG=] set keyword to get additional results for debugging
    
    OUTPUT:
    <CORRECTION> barycentric correction - correction for rotation of earth,
       rotation of earth center about the earth-moon barycenter, earth-moon 
       barycenter about the center of the Sun.
    <HJD> Heliocentric Julian date for middle of exposure
    
    Algorithms used are taken from the IRAF task noao.astutils.rvcorrect
    and some procedures of the IDL Astrolib are used as well.
    Accuracy is about 0.5 seconds in time and about 1 m/s in velocity.
    
    History:
    written by Peter Mittermayer, Nov 8,2003
    2005-January-13   Kudryavtsev   Made more accurate calculation of the sidereal time.
                                    Conformity with MIDAS compute/barycorr is checked.
    2005-June-20      Kochukhov Included precession of RA2000 and DEC2000 to current epoch

"""
    from PyAstronomy.pyaC import degtorad

    # This reverts the original longitude convention. After this,
    # East longitudes are positive
    obs_long = -obs_long

    if jd < 2.4e6:
        PE.warn(
            PE.PyAValError("The given Julian Date (" + str(jd) +
                           ") is exceedingly small. Did you subtract 2.4e6?"))

    # Covert JD to Gregorian calendar date
    xjd = jd

    year, month, day, ut = tuple(daycnv(xjd))

    # Current epoch
    epoch = year + month / 12. + day / 365.

    # Precess ra2000 and dec2000 to current epoch, resulting ra is in degrees
    ra = ra2000
    dec = dec2000
    ra, dec = precess(ra, dec, 2000.0, epoch)

    # Calculate heliocentric julian date
    rjd = jd - 2.4e6
    hjd = helio_jd(rjd, ra, dec) + 2.4e6

    # DIURNAL VELOCITY (see IRAF task noao.astutil.rvcorrect)
    # convert geodetic latitude into geocentric latitude to correct
    # for rotation of earth
    dlat = -(11.*60.+32.743)*np.sin(2.0*degtorad(obs_lat)) \
           +1.1633*np.sin(4.0*degtorad(obs_lat)) - 0.0026*np.sin(6.0*degtorad(obs_lat))
    lat = obs_lat + dlat / 3600.0

    # Calculate distance of observer from earth center
    r = 6378160.0 * (0.998327073+0.001676438*np.cos(2.0*degtorad(lat)) \
       -0.00000351 * np.cos(4.0*degtorad(lat)) + 0.000000008*np.cos(6.0*degtorad(lat))) \
       + obs_alt

    # Calculate rotational velocity (perpendicular to the radius vector) in km/s
    # 23.934469591229 is the sidereal day in hours for 1986
    v = 2. * np.pi * (r / 1000.) / (23.934469591229 * 3600.)

    # Calculating local mean sidereal time (see astronomical almanach)
    tu = (rjd - 51545.0) / 36525.0
    gmst = 6.697374558 + ut + \
          (236.555367908*(rjd-51545.0) + 0.093104*tu**2 - 6.2e-6*tu**3)/3600.0
    lmst = idlMod(gmst - obs_long / 15., 24)

    # Projection of rotational velocity along the line of sight
    vdiurnal = v * np.cos(degtorad(lat)) * np.cos(degtorad(dec)) * np.sin(
        degtorad(ra - lmst * 15))

    # BARICENTRIC and HELIOCENTRIC VELOCITIES
    vh, vb = baryvel(xjd, 0)

    # Project to line of sight
    vbar = vb[0]*np.cos(degtorad(dec))*np.cos(degtorad(ra)) + vb[1]*np.cos(degtorad(dec))*np.sin(degtorad(ra)) + \
           vb[2]*np.sin(degtorad(dec))
    vhel = vh[0]*np.cos(degtorad(dec))*np.cos(degtorad(ra)) + vh[1]*np.cos(degtorad(dec))*np.sin(degtorad(ra)) + \
           vh[2]*np.sin(degtorad(dec))

    # Use barycentric velocity for correction
    corr = (vdiurnal + vbar)

    if debug:
        print('')
        print('----- HELCORR.PRO - DEBUG INFO - START ----')
        print(
            '(obs_long (East positive),obs_lat,obs_alt) Observatory coordinates [deg,m]: ',
            -obs_long, obs_lat, obs_alt)
        print('(ra,dec) Object coordinates (for epoch 2000.0) [deg]: ', ra,
              dec)
        print('(ut) Universal time (middle of exposure) [hrs]: ', ut)
        print('(jd) Julian date (middle of exposure) (JD): ', jd)
        print('(hjd) Heliocentric Julian date (middle of exposure) (HJD): ',
              hjd)
        print('(gmst) Greenwich mean sidereal time [hrs]: ', idlMod(gmst, 24))
        print('(lmst) Local mean sidereal time [hrs]: ', lmst)
        print('(dlat) Latitude correction [deg]: ', dlat)
        print('(lat) Geocentric latitude of observer [deg]: ', lat)
        print('(r) Distance of observer from center of earth [m]: ', r)
        print(
            '(v) Rotational velocity of earth at the position of the observer [km/s]: ',
            v)
        print(
            '(vdiurnal) Projected earth rotation and earth-moon revolution [km/s]: ',
            vdiurnal)
        print('(vbar) Barycentric velocity [km/s]: ', vbar)
        print('(vhel) Heliocentric velocity [km/s]: ', vhel)
        print('(corr) Vdiurnal+vbar [km/s]: ', corr)
        print('----- HELCORR.PRO - DEBUG INFO - END -----')
        print('')

    return corr, hjd
示例#58
0
 def steppar(self, pars, ranges, extractFctVal=None, quiet=False):
   """
     Allows to step a parameter through a specified range.
     
     This function steps the specified parameters through the given
     ranges. During each steps, all free parameters, except for those
     which are stepped, are fitted. The resulting contours allow
     to estimate confidence intervals.
     
     This command uses the fitting parameters specified on a call
     to the `fit` method. In particular, the same values for `x`,
     `y`, `yerr`, `minAlgo`, `miniFunc`, `fminPars`, and `fminArgs`
     are used.
     
     .. note:: You need to have carried out a fit before you can
               use `steppar`.
     
     Parameters
     ----------
     pars : string or list of strings
         The parameter(s) which are to be stepped.
     ranges : dictionary
         A dictionary mapping parameter name to range specifier.
         The latter is a list containing [lower limit, upper limit,
         no. of steps, 'lin'/'log']. The fourth entry, which
         is optional, is a string specifying whether a constant
         linear step size ('lin') or a constant logarithmic
         step size ('log') shall be used.
     quiet : boolean, optional
         If True, output will be suppressed.
     extractFctVal : callable, optional
         A function specifying how the function value is extracted
         from the fit result. If standard settings are used, the
         default of None is adequate.
     
     Returns
     -------
     Parameter steps : list
         The return value is a list of lists. Each individual list
         contains the values of the stepped parameters as the first
         entries (same order as the input `pars` list), the
         following entry is the value of the objective function
         (e.g., chi square), and the last entry is a tuple
         containing the indices of the steps of the parameter values.
         This last entry can be useful to convert the result into
         an arrow to plot, e.g., contours. 
   """
   if not self._stepparEnabled:
     raise(PE.PyAOrderError("Before you can use steppar, you must call a function, which enables its use (e.g., `fit`).", \
           solution="Call the `fit` method first and then try again."))
   if isinstance(pars, six.string_types):
     # Make it a list
     pars = [pars]
   # Check parameter consistency
   for p in pars:
     # Check existence
     tmp = self[p]
     if not p in ranges:
       raise(PE.PyAValError("There is no range for parameter: " + p, \
                            solution="Specify a range; e.g., {'xyz':[0.5,1.9,20,'lin']}"))
   # Function to extract function value from the fit result
   if extractFctVal is None:
     self._extractFctVal = self.__extractFunctionValue
   else:
     if not hasattr(extractFctVal, "__call__"):
       raise(PE.PyAValError("`extractFctVal` needs to be callable!", \
                            solution="Specify a function here or try to use None."))
     self._extractFctVal = extractFctVal
   # Set up ranges
   rs = []
   for par in pars:
     r = ranges[par]
     if len(r) > 4:
       # Use the axis as given
       rs.append(r)
       continue
     if len(r) < 4:
       # By default, use linear spacing
       mode = 'lin'
     else:
       if not isinstance(r[3], six.string_types):
         raise(PE.PyAValError("If the range has 4 entries, the fourth must be a string specifying the mode.", \
                              solution="Use either 'lin' or 'log' as the fourth entry."))
       mode = r[3]
     if mode == 'lin':
       rs.append(numpy.linspace(r[0], r[1], r[2]))
     elif mode == 'log':
       # Calculate factor
       s = numpy.power((r[1]/r[0]), 1.0/r[2])
       rs.append( r[0] * numpy.power(s, numpy.arange(r[2])) )
     else:
       raise(PE.PyAValError("Unknown mode: " + str(mode), \
                            solution="Use either 'lin' or 'log'."))
   # Save state of object
   saveObj = self.saveState()
   saveFitResult = self.fitResult
   saveModels = {}
   for k in six.iterkeys(self._compos):
     saveModels[k] = self.models[k].copy()
   # Freeze parameters, which are affected
   self.freeze(pars)
   # Store result
   result = []
   # Loop over the axes
   nli = pyaC.NestedLoop(list(map(len, rs)))
   for index in nli:
     for i, p in enumerate(pars):
       self[p] = rs[i][index[i]]
     # Fit using previous setting
     # Note that mAA is dispensable, because self.minAlgo will be a callable.
     self.fit(None, None, minAlgo=self.minAlgo, miniFunc=self.miniFunc, \
              *self.fminPars, **self.fminArgs)
     # Build up result
     ppr = []
     for par in pars:
       ppr.append(self[par])
     try:
       ppr.append(self._extractFctVal(self.fitResult))
     except Exception as e:
       PE.warn(PE.PyAValError("The call to the `extractFctVal` function failed. Using full output." + \
                              "\n  Original message: " + str(e)))
       ppr.append(self.fitResult)
     if not quiet:
       print("Result from last iteration:")
       print("  ", ppr)
     ppr.append(index)
     result.append(ppr)
   # Restore old state of object
   self.restoreState(saveObj)
   self.fitResult = saveFitResult
   for k in six.iterkeys(self._compos):
     self.models[k] = saveModels[k]
   return result
示例#59
0
def helio_jd(date, ra, dec, B1950=False, TIME_DIFF=False):
    """
    Convert geocentric (reduced) Julian date to heliocentric Julian date

    Parameters
    ----------
    date : float
        (Reduced) Julian date (2.4e6 subtracted)
    ra, dec : float
        Right ascension and declination in degrees
    B1950 : boolean
        If True, input coordinates are assumed to be given in equinox
        1950 coordinates.
    TIME_DIFF : boolean
        If True, this function returns the time difference
        (heliocentric JD - geocentric JD ) in seconds

    Returns
    -------
    HJD : float
        The heliocentric Julian date.

    Notes
    -----

    .. note:: This function was ported from the IDL Astronomy User's Library.

    :IDL - Documentation:

    NAME:
         HELIO_JD
    PURPOSE:
         Convert geocentric (reduced) Julian date to heliocentric Julian date
    EXPLANATION:
         This procedure correct for the extra light travel time between the Earth 
         and the Sun.

          An online calculator for this quantity is available at 
          http://www.physics.sfasu.edu/astro/javascript/hjd.html
    CALLING SEQUENCE:
          jdhelio = HELIO_JD( date, ra, dec, /B1950, /TIME_DIFF)

    INPUTS
          date - reduced Julian date (= JD - 2400000), scalar or vector, MUST
                  be double precision
          ra,dec - scalars giving right ascension and declination in DEGREES
                  Equinox is J2000 unless the /B1950 keyword is set

    OUTPUTS:
          jdhelio - heliocentric reduced Julian date.  If /TIME_DIFF is set, then
                    HELIO_JD() instead returns the time difference in seconds
                    between the geocentric and heliocentric Julian date.

    OPTIONAL INPUT KEYWORDS 
          /B1950 - if set, then input coordinates are assumed to be in equinox 
                   B1950 coordinates.
          /TIME_DIFF - if set, then HELIO_JD() returns the time difference
                   (heliocentric JD - geocentric JD ) in seconds 

    EXAMPLE:
          What is the heliocentric Julian date of an observation of V402 Cygni
          (J2000: RA = 20 9 7.8, Dec = 37 09 07) taken June 15, 1973 at 11:40 UT?

          IDL> juldate, [1973,6,15,11,40], jd      ;Get geocentric Julian date
          IDL> hjd = helio_jd( jd, ten(20,9,7.8)*15., ten(37,9,7) )  

          ==> hjd = 41848.9881

    Wayne Warren (Raytheon ITSS) has compared the results of HELIO_JD with the
    FORTRAN subroutines in the STARLINK SLALIB library (see 
    http://star-www.rl.ac.uk/).    
                                                     Time Diff (sec)
         Date               RA(2000)   Dec(2000)  STARLINK      IDL

    1999-10-29T00:00:00.0  21 08 25.  -67 22 00.  -59.0        -59.0
    1999-10-29T00:00:00.0  02 56 33.4 +00 26 55.  474.1        474.1
    1940-12-11T06:55:00.0  07 34 41.9 -00 30 42.  366.3        370.2
    1992-02-29T03:15:56.2  12 56 27.4 +42 10 17.  350.8        350.9
    2000-03-01T10:26:31.8  14 28 36.7 -20 42 11.  243.7        243.7
    2100-02-26T09:18:24.2  08 26 51.7 +85 47 28.  104.0        108.8
    PROCEDURES CALLED:
          bprecess, xyz, zparcheck

    REVISION HISTORY:
          Algorithm from the book Astronomical Photometry by Henden, p. 114
          Written,   W. Landsman       STX     June, 1989 
          Make J2000 default equinox, add B1950, /TIME_DIFF keywords, compute
          variation of the obliquity      W. Landsman   November 1999
    """

    # Because XYZ uses default B1950 coordinates, we'll convert everything to B1950

    if date > 2.4e6:
        PE.warn(PE.PyAValError("The given Julian Date ( " + str(date) + ") is exceedingly large far a reduced JD.",
                               solution="Did you forget to subtract 2.4e6?",
                               where="helio_jd"))

    if not B1950:
        bpresult = bprecess(ra, dec)
        ra1 = bpresult[0]
        dec1 = bpresult[1]
    else:
        ra1 = ra
        dec1 = dec

    radeg = 180.0 / numpy.pi
    # I think, this is not needed in Python, even at this stage...
    # zparcheck,'HELIO_JD',date,1,[3,4,5],[0,1],'Reduced Julian Date'

    delta_t = (date - 33282.42345905) / 36525.0
    epsilon_sec = 44.836 - 46.8495 * delta_t - \
        0.00429 * delta_t**2 + 0.00181 * delta_t**3
    epsilon = (23.433333 + epsilon_sec / 3600.0) / radeg
    ra1 = ra1 / radeg
    dec1 = dec1 / radeg

    x, y, z, tmp, tmp, tmp = xyz(date)

    # Find extra distance light must travel in AU, multiply by 1.49598e13 cm/AU,
    # and divide by the speed of light, and multiply by 86400 second/year

    time = -499.00522 * (cos(dec1) * cos(ra1) * x +
                         (tan(epsilon) * sin(dec1) + cos(dec1) * sin(ra1)) * y)

    if TIME_DIFF:
        return time
    else:
        return (date + time / 86400.0)