Esempio n. 1
0
 def get_session_files(self, date):
     """
 Get the files for the specified data
 """
     year, month, day = calendar_date(self.year, self.DOY)
     UNIXstart = calendar.timegm((self.year, month, day, 0, 0, 0, 0, 0))
     UNIXend = calendar.timegm((self.year, month, day, 23, 59, 59, 0, 0))
     filekeys = list(self.files.keys())
     if filekeys:
         pass
     else:
         raise RuntimeError("no tipping files found")
     first_idx = nearest_index(filekeys, UNIXstart)
     last_idx = nearest_index(filekeys, UNIXend)
     self.logger.debug("get_session_files: index from %d to %d", first_idx,
                       last_idx)
     if filekeys[first_idx] < UNIXstart:
         first_idx += 1
     if filekeys[last_idx] < UNIXend:
         last_idx += 1
     files = []
     for index in range(first_idx, last_idx):
         files.append(self.files[filekeys[index]])
     files.sort()
     self.datefiles = files
     return files
Esempio n. 2
0
def polate_flux(Jname, datenum, freq):
    """
  Interpolate or extrapolate flux of source for given date and frequency
  """
    times, fluxes, sigflux = get_flux_data(table_links[Jname][1])
    # (inter/extra)polate time first
    guess = {}
    for key in list(times.keys()):
        # interpolate or extrapolate?
        datenums = array(times[key])
        if datenum > datenums[-1]:
            # extrapolate forward
            timedelta = datenum - datenums[-1]
            ref_index = nearest_index(datenums, datenums[-1] - timedelta)
            timedata = datenums[ref_index:-1]
            fluxdata = fluxes[key][ref_index:-1]
            (ar, br) = polyfit(timedata, fluxdata, 1)
            guess[key] = polyval([ar, br], datenum)
        elif datenum < datenums[0]:
            # extrapolate backward
            time_delta = datenums[0] - datenum
            ref_index = nearest_index(datenums, datenums[0] + timedelta)
            timedata = datenums[0:ref_index]
            fluxdata = fluxes[key][0:ref_index]
            (ar, br) = polyfit(timedata, fluxdata, 1)
            guess[key] = polyval([ar, br], datenum)
        else:
            ref_index = nearest_index(datenums, datenum)
            max_index = len(datenums) - 1
            first_index = max(ref_index - 4, 0)
            last_index = min(ref_index + 4, max_index)
            timedata = datenums[first_index:last_index]
            fluxdata = fluxes[key][first_index:last_index]
            (ar, br) = polyfit(timedata, fluxdata, 1)
            guess[key] = polyval([ar, br], datenum)
    # Now we (inter/extra)polate frequency
    freqs = []
    fluxdata = []
    for key in list(times.keys()):
        freqs.append(float(key))
        fluxdata.append(guess[key])
    (ar, br) = polyfit(freqs, fluxdata, 1)
    return polyval([ar, br], freq)
Esempio n. 3
0
 def set_averaging(self, num, no_smear=False, min_rms=False, most=False):
   """
   Selects the averaging option for power meter readings
   
   NOT FINISHED
   
   Sets the number of samples to average.  If no keyword argument is given,
   the averaging option is the one which averages the number of samples which
   is closest to num.
   
   The acquisition speed and filter should be set in such a way that at least
   one full period of the modulation signal is measured. At 1Msps, the filter
   should be set to 5 or higher, which results in 1000 or more samples. At
   lower sampling speeds, for example 100ksps, the filter should be set to 3
   or higher to measure at least one full period of the envelope signal. 
   
   @param num : number of samples to average; calculate if 0
   @type  num : int
   
   @param no_smear : if True, num is the largest < reading_time/sampling_time
   @type  no_smear : bool
   
   @param min_rms : if True, num is the smallest > reading_time/sampling_time
   @type  min_rms : bool
   
   @param most: largest number of samples available
   @type  most: bool
   
   @return num_averaged
   """
   if self.model == 'RPR2006C':
     if num > 5000:
       raise RadipowerError(num_averages,"averages is greater than 5000")
     elif num > 0:
       read_speed = self.get_read_speed() # ms
       self.logger.debug("set_averaging: read speed = %6.3f", read_speed)
       if read_speed < 0.5 :
         response = self.ask("BAUD 3")           # 0.3 ms/sample
       elif read_speed < 1:
         response = self.ask("BAUD 2")           # 0.6 ms/sample
       else:
         response = self.ask("BAUD 1")           # 1.25 ms/sample
     elif num == 0:
       samples_per_read = read_speed*20
       bounds = self.filter_bounds(samples_per_read)
       if most:
         response = self.ask("FILTER 7")
       elif no_smear:
         response = self.ask("FILTER "+str(bounds[0]))
       elif min_rms:
         response = self.ask("FILTER "+str(bounds[1]))
       else:
         response = self.ask("FILTER "+str(nearest_index(array(bounds), num)))
   elif self.model == 'RPR1018A':
     if num > 1000:
       raise RadipowerError(num_averages,"averages is greater than 1000")
     elif num > 0:
       read_speed = self.get_read_speed() # ms
       if read_speed < 1 :
         pass
   self.get_averaging()
   return response
Esempio n. 4
0
 def fit_gaussian(self, beam_limit=2.5):
     """
 Extract the appropriate data
 
 For raster scans, ``xdec`` means that ``xdec`` stays fixed while the
 antenna moves up and down; ``dec`` means that 'dec' stays fixed while the
 left and right.
     
 The Gaussian is assumed to fit the inner five beamwidths of the data,
 though that limit can be adjusted.  The baseline is the rest of the data,
 although the lower baseline includes at least ``data[:5]`` and the upper
 baseline includes ``data[-5:]``
 """
     self.logger.debug("fit_gaussian: direction is %s", self.axis)
     if self.axis.lower() == 'xdec':
         x = NP.array(self.ddecs)
     else:
         x = NP.array(self.dxdecs)
     self.logger.debug("fit_gaussian: selected x: %s", x)
     # define the domain of the Gaussian fit:
     beam_index = NP.array(self.data).argmax()
     self.logger.debug("fit_gaussian: peak at index %d", beam_index)
     beam_center = x[beam_index]
     self.logger.debug("fit_gaussian: peak at x = %f", beam_center)
     beamwidth = DSS28_beamwidth(self.freq / 1000)
     self.logger.debug("fit_gaussian: beamwidth = %f deg", beamwidth)
     lower_limit = beam_center - beam_limit * beamwidth
     upper_limit = beam_center + beam_limit * beamwidth
     self.logger.debug("fit_gaussian: center left: %f", lower_limit)
     self.logger.debug("fit_gaussian: center right: %f", upper_limit)
     # Define baseline ranges for the lower end and the upper end of the spectrum
     #  * 'lower_baseline' and 'upper_baseline' are 2-item lists
     #  * assume that there are at least 5 data points for each baseline section
     if x[0] < x[-1]:  # increasing X-coordinate
         # scans go from low sample to high sample
         if lower_limit < x[5]:  # lower baseline segment
             lower_baseline = [0, 5]
         else:
             lower_baseline = [0, support.nearest_index(x, lower_limit)]
         if upper_limit > x[-5]:  # upper baseline segment
             upper_baseline = [-6, -1]
         else:
             upper_baseline = [support.nearest_index(x, upper_limit), -1]
     else:
         # scans go from high sample to low sample
         if upper_limit > x[5]:
             upper_baseline = [0, 5]
         else:
             upper_baseline = [0, support.nearest_index(x, upper_limit)]
         if upper_limit < x[-5]:
             upper_baseline = [-6, -1]
         else:
             upper_baseline = [support.nearest_index(x, lower_limit), 0]
     self.logger.debug("fit_gaussian: lower baseline: %s", lower_baseline)
     self.logger.debug("fit_gaussian: upper baseline: %s", upper_baseline)
     # define the baseline data
     xdata = NP.append(x[lower_baseline[0]:lower_baseline[1]],
                       x[upper_baseline[0]:upper_baseline[1]]).astype(float)
     ydata = NP.append(
         self.tsys[lower_baseline[0]:lower_baseline[1]],
         self.tsys[upper_baseline[0]:upper_baseline[1]]).astype(float)
     #   Fit baseline
     self.baseline_pars = scipy.polyfit(xdata, ydata, 1)
     self.logger.debug("fit_gaussian: baseline parameters: %s",
                       self.baseline_pars)
     #   Fit the beam
     zdata = NP.array(self.tsys).astype(float)
     self.logger.debug("fit_gaussian: zdata: %s", zdata)
     height = zdata[beam_index] - scipy.polyval(self.baseline_pars,
                                                x[beam_index])
     self.logger.debug("fit_gaussian: height: %s", height)
     sigma = Mlsq.st_dev(beamwidth)
     initial_guess = [height, beam_center, sigma]
     # in this case we only fit out to one beamwidth
     if x[0] < x[-1]:
         xfit = x[support.nearest_index(x, beam_center - beamwidth):support.
                  nearest_index(x, beam_center + beamwidth)]
         y = zdata[support.nearest_index(x, beam_center -
                                         beamwidth):support.
                   nearest_index(x, beam_center + beamwidth)]
     else:
         xfit = x[support.nearest_index(x, beam_center + beamwidth):support.
                  nearest_index(x, beam_center - beamwidth)]
         y = zdata[support.nearest_index(x, beam_center +
                                         beamwidth):support.
                   nearest_index(x, beam_center - beamwidth)]
     self.pars, err = Mlsq.fit_gaussian(
         Mlsq.gaussian_error_function, initial_guess, xfit,
         y - scipy.polyval(self.baseline_pars, xfit))
     return self.baseline_pars, self.pars, err
Esempio n. 5
0
def extract_data(datatype, wb, start, stop, meta_column, files):
  """
  Extract map from the data files and create/fill a metadata worksheet

  First the first t-file is loaded to get the start and stop time.  These are
  used to create the meta-data sheet name.  If the sheet exists it is loaded,
  else it is created.  Creation begins with the copying the session meta-data
  sheet.  The 'create_metadata_sheet' adds additional empty columns.  Then for
  each row the appropriate t-file is read and the meta-data are obtained.

  @param wb : observation spreadsheet
  @type  wb : workbook instance

  @param start : datetime of first data point
  @type  start : mpl datenum

  @param stop : datetime of last data point
  @type  stop : mpl datenum

  @return: metadata worksheet instance
  """
  # Get metadata from first data file
  data, labels, date_nums, ras, decs, azs, elevs, tsys = \
    load_tfile_data(files[0])
  # get the index for the start time and the end time
  start_hr  = num2date(start).hour
  start_min = num2date(start).minute
  stop_hr   = num2date(stop).hour
  stop_min  = num2date(stop).minute
  startstr = "%02d%02d" % (start_hr,start_min)
  stopstr  = "%02d%02d" % (stop_hr, stop_min)
  if datatype == None:
    sheetname = "Other-"
  elif datatype.lower() == 'map':
    sheetname = "Map-"
  elif datatype.lower() == 'time-series':
    sheetname = "Plot-"
  elif datatype == 'boresight':
    sheetname = "Bore-"
  sheetname += startstr+"-"+stopstr
  logger.debug("Looking for sheet %s", sheetname)
  metadata_sheet = wb.get_sheet_by_name(sheetname)
  if metadata_sheet == None:
    # Doesn't exist, make it.
    logger.debug("Attempting to create worksheet %s", sheetname)
    try:
      metasheet = wb.get_sheet_by_name('Metadata')
    except Exception as details:
      logger.error("Could not get spreadsheet metadata", exc_info=True)
      return None
    try:
      wb.add_sheet(copy.deepcopy(metasheet))
    except Exception as details:
      logger.error("Could not copy metadata sheet", exc_info=True)
      return None
    else:
      logger.debug("New sheets: %s", str(wb.get_sheet_names()))
      metadata_sheet = wb.get_sheet_by_name('Metadata')
      logger.debug("Active sheet: %s", metadata_sheet.title)
      metadata_sheet.title = sheetname
      logger.debug("Active sheet was renamed to %s", metadata_sheet.title)

    #logger.debug("Creating worksheet %s named %s",
    #               metadata_sheet,sheetname)
    #metadata_sheet = create_metadata_sheet(metadata_sheet,sheetname)
  else:
    logger.debug("%s already exists",metadata_sheet.title)
  for filename in files:
    bname = os.path.basename(filename)
    # find the row for this file or create it
    row = support.excel.get_row_number(metadata_sheet,meta_column['File'],bname)
    logger.debug("%s meta data will go into row %s", bname,row)
    if row == None:
      row = metadata_sheet.get_highest_row()
      logger.debug("%s meta data are not yet in %s", bname,sheetname)
      logger.debug("%s meta data will now go into row %d",bname,row)
    # get data for this file
    data, labels, date_nums, ras, decs, azs, elevs, tsys = \
          load_tfile_data(filename)
    start_index = support.nearest_index(date_nums,start)
    stop_index = support.nearest_index(date_nums,stop)
    if start_index == -1 or stop_index == -1:
      wb.remove_sheet(metadata_sheet)
      return None
    else:
      metadata_sheet.cell(row=row,
                        column=meta_column['File']).value = bname
      metadata_sheet.cell(row=row,
                        column=meta_column["First"]).value = start_index
      metadata_sheet.cell(row=row,
                        column=meta_column["Last"] ).value = stop_index
      metadata_sheet.cell(row=row,
                        column=meta_column['Start']).value = num2date(start)
      metadata_sheet.cell(row=row,
                        column=meta_column['Stop'] ).value = num2date(stop)
  set_column_dimensions(metadata_sheet)
  return metadata_sheet
Esempio n. 6
0
 def fit_gaussian(self, channel, beam_limit=2.5):
   """
   Fit the scan to a Gaussian and a baseline
   
   Extract the appropriate data::
   
       For raster scans, 'xdec' means that 'xdec' stays fixed while the
       antenna moves up and down; 'dec' means that 'dec' stays fixed while the
       left and right.
       
   The Gaussian is assumed to fit the inner five beamwidths of the data,
   though that limit can be adjusted.  The baseline is the rest of the data,
   although the lower baseline includes at least data[:5] and the upper
   baseline includes data[-5:]
   
   @param channel : channel whose data will be fit (required)
   @type  channel : int
   
   @param beam_limit : distance from the center included in Gaussian fit
   @type  beam_limit : float
   """
   self.logger.debug("fit_gaussian: direction is %s", self.axis)
   # get offsets if necessary
   if ('data' in self.__dict__) == False:
     self.get_offsets()
   # remember that GAVRT nomenclature seems backwards
   if self.axis.lower() == 'xdec':
     x = NP.array(self.data['dec_offset'])  # NP.array(self.ddecs)
   else:
     x = NP.array(self.data['xdec_offset']) # NP.array(self.dxdecs)
   self.logger.debug("fit_gaussian: selected x: %s", x)
   tsys = self.data['vfc_counts'][channel]
   # define the domain of the Gaussian fit:
   beam_index = tsys.argmax()                  # NP.array(self.data).argmax()
   self.logger.debug("fit_gaussian: peak at index %d", beam_index)
   beam_center = x[beam_index]
   self.logger.debug("fit_gaussian: peak at x = %f", beam_center)
   beamwidth = DSS28_beamwidth(self.data['freq'][channel]/1000)
   self.logger.debug("fit_gaussian: beamwidth = %f deg", beamwidth)
   lower_limit = beam_center - beam_limit*beamwidth # source scan starts here
   upper_limit = beam_center + beam_limit*beamwidth # source scan ends here
   self.logger.debug("fit_gaussian: scan lower limit: %f", lower_limit)
   self.logger.debug("fit_gaussian: scan upper limit: %f", upper_limit)
   # Define baseline ranges for the lower end and the upper end of the spectrum
   #  * 'lower_baseline' and 'upper_baseline' are 2-item lists
   #  * assume that there are at least 5 data points for each baseline section
   if x[0] < x[-1]: # increasing X-coordinate
     # scans go from low sample to high sample
     if lower_limit < x[5]: # source scan starts inside lower baseline segment
       lower_baseline = [0,5] # force 5 baseline points
     else:
       lower_baseline = [0, support.nearest_index(x, lower_limit)]
     if upper_limit > x[-5]: # source scan starts inside upper baseline segment
       upper_baseline = [-6,-1] # force 5 baseline points
     else:
       upper_baseline = [support.nearest_index(x, upper_limit), -1]
   else:
     # scans go from high sample to low sample
     if upper_limit > x[5]: 
       upper_baseline = [0, support.nearest_index(x,upper_limit)]
     else:
       upper_baseline = [0,5]
     if lower_limit < x[-5]:
       lower_baseline = [-6,-1]
     else:
       lower_baseline = [support.nearest_index(x,lower_limit), -1]
   self.logger.debug("fit_gaussian: lower baseline: %s", lower_baseline)
   self.logger.debug("fit_gaussian: upper baseline: %s", upper_baseline)
   # define the baseline data
   xdata = NP.append(x[lower_baseline[0]:lower_baseline[1]],
                     x[upper_baseline[0]:upper_baseline[1]]).astype(float)
   ydata = NP.append(tsys[lower_baseline[0]:lower_baseline[1]],
                     tsys[upper_baseline[0]:upper_baseline[1]]).astype(float)
   #   Fit baseline
   self.baseline_pars = NP.polyfit(xdata,ydata,1)
   self.logger.debug("fit_gaussian: baseline parameters: %s", self.baseline_pars)
   #   Fit the beam
   zdata = NP.array(tsys).astype(float)
   self.logger.debug("fit_gaussian: zdata: %s", zdata)
   height = zdata[beam_index] - NP.polyval(self.baseline_pars, x[beam_index])
   self.logger.debug("fit_gaussian: height: %s", height)
   sigma = Mlsq.st_dev(beamwidth)
   initial_guess = [height, beam_center, sigma]
   # in this case we only fit out to one beamwidth
   if x[0] < x[-1]:
     xfit =  x[support.nearest_index(x,beam_center-beamwidth):\
               support.nearest_index(x,beam_center+beamwidth)]
     y = zdata[support.nearest_index(x,beam_center-beamwidth):\
               support.nearest_index(x,beam_center+beamwidth)]
   else:
     xfit =  x[support.nearest_index(x,beam_center+beamwidth):\
               support.nearest_index(x,beam_center-beamwidth)]
     y = zdata[support.nearest_index(x,beam_center+beamwidth):\
               support.nearest_index(x,beam_center-beamwidth)]
   self.pars, err = Mlsq.fit_gaussian(Mlsq.gaussian_error_function,
                           initial_guess,
                           xfit,
                           y-NP.polyval(self.baseline_pars,xfit))
   return self.baseline_pars, self.pars, err