def main(npyfile_1=None, npyfile_2=None, offset=None, work_dir=None, function=None, batchname=None, verbose=False):
  assert npyfile_1 and npyfile_2 and work_dir
  assert function in FUNCTIONS
  offset = int(offset)
  assert offset >= 0
  if batchname is None or batchname in ("None", "NONE", "none"):
    batchname = "%s_vs_%s_%s_%d" % \
      (os.path.basename(npyfile_1), os.path.basename(npyfile_2), function, offset)

  M1 = ma.load(npyfile_1) # these should be pickles
  M2 = ma.load(npyfile_2)
  assert offset < np.size(M1, 0)
  assert np.count_nonzero(np.isnan(M1.compressed())) == 0
  assert np.count_nonzero(np.isnan(M2.compressed())) == 0

  # Get batch function handler for this function.
  size = np.size(M2,0)
  F = FUNCTIONS[function](size)
  if verbose:
    print "Vebose: Try F on n=700 identity."
    print F.compute_one(np.arange(700), np.arange(700))

  # Compute pairs using batch handler `F`
  print "Starting to write %d pairs for %s" % (size, batchname)
  for i in xrange(np.size(M2,0)):
    if i % REPORT_N == 0:
      print "Generating pair %d (to %d) in %s..." % \
        (i, size, batchname)
    # Mask pairs with at least one missing value
    shared_mask = ~(M1[offset].mask | M2[i].mask)
    X, Y = M1[offset][shared_mask].data, M2[i][shared_mask].data
    assert np.size(X) == np.size(Y) <= np.size(M1,1)

    F.compute(X,Y,i)
    if verbose:
      d = F.get(i)
      s = " ".join(["%s=%f"%(k,v) for k,v in d.items()])
      print "%d: " % (i), s
    
  print "Computed %d pairs for %s using %s." % (n, batchname, function)
  n_nans = F.nans()
  print "%d nans" % (n_nans)
  if n_nans > 0:
    print "!!!WARNING: There exists at least one (%d) not-a-numbers (nans) in this batch." % (n_nans)

  out_names = F.save(work_dir, batchname)
  print "Saving %d results as:" % (n)
  for name, out_name in out_names.items():
    print "%s: %s" % (name, out_name)
 def __init__(self,
              rawspec,
              objectn='',
              objectd='',
              units=r'$\AA$',
              order=0,
              ref_fit=False):
     self.pick = 0
     self.order = order
     self.rawspec = ma.masked_array(rawspec)
     self.objectn = objectn
     self.objectd = objectd
     self.units = units
     self.norm = copy.deepcopy(self.rawspec)
     self.fit = copy.deepcopy(self.rawspec)
     self.ref_fit = ref_fit
     self.trimmed_spec = copy.deepcopy(self.rawspec)
     self.num_orders = len(self.rawspec)
     self.sm = [0] * self.num_orders
     self.fitpoints = [0] * self.num_orders
     self.spec_trim_points = [0] * self.num_orders
     self.fig1 = plt.figure(1)
     self.ax = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
     self.ax2 = self.fig1.add_subplot(3, 1, 3)
     self.cid = self.fig1.canvas.mpl_connect('button_press_event',
                                             self._click_event)
     self.cid2 = self.fig1.canvas.mpl_connect('key_press_event',
                                              self._key_press)
     fitted = [False] * self.num_orders
     trimmed = [False] * self.num_orders
     w_smoothed = [False] * self.num_orders
     show_ref = [0] * self.num_orders
     # read in a referance fit if suplied.
     self.state = {
         'fitted': fitted,
         'editting_fit': False,
         'w_smoothed': w_smoothed,
         'trimmed': trimmed,
         'edditing_trim': False,
         'trimming': False,
         'smoothed': False,
         'del_trim': False,
         'show_ref': show_ref,
         'has_ref_fit': False
     }
     if type(self.ref_fit) == str and self.state['has_ref_fit'] == False:
         try:
             self.ref_fit = ma.load(open(self.ref_fit, 'rb'))
             self.state['has_ref_fit'] = True
         except:
             print('Error: Could not read referance fit from file')
             self.state['has_ref_fit'] = False
     else:
         self.ref_fit = ref_fit
         self.state['has_ref_fit'] = False
 def read_pickle(self):
     self.norm = ma.load( open(
                             self.objectn+'-'+self.objectd+'-norm.p', 'rb'))
     self.fit = ma.load( open(
                            self.objectn+'-'+self.objectd+'-fit.p', 'rb'))
     self.state = pickle.load( open(
                               self.objectn+'-'+self.objectd+'-state.p',
                               'rb'))
     self.fitpoints = pickle.load( open(self.objectn+'-'+
                                  self.objectd+'-fitpoints.p', 'rb'))
     if True in self.state['trimmed']:
         self.rawspec = ma.load( open(self.objectn+'-'+
                                  self.objectd+'-trimmed-spec.p', 'rb'))
         self.spec_trim_points = pickle.load( open(self.objectn+'-'+
                                  self.objectd+'-spec-trim-points.p', 'rb'))
     # Reading the ref_fit has to come after the read of self.state so that
     # has_ref_fit will be correct if normalizition is started on a file that
     # was previously used with ref_fit but, the ref_fit file was not
     # supplied when is was opend again.
     #if self.state['has_ref_fit'] == True:
     self.ref_fit = ma.load( open(
                            self.objectn+'-'+self.objectd+'-fit-guide.p', 'rb'))
 def read_pickle(self):
     self.norm = ma.load(
         open(self.objectn + '-' + self.objectd + '-norm.p', 'rb'))
     self.fit = ma.load(
         open(self.objectn + '-' + self.objectd + '-fit.p', 'rb'))
     self.state = pickle.load(
         open(self.objectn + '-' + self.objectd + '-state.p', 'rb'))
     self.fitpoints = pickle.load(
         open(self.objectn + '-' + self.objectd + '-fitpoints.p', 'rb'))
     if True in self.state['trimmed']:
         self.rawspec = ma.load(
             open(self.objectn + '-' + self.objectd + '-trimmed-spec.p',
                  'rb'))
         self.spec_trim_points = pickle.load(
             open(self.objectn + '-' + self.objectd + '-spec-trim-points.p',
                  'rb'))
     # Reading the ref_fit has to come after the read of self.state so that
     # has_ref_fit will be correct if normalizition is started on a file that
     # was previously used with ref_fit but, the ref_fit file was not
     # supplied when is was opend again.
     #if self.state['has_ref_fit'] == True:
     self.ref_fit = ma.load(
         open(self.objectn + '-' + self.objectd + '-fit-guide.p', 'rb'))
 def __init__(self,rawspec,objectn='',objectd='',units=r'$\AA$',order=0,
              ref_fit=False):
     self.pick = 0
     self.order = order
     self.rawspec = ma.masked_array(rawspec)
     self.objectn = objectn
     self.objectd = objectd
     self.units = units
     self.norm = copy.deepcopy(self.rawspec)
     self.fit = copy.deepcopy(self.rawspec)
     self.ref_fit = ref_fit
     self.trimmed_spec = copy.deepcopy(self.rawspec)
     self.num_orders = len(self.rawspec)
     self.sm = [0] * self.num_orders
     self.fitpoints = [0] * self.num_orders
     self.spec_trim_points = [0] * self.num_orders
     self.fig1 = plt.figure(1)
     self.ax = plt.subplot2grid((3,1), (0, 0), rowspan=2)
     self.ax2 = self.fig1.add_subplot(3,1,3)
     self.cid = self.fig1.canvas.mpl_connect('button_press_event',
                                              self._click_event)
     self.cid2 = self.fig1.canvas.mpl_connect('key_press_event',
                                              self._key_press)
     fitted = [False] * self.num_orders
     trimmed  = [False] * self.num_orders
     w_smoothed = [False] * self.num_orders
     show_ref = [0] * self.num_orders
     # read in a referance fit if suplied.
     self.state = {'fitted': fitted, 'editting_fit': False,
             'w_smoothed': w_smoothed, 'trimmed': trimmed,
             'edditing_trim': False, 'trimming': False,
             'smoothed': False, 'del_trim': False,
             'show_ref': show_ref, 'has_ref_fit': False}
     if type(self.ref_fit) == str and self.state['has_ref_fit'] == False:
         try:
             self.ref_fit = ma.load( open(self.ref_fit, 'rb'))
             self.state['has_ref_fit'] = True
         except:
             print('Error: Could not read referance fit from file')
             self.state['has_ref_fit'] = False
     else:
         self.ref_fit = ref_fit
         self.state['has_ref_fit'] = False
예제 #6
0
#%%

#%%
for MONTH in [
        '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12'
]:
    MMEAN = []
    for YEAR in np.arange(2003, 2017 + 1, 1):
        print YEAR
        if os.path.exists(path + '/' + str(YEAR) + '/monthly_mean_' + MONTH +
                          '.dat'):
            print MONTH
            fname = open(
                path + '/' + str(YEAR) + '/monthly_mean_' + MONTH + '.dat',
                'rb')
            dop = ma.load(fname)
            MMEAN.append(dop)

    if len(MMEAN) > 0:
        MMEAN = ma.average(MMEAN, axis=0)
        #prepare figure
        plt.close('all')
        plt.figure(figsize=(8, 10), dpi=defdpi)
        ax = plt.gca()

        # Do plot
        mymap.drawcoastlines(ax=ax, zorder=500)
        #mymap.fillcontinents('0.9',ax=ax,zorder=499)

        mymap.drawparallels(pars,
                            dashes=(1, 1),
def main(npyfile=None, work_dir=None, function=None, n=None, start=None, end=None, batchname=None, verbose=False, *args, **kwds):
  
  assert npyfile and work_dir
  assert function in FUNCTIONS
  if start is None:
    start = 0
  else:
    start = int(start)
  if end is None: 
    end = n*(n-1) / 2
  else:
    end = int(end)
  n = int(n)
  assert n > 0 and start >= 0 and end > 0
  
  if not os.path.exists(work_dir):
    print "WARNING: work_dir %s does not exist." % work_dir
    if verbose:
      print "Creating work_dir in verbose mode...."
      make_dir(work_dir)

  # If batchname not provided, compute default value. Use batchname in output file name.
  if batchname is None or batchname.lower() in ("false", "f", "none"):
    batchname = "%s_%s_%d_%d" % \
      (os.path.basename(npyfile), function, start, end)

  # Do not recreate existing batch output files.
  output_fname = os.path.join(work_dir, batchname+".npy")
  if os.path.exists(output_fname):
    print "%s already exists. Exiting..." % output_fname
    return 1
  
  # Load data file
  print "Loading %s..." % npyfile
  if npyfile.rpartition('.')[2].lower() == 'pkl':
    print "Loading as level 2 pickle"
    M = pickle.load(open(npyfile))
  else:
    print "Loading as level 0 numpy.MaskedArray pickle"
    M = ma.load(npyfile)

  # Get batch function handler for this function.
  size = end-start
  F = FUNCTIONS[function](size)
  if verbose:
    print "Vebose: Try F on n=700 identity."
    print F.compute_one(np.arange(700), np.arange(700))

  # Compute pairs using batch handler `F`
  print "Starting to write %d pairs for %s" % (size, batchname)
  for i, j in enumerate(xrange(start, end)):
    if i % REPORT_N == 0:
      print "Generating pair %d (to %d), (#%d of %d total) in %s..." % \
        (j, end-1, i+1, size, batchname)
    # Carefully check indexing schemes...
    x, y = inv_sym_idx(j, n)
    assert x >= 0 and y >= 0 and x < np.size(M,0) and y < np.size(M,0)
    idx_check = sym_idx(x,y,n)
    assert idx_check == j and idx_check >= start and idx_check < end

    # Remove samples with at least one missing value.
    shared_mask = ~(M[x].mask | M[y].mask)
    X, Y = M[x][shared_mask].data, M[y][shared_mask].data
    assert np.size(X) == np.size(Y) <= np.size(M,1)

    F.compute(X,Y,i)
    if verbose:
      d = F.get(i)
      s = " ".join(["%s=%f"%(k,v) for k,v in d.items()])
      print "%d->%d: " % (i,j), s

  print "Computed %d pairs for %s" % (size, batchname)
  n_nans = F.nans()
  print "%d nans" % (n_nans)
  if n_nans > 0:
    print "!!!WARNING: There exists at least one (%d) not-a-numbers (nans) in this batch." % (n_nans)

  out_names = F.save(work_dir, batchname)
  print "Saved %d results, %d through %d, as %d matrices:" % (size, start, end-1, len(out_names))
  for name, out_name in out_names.items():
    print "%s: %s" % (name, out_name)
예제 #8
0
    end = m*(m-1) / 2
  else:
    end = int(end)
  if start is None:
    start = 0
  else:
    start = int(start)
  assert start < end, start >= 0

  # if tmp directory does not exist, create it
  try:
    os.makedirs(TMP_DIR)
  except OSError, e:
    if e.errno != errno.EEXIST: raise

  M = ma.load(npy_fname)

  if batchname is None or batchname in ("None", "NONE", "none"):
    batchname = "%s_%s_%d_%d" % \
      (os.path.basename(npy_fname), function, start, end)

  log_msg = LOG_MSG % {'npy_fname': npy_fname, 'function': function, 'start': start,
  'end': end, 'm': m, 'date': datetime.datetime.now().isoformat(' ')}
  # Create output file in temporary directory
  output_fname = os.path.join(TMP_DIR, batchname+".txt")
  fp_out = open(output_fname, 'w')
  fp_out.write(log_msg + "\n")
  print "Started job...", log_msg
  
  f = FUNCTIONS[function]
  for i in xrange(start, end):