Пример #1
0
    def SmoothData(self, numiters=10, lowreject=2, highreject=2):
        done = False
        data = self.smoothing_data.copy()
        iterations = 0
        while not done and iterations < numiters:
            iterations += 1
            done = True
            smoother = UnivariateSpline(data.x, data.y / data.cont, s=self.smoothing_factor)
            smoothed = DataStructures.xypoint(x=data.x)
            smoothed.y = smoother(smoothed.x)
            resid = data.y / data.cont - smoothed.y
            std = np.std(resid)
            badindices = np.where(np.logical_or(resid < -lowreject * std, resid > highreject * std))[0]
            # plt.figure(2)
            #plt.plot(data.x, resid, 'ro')
            #plt.plot(data.x, -lowreject*std*np.ones(data.x.size), 'b-')
            #plt.plot(data.x, highreject*std*np.ones(data.x.size), 'b-')
            #plt.show()
            if badindices.size > 0 and data.size() - badindices.size > 10:
                done = False
                data.x = np.delete(data.x, badindices)
                data.y = np.delete(data.y, badindices)
                data.cont = np.delete(data.cont, badindices)

        return DataStructures.xypoint(x=self.smoothing_data.x, y=smoother(self.smoothing_data.x))
 def GetSpectrum(self, T, metal, logg, vmicro):
     #Make sure it is not already read in
     if self.read_dict[T][metal][logg][vmicro]:
         if self.debug:
             print "Model already read. Skipping..."
         return self.models[T][metal][logg][vmicro]
     elif self.model_dict[T][metal][logg][vmicro] == '':
         if self.debug:
             print "No model found with: \n\tT=%f\n\t[Fe/H]=%f\n\tlog(g)=%f\n\tvmicro=%f" % (T, metal, logg, vmicro)
         raise ValueError
     else:
         #If we get here, the model does exist and has not yet been read in.
         if self.debug:
             print "Reading model with: \n\tT=%f\n\t[Fe/H]=%f\n\tlog(g)=%f\n\tvmicro=%f" % (T, metal, logg, vmicro)
         fluxfile = self.model_dict[T][metal][logg][vmicro]
         contfile = fluxfile.replace("vis.7", "vis.17")
         x, y = np.loadtxt(fluxfile, usecols=(0, 1), unpack=True)
         x2, c = np.loadtxt(contfile, usecols=(0, 1), unpack=True)
         flux = DataStructures.xypoint(x=x * units.angstrom.to(units.nm), y=y)
         cont = DataStructures.xypoint(x=x2 * units.angstrom.to(units.nm), y=c)
         flux = RebinData(flux, self.xgrid)
         cont = RebinData(cont, self.xgrid)
         model = DataStructures.xypoint(x=self.xgrid, y=flux.y, cont=cont.y)
         self.read_dict[T][metal][logg][vmicro] = True
         self.models[T][metal][logg][vmicro] = model
         return model
def MakeXYpoints(datafile, extensions=False, x=None, y=None, cont=None, errors=None):
    print "Reading in file %s: " % datafile

    orders = []
    if extensions:
        #This means the data is in fits extensions, with one order per extension
        #At least x and y should be given (and should be strings to identify the field in the table record array)
        if type(x) != str:
            x = raw_input("Give name of the field which contains the x array: ")
        if type(y) != str:
            y = raw_input("Give name of the field which contains the y array: ")
        hdulist = pyfits.open(datafile)
        if cont == None:
            if errors == None:
                for i in range(1, len(hdulist)):
                    data = hdulist[i].data
                    xypt = DataStructures.xypoint(x=data.field(x), y=data.field(y))
                    orders.append(xypt)
            else:
                if type(errors) != str:
                    errors = raw_input("Give name of the field which contains the errors/sigma array: ")
                for i in range(1, len(hdulist)):
                    data = hdulist[i].data
                    xypt = DataStructures.xypoint(x=data.field(x), y=data.field(y), err=data.field(errors))
                    orders.append(xypt)
        elif type(cont) == str:
            if errors == None:
                for i in range(1, len(hdulist)):
                    data = hdulist[i].data
                    xypt = DataStructures.xypoint(x=data.field(x), y=data.field(y), cont=data.field(cont))
                    orders.append(xypt)
            else:
                if type(errors) != str:
                    errors = raw_input("Give name of the field which contains the errors/sigma array: ")
                for i in range(1, len(hdulist)):
                    data = hdulist[i].data
                    xypt = DataStructures.xypoint(x=data.field(x), y=data.field(y), cont=data.field(cont),
                                                  err=data.field(errors))
                    orders.append(xypt)
        hdulist.close()

    else:
        #Data is in a big array.
        hdulist = pyfits.open(datafile)
        data = hdulist[0].data
        hdulist.close()
        numorders = data.shape[0]
        for i in range(numorders):
            wave = data[..., ..., 0][i]
            flux = data[..., ..., 1][i]
            error = np.ones(wave.size) * 1e9
            error[flux > 0] = np.sqrt(flux[flux > 0])
            orders.append(DataStructures.xypoint(x=wave, y=flux, err=error))

    return orders
Пример #4
0
def OptimalSmooth(order, normalize=True):
  """
    Determine the best window size with cross-validation
  """

  #Flatten the spectrum
  order.y /= order.cont/order.cont.mean()
  order.err /= order.cont/order.cont.mean()

  #Remove outliers (telluric residuals)
  smoothed = SmoothData(order, windowsize=41, normalize=False)
  temp = smoothed.copy()
  temp.y = order.y/smoothed.y
  temp.cont = FittingUtilities.Continuum(temp.x, temp.y, lowreject=2, highreject=2, fitorder=3)
  outliers = HelperFunctions.FindOutliers(temp, numsiglow=6, numsighigh=6, expand=10)
  data = order.copy()
  if len(outliers) > 0:
    #order.y[outliers] = order.cont[outliers]
    order.y[outliers] = smoothed.y[outliers]
    order.err[outliers] = 9e9

  #Make cross-validation sets
  inp = np.transpose((order.x, order.err, order.cont))
  X_train, X_test, y_train, y_test = cross_validation.train_test_split(inp, order.y, test_size=0.2)
  X_train = X_train.transpose()
  X_test = X_test.transpose()
  sorter_train = np.argsort(X_train[0])
  sorter_test = np.argsort(X_test[0])
  training = DataStructures.xypoint(x=X_train[0][sorter_train], y=y_train[sorter_train], err=X_train[1][sorter_train], cont=X_train[2][sorter_train])
  validation = DataStructures.xypoint(x=X_test[0][sorter_test], y=y_test[sorter_test], err=X_test[1][sorter_test], cont=X_test[2][sorter_test])

  """
  #Try each smoothing parameter
  s_array = np.logspace(-3, 1, 100)
  chisq = []
  for s in s_array:
    fcn = smooth(training.x, training.y, w=1.0/training.err, s=s)
    prediction = fcn(validation.x)
    chisq.append(cost(validation.y, prediction, validation.err))
    print s, chisq[-1]


  idx = np.argmin(np.array(chisq) - 1.0)
  s = s_array[idx]
  """

  s = 0.9*order.size()
  smoothed = order.copy()
  fcn = smooth(smoothed.x, smoothed.y, w=1.0/smoothed.err, s=s)
  smoothed.y = fcn(smoothed.x)
  plt.plot(order.x, order.y)
  plt.plot(smoothed.x, smoothed.y)
  plt.show()
  return smoothed, s
def fix_chip_wavelength(model_orders, data_orders, band_cutoff=1870):
    """ Adjust the wavelength in data_orders to be self-consistent
    """
    # H band
    model_orders_H = [o.copy() for o in model_orders if o.x[-1] < band_cutoff]
    data_orders_H = [o.copy() for o in data_orders if o.x[-1] < band_cutoff]
    ordernums_H = 121.0 - np.arange(len(model_orders_H))
    p_H = fit_wavelength(model_orders_H, ordernums_H, first_order=3, last_order=len(ordernums_H) - 4)

    # K band
    model_orders_K = [o.copy() for o in model_orders if o.x[-1] > band_cutoff]
    data_orders_K = [o.copy() for o in data_orders if o.x[-1] > band_cutoff]
    ordernums_K = 92.0 - np.arange(len(model_orders_K))
    p_K = fit_wavelength(model_orders_K, ordernums_K, first_order=7, last_order=len(ordernums_K) - 4)

    new_orders = []
    for i, order in enumerate(data_orders):
        pixels = np.arange(order.size(), dtype=np.float)
        if order.x[-1] < band_cutoff:
            # H band
            ordernum = ordernums_H[i] * np.ones_like(pixels)
            wave = p_H(pixels, ordernum) / ordernum
        else:
            # K band
            ordernum = ordernums_K[i-len(ordernums_H)] * np.ones_like(pixels)
            wave = p_K(pixels, ordernum) / ordernum
            
        new_orders.append(DataStructures.xypoint(x=wave, y=order.y, cont=order.cont, err=order.err))
    return new_orders
Пример #6
0
    def Interpolate_Old(self, dictionary, SpT):
        #First, we must convert the relations above into a monotonically increasing system
        #Just add ten when we get to each new spectral type
        relation = DataStructures.xypoint(len(dictionary))

        # Strip the spectral type of the luminosity class information
        SpT = re.search('[A-Z]([0-9]\.?[0-9]*)', SpT).group()

        xpoints = []
        ypoints = []
        for key, index in zip(dictionary, range(len(dictionary))):
            #Convert key to a number
            xpoints.append(self.SpT_To_Number(key))
            ypoints.append(dictionary[key])

        sorting_indices = [i[0] for i in sorted(enumerate(xpoints), key=lambda x: x[1])]
        for index in range(len(dictionary)):
            i = sorting_indices[index]
            relation.x[index] = xpoints[i]
            relation.y[index] = ypoints[i]

        RELATION = UnivariateSpline(relation.x, relation.y, s=0)

        spnum = self.SpT_To_Number(SpT)
        if spnum > 0:
            return RELATION(spnum)
        else:
            return np.nan
Пример #7
0
def CombineIntervals(intervals, overlap=0):
    iteration = 0
    print "\n\n"
    for interval in intervals:
        lastindex = interval.x.size - overlap

        if iteration == 0:
            firstindex = 0
            master_x = interval.x[firstindex:lastindex]
            master_y = interval.y[firstindex:lastindex]
            master_cont = interval.cont[firstindex:lastindex]
        else:
            firstindex = np.searchsorted(interval.x, master_x[-1]) + 1
            master_x = np.append(master_x, interval.x[firstindex:lastindex])
            master_y = np.append(master_y, interval.y[firstindex:lastindex])
            master_cont = np.append(master_cont, interval.cont[firstindex:lastindex])
        iteration += 1

    output = DataStructures.xypoint(master_x.size)
    output.x = master_x.copy()
    output.y = master_y.copy()
    output.cont = master_cont.copy()

    #Scale continuum so the highest normalized flux = 1.0
    maxindex = np.argmax(output.y / output.cont)
    factor = output.y[maxindex] / output.cont[maxindex]
    output.cont *= factor

    return output
Пример #8
0
    def __getitem__(self, item):
        if item not in self.valid_keys:
            raise KeyError('{} not a valid item for CCFContainer!'.format(item))

        if item == 'ml' and self.ml is not None:
            return DataStructures.xypoint(x=self.x, y=self.ml)
        elif item == 'dc' and self.dc is not None:
            return DataStructures.xypoint(x=self.x, y=self.dc)
        elif item == 'simple' and self.simple is not None:
            return DataStructures.xypoint(x=self.x, y=self.simple)
        elif item == 'weighted' and self.weighted is not None:
            return DataStructures.xypoint(x=self.x, y=self.weighted)
        elif item == 'simple-weighted' and self.simple_weighted is not None:
            return DataStructures.xypoint(x=self.x, y=self.simple_weighted)

        return None  # We should never get here...
Пример #9
0
    def ConvolveSmooth(self, numiters=10, lowreject=2, highreject=3):
        done = False
        data = self.smoothing_data.copy()
        # data.y /= data.cont
        iterations = 0
        if self.window_size % 2 == 0:
            self.window_size += 1

        while not done and iterations < numiters:
            iterations += 1
            done = True
            y = FittingUtilities.savitzky_golay(data.y, self.window_size, 5)
            #s = np.r_[data.y[self.window_size/2:0:-1], data.y, data.y[-1:-self.window_size/2:-1]]
            #y = np.convolve(window/window.sum(), s, mode='valid')

            reduced = data.y / y
            sigma = np.std(reduced)
            mean = np.mean(reduced)
            badindices = \
            np.where(np.logical_or((reduced - mean) / sigma < -lowreject, (reduced - mean) / sigma > highreject))[0]
            if badindices.size > 0:
                done = False
                data.y[badindices] = y[badindices]

        return DataStructures.xypoint(x=self.smoothing_data.x, y=y / self.smoothing_data.cont)
Пример #10
0
def main2():
    vsini = 250
    fname = "/Users/kgulliks/t15000g40a00p00_w300_700_r1.dat"
    x, y, c = np.loadtxt(fname, usecols=(0, 1, 2), unpack=True)
    import DataStructures
    import Broaden

    order = DataStructures.xypoint(x=x, y=y, cont=c)
    order = Broaden.RotBroad(order, vsini * units.km.to(units.cm))
    left = np.searchsorted(order.x, 4780)
    right = np.searchsorted(order.x, 4960)
    arr = order[left:right].toarray(norm=True)
    syn = s4.synthesis.Synplot(15000,
                               4.0,
                               idl="/Applications/itt/idl/bin/idl",
                               wstart=order.x[left],
                               wend=order.x[right],
                               observ=arr,
                               relative=1,
                               vrot=vsini,
                               rv=83)
    syn.run()
    syn.plot()
    plt.figure(2)
    # spec = syn.spectrum
    #plt.plot(spec[:,0], spec[:,1])
    #plt.plot(arr[:,0], arr[:,1])
    plt.show()
Пример #11
0
    def onclick(self, event):
        print event.xdata, event.ydata
        self.clicks.append((event.xdata, event.ydata))

        if len(self.clicks) < 2:
            return
        else:
            # Perform fit. Try just fitting splines?
            x1, y1 = self.clicks[0]  #Left-hand continuum
            x2, y2 = self.clicks[1]  #Right-hand continuum
            #x3, y3 = self.clicks[2]    #Line depth
            self.clicks = []
            left = np.searchsorted(self.current_order.x, x1)
            right = np.searchsorted(self.current_order.x, x2)
            y1 = np.median(self.current_order.y[max(0, left - 2):min(self.current_order.size(), left + 2)])
            y2 = np.median(self.current_order.y[max(0, right - 2):min(self.current_order.size(), right + 2)])
            cont = np.poly1d(np.polyfit((x1, x2), (y1, y2), 1))
            self.smoothing_data = DataStructures.xypoint(x=self.current_order.x[left:right],
                                                         y=self.current_order.y[left:right],
                                                         cont=cont(self.current_order.x[left:right]))
            self.smoothing_factor *= self.smoothing_data.size()
            smoothed = self.SmoothData()
            #smoothed = UnivariateSpline(data.x, data.y/data.cont, s=6e-4 ) #np.median(data.y)/10000.0)
            #mean = data.x.mean()
            mean = 0.0
            #smoothed = np.poly1d(np.polyfit(data.x - mean, data.y/data.cont, 7) )

            self.PlotArrays(((self.smoothing_data.x, self.smoothing_data.y / self.smoothing_data.cont, "Data"),
                             (smoothed.x, smoothed.y, "Smoothed")), self.fitaxis)
            #plt.show()

            return
Пример #12
0
 def GetSpectrum(self, T, logg, metal):
     retval = self.ReadFile(T, logg, metal)
     if retval == 0:
         wave = self.read_dict[T][metal][0]
         flux = self.read_dict[T][metal][1][logg]
         return DataStructures.xypoint(x=wave, y=flux)
     else:
         return retval
Пример #13
0
def main1():
    model_dir = "%s/School/Research/Models/Sorted/Stellar/Vband/" % (os.environ["HOME"])
    modelfile = "%slte86-4.00+0.0-alpha0.KURUCZ_MODELS.dat.sorted" % model_dir
    threshold = 0.90
    print "Reading stellar model"
    x, y = np.loadtxt(modelfile, usecols=(0, 1), unpack=True)
    x *= units.angstrom.to(units.nm)
    y = 10 ** y
    model = DataStructures.xypoint(x=x, y=y)
    model.cont = FittingUtilities.Continuum(model.x, model.y, fitorder=21, lowreject=1.5, highreject=20)
    plt.plot(model.x, model.y)
    plt.plot(model.x, model.cont)
    plt.show()

    print "Finding lines"
    linepoints = np.where(model.y / model.cont < threshold)[0]
    points = []
    lines = []
    strengths = []
    for line in linepoints:
        # print len(points)
        if len(points) == 0 or int(line) - 1 == points[-1]:
            points.append(int(line))
        else:
            index = int(np.median(points) + 0.5)
            if len(points) > 1:
                minindex = model.y[points[0]:points[-1]].argmin() + points[0]
            else:
                minindex = points[0]
            lines.append(model.x[minindex])
            yval = model.y[minindex] / model.cont[minindex]
            strengths.append(yval)
            points = [int(line), ]

    """
    #Make sure there are no points too close to each other
    tol = 0.05
    lines = sorted(lines)
    for i in range(len(lines) - 2, 0, -1):
      if np.abs(lines[i] - lines[i-1]) < tol:
        del lines[i]
        del lines[i-1]
      elif np.abs(lines[i] - lines[i+1]) < tol:
        del lines[i+1]
        del lines[i]
      else:
        index = np.searchsorted(x,lines[i]) - 1
        yval = trans[index]
        plt.plot((lines[i], lines[i]), (yval-0.05, yval-0.1), 'r-')
    """
    plt.plot(model.x, model.y / model.cont, 'k-')
    for line in lines:
        idx = np.searchsorted(model.x, line)
        x = model.x[idx]
        y = model.y[idx] / model.cont[idx]
        plt.plot([x, x], [y - 0.05, y - 0.1], 'r-')
    plt.show()
    np.savetxt("Linelist3.dat", np.transpose((lines, strengths)), fmt="%.8f")
Пример #14
0
def Broaden2(model, vsini, intervalsize=50.0, epsilon=0.5, linear=False, findcont=False):
    """
      model:           input filename of the spectrum. The continuum data is assumed to be in filename[:-1]+".17"
                       model can also be a DataStructures.xypoint containing the already-read model (must include continuum!)
      vsini:           the velocity (times sin(i) ) of the star
      intervalsize:    The size (in nm) of the interval to use for broadening. Since it depends on wavelength, you don't want to do all at once
      epsilon:          Linear limb darkening. I(u) = 1-epsilon + epsilon*u
      linear:          flag for if the x-spacing is already linear. If true, we don't need to make UnivariateSplines and linearize
      findcont:        flag to decide if the continuum needs to be found
    """

    if type(model) == str:
        model = ReadFile(model)

    if not findcont:
        cont_fcn = UnivariateSpline(model.x, model.cont, s=0)

    if not linear:
        model_fcn = UnivariateSpline(model.x, model.y, s=0)
        x = np.linspace(model.x[0], model.x[-1], model.size())
        model = DataStructures.xypoint(x=x, y=model_fcn(x))
        if not findcont:
            model.cont = cont_fcn(model.x)
        else:
            model.cont = FittingUtilities.Continuum(model.x, model.y, lowreject=1.5, highreject=10)
    elif findcont:
        model.cont = FittingUtilities.Continuum(model.x, model.y, lowreject=1.5, highreject=10)


    #Convert to velocity space
    wave0 = model.x[model.size() / 2]
    model.x = constants.c.cgs.value * (model.x - wave0) / wave0

    #Make broadening profile
    left = np.searchsorted(model.x, -2 * vsini)
    right = np.searchsorted(model.x, 2 * vsini)
    profile = model[left:right]
    profile.y = np.zeros(profile.size())
    dv = profile.x / vsini
    indices = np.where(np.abs(dv) < 1.0)[0]
    profile.y[indices] = 1.0 / (vsini * (1 - epsilon / 3.0)) * (
    2 * (1 - epsilon) / np.pi * np.sqrt(1 - dv[indices] ** 2) + epsilon / 2.0 * (1 - dv[indices] ** 2) )

    #Extend interval to reduce edge effects (basically turn convolve into circular convolution)
    before = model.y[-int(profile.size()):]
    after = model.y[:int(profile.size())]
    extended = np.append(np.append(before, model.y), after)

    if profile.size() % 2 == 0:
        left, right = int(profile.size() * 1.5), int(profile.size() * 1.5) - 1
    else:
        left, right = int(profile.size() * 1.5), int(profile.size() * 1.5)

    model.y = np.convolve(extended, profile.y / profile.y.sum(), mode="full")[left:-right]

    #Return back to wavelength space
    model.x = wave0 * (1 + model.x / constants.c.cgs.value)
    return model
Пример #15
0
def HighPassFilter(data, vel, width=5, linearize=False):
    """
    Function to apply a high-pass filter to data.
      Data must be in an xypoint container, and have linear wavelength spacing
      vel is the width of the features you want to remove, in velocity space (in cm/s)
      width is how long it takes the filter to cut off, in units of wavenumber
    """

    if linearize:
        original_data = data.copy()
        datafcn = spline(data.x, data.y, k=3)
        errorfcn = spline(data.x, data.err, k=3)
        contfcn = spline(data.x, data.cont, k=3)
        linear = DataStructures.xypoint(data.x.size)
        linear.x = np.linspace(data.x[0], data.x[-1], linear.size())
        linear.y = datafcn(linear.x)
        linear.err = errorfcn(linear.x)
        linear.cont = contfcn(linear.x)
        data = linear

    # Figure out cutoff frequency from the velocity.
    featuresize = 2 * data.x.mean() * vel / constants.c.cgs.value  # vel MUST be given in units of cm
    dlam = data.x[1] - data.x[0]  # data.x MUST have constant x-spacing
    Npix = featuresize / dlam

    nsamples = data.size()
    sample_rate = 1.0 / dlam
    nyq_rate = sample_rate / 2.0  # The Nyquist rate of the signal.
    width /= nyq_rate
    cutoff_hz = min(1.0 / featuresize, nyq_rate - width * nyq_rate / 2.0)  # Cutoff frequency of the filter

    # The desired attenuation in the stop band, in dB.
    ripple_db = 60.0

    # Compute the order and Kaiser parameter for the FIR filter.
    N, beta = kaiserord(ripple_db, width)
    if N % 2 == 0:
        N += 1

    # Use firwin with a Kaiser window to create a lowpass FIR filter.
    taps = firwin(N, cutoff_hz / nyq_rate, window=('kaiser', beta), pass_zero=False)

    # Extend data to prevent edge effects
    y = np.r_[data.y[::-1], data.y, data.y[::-1]]

    # Use lfilter to filter data with the FIR filter.
    smoothed_y = lfilter(taps, 1.0, y)

    # The phase delay of the filtered signal.
    delay = 0.5 * (N - 1) / sample_rate
    delay_idx = np.searchsorted(data.x, data.x[0] + delay)
    smoothed_y = smoothed_y[data.size() + delay_idx:-data.size() + delay_idx]
    if linearize:
        fcn = spline(data.x, smoothed_y)
        return fcn(original_data.x)
    else:
        return smoothed_y
Пример #16
0
    def __call__(self, T, logg, metal, alpha, return_xypoint=True):
        """
        Given parameters, return an interpolated spectrum

        If return_xypoint is False, then it will only return
          a numpy.ndarray with the spectrum

        Before interpolating, we will do some error checking to make
        sure the requested values fall within the grid
        """

        # Scale the requested values
        T = (T - self.T_scale[0]) / self.T_scale[1]
        logg = (logg - self.logg_scale[0]) / self.logg_scale[1]
        metal = (metal - self.metal_scale[0]) / self.metal_scale[1]
        alpha = (alpha - self.alpha_scale[0]) / self.alpha_scale[1]

        # Get the minimum and maximum values in the grid
        T_min = min(self.grid[:, 0])
        T_max = max(self.grid[:, 0])
        logg_min = min(self.grid[:, 1])
        logg_max = max(self.grid[:, 1])
        metal_min = min(self.grid[:, 2])
        metal_max = max(self.grid[:, 2])
        alpha_min = min(self.grid[:, 3])
        alpha_max = max(self.grid[:, 3])

        # Check to make sure the requested values fall within the grid
        if (
            T_min <= T <= T_max
            and logg_min <= logg <= logg_max
            and metal_min <= metal <= metal_max
            and alpha_min <= alpha <= alpha_max
        ):

            y = self.interpolator((T, logg, metal, alpha))
        else:
            warnings.warn("The requested parameters fall outside the model grid. Results may be unreliable!")
            print T, T_min, T_max
            print logg, logg_min, logg_max
            print metal, metal_min, metal_max
            print alpha, alpha_min, alpha_max
            y = self.NN_interpolator((T, logg, metal, alpha))

        # Test to make sure the result is valid. If the requested point is
        # outside the Delaunay triangulation, it will return NaN's
        if np.any(np.isnan(y)):
            warnings.warn("Found NaNs in the interpolated spectrum! Falling back to Nearest Neighbor")
            y = self.NN_interpolator((T, logg, metal, alpha))

        # Return the appropriate object
        if return_xypoint:
            return DataStructures.xypoint(x=self.xaxis, y=y)
        else:
            return y
Пример #17
0
 def _read_fits_file(self, fname):
     orders = []
     hdulist = fits.open(fname)
     for i, hdu in enumerate(hdulist[1:]):
         xypt = DataStructures.xypoint(x=hdu.data['wavelength'], 
                                       y=hdu.data['flux'], 
                                       cont=hdu.data['continuum'], 
                                       err=hdu.data['error'])
         xypt.x *= 10 #Convert from nanometers to angstrom
         orders.append(xypt)
     return orders
Пример #18
0
def CombineXYpoints(xypts, snr=None, xspacing=None, numpoints=None, interp_order=3):
    """
    Function to combine a list of xypoints into a single
      xypoint. Useful for combining several orders/chips
      or for coadding spectra

    Warning! This function is basically un-tested! 

      ***Optional keywords***
      snr: the spectra will be weighted by the signal-to-noise ratio
           before adding
      xspacing: the x-spacing in the final array
      numpoints: the number of points in the final array. If neither
                 numpoints nor xspacing is given, the x-spacing in the
                 final array will be determined by averaging the spacing
                 in each of the xypoints.
      interp_order: the interpolation order. Default is cubic
    """

    if snr is None or type(snr) != list:
        snr = [1.0] * len(xypts)

    # Find the maximum range of the x data:
    first = np.min([o.x[0] for o in xypts])
    last = np.max([o.x[-1] for o in xypts])
    avg_spacing = np.mean([(o.x[-1] - o.x[0]) / float(o.size() - 1) for o in xypts])

    if xspacing is None and numpoints is None:
        xspacing = avg_spacing
    if numpoints is None:
        if xspacing is None:
            xspacing = avg_spacing
        numpoints = (last - first) / xspacing
    x = np.linspace(first, last, numpoints)

    full_array = DataStructures.xypoint(x=x, y=np.zeros(x.size), err=np.zeros(x.size))
    numvals = np.zeros(x.size, dtype=np.float)  # The number of arrays each x point is in
    normalization = 0.0
    for xypt in xypts:
        #interpolator = ErrorPropagationSpline(xypt.x, xypt.y / xypt.cont, xypt.err / xypt.cont, k=interp_order)
        interpolator = spline(xypt.x, xypt.y/xypt.cont, k=interp_order)
        err_interpolator = spline(xypt.x, xypt.err/xypt.cont, k=interp_order)
        left = np.searchsorted(full_array.x, xypt.x[0])
        right = np.searchsorted(full_array.x, xypt.x[-1], side='right')
        if right < xypt.size():
            right += 1
        numvals[left:right] += 1.0
        val, err = interpolator(full_array.x[left:right]), err_interpolator(full_array.x[left:right])
        full_array.y[left:right] += val
        full_array.err[left:right] += err ** 2

    full_array.err = np.sqrt(full_array.err)
    full_array.y[numvals > 0] /= numvals[numvals > 0]
    return full_array
Пример #19
0
def Process(filename, data, vsini, resolution):
    # Read in the model
    print "Reading in the input model from %s" % filename
    x, y = np.loadtxt(filename, usecols=(0, 1), unpack=True)
    x = x * units.angstrom.to(units.nm)
    y = 10 ** y
    left = np.searchsorted(x, data[0].x[0] - 10)
    right = np.searchsorted(x, data[-1].x[-1] + 10)
    model = DataStructures.xypoint(x=x[left:right], y=y[left:right])


    #Linearize the x-axis of the model
    print "Linearizing model"
    xgrid = np.linspace(model.x[0], model.x[-1], model.size())
    model = FittingUtilities.RebinData(model, xgrid)


    #Broaden
    print "Rotationally broadening model to vsini = %g km/s" % (vsini * units.cm.to(units.km))
    if vsini > 1.0 * units.km.to(units.cm):
        model = RotBroad.Broaden(model, vsini, linear=True)


    #Reduce resolution
    print "Convolving to the detector resolution of %g" % resolution
    model = FittingUtilities.ReduceResolution(model, resolution)


    # Rebin subsets of the model to the same spacing as the data
    model_orders = []
    for i, order in enumerate(data):
        sys.stdout.write("\rGenerating model subset for order %i in the input data" % (i + 1))
        sys.stdout.flush()
        # Find how much to extend the model so that we can get maxvel range.
        dlambda = order.x[order.size() / 2] * maxvel * 1.5 / 3e5
        left = np.searchsorted(model.x, order.x[0] - dlambda)
        right = np.searchsorted(model.x, order.x[-1] + dlambda)

        # Figure out the log-spacing of the data
        start = np.log(order.x[0])
        end = np.log(order.x[-1])
        xgrid = np.logspace(start, end, order.size(), base=np.e)
        logspacing = np.log(xgrid[1] / xgrid[0])

        # Finally, space the model segment with the same log-spacing
        start = np.log(model.x[left])
        end = np.log(model.x[right])
        xgrid = np.exp(np.arange(start, end + logspacing, logspacing))

        segment = FittingUtilities.RebinData(model.copy(), xgrid)
        segment.cont = FittingUtilities.Continuum(segment.x, segment.y, lowreject=1.5, highreject=5, fitorder=2)
        model_orders.append(segment)

    return model_orders
def Correct(original, corrected, offset=None):
  #Read in the data and model
  original_orders = HelperFunctions.ReadFits(original, extensions=True, x="wavelength", y="flux", errors="error", cont="continuum")
  corrected_orders, corrected_headers = ReadCorrectedFile(corrected)
  test_orders, header = ReadCorrectedFile(corrected, yaxis="flux")

  if plot:
    order = test_orders[plotorder]
    model = corrected_orders[plotorder]
    #for order, model in zip(test_orders, corrected_orders):
    plt.plot(order.x, order.y/order.cont)
    plt.plot(model.x, model.y)
    plt.title("Correction in corrected file only")
    plt.show()

  
  print len(original_orders), len(corrected_orders)
  if offset == None:
    offset = len(original_orders) - len(corrected_orders)
  offset = 0
  for i in range(offset, len(original_orders)):
    data = original_orders[i]
    data.cont = FittingUtilities.Continuum(data.x, data.y)
    try:
      model = corrected_orders[i-offset]
      header = corrected_headers[i-offset]
      print "Order = %i\nHumidity: %g\nO2 concentration: %g\n" %(i, header['h2oval'], header['o2val'])
    except IndexError:
      model = DataStructures.xypoint(x=data.x, y=numpy.ones(data.x.size))
      print "Warning!!! Telluric Model not found for order %i" %i

    if plot and i == plotorder:
      plt.figure(1)
      plt.plot(data.x, data.y/data.cont)
      plt.plot(model.x, model.y)

    if model.size() < data.size():
      left = numpy.searchsorted(data.x, model.x[0])
      right = numpy.searchsorted(data.x, model.x[-1])
      if right < data.size():
        right += 1
      data = data[left:right]
    elif model.size() > data.size():
      sys.exit("Error! Model size (%i) is larger than data size (%i)" %(model.size(), data.size()))

    badindices = numpy.where(numpy.logical_or(data.y <= 0, model.y < 0.05))[0]
    model.y[badindices] = data.y[badindices]/data.cont[badindices]
    
    data.y /= model.y
    original_orders[i] = data.copy()
  if plot:
    plt.show()
  return original_orders
Пример #21
0
def ReadFile(filename):
    #Read in file
    x, y = np.loadtxt(filename, unpack=True)
    model = DataStructures.xypoint(x.size)
    model.x = x.copy() * units.angstrom.to(units.nm)
    model.y = y.copy()
    #Read in continuum
    x, y = np.loadtxt(filename[:-1] + "17", unpack=True)
    cont_fcn = UnivariateSpline(x * units.angstrom.to(units.nm), y, s=0)
    model.cont = cont_fcn(model.x)

    return model
Пример #22
0
def HighPass():
    fileList = []
    vsini = 40.0
    for arg in sys.argv[1:]:
        if 'vsini' in arg:
            vsini = float(arg.split("=")[-1])
        else:
            fileList.append(arg)

    for fname in fileList:
        column_list = []
        fig = plt.figure(1)
        plotgrid = gridspec.GridSpec(3, 1)
        mainaxis = plt.subplot(plotgrid[0:2])
        reducedaxis = plt.subplot(plotgrid[2], sharex=mainaxis)
        orders = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error",
                                        cont="continuum")
        for order in orders:
            # Linearize
            datafcn = interp(order.x, order.y, k=1)
            errorfcn = interp(order.x, order.err, k=1)
            linear = DataStructures.xypoint(order.x.size)
            linear.x = np.linspace(order.x[0], order.x[-1], linear.size())
            linear.y = datafcn(linear.x)
            linear.err = errorfcn(linear.x)
            linear.cont = FittingUtilities.Continuum(linear.x, linear.y)
            smoothed = HelperFunctions.HighPassFilter(linear, vsini * units.km.to(units.cm))
            mean = np.mean(smoothed)
            std = np.std(smoothed)
            badindices = np.where(np.abs((smoothed - mean) / std > 3.0))[0]
            plt.figure(2)
            plt.plot(linear.x, (smoothed - mean) / std)
            plt.figure(3)
            plt.plot(linear.x, linear.y - smoothed)
            plt.figure(1)
            smoothed[badindices] = 0.0
            smoothed += np.median(linear.cont)
            smoothed /= np.median(linear.cont)
            #linear.y[badindices] = smoothed[badindices]
            mainaxis.plot(linear.x, linear.y / linear.cont, 'k-')
            mainaxis.plot(linear.x, smoothed, 'r-', linewidth=1)
            reducedaxis.plot(linear.x, smoothed)
            columns = {"wavelength": linear.x,
                       "flux": smoothed,
                       "error": linear.err,
                       "continuum": FittingUtilities.Continuum(linear.x, linear.y, fitorder=3, lowreject=3,
                                                               highreject=3)}
            column_list.append(columns)
        outfilename = "%s_filtered.fits" % (fname.split(".fits")[0])
        print "Outputting to %s" % outfilename
        plt.show()
        HelperFunctions.OutputFitsFileExtensions(column_list, fname, outfilename, mode='new')
Пример #23
0
def GetModel(Temperature, grid):
    filename = "%st%.5i_g+4.0_p00p00_hr.fits" % (grid, Temperature)
    print "Model file name: ", filename
    hdu = fits.open(filename)[0]
    data = hdu.data
    header = hdu.header
    wave_A = np.arange(data.size) * header['CDELT1'] + header['CRVAL1']
    x = wave_A * units.angstrom.to(units.nm)
    y = data
    left = np.searchsorted(x, 400)
    right = np.searchsorted(x, 900)
    model = DataStructures.xypoint(x=x[left:right], y=y[left:right])
    return model
Пример #24
0
def make_synthetic_observation(model, rv, snr, x0, x1, dx=0.0):
    c = constants.c.to(u.km / u.s).value
    fcn = spline(model.x, model.y / model.cont)
    left = np.searchsorted(model.x, x0)
    right = np.searchsorted(model.x, x1)
    x = model.x[left:right] + dx
    d_logx = np.log(model.x[1] / model.x[0])
    x = np.exp(np.arange(np.log(model.x[left] + dx), np.log(model.x[right] + dx), d_logx))
    # x = np.logspace(np.log(model.x[left]+dx), np.log(model.x[right]+dx), right - left + 1, base=np.e)
    print((model.x[left:right] + dx) - x)
    data = DataStructures.xypoint(x=x, y=fcn(x * (1 + rv / c)))
    data.y += np.random.normal(loc=0, scale=1.0 / snr, size=data.size())
    return data
Пример #25
0
    def __call__(self, T, metal, vsini=0.0, return_xypoint=True, **kwargs):
        """
        Given parameters, return an interpolated spectrum

        If return_xypoint is False, then it will only return
          a numpy.ndarray with the spectrum

        Before interpolating, we will do some error checking to make
        sure the requested values fall within the grid
        """

        # Scale the requested values
        T = (T - self.T_scale[0]) / self.T_scale[1]
        metal = (metal - self.metal_scale[0]) / self.metal_scale[1]

        # Get the minimum and maximum values in the grid
        T_min = min(self.grid[:, 0])
        T_max = max(self.grid[:, 0])
        metal_min = min(self.grid[:, 1])
        metal_max = max(self.grid[:, 1])
        input_list = (T, metal)

        # Check to make sure the requested values fall within the grid
        if (T_min <= T <= T_max and
                        metal_min <= metal <= metal_max):

            y = self.interpolator(input_list)
        else:
            if self.debug:
                warnings.warn("The requested parameters fall outside the model grid. Results may be unreliable!")
            print T, T_min, T_max
            print metal, metal_min, metal_max
            y = self.NN_interpolator(input_list)

        # Test to make sure the result is valid. If the requested point is
        # outside the Delaunay triangulation, it will return NaN's
        if np.any(np.isnan(y)):
            if self.debug:
                warnings.warn("Found NaNs in the interpolated spectrum! Falling back to Nearest Neighbor")
            y = self.NN_interpolator(input_list)

        model = DataStructures.xypoint(x=self.xaxis, y=y)
        vsini *= units.km.to(units.cm)
        model = Broaden.RotBroad(model, vsini, linear=self.rebin)


        # Return the appropriate object
        if return_xypoint:
            return model
        else:
            return model.y
def GenerateModel(lines, strengths, xgrid, vsini, epsilon, resolution, vsys):
    # Make spectrum of delta functions with same xgrid as given
    strengths = 1.0 - strengths
    model = DataStructures.xypoint(x=xgrid, y=np.zeros(xgrid.size))
    delta_v = (xgrid[1] - xgrid[0]) / xgrid[xgrid.size / 2] * 3e5
    factor = 10. / delta_v
    for i, line in enumerate(lines):
        idx = np.searchsorted(model.x, line * (1 + vsys / constants.c.cgs.value))
        model.y[idx] = -strengths[i] * factor
    model.y += 1.0
    if vsini > 10.0:
        model = RotBroad.Broaden2(model.copy(), vsini * units.km.to(units.cm), linear=True, epsilon=epsilon)
    model = FittingUtilities.ReduceResolution(model, resolution)
    return model
Пример #27
0
def Correlate(data, model_orders):
    corrlist = []
    normalization = 0.0
    for ordernum, order in enumerate(data):
        # print "Cross-correlating order %i" %(ordernum+1)
        model = model_orders[ordernum]
        reduceddata = order.y
        reducedmodel = model.y / model.cont
        meandata = reduceddata.mean()
        meanmodel = reducedmodel.mean()
        data_rms = np.std(reduceddata)
        model_rms = np.std(reducedmodel)
        left = np.searchsorted(model.x, order.x[0])
        right = model.x.size - np.searchsorted(model.x, order.x[-1])
        delta = left - right

        #ycorr = np.correlate(reduceddata - meandata, reducedmodel - meanmodel, mode='valid')
        ycorr = fftconvolve((reduceddata - meandata), (reducedmodel - meanmodel)[::-1], mode='valid')
        xcorr = np.arange(ycorr.size)
        lags = xcorr - right
        distancePerLag = np.log(model.x[1] / model.x[0])
        offsets = -lags * distancePerLag
        velocity = offsets * constants.c.cgs.value * units.cm.to(units.km)
        corr = DataStructures.xypoint(velocity.size)
        corr.x = velocity[::-1]
        corr.y = ycorr[::-1] / (data_rms * model_rms * float(ycorr.size))

        # Only save part of the correlation
        left = np.searchsorted(corr.x, minvel)
        right = np.searchsorted(corr.x, maxvel)
        corr = corr[left:right]

        normalization += float(order.size())

        # Save correlation
        corrlist.append(corr.copy())


    # Add up the individual CCFs (use the Maximum Likelihood method from Zucker 2003, MNRAS, 342, 1291)
    total = corrlist[0].copy()
    total.y = np.ones(total.size())
    for i, corr in enumerate(corrlist):
        correlation = spline(corr.x, corr.y, k=1)
        N = data[i].size()
        total.y *= np.power(1.0 - correlation(total.x) ** 2, float(N) / normalization)
    master_corr = total.copy()
    master_corr.y = 1.0 - np.power(total.y, 1.0 / float(len(corrlist)))

    return master_corr
Пример #28
0
def GetModel(spline, data, RV, vsini):
    #We first need to make an evenly-sample x grid that is wider than the final one, so edge effects are not an issue
    xgrid = data.x
    xspacing = (xgrid[-1] - xgrid[0]) / float(xgrid.size - 1)
    first = 2 * xgrid[0] - xgrid[-1]
    last = 2 * xgrid[-1] - xgrid[0]
    x = np.arange(first, last, xspacing)

    z = RV / Units.c
    unbroadened = DataStructures.xypoint(x=x, y=spline(x * (1 + z)), cont=np.ones(x.size))

    broadened = RotBroad.Broaden(unbroadened, vsini, linear=True, alpha=0.1)

    #Now, we must spline the broadened function onto the xgrid
    fcn = UnivariateSpline(broadened.x, broadened.y / broadened.cont, s=0)
    a, b, c = AdjustWaveScale(data, fcn)
    #retarray = DataStructures.xypoint(x=xgrid, y=fcn(xgrid), cont=np.ones(xgrid.size))
    retarray = DataStructures.xypoint(x=xgrid, y=fcn(a + b * xgrid + c * xgrid ** 2), cont=np.ones(xgrid.size))

    #Adjust model continuum
    corrected = data.y / (data.cont * retarray.y)
    temp = DataStructures.xypoint(x=data.x, y=corrected)
    done = False
    while not done:
        done = True
        contfcn = np.poly1d(np.polyfit(temp.x, temp.y, 2))
        residuals = temp.y - contfcn(temp.x)
        sigma = np.std(residuals)
        badindices = np.where(residuals < -2 * sigma)[0]
        if badindices.size > 0 and badindices.size < temp.x.size:
            temp.x = np.delete(temp.x, badindices)
            temp.y = np.delete(temp.y, badindices)
            done = False
    retarray.cont = 1.0 / contfcn(retarray.x)

    return retarray
def ReadCorrectedFile(fname, yaxis="model"):
    orders = []
    headers = []
    hdulist = pyfits.open(fname)
    numorders = len(hdulist)
    for i in range(1, numorders):
        order = hdulist[i].data
        xypt = DataStructures.xypoint(x=order.field("wavelength"),
                                      y=order.field(yaxis),
                                      cont=order.field("continuum"),
                                      err=order.field("error"))

        orders.append(xypt)
        headers.append(hdulist[i].header)
    return orders, headers
Пример #30
0
    def __init__(self, infilename,
                 telluricfile="/Users/kgulliks/School/Research/aerlbl_v12.2/rundir2/OutputModels/transmission-792.30-290.93-45.0-7.4-368.50-4.00-10.00-1.50",
                 telluric=False, default_windowsize=200):


        print "Reading data"
        self.orders = FitsUtils.MakeXYpoints(infilename, errors="error", extensions=True, x="wavelength", y="flux")

        for i, order in enumerate(self.orders):
            self.orders[i].cont = FindContinuum.Continuum(order.x, order.y, lowreject=3, highreject=3, fitorder=2)

        if telluric:
            print "Reading telluric model from database"
            x, y = np.loadtxt(telluricfile, unpack=True)
            self.model = DataStructures.xypoint(x=x[::-1], y=y[::-1])
        else:
            x = np.arange(self.orders[0].x[0] - 20.0, self.orders[-1].x[-1] + 20.0, 0.001)
            y = np.ones(x.size)
            self.model = DataStructures.xypoint(x=x, y=y)

        # Make outfilename
        if "-" in infilename:
            num = int(infilename.split("-")[-1].split(".fits")[0])
            outfilename = "%s-%i.fits" % (infilename.split("-")[0], num + 1)
        else:
            outfilename = "%s-0.fits" % (infilename.split(".fits")[0])
        cmdstring = "cp %s %s" % (infilename, outfilename)
        command = subprocess.check_call(cmdstring, shell=True)

        self.fitmode = False
        self.mode = "convolution"
        self.clicks = []
        self.template = infilename
        self.infilename = infilename
        self.outfilename = outfilename
        self.default_windowsize = default_windowsize
Пример #31
0
def ensure_nn_index(hs, qdat, dcxs):
    # NNIndexes depend on the data cxs AND feature / chip configs
    feat_uid = qdat.cfg._feat_cfg.get_uid()
    dcxs_uid = helpers.hashstr_arr(dcxs, 'dcxs') + feat_uid
    if not dcxs_uid in qdat._dcxs2_index:
        # Make sure the features are all computed first
        print('[mc3] qdat._data_index[dcxs_uid]... cache miss')
        print('[mc3] dcxs_ is not in qdat cache')
        print('[mc3] hashstr(dcxs_) = %r' % dcxs_uid)
        print('[mc3] REFRESHING FEATURES')
        hs.refresh_features(dcxs)
        # Compute the FLANN Index
        data_index = ds.NNIndex(hs, dcxs)
        qdat._dcxs2_index[dcxs_uid] = data_index
    else:
        print('[mc3] qdat._data_index[dcxs_uid]... cache hit')
    qdat._data_index = qdat._dcxs2_index[dcxs_uid]
def createAllCEByChannelVars(scenario, allCEVarList):
    ceByChannelVarList = []
    ceByChannelVarNameList = []

    for ceVar in allCEVarList:
        ceVarNameSplit = ceVar.name.split('_')
        ceVarName = "CEs_NO_CANAL_" + ceVarNameSplit[2]

        if (ceVarName not in ceByChannelVarNameList):
            ceByChannelVarNameList.append(ceVarName)

    for ceVarName in ceByChannelVarNameList:
        ceByChannelVarList.append(
            DataStructures.LPVar(
                ceVarName,
                "Quantide de CEs no canal=" + ceVarName.split('_')[3]))

    return ceByChannelVarList
Пример #33
0
    def __init__(self, parent, app):
        super(GameField, self).__init__(parent)
        self.parent = parent
        self.app = app

        self.moves = 0
        self.messy = False

        self.width = 30
        self.height = 20
        self.mines = 70
        self.remainingMines = 0

        self.movesHistory = []

        # Time counters #
        self.s = 0
        self.m = 0
        self.h = 0
        self.timeSpeed = 1

        gameStructure = DataStructures.GameStructures(self.width, self.height,
                                                      self.mines)
        self.gameStructure = gameStructure
        self.dataList = gameStructure.matrix
        self.visibleList = gameStructure.visibleMatrix
        self.VP = gameStructure.virtualPlayer

        self.setWindowTitle("Fair minesweeper")

        sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred,
                                           QtWidgets.QSizePolicy.Preferred)
        self.setSizePolicy(sizePolicy)

        self.initiateWidgets()

        self.loadTopResults()
Пример #34
0
def CombineInChars():
    global masterLineList
    tempLine = makeLineObj(masterLineList[0])
    previousLine = LineObj(
        tempLine.script, 0, 0, "DIRECTION",
        tempLine.script)  #to keep lines in pairs, call and response
    for jline in masterLineList:
        line = makeLineObj(jline)
        #print "checking Line"
        found = False
        AddToStructures(line)
        tempPlay = getPlayinList(scriptWatcher)
        for actor in tempPlay.charList:
            if actor.name == line.character:  #Find the character in the char list
                found = True
                actor.lines.append([previousLine, line])
        if not found:
            tempPlay.charList.append(
                DataStructures.CharacterObject(line.character))
            for actor in tempPlay.charList:
                if actor.name == line.character:  #Find the character in the char list
                    found = True
                    actor.lines.append([previousLine, line])
        previousLine = line
Пример #35
0
    def start(self, load=False):
        try:
            self.width, self.height = int(self.columnsWidget.text()), int(
                self.rowsWidget.text())
            self.mines = int(self.minesWidget.text())
            if self.width > 50 or self.height > 30:
                QtWidgets.QMessageBox.warning(
                    self, "Too big field",
                    "Cannot set width > 50 or height > 30.",
                    QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
                return
            if self.mines >= self.width * self.height:
                QtWidgets.QMessageBox.warning(
                    self, "Too many mines",
                    "Cannot set mines >= width*height.",
                    QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
                return
            else:
                self.moves = 0

                if not load:
                    gameStructure = DataStructures.GameStructures(
                        self.width, self.height, self.mines)
                    self.gameStructure = gameStructure
                    self.remaining.setText(str(self.mines))
                    self.resetTiming()
                else:
                    self.remaining.setText(self.remainingMines)
                self.dataList = self.gameStructure.matrix
                self.visibleList = self.gameStructure.visibleMatrix
                self.VP = self.gameStructure.virtualPlayer
                self.initiateGameField()

        except Exception as e:
            print(e)
            print('Cannot set given field dimension.')
Пример #36
0
import DataStructures

linkedList = DataStructures.LinkedList()
linkedQueue = DataStructures.LinkedQueue()
linkedStack = DataStructures.LinkedStack()

node = DataStructures.Node(15)
node2 = DataStructures.Node("SIXTEEN")
node3 = DataStructures.Node(17)

print("------------------LINKED LIST TESTING------------------")
linkedList.add(node)
linkedList.add(node2)
linkedList.add(node3)
linkedList.add(DataStructures.Node(18))
print(linkedList)

print("AT INDEX 1:" + str(linkedList.get(1).getData()))

print("inserting at index 1")
linkedList.insert(DataStructures.Node(20), 1)
print(linkedList)
print("AT INDEX 1:" + str(linkedList.get(1).getData()))

print("removing index 1")
linkedList.remove(1)
print(linkedList)
print("removing index 1")
linkedList.remove(1)
print(linkedList)
Пример #37
0
def chip_interaction(hs, cx, notes, fnum=1, **kwargs):
    chip_info_locals = dev.chip_info(hs, cx)
    chip_title = chip_info_locals['cidstr']+' '+chip_info_locals['name']
    chip_xlabel = chip_info_locals['gname']
    class State(DynStruct):
        def __init__(state):
            super(State, state).__init__()
            state.reset()
        def reset(state):
            state.res = None
            state.scale_min = None
            state.scale_max = None
            state.fnum = 1
            state.fnum_offset = 1

    state = State()
    state.fnum = fnum
    fx_ptr = [0]
    hprint = helpers.horiz_print
    kpts = hs.get_kpts(cx)
    scale = np.sqrt(kpts.T[2]*kpts.T[4])
    desc = hs.get_desc(cx)
    rchip = hs.get_chip(cx)
    # Start off keypoints with no filters
    is_valid = np.ones(len(kpts), dtype=bool)

    def update_valid(reset=False):
        print('[interact] updating valid')
        if reset is True:
            state.reset()
            is_valid[:] = True
        if state.scale_min:
            is_valid[:] = np.bitwise_and(scale >= state.scale_min, is_valid)
        if state.scale_max:
            is_valid[:] = np.bitwise_and(scale <= state.scale_max, is_valid)
        print(state)
        print('%d valid keypoints' % sum(is_valid))
        print('kpts scale ' + helpers.printable_mystats(scale[is_valid]))
        select_ith_keypoint(fx_ptr[0])

    def keypoint_info(fx):
        kp = kpts[fx]
        print(kp)
        x,y,a,c,d = kp
        A = np.array(([a,0],[c,d]))
        print('--kp info--')
        invA = np.linalg.inv(A)

    def select_ith_keypoint(fx):
        print('-------------------------------------------')
        print('[interact] viewing ith=%r keypoint' % fx)
        kp = kpts[fx]
        sift = desc[fx]
        np.set_printoptions(precision=5)
        df2.cla()
        fig1 = df2.figure(state.fnum, **kwargs)
        df2.imshow(rchip, pnum=(2,1,1))
        #df2.imshow(rchip, pnum=(1,2,1), title='inv(sqrtm(invE*)')
        #df2.imshow(rchip, pnum=(1,2,2), title='inv(A)')
        ell_args = {'ell_alpha':.4, 'ell_linewidth':1.8, 'rect':False}
        df2.draw_kpts2(kpts[is_valid], ell_color=df2.ORANGE, **ell_args)
        df2.draw_kpts2(kpts[fx:fx+1], ell_color=df2.BLUE, **ell_args)
        ax = df2.gca()
        #ax.set_title(str(fx)+' old=b(inv(sqrtm(invE*)) and new=o(A=invA)')
        scale = np.sqrt(kp[2]*kp[4])
        printops = np.get_printoptions()
        np.set_printoptions(precision=1)
        ax.set_title(chip_title)
        ax.set_xlabel(chip_xlabel)

        extract_patch.draw_keypoint_patch(rchip, kp, sift, pnum=(2,2,3))
        ax = df2.gca()
        ax.set_title('affine feature\nfx=%r scale=%.1f' % (fx, scale))
        extract_patch.draw_keypoint_patch(rchip, kp, sift, warped=True, pnum=(2,2,4))
        ax = df2.gca()
        ax.set_title('warped feature\ninvA=%r ' % str(kp))
        golden_wh = lambda x:map(int,map(round,(x*.618 , x*.312)))
        Ooo_50_50 = {'num_rc':(1,1), 'wh':golden_wh(1400*2)}
        np.set_printoptions(**printops)
        #df2.present(**Ooo_50_50)
        #df2.update()
        fig1.show()
        fig1.canvas.draw()
        #df2.show()

    fig = df2.figure(state.fnum )
    xy = kpts.T[0:2].T
    # Flann doesn't help here at all
    use_flann = False
    flann_ptr = [None]
    def on_click(event):
        if event.xdata is None: return
        print('[interact] button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
        event.button, event.x, event.y, event.xdata, event.ydata))
        x,y = event.xdata, event.ydata
        if not use_flann:
            dist = (kpts.T[0] - x)**2 + (kpts.T[1] - y)**2
            fx_ptr[0] = dist.argsort()[0]
            select_ith_keypoint(fx_ptr[0])
        else:
            flann, = flann_ptr
            if flann is None:
                flann = pyflann.FLANN()
                flann.build_index(xy, algorithm='kdtree', trees=1)
            query = np.array(((x,y)))
            knnx, kdist = flann.nn_index(query, 1, checks=8)
            fx_ptr[0]=knnx[0]
            select_ith_keypoint(fx_ptr[0])
        print('>>>')
    callback_id = fig.canvas.mpl_connect('button_press_event', on_click)

    select_ith_keypoint(fx_ptr[0])
    query_cfg = ds.QueryConfig(hs, **kwargs)
    while True:
      try:
        print('>>>')
        raw = raw_input('enter a chip-interaction command (q to exit, h for help)\n>>>')
        print('>>>')
        ans = raw.split(' ')
        if len(ans) == 0: continue
        cmd = ans[0]
        if cmd in ['e', 'exit']: break;
        elif cmd == 'n':
            fx_ptr[0] += 1
            select_ith_keypoint(fx_ptr[0])
        elif cmd in ['q', 'query']:
            print(query_cfg)
            print(query_cfg.get_uid())
            res = hs.query(cx, query_cfg=query_cfg, use_cache=False)
            state.res = res
            resfnum = state.fnum + state.fnum_offset
            res.show_topN(hs, fnum=resfnum)
            df2.update()
            #fig_res = df2.figure(fnum=resfnum)
            #fig_res.show()
            #fig_res.canvas.draw()
        elif cmd == 'K':
            query_cfg.update_cfg(K=int(ans[1]))
        elif cmd == 'svoff':
            query_cfg.update_cfg(sv_on=False)
        elif cmd == 'svon':
            query_cfg.update_cfg(sv_on=True)
        elif cmd == 'test':
            query_cfg.update_cfg(sv_on=True, K=20, use_chip_extent=True)
        elif cmd in ['m', 'mytest']:
            mycmd = open('mytest.py').read();
            print(mycmd)
            exec mycmd in locals(), globals()
            print(query_cfg)
            res = hs.query(cx, query_cfg=query_cfg, use_cache=False)
            state.res = res
            resfnum = state.fnum + state.fnum_offset
            res.show_topN(hs, fnum=resfnum)
            df2.update()
        elif cmd == 'test2':
            query_cfg.update_cfg(sv_on=True, K=20, use_chip_extent=True, xy_thresh=.1)
            #query_cfg.update_cfg(sv_on=True, K=20, use_chip_extent=False)
        elif cmd == 'reset':
            update_valid(reset=True)
        elif cmd in ['fig']:
            state.fnum_offset += 1
        elif cmd in ['smin', 'scale_min']:
            state.scale_min = int(ans[1])
            update_valid()
        elif cmd in ['smax', 'scale_max']:
            state.scale_max = int(ans[1])
            update_valid()
        else:
            print('I dont understand the answer. I hope you know what you are doing');
            print(raw)
            exec raw in globals(), locals()
        print('>>>')
      except Exception as ex:
          print(repr(ex))
          if 'doraise' in vars():
            raise
             #a=input("Correct Sequence?")
 ##                if not a=="c":
 ##                    dot_description=a.split(",")
 ##                    description=str()
 ##                    for dotqualifier in dot_description:
 ##                        description+=dotqualifier
 ##                    updated_dot_code_id=self.dotcodereader.translator.get(description, "Error!Try again")
 ##                    print("Corrected: ", updated_dot_code_id)
 ##                    tray_id=updated_dot_code_id
 ##                        
             ###th2 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_MEAN_C,cv.THRESH_BINARY,11,2)
                 
                 
             #print(tray_counter%3)
             #q=input("Dot Code")
             current_tray_object=DataStructures.Tray(tray_id,section_types[tray_counter%3])
             current_tray_object.scan_in_pots(pots)
             tray_struct.append(current_tray_object)
             #= #plt.imshow(current_tray_object.get_pot_position(3).get_image())
             #plt.show()
             tray_counter+=1
         except IndexError as e:
             errors += 1
             tray_counter += 1
             print("dots_not_located")
             record = ErrorHandler.ErroneousImage(i, "dots_not_located", None,
                                                  tray_img_path.split(".")[0] + ".shelf",
                                                  None, section_types[tray_counter % 3])
             all_errors.append(record)
         except statistics.StatisticsError as e:
             print(e)
Пример #39
0
def detect_version(db_dir):
    '''
    Attempt to detect the version of the database
    Input: db_dir - the directory to the database
    Output:
    '''
    printDBG('[ld3] detect_version(%r)' % db_dir)
    hs_dirs = ds.HotspotterDirs(db_dir)
    # --- Directories ---
    db_dir = hs_dirs.db_dir
    img_dir = hs_dirs.img_dir
    internal_dir = hs_dirs.internal_dir

    # --- Table File Names ---
    chip_table = join(internal_dir, CHIP_TABLE_FNAME)
    name_table = join(internal_dir, NAME_TABLE_FNAME)
    image_table = join(internal_dir, IMAGE_TABLE_FNAME)  # TODO: Make optional

    # --- CHECKS ---
    has_dbdir = detect_checkpath(db_dir)
    has_imgdir = detect_checkpath(img_dir)
    has_chiptbl = detect_checkpath(chip_table)
    has_nametbl = detect_checkpath(name_table)
    has_imgtbl = detect_checkpath(image_table)

    # ChipTable Header Markers and ChipTable Header Variables
    header_numdata = '# NumData '
    header_csvformat_re = '# *ChipID,'
    chip_csv_format = [
        'ChipID', 'ImgID', 'NameID', 'roi[tl_x  tl_y  w  h]', 'theta'
    ]
    vss_csvformat_re = '#imgindex,'
    v12_csvformat_re = r'#[ 0-9]*\) '
    v12_csv_format = ['instance_id', 'image_id', 'name_id', 'roi']

    db_version = 'current'
    isCurrentVersion = all(
        [has_dbdir, has_imgdir, has_chiptbl, has_nametbl, has_imgtbl])
    printDBG('[ld3] isCurrentVersion=%r' % isCurrentVersion)

    if not isCurrentVersion:

        def assign_alternate(tblname, optional=False):
            # Checks several places for target file
            path = join(db_dir, tblname)
            if detect_checkpath(path):
                return path
            path = join(db_dir, '.hs_internals', tblname)
            if detect_checkpath(path):
                return path
            if optional:
                return None
            else:
                raise AssertionError('bad state=%r' % tblname)

        # Assign the following:
        # db_version : database version,
        # header_csvformat_re : Header format regex (to locate the # header)
        # chip_cvs_format : Default header order
        # chip_table, name_table, image_table

        # HOTSPOTTER VERSION 2
        if db_info.has_v2_gt(db_dir):
            db_version = 'hotspotter-v2'
            header_csvformat_re = v12_csvformat_re
            chip_csv_format = 'MULTILINE'
            chip_table = assign_alternate('instance_table.csv')
            name_table = assign_alternate('name_table.csv')
            image_table = assign_alternate('image_table.csv')
        # HOTSPOTTER VERSION 1
        elif db_info.has_v1_gt(db_dir):
            db_version = 'hotspotter-v1'
            header_csvformat_re = v12_csvformat_re
            chip_csv_format = 'MULTILINE'
            chip_table = assign_alternate('animal_info_table.csv')
            name_table = assign_alternate('name_table.csv', optional=True)
            image_table = assign_alternate('image_table.csv', optional=True)
        # STRIPESPOTTER VERSION
        elif db_info.has_ss_gt(db_dir):
            db_version = 'stripespotter'
            header_csvformat_re = vss_csvformat_re
            chip_csv_format = [
                'imgindex', 'original_filepath', 'roi', 'animal_name'
            ]
            chip_table = join(db_dir, 'SightingData.csv')
            name_table = None
            image_table = None
            if not detect_checkpath(chip_table):
                msg = 'chip_table=%r must exist to convert stripespotter db' % chip_table
                raise AssertionError(msg)
        else:
            try:
                # ALTERNATIVE CURRENT VERSION
                db_version = 'current'  # Well almost
                chip_table = assign_alternate(CHIP_TABLE_FNAME)
                name_table = assign_alternate(NAME_TABLE_FNAME)
                image_table = assign_alternate(IMAGE_TABLE_FNAME)
            except AssertionError:
                # CORRUPTED CURRENT VERSION
                if db_info.has_partial_gt(db_dir):
                    db_version = 'partial'
                    chip_table = join(db_dir, 'flat_table.csv')
                    name_table = None
                    image_table = None
                # XLSX VERSION
                elif db_info.has_xlsx_gt(db_dir):
                    db_version = 'xlsx'
                    chip_table = None
                    name_table = None
                    image_table = None
                # NEW DATABASE
                else:
                    db_version = 'newdb'
                    chip_table = None
                    name_table = None
                    image_table = None
        version_info = {
            'db_version': db_version,
            'chip_csv_format': chip_csv_format,
            'header_csvformat_re': header_csvformat_re,
            'tables_fnames': (chip_table, name_table, image_table)
        }
        print('[ld3] has %s database format' % db_version)
        return version_info
Пример #40
0
import DataStructures

print "TESTING: STACK\n----------"

testStack = DataStructures.Stack()
print "pushing 5"
testStack.push(5)
print "pushing 6"
testStack.push(6)
print "pushing 7"
testStack.push(7)
print testStack.pop()
print testStack.pop()
print "pushing 8"
testStack.push(8)
print testStack.pop()
print testStack.pop()

print "\nTESTING: QUEUE\n----------"
testQueue = DataStructures.Queue()
print "queueing 0"
testQueue.enqueue(0)
print "queueing 1"
testQueue.enqueue(1)
print "queueing 2"
testQueue.enqueue(2)
print testQueue.dequeue()
print "queueing 3"
testQueue.enqueue(3)
print testQueue.dequeue()
print testQueue.dequeue()
Пример #41
0
    test_df = pd.read_csv("test.csv")
    airport_df = pd.read_csv("airport.csv")
    country_currency_df = pd.read_csv("countrycurrency.csv")
    currency_rates_df = pd.read_csv("currencyrates.csv")
    aircraft_df = pd.read_csv("aircraft.csv")

    airport_currencies_df = airport_df.merge(
        country_currency_df.rename(columns={"name": "Country"}), on="Country")
    merged_df = airport_currencies_df.merge(currency_rates_df.rename(
        columns={"CurrencyCode": "currency_alphabetic_code"}),
                                            on="currency_alphabetic_code")
else:
    print("Error, csv file(s) missing:", missingFiles)
    sys.exit()

g = DataStructures.Graph()

airports = DataStructures.create_airport_list(merged_df)
aircrafts = DataStructures.createAircrafts(aircraft_df)

for i in range(len(test_df)):
    route = DataStructures.getRoute(test_df, i)
    aircraft = test_df["Airplane"][i]
    checkRoute = DataStructures.checkRoute(route, airports)
    if not checkRoute[0]:
        print("Error: Airport,", checkRoute[1], ", doesn't exist in route:",
              route, "\n")
        continue
    if not DataStructures.checkAircraftExist(aircraft, aircrafts):
        print("Error: Aircraft doesn't exist:", aircraft, "\n")
        continue
 def read(self, cm_xml_path, cdis_xml_path):
     xmlTree = et.parse(cm_xml_path)
     xmlRoot = xmlTree.getroot()
     
     ceList = []
     
     for ceElement in xmlRoot.findall('ceList'):
         ceClientList = []
         
         for ceClientElement in ceElement.findall('CEClientList'):
             id = ceClientElement.find('id').text
             latitude = float(ceClientElement.find('latitude').text)
             longitude = float(ceClientElement.find('longitude').text)
             signal = int(ceClientElement.find('nivelSinal').text)
             interference = float(ceClientElement.find('interferenciaCocanal').text)
             
             ceClient = DataStructures.CEClient(id, DataStructures.GeoPoint(latitude, longitude), signal, interference)
             
             ceClientList.append(ceClient)
         
         id = ceElement.find('id').text
         antenna = int(ceElement.find('antena').text)
         channel = int(ceElement.find('device').find('canal').text)
         latitude = float(ceElement.find('latitude').text)
         longitude = float(ceElement.find('longitude').text)
         potency = int(ceElement.find('potencia').text)
         maxPotency = int(ceElement.find('potenciaMax').text)
         
         ce = DataStructures.CE(id, antenna, channel, DataStructures.GeoPoint(latitude, longitude), potency, maxPotency, ceClientList)
         ceList.append(ce)
     
     id = xmlRoot.find('id').text  
     self.cm = DataStructures.CM(id, ceList)
     
     channelList = []
     
     for channelElement in xmlRoot.findall('channels'):
         name = channelElement.find('nome').text
         number = int(channelElement.find('numCanal').text)
         frequency = float(channelElement.find('frequencia').text)
         state = DataStructures.ChannelState[channelElement.find('estado').text]
         
         channel = DataStructures.Channel(name, number, frequency, state)
         channelList.append(channel)
     
     self.channels = channelList 
     
     xmlTree = et.parse(cdis_xml_path)
     xmlRoot = xmlTree.getroot()
     
     cdisList = []
     
     for cdisElement in xmlRoot.findall('cdis'):
         cdisChannelList = []
         
         for channelElement in cdisElement.findall('canaisTvdb'):
             name = channelElement.find('nome').text
             number = int(channelElement.find('numCanal').text)
             frequency = float(channelElement.find('frequencia').text)
             state = DataStructures.ChannelState[channelElement.find('estado').text]
             
             channel = DataStructures.Channel(name, number, frequency, state)                
             cdisChannelList.append(channel)
             
         cdis = DataStructures.CDIS(cdisChannelList)
         cdisList.append(cdis)
             
     self.cdisList = cdisList
Пример #43
0
    def test(self):
        self.items = []
        static = DataStructures.Object(
            "Static", DataStructures.createRect(0, 0, 800, 200))
        static.add_children(DataStructures.Object("child_1"))
        static.add_children(DataStructures.Object("child_2"))
        static.add_children(DataStructures.Object("child_3"))
        static.color = QColor(200, 0, 0).name()
        static.childrens[0].add_children(
            DataStructures.Object("child_1.1",
                                  DataStructures.createRect(40, 40, 80, 40)))

        self.items.append(static)

        dynamic = DataStructures.Object(
            "Dynamic", DataStructures.createRect(0, 0, 200, 800))
        dynamic.add_children(DataStructures.Object("child_1"))
        dynamic.add_children(DataStructures.Object("child_2"))
        dynamic.add_children(DataStructures.Object("child_3"))
        dynamic.childrens[2].add_children(DataStructures.Object("child_2.1"))
        dynamic.color = QColor(0, 0, 200).name()
        self.items.append(dynamic)

        self.rebuildModel()
 def initProperties(self, object=DataStructures.Object()):
     self.beginResetModel()
     self.object = object
     self.endResetModel()
Пример #45
0
import sys
import DataStructures

crypto_symbols = ('BTC', 'ETH', 'LTC')
warning_tolerance = (10000, 400, 90)
tol_frac = .012
frequency = 2000
duration = 100
tol_dict = dict(zip(crypto_symbols, warning_tolerance))
sys.stdout.write("%s\n\n" % tol_dict)
url_dict = dict()
window_dict = dict()

for sym in crypto_symbols:
    url_dict[sym] = 'https://api.gdax.com/products/%s-USD/ticker' % sym
    window_dict[sym] = DataStructures.CircularQueue(60)


def main():
    while True:
        sys.stdout.write("%s     " % datetime.datetime.now())
        for sym in crypto_symbols:
            url = url_dict[sym]
            # Query CoinDesk Current price API
            try:
                response = requests.get(url).json()
                price = round(float(response['bid']), 2)
                window_dict[sym].push(price)
                cur_mean = window_dict[sym].mean()
                dev_frac = (price - cur_mean) / cur_mean
                #if price < tol_dict[sym] or dev_frac > tol_frac or dev_frac < -tol_frac:
Пример #46
0
    def MakeModel(self,
                  pressure=795.0,
                  temperature=283.0,
                  lowfreq=4000,
                  highfreq=4600,
                  angle=45.0,
                  humidity=50.0,
                  co2=368.5,
                  o3=3.9e-2,
                  n2o=0.32,
                  co=0.14,
                  ch4=1.8,
                  o2=2.1e5,
                  no=1.1e-19,
                  so2=1e-4,
                  no2=1e-4,
                  nh3=1e-4,
                  hno3=5.6e-4,
                  lat=30.6,
                  alt=2.1,
                  wavegrid=None,
                  resolution=None,
                  save=False,
                  libfile=None,
                  vac2air=True):
        """
        Here is the important function! All of the variables have default values,
          which you will want to override for any realistic use.

        :param pressure:       Pressure at telescope altitude (hPa)
        :param temperature:    Temperature at telescope altitude (Kelvin)
        :param lowfreq:        The starting wavenumber (cm^-1)
        :param highfreq:       The ending wavenumber (cm^-1)
        :param angle:          The zenith distance of the telescope (degrees). This is related to
                               the airmass (z) through z = sec(angle)
        :param humidity:       Percent relative humidity at the telescope altitude.
        :param co2:            Mixing ratio of this molecule (parts per million by volumne)
        :param o3:             Mixing ratio of this molecule (parts per million by volumne)
        :param n2o:            Mixing ratio of this molecule (parts per million by volumne)
        :param co:             Mixing ratio of this molecule (parts per million by volumne)
        :param ch4:            Mixing ratio of this molecule (parts per million by volumne)
        :param o2:             Mixing ratio of this molecule (parts per million by volumne)
        :param no:             Mixing ratio of this molecule (parts per million by volumne)
        :param so2:            Mixing ratio of this molecule (parts per million by volumne)
        :param no2:            Mixing ratio of this molecule (parts per million by volumne)
        :param nh3:            Mixing ratio of this molecule (parts per million by volumne)
        :param hno3:           Mixing ratio of this molecule (parts per million by volumne)
        :param lat:            The latitude of the observatory (degrees)
        :param alt:            The altitude of the observatory above sea level (km)
        :param wavegrid:       If given, the model will be resampled to this grid.
                               Should be a 1D np array
        :param resolution:     If given, it will reduce the resolution by convolving
                               with a gaussian of appropriate width. Should be a float
                               with R=lam/dlam
        :param save:           If true, the generated model is saved. The filename will be
                               printed to the screen.
        :param libfile:        Useful if generating a telluric library. The filename of the
                               saved file will be written to this filename. Should be a string
                               variable. Ignored if save==False
        :param vac2air:        If True (default), it converts the wavelengths from vacuum to air
        
        :return:               DataStructures.xypoint instance with the telluric model. The x-axis
                               is in nanometers and the y-axis is in fractional transmission.
        """

        self.FindWorkingDirectory()

        #Make the class variables local
        TelluricModelingDir = self.TelluricModelingDir
        debug = self.debug
        lock = self.lock
        layers = np.array(self.layers)
        ModelDir = self.ModelDir

        #Make a deep copy of atmosphere so that I don't repeatedly modify it
        Atmosphere = copy.deepcopy(self.Atmosphere)

        #Convert from relative humidity to concentration (ppm)
        h2o = humidity_to_ppmv(humidity, temperature, pressure)

        #Start by scaling the abundances from those at 'alt' km
        #  (linearly interpolate)
        keys = sorted(Atmosphere.keys())
        lower = max(0, np.searchsorted(keys, alt) - 1)
        upper = min(lower + 1, len(keys) - 1)
        if lower == upper:
            raise ZeroDivisionError(
                "Observatory altitude of %g results in the surrounding layers being the same!"
                % alt)
        scale_values = list(Atmosphere[lower])
        scale_values[2] = list(Atmosphere[lower][2])
        scale_values[0] = (Atmosphere[upper][0] - Atmosphere[lower][0]) / (
            keys[upper] - keys[lower]) * (alt -
                                          keys[lower]) + Atmosphere[lower][0]
        scale_values[1] = (Atmosphere[upper][1] - Atmosphere[lower][1]) / (
            keys[upper] - keys[lower]) * (alt -
                                          keys[lower]) + Atmosphere[lower][1]
        for mol in range(len(scale_values[2])):
            scale_values[2][mol] = (
                Atmosphere[upper][2][mol] -
                Atmosphere[lower][2][mol]) / (keys[upper] - keys[lower]) * (
                    alt - keys[lower]) + Atmosphere[lower][2][mol]

        #Do the actual scaling
        pressure_scalefactor = (scale_values[0] - pressure) * np.exp(
            -(layers - alt)**2 / (2.0 * 10.0**2))
        temperature_scalefactor = (scale_values[1] - temperature) * np.exp(
            -(layers - alt)**2 / (2.0 * 10.0**2))
        for i, layer in enumerate(layers):
            # Atmosphere[layer][0] -= pressure_scalefactor[i]
            Atmosphere[layer][0] *= pressure / scale_values[0]
            Atmosphere[layer][1] -= temperature_scalefactor[i]
            Atmosphere[layer][2][0] *= h2o / scale_values[2][0]
            Atmosphere[layer][2][1] *= co2 / scale_values[2][1]
            Atmosphere[layer][2][2] *= o3 / scale_values[2][2]
            Atmosphere[layer][2][3] *= n2o / scale_values[2][3]
            Atmosphere[layer][2][4] *= co / scale_values[2][4]
            Atmosphere[layer][2][5] *= ch4 / scale_values[2][5]
            Atmosphere[layer][2][6] *= o2 / scale_values[2][6]
            Atmosphere[layer][2][7] *= no / scale_values[2][7]
            Atmosphere[layer][2][8] *= so2 / scale_values[2][8]
            Atmosphere[layer][2][9] *= no2 / scale_values[2][9]
            Atmosphere[layer][2][10] *= nh3 / scale_values[2][10]
            Atmosphere[layer][2][11] *= hno3 / scale_values[2][11]

        #Now, Read in the ParameterFile and edit the necessary parameters
        parameters = MakeTape5.ReadParFile(parameterfile=TelluricModelingDir +
                                           "ParameterFile")
        parameters[48] = "%.1f" % lat
        parameters[49] = "%.1f" % alt
        parameters[51] = "%.5f" % angle
        parameters[17] = lowfreq
        freq, transmission = np.array([]), np.array([])

        #Need to run lblrtm several times if the wavelength range is too large.
        maxdiff = 1999.9
        if (highfreq - lowfreq > maxdiff):
            while lowfreq + maxdiff <= highfreq:
                parameters[18] = lowfreq + maxdiff

                MakeTape5.WriteTape5(parameters,
                                     output=TelluricModelingDir + "TAPE5",
                                     atmosphere=Atmosphere)

                #Run lblrtm
                cmd = "cd " + TelluricModelingDir + ";sh runlblrtm_v3.sh"
                try:
                    if self.print_lblrtm_output:
                        command = subprocess.check_call(cmd, shell=True)
                    if not self.print_lblrtm_output:
                        command = subprocess.check_call(
                            cmd,
                            shell=True,
                            stdout=subprocess.DEVNULL,
                            stderr=subprocess.DEVNULL)

                except subprocess.CalledProcessError:
                    raise subprocess.CalledProcessError(
                        "Error: Command '{}' failed in directory {}".format(
                            cmd, TelluricModelingDir))

                #Read in TAPE12, which is the output of LBLRTM
                freq, transmission = self.ReadTAPE12(TelluricModelingDir,
                                                     appendto=(freq,
                                                               transmission))
                lowfreq = lowfreq + 2000.00001
                parameters[17] = lowfreq

        parameters[18] = highfreq
        MakeTape5.WriteTape5(parameters,
                             output=TelluricModelingDir + "TAPE5",
                             atmosphere=Atmosphere)

        #Run lblrtm for the last time
        cmd = "cd " + TelluricModelingDir + ";sh runlblrtm_v3.sh"
        try:
            if self.print_lblrtm_output:
                command = subprocess.check_call(cmd, shell=True)
            if not self.print_lblrtm_output:
                command = subprocess.check_call(cmd,
                                                shell=True,
                                                stdout=subprocess.DEVNULL,
                                                stderr=subprocess.DEVNULL)

        except subprocess.CalledProcessError:
            raise subprocess.CalledProcessError(
                "Error: Command '{}' failed in directory {}".format(
                    cmd, TelluricModelingDir))

        #Read in TAPE12, which is the output of LBLRTM
        freq, transmission = self.ReadTAPE12(TelluricModelingDir,
                                             appendto=(freq, transmission))

        #Convert from frequency to wavelength units
        wavelength = units.cm.to(units.nm) / freq

        #Correct for index of refraction of air (use IAU standard conversion from
        #  Morton, D. C. 1991, ApJS, 77, 119
        if vac2air:
            wave_A = wavelength * units.nm.to(
                units.angstrom)  # Wavelength in angstroms
            n = 1.0 + 2.735182e-4 + 131.4182 / wave_A**2 + 2.76249e8 / wave_A**4
            wavelength /= n

        if save:
            #Output filename
            model_name = ModelDir + "transmission" + "-%.2f" % pressure + "-%.2f" % temperature + "-%.1f" % humidity + "-%.1f" % angle + "-%.2f" % (
                co2) + "-%.2f" % (o3 * 100) + "-%.2f" % ch4 + "-%.2f" % (co *
                                                                         10)
            logging.info(
                "All done! Output Transmission spectrum is located in the file below:\n\t\t{}"
                .format(model_name))

            np.savetxt(model_name,
                       np.transpose((wavelength[::-1], transmission[::-1])),
                       fmt="%.8g")
            if libfile != None:
                infile = open(libfile, "a")
                infile.write(model_name + "\n")
                infile.close()

        self.Cleanup()  #Un-lock the working directory

        if wavegrid != None:
            model = DataStructures.xypoint(x=wavelength[::-1],
                                           y=transmission[::-1])
            return FittingUtilities.RebinData(model, wavegrid)

        return DataStructures.xypoint(x=wavelength[::-1], y=transmission[::-1])
Пример #47
0
def load_csv_tables(db_dir, allow_new_dir=True):
    '''
    Big function which loads the csv tables from a datatabase directory
    Returns HotspotterDirs and HotspotterTables
    '''
    if 'vdd' in sys.argv:
        helpers.vd(db_dir)
    print('\n=============================')
    print('[ld2] Loading hotspotter csv tables: %r' % db_dir)
    print('=============================')
    hs_dirs = ds.HotspotterDirs(db_dir)
    hs_tables = ds.HotspotterTables()
    #exec(hs_dirs.execstr('hs_dirs'))
    #print(hs_dirs.execstr('hs_dirs'))
    img_dir = hs_dirs.img_dir
    internal_dir = hs_dirs.internal_dir
    db_dir = hs_dirs.db_dir
    # --- Table Names ---
    chip_table = join(internal_dir, CHIP_TABLE_FNAME)
    name_table = join(internal_dir, NAME_TABLE_FNAME)
    image_table = join(internal_dir, IMAGE_TABLE_FNAME)  # TODO: Make optional
    # --- CHECKS ---
    has_dbdir = helpers.checkpath(db_dir)
    has_imgdir = helpers.checkpath(img_dir)
    has_chiptbl = helpers.checkpath(chip_table)
    has_nametbl = helpers.checkpath(name_table)
    has_imgtbl = helpers.checkpath(image_table)

    # ChipTable Header Markers
    header_numdata = '# NumData '
    header_csvformat_re = '# *ChipID,'
    v12_csvformat_re = r'#[0-9]*\) '
    # Default ChipTable Header Variables
    chip_csv_format = [
        'ChipID', 'ImgID', 'NameID', 'roi[tl_x  tl_y  w  h]', 'theta'
    ]
    v12_csv_format = ['instance_id', 'image_id', 'name_id', 'roi']

    # TODO DETECT OLD FORMATS HERE
    db_version = 'current'
    isCurrentVersion = all(
        [has_dbdir, has_imgdir, has_chiptbl, has_nametbl, has_imgtbl])
    print('[ld2] isCurrentVersion=%r' % isCurrentVersion)
    IS_VERSION_1_OR_2 = False

    if not isCurrentVersion:
        helpers.checkpath(db_dir, verbose=True)
        helpers.checkpath(img_dir, verbose=True)
        helpers.checkpath(chip_table, verbose=True)
        helpers.checkpath(name_table, verbose=True)
        helpers.checkpath(image_table, verbose=True)
        import db_info

        def assign_alternate(tblname):
            path = join(db_dir, tblname)
            if helpers.checkpath(path, verbose=True):
                return path
            path = join(db_dir, '.hs_internals', tblname)
            if helpers.checkpath(path, verbose=True):
                return path
            raise Exception('bad state=%r' % tblname)

        #
        if db_info.has_v2_gt(db_dir):
            IS_VERSION_1_OR_2 = True
            db_version = 'hotspotter-v2'
            chip_csv_format = []
            header_csvformat_re = v12_csvformat_re
            chip_table = assign_alternate('instance_table.csv')
            name_table = assign_alternate('name_table.csv')
            image_table = assign_alternate('image_table.csv')
        #
        elif db_info.has_v1_gt(db_dir):
            IS_VERSION_1_OR_2 = True
            db_version = 'hotspotter-v1'
            chip_csv_format = []
            header_csvformat_re = v12_csvformat_re
            chip_table = assign_alternate('animal_info_table.csv')
            name_table = assign_alternate('name_table.csv')
            image_table = assign_alternate('image_table.csv')
        #
        elif db_info.has_ss_gt(db_dir):
            db_version = 'stripespotter'
            chip_table = join(db_dir, 'SightingData.csv')

            chip_csv_format = [
                'imgindex', 'original_filepath', 'roi', 'animal_name'
            ]
            header_csvformat_re = '#imgindex,'
            #raise NotImplementedError('stripe spotter conversion')
            if not helpers.checkpath(chip_table, verbose=True):
                raise Exception('bad state chip_table=%r' % chip_table)
        else:
            try:
                db_version = 'current'  # Well almost
                chip_table = assign_alternate(CHIP_TABLE_FNAME)
                name_table = assign_alternate(NAME_TABLE_FNAME)
                image_table = assign_alternate(IMAGE_TABLE_FNAME)
            except Exception:
                if db_info.has_partial_gt(db_dir):
                    print('[ld2] detected incomplete database')
                    raise NotImplementedError('partial database recovery')
                elif allow_new_dir:
                    print('[ld2] detected new dir')
                    hs_dirs.ensure_dirs()
                    return hs_dirs, hs_tables, 'newdb'
                else:
                    import traceback
                    print(traceback.format_exc())
                    print('[ld2] I AM IN A BAD STATE!')
                    errmsg = ''
                    errmsg += ('\n\n!!!!!\n\n')
                    errmsg += ('  ! The data tables seem to not be loaded')
                    errmsg += (' Files in internal dir: %r' % internal_dir)
                    for fname in os.listdir(internal_dir):
                        errmsg += ('   ! fname')
                    errmsg += ('\n\n!!!!!\n\n')
                    print(errmsg)
                    raise Exception(errmsg)
    if not helpers.checkpath(chip_table):
        raise Exception('bad state chip_table=%r' % chip_table)
    print('[ld2] detected %r' % db_version)
    hs_dirs.ensure_dirs()
    print('-------------------------')
    print('[ld2] Loading database tables: ')
    cid_lines = []
    line_num = 0
    csv_line = ''
    csv_fields = []

    # RCOS TODO: We need a more general csv read function
    # which can handle all of these little corner cases delt with here.

    try:
        # ------------------
        # --- READ NAMES ---
        # ------------------
        print('[ld2] Loading name table: %r' % name_table)
        nx2_name = [UNKNOWN_NAME, UNKNOWN_NAME]
        nid2_nx = {0: 0, 1: 1}
        name_lines = open(name_table, 'r')
        for line_num, csv_line in enumerate(name_lines):
            csv_line = csv_line.strip('\n\r\t ')
            if len(csv_line) == 0 or csv_line.find('#') == 0:
                continue
            csv_fields = [
                _.strip(' ') for _ in csv_line.strip('\n\r ').split(',')
            ]
            nid = int(csv_fields[0])
            name = csv_fields[1]
            nid2_nx[nid] = len(nx2_name)
            nx2_name.append(name)
        name_lines.close()
        if VERBOSE_LOAD_DATA:
            print('[ld2] * Loaded %r names (excluding unknown names)' %
                  (len(nx2_name) - 2))
            print('[ld2] * Done loading name table')
    except IOError as ex:
        print('IOError: %r' % ex)
        print('[ld2.name] loading without name table')
        #raise
    except Exception as ex:
        print('[ld2.name] ERROR %r' % ex)
        #print('[ld2.name] ERROR name_tbl parsing: %s' % (''.join(cid_lines)))
        print('[ld2.name] ERROR on line number:  %r' % (line_num))
        print('[ld2.name] ERROR on line:         %r' % (csv_line))
        print('[ld2.name] ERROR on fields:       %r' % (csv_fields))

    try:
        # -------------------
        # --- READ IMAGES ---
        # -------------------
        gx2_gname = []
        gx2_aif = []
        gid2_gx = {}  # this is not used. It can probably be removed

        def add_image(gname, aif, gid):
            gx = len(gx2_gname)
            gx2_gname.append(gname)
            gx2_aif.append(aif)
            if gid is not None:
                # this is not used. It can probably be removed
                gid2_gx[gid] = gx

        print('[ld2] Loading images')
        # Load Image Table
        # <LEGACY CODE>
        if VERBOSE_LOAD_DATA:
            print('[ld2] * Loading image table: %r' % image_table)
        gid_lines = open(image_table, 'r').readlines()
        for line_num, csv_line in enumerate(gid_lines):
            csv_line = csv_line.strip('\n\r\t ')
            if len(csv_line) == 0 or csv_line.find('#') == 0:
                continue
            csv_fields = [
                _.strip(' ') for _ in csv_line.strip('\n\r ').split(',')
            ]
            gid = int(csv_fields[0])
            # You have 3 csv files. Format == gid, gname.ext, aif
            if len(csv_fields) == 3:
                gname = csv_fields[1]
                aif = csv_fields[2].lower() in ['true', '1'
                                                ]  # convert to bool correctly
            # You have 4 csv fields. Format == gid, gname, ext, aif
            if len(csv_fields) == 4:
                gname = '.'.join(csv_fields[1:3])
                aif = csv_fields[3].lower() in ['true', '1']
            add_image(gname, aif, gid)
        nTableImgs = len(gx2_gname)
        fromTableNames = set(gx2_gname)
        if VERBOSE_LOAD_DATA:
            print('[ld2] * table specified %r images' % nTableImgs)
            # </LEGACY CODE>
            # Load Image Directory
            print('[ld2] * Loading image directory: %r' % img_dir)
        nDirImgs = 0
        nDirImgsAlready = 0
        for fname in os.listdir(img_dir):
            if len(fname) > 4 and fname[-4:].lower() in [
                    '.jpg', '.png', '.tiff'
            ]:
                if fname in fromTableNames:
                    nDirImgsAlready += 1
                    continue
                add_image(fname, False, None)
                nDirImgs += 1
        if VERBOSE_LOAD_DATA:
            print('[ld2] * dir specified %r images' % nDirImgs)
            print('[ld2] * %r were already specified in the table' %
                  nDirImgsAlready)
            print('[ld2] * Loaded %r images' % len(gx2_gname))
            print('[ld2] * Done loading images')
    except IOError:
        print('IOError: %r' % ex)
        print('[ld2.img] loading without image table')
        #raise
    except Exception as ex:
        print('[ld2!.img] ERROR %r' % ex)
        #print('[ld2.img] ERROR image_tbl parsing: %s' % (''.join(cid_lines)))
        print('[ld2!.img] ERROR on line number:  %r' % (line_num))
        print('[ld2!.img] ERROR on line:         %r' % (csv_line))
        print('[ld2!.img] ERROR on fields:       %r' % (csv_fields))
        raise

    try:
        # ------------------
        # --- READ CHIPS ---
        # ------------------
        print('[ld2] Loading chip table: %r' % chip_table)
        # Load Chip Table Header
        cid_lines = open(chip_table, 'r').readlines()
        num_data = -1
        # Parse Chip Table Header
        for line_num, csv_line in enumerate(cid_lines):
            #print('[LINE %4d] %r' % (line_num, csv_line))
            csv_line = csv_line.strip('\n\r\t ')
            if len(csv_line) == 0:
                #print('[LINE %4d] BROKEN' % (line_num))
                continue
            csv_line = csv_line.strip('\n')
            if csv_line.find('#') != 0:
                #print('[LINE %4d] BROKEN' % (line_num))
                break  # Break after header
            if re.search(header_csvformat_re, csv_line) is not None:
                #print('[LINE %4d] SEARCH' % (line_num))
                # Specified Header Variables
                if IS_VERSION_1_OR_2:
                    #print(csv_line)
                    end_ = csv_line.find('-')
                    if end_ != -1:
                        end_ = end_ - 1
                        #print('end_=%r' % end_)
                        fieldname = csv_line[5:end_]
                    else:
                        fieldname = csv_line[5:]
                    #print(fieldname)
                    chip_csv_format += [fieldname]

                else:
                    chip_csv_format = [
                        _.strip() for _ in csv_line.strip('#').split(',')
                    ]
                #print('[ld2] read chip_csv_format: %r' % chip_csv_format)
            if csv_line.find(header_numdata) == 0:
                #print('[LINE %4d] NUM_DATA' % (line_num))
                num_data = int(csv_line.replace(header_numdata, ''))
        if IS_VERSION_1_OR_2 and len(chip_csv_format) == 0:
            chip_csv_format = v12_csv_format
        if VERBOSE_LOAD_DATA:
            print('[ld2] * num_chips: %r' % num_data)
            print('[ld2] * chip_csv_format: %r ' % chip_csv_format)
        #print('[ld2.chip] Header Columns: %s\n    ' % '\n   '.join(chip_csv_format))
        cid_x = tryindex(chip_csv_format, 'ChipID', 'imgindex', 'instance_id')
        gid_x = tryindex(chip_csv_format, 'ImgID', 'image_id')
        nid_x = tryindex(chip_csv_format, 'NameID', 'name_id')
        roi_x = tryindex(chip_csv_format, 'roi[tl_x  tl_y  w  h]', 'roi')
        theta_x = tryindex(chip_csv_format, 'theta')
        # new fields
        gname_x = tryindex(chip_csv_format, 'Image', 'original_filepath')
        name_x = tryindex(chip_csv_format, 'Name', 'animal_name')
        required_x = [cid_x, gid_x, gname_x, nid_x, name_x, roi_x, theta_x]
        # Hotspotter Chip Tables
        cx2_cid = []
        cx2_nx = []
        cx2_gx = []
        cx2_roi = []
        cx2_theta = []
        # x is a csv field index in this context
        # get csv indexes which are unknown properties
        prop_x_list = np.setdiff1d(range(len(chip_csv_format)),
                                   required_x).tolist()
        px2_prop_key = [chip_csv_format[x] for x in prop_x_list]
        prop_dict = {}
        for prop in iter(px2_prop_key):
            prop_dict[prop] = []
        if VERBOSE_LOAD_DATA:
            print('[ld2] * num_user_properties: %r' % (len(prop_dict.keys())))
        # Parse Chip Table
        for line_num, csv_line in enumerate(cid_lines):
            csv_line = csv_line.strip('\n\r\t ')
            if len(csv_line) == 0 or csv_line.find('#') == 0:
                continue
            csv_fields = [
                _.strip(' ') for _ in csv_line.strip('\n\r ').split(',')
            ]
            #
            # Load Chip ID
            try:
                cid = int(csv_fields[cid_x])
            except ValueError:
                print('[ld2!] cid_x = %r' % cid_x)
                print('[ld2!] csv_fields = %r' % csv_fields)
                print('[ld2!] csv_fields[cid_x] = %r' % csv_fields[cid_x])
                print(chip_csv_format)
                raise
            #
            # Load Chip ROI Info
            if roi_x != -1:
                roi_str = csv_fields[roi_x].strip('[').strip(']')
                roi = [int(round(float(_))) for _ in roi_str.split()]
            #
            # Load Chip theta Info
            if theta_x != -1:
                theta = float(csv_fields[theta_x])
            else:
                theta = 0
            #
            # Load Image ID/X
            if gid_x != -1:
                gid = int(csv_fields[gid_x])
                gx = gid2_gx[gid]
            elif gname_x != -1:
                gname = csv_fields[gname_x]
                if db_version == 'stripespotter':
                    if not exists(gname):
                        gname = 'img-%07d.jpg' % cid
                        gpath = join(db_dir, 'images', gname)
                        w, h = Image.open(gpath).size
                        roi = [1, 1, w, h]
                try:
                    gx = gx2_gname.index(gname)
                except ValueError:
                    gx = len(gx2_gname)
                    gx2_gname.append(gname)
            #
            # Load Name ID/X
            if nid_x != -1:
                #print('namedbg csv_fields=%r' % csv_fields)
                #print('namedbg nid_x = %r' % nid_x)
                nid = int(csv_fields[nid_x])
                #print('namedbg %r' % nid)
                nx = nid2_nx[nid]
            elif name_x != -1:
                name = csv_fields[name_x]
                try:
                    nx = nx2_name.index(name)
                except ValueError:
                    nx = len(nx2_name)
                    nx2_name.append(name)
            # Append info to cid lists
            cx2_cid.append(cid)
            cx2_gx.append(gx)
            cx2_nx.append(nx)
            cx2_roi.append(roi)
            cx2_theta.append(theta)
            for px, x in enumerate(prop_x_list):
                prop = px2_prop_key[px]
                prop_val = csv_fields[x]
                prop_dict[prop].append(prop_val)
    except Exception as ex:
        print('[chip.ld2] ERROR %r' % ex)
        #print('[chip.ld2] ERROR parsing: %s' % (''.join(cid_lines)))
        print('[chip.ld2] ERROR reading header:  %r' % (line_num))
        print('[chip.ld2] ERROR on line number:  %r' % (line_num))
        print('[chip.ld2] ERROR on line:         %r' % (csv_line))
        print('[chip.ld2] ERROR on fields:       %r' % (csv_fields))
        raise

    if VERBOSE_LOAD_DATA:
        print('[ld2] * Loaded: %r chips' % (len(cx2_cid)))
        print('[ld2] * Done loading chip table')

    # Return all information from load_tables
    #hs_tables.gid2_gx = gid2_gx
    #hs_tables.nid2_nx  = nid2_nx
    hs_tables.init(gx2_gname, gx2_aif, nx2_name, cx2_cid, cx2_nx, cx2_gx,
                   cx2_roi, cx2_theta, prop_dict)

    print('[ld2] Done Loading hotspotter csv tables: %r' % (db_dir))
    if 'vcd' in sys.argv:
        helpers.vd(hs_dirs.computed_dir)
    return hs_dirs, hs_tables, db_version
Пример #48
0
            self.prop9 = Pref.Pref()
            self.prop9.subprop = ''

    guitools.configure_matplotlib()
    app, is_root = guitools.init_qtapp()
    backend = None
    #guitools.make_dummy_main_window()
    prefs = Preferences.Pref()
    r = prefs
    r.a = Preferences.Pref()
    r.b = Preferences.Pref('pref value 1')
    r.c = Preferences.Pref('pref value 2')
    r.a.d = Preferences.Pref('nested1')
    r.a.e = Preferences.Pref()
    r.a.f = Preferences.Pref('nested3')
    r.a.e.g = Preferences.Pref('nested4')

    pref2 = Pref2()

    r.pref2 = pref2
    chip_cfg = ds.make_chip_cfg()
    r.chip_cfg = chip_cfg
    #feat_cfg = ds.make_feat_cfg()
    #r.feat_cfg = feat_cfg
    #query_cfg = ds.make_vsmany_cfg()
    #r.query_cfg = query_cfg

    print(prefs)
    prefWidget = prefs.createQWidget()
    guitools.run_main_loop(app, is_root, backend)
Пример #49
0
def make_nn_index(hs, sx2_cx=None):
    if sx2_cx is None:
        sx2_cx = hs.indexed_sample_cx
    data_index = ds.NNIndex(hs, sx2_cx)
    return data_index
 def __init__(self, numberOfVertices):
     self.MST = []
     self.DS = DS.CreateGraphUsingDisjointSets(numberOfVertices)
     self.time = ""
def main(cm_file, cdis_file, output_dir):
    #------------------------------ BEGIN LOADING ------------------------------#

    print(">>>: Carregando cenário")
    scenario = Scenario.Scenario()
    scenario.read(cm_file, cdis_file)

    scenario_name = os.path.splitext(os.path.basename(cm_file))[0]
    cdis_info_name = os.path.splitext(os.path.basename(cdis_file))[0]

    scenario_path = os.path.join(output_dir, scenario_name + cdis_info_name)

    result_log_path = os.path.join(scenario_path, "result_log.csv")
    result_fig_path = os.path.join(scenario_path, "figures")
    result_res_path = os.path.join(scenario_path, "results")

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if not os.path.exists(scenario_path):
        os.makedirs(scenario_path)
    else:
        shutil.rmtree(scenario_path)
        os.makedirs(scenario_path)

    if not os.path.exists(result_fig_path):
        os.makedirs(result_fig_path)

    if not os.path.exists(result_res_path):
        os.makedirs(result_res_path)

    Visualization.visualize(scenario, scenario.cm.ceList, result_fig_path,
                            "00")

    #------------------------------ END LOADING ------------------------------#

    with open(result_log_path, "w+") as resultLogFile:
        resultLogFile.write(
            "Data_e_Hora;Nome_do_cenário;Caminho_do_arquivo_do_cenário;Nome_das_informações_do_CDIS;Caminho_do_arquivo_das_informações_do_cdis;Tempo_de_execução\n"
        )
        resultLogFile.write(datetime.now().strftime("%d/%m/%Y %H:%M:%S") +
                            ";" + scenario_name + ";" + cm_file + ";" +
                            cdis_info_name + ";" + cdis_file)

        startTime = time.time()
        totalIteration = len(scenario.cdisList)

        for iteration in range(0, totalIteration):
            print("\n>>>: Resolvendo cenário com informações do CDIS=" +
                  str(iteration) + "/" + str(totalIteration))
            scenario.updateChannels(scenario.cdisList[iteration].channelList)

            #------------------------------ BEGIN PREPROCESSING ------------------------------#

            print(">>>: Pré-processando entrada")
            (allCEVarList, ceVarList, ceByChannelVarList,
             interferenceList) = PreProcessing.process(scenario)

            #------------------------------ END PREPROCESSING ------------------------------#

            #------------------------------ BEGIN SOLVER ------------------------------#

            print(">>>: Criando modelo")
            model = gb.Model('cognitive-networks')

            print(">>>: Adicionando variáveis ao modelo")
            allCEModelVarList = []

            for ceVar in allCEVarList:
                allCEModelVarList.append(
                    model.addVar(name=ceVar.name, vtype=gb.GRB.BINARY))

            ceByChannelModelVarList = []

            for ceByChannelVar in ceByChannelVarList:
                ceByChannelModelVarList.append(
                    model.addVar(name=ceByChannelVar.name))

            model.update()

            ceModelVarList = []

            for ceVars in ceVarList:
                modelVarList = []

                for ceVar in ceVars:
                    modelVarList.append(model.getVarByName(ceVar.name))

                ceModelVarList.append(modelVarList)

            print(">>>: Adicionando restrições ao modelo")
            ceId = 0

            for ceModelVars in ceModelVarList:
                model.addConstr(gb.quicksum(ceModelVars), gb.GRB.EQUAL, 1,
                                "Única_configuração_para_CE_" + str(ceId))

                ceId += 1

            interferenceModelVarList = []

            for interference in interferenceList:
                ceVar = interference[0]
                ceTotalInterference = interference[1]
                ceInterferenceList = interference[2]

                if (ceTotalInterference > 0):
                    interferenceModelVar = model.addVar(
                        name="Interferência-devido-" + ceVar.name)
                    interferenceModelVarList.append(interferenceModelVar)
                    model.update()

                    ceInterferenceModelVarList = []

                    for ceInterference in ceInterferenceList:
                        ceInterferenceModelVarList.append(
                            ceInterference * model.getVarByName(ceVar.name))

                    model.addConstr(
                        gb.quicksum(ceInterferenceModelVarList), gb.GRB.EQUAL,
                        interferenceModelVar,
                        "Interferência_provocada_por_" + ceVar.name)

                    model.addConstr(
                        interferenceModelVar, gb.GRB.LESS_EQUAL,
                        args.max_interference,
                        "Máximo_de_interferência_tolerada_de_" + ceVar.name)

            for ceByChannelModelVar in ceByChannelModelVarList:
                ceByChannelVarNameSplit = ceByChannelModelVar.varName.split(
                    '_')

                channel = int(ceByChannelVarNameSplit[3])

                filtredCEModelVarList = PreProcessing.filterCEByChannelModelVar(
                    allCEModelVarList, channel)

                model.addConstr(gb.quicksum(filtredCEModelVarList),
                                gb.GRB.EQUAL, ceByChannelModelVar,
                                "Qtd_de_CE_no_canal_" + str(channel))

                model.addConstr(
                    ceByChannelModelVar, gb.GRB.LESS_EQUAL,
                    min(len(scenario.cm.ceList),
                        ((len(scenario.cm.ceList) /
                          PreProcessing.countAvailableChannels(scenario)) +
                         1)), "Máximo_de_CEs_no_canal_" + str(channel))

            ceId = 0

            for ceModelVars in ceModelVarList:
                potencyList = []

                for ceModelVar in ceModelVars:
                    ceModelVarNameSplit = ceModelVar.varName.split("_")
                    cePotency = int(ceModelVarNameSplit[3])
                    potencyList.append(cePotency * ceModelVar)

                model.addConstr(
                    gb.quicksum(potencyList), gb.GRB.GREATER_EQUAL,
                    args.min_potency,
                    "Mínimo_de_potência_para_máxima_cobertura_do_CE_" +
                    str(ceId))

                ceId += 1

            print(">>>: Definindo a função objetivo")
            model.setObjective(gb.quicksum(interferenceModelVarList),
                               gb.GRB.MINIMIZE)

            model.write(
                os.path.join(result_res_path,
                             "model_it_" + str(iteration) + ".lp"))
            print(">>>: Modelo salvo")

            print(">>>: Otimizando modelo")
            model.optimize()

            resultCEVarList = []

            with open(
                    os.path.join(result_res_path,
                                 "it_" + str(iteration) + ".txt"),
                    "w") as resultFile:
                if (model.status == gb.GRB.Status.OPTIMAL):
                    resultFile.write(">>>: Resultado ótimo:\n")
                    print(">>>: Resultado ótimo:")

                    for ceModelVar in allCEModelVarList:
                        if (ceModelVar.x == 1.0):
                            resultCEVarList.append(ceModelVar.varName)
                            resultFile.write("%s\n" % ceModelVar.varName)
                            print("%s" % ceModelVar.varName)

                    for interferenceModelVar in interferenceModelVarList:
                        ceModelVar = model.getVarByName(
                            interferenceModelVar.varName.split("-")[2])

                        if ((ceModelVar.x == 1.0)
                                and (interferenceModelVar.x > 0.0)):
                            resultFile.write("%s %s\n" %
                                             (interferenceModelVar.varName,
                                              interferenceModelVar.x))
                            print("%s %s" % (interferenceModelVar.varName,
                                             interferenceModelVar.x))

                    for ceByChannelModelVar in ceByChannelModelVarList:
                        resultFile.write("%s %s\n" %
                                         (ceByChannelModelVar.varName,
                                          ceByChannelModelVar.x))
                        print("%s %s" % (ceByChannelModelVar.varName,
                                         ceByChannelModelVar.x))
                elif (model.status == gb.GRB.Status.INFEASIBLE):
                    resultFile.write(">>>: O modelo é inviável!\n")
                    print(">>>: O modelo é inviável!")

                    print(">>>: Computando IIS")
                    model.computeIIS()

                    resultFile.write(
                        "\n>>>: As restrições a seguir não foram satisfeitas:\n"
                    )
                    print(">>>: As restrições a seguir não foram satisfeitas:")
                    for c in model.getConstrs():
                        if c.IISConstr:
                            resultFile.write("%s\n" % c.constrName)
                            print("%s" % c.constrName)

                    print(">>>: Otimizando modelo relaxado")
                    model.feasRelaxS(0, False, False, True)
                    model.optimize()

                    if (model.status == gb.GRB.Status.OPTIMAL):
                        resultFile.write(
                            "\n>>>: Resultado ótimo do modelo relaxado:\n")
                        print(">>>: Resultado ótimo do modelo relaxado:")

                        for ceModelVar in allCEModelVarList:
                            if (ceModelVar.x == 1.0):
                                resultCEVarList.append(ceModelVar.varName)
                                resultFile.write("%s\n" % ceModelVar.varName)
                                print("%s" % ceModelVar.varName)

                        for interferenceModelVar in interferenceModelVarList:
                            ceModelVar = model.getVarByName(
                                interferenceModelVar.varName.split("-")[2])

                            if ((ceModelVar.x == 1.0)
                                    and (interferenceModelVar.x > 0.0)):
                                resultFile.write("%s %s\n" %
                                                 (interferenceModelVar.varName,
                                                  interferenceModelVar.x))
                                print("%s %s" % (interferenceModelVar.varName,
                                                 interferenceModelVar.x))

                        for ceByChannelModelVar in ceByChannelModelVarList:
                            resultFile.write("%s %s\n" %
                                             (ceByChannelModelVar.varName,
                                              ceByChannelModelVar.x))
                            print("%s %s" % (ceByChannelModelVar.varName,
                                             ceByChannelModelVar.x))
                    elif (model.status in (gb.GRB.Status.INF_OR_UNBD,
                                           gb.GRB.Status.UNBOUNDED,
                                           gb.GRB.Status.INFEASIBLE)):
                        print(
                            ">>>: O modelo relaxado não pode ser resolvido porque é ilimitado ou inviável"
                        )
                    else:
                        resultFile.write(
                            ">>>: A otimização parou com status: %d\n" %
                            model.status)
                        print(">>>: A otimização parou com status: %d" %
                              model.status)
                elif (model.status == gb.GRB.Status.UNBOUNDED):
                    resultFile.write(
                        ">>>: O modelo não pode ser resolvido porque é ilimitado\n"
                    )
                    print(
                        ">>>: O modelo não pode ser resolvido porque é ilimitado"
                    )
                else:
                    resultFile.write(
                        ">>>: A otimização parou com status: %d\n" %
                        model.status)
                    print(">>>: A otimização parou com status: %d" %
                          model.status)

            resultCEList = []

            for resultCEVar in resultCEVarList:
                ceVarNameSplit = resultCEVar.split('_')

                ceId = int(ceVarNameSplit[1])
                resultCEChannelNumber = int(ceVarNameSplit[2])
                resultCEPotency = int(ceVarNameSplit[3])

                ce = scenario.cm.ceList[ceId]

                resultCEList.append(
                    DataStructures.CE(ceId, ce.antenna, resultCEChannelNumber,
                                      ce.geoPoint, resultCEPotency,
                                      ce.maxPotency, ce.clientList))

            #------------------------------ END SOLVER ------------------------------#

            #------------------------------ BEGIN VISUALIZATION ------------------------------#

            if (len(resultCEVarList) > 0):
                Visualization.visualize(scenario, resultCEList,
                                        result_fig_path, str(iteration))

            #------------------------------ END VISUALIZATION ------------------------------#

        resultLogFile.write(";" + str((time.time() - startTime)))