Example #1
0
 def noisefloor(self):
     if self._noisefloor is None:
         if self.config.start_slice.start == 0:
             self._noisefloor = np.max(gauss(self._original[self.config.line_trim:-4], self.config.gauss))
         else:
             self._noisefloor = np.max(gauss(self._original[:self.config.start_slice.start], self.config.gauss))
     return self._noisefloor
Example #2
0
 def noisefloor(self):
     if self._noisefloor is None:
         if self.config.start_slice.start == 0:
             self._noisefloor = np.max(gauss(self._resampled[self.config.line_trim:-4], self.config.gauss))
         else:
             self._noisefloor = np.max(gauss(self._resampled[:self.config.start_slice.start], self.config.gauss))
     return self._noisefloor
Example #3
0
    def find_offset_and_scale(self):
        '''Tries to find the offset of the vbi data in the raw samples.'''

        # Split into chunks and ensure there is something "interesting" in each
        target = gauss(self.vbi, self.gauss_sd_offset)
        d = [np.std(target[x:x+128]) < 5.0 for x in range(64, 1440, 128)]
        if any(d):
            return False

        low = 64
        high = 256
        target = gauss(self.vbi[low:high], self.gauss_sd_offset)

        def _inner(offset):
            self.g.set_offset(offset)

            self.g.update_cri(low, high)
            guess_scaled = self.g.convolved[low:high]
            mask_scaled = self.g.mask[low:high]

            a = guess_scaled*mask_scaled
            b = np.clip(target*mask_scaled, self.black, 256)

            scale = a.std()/b.std()
            b -= self.black
            b *= scale
            a = np.clip(a, 0, 256*scale)

            return np.sum(np.square(b-a))

        offset = fminbound(_inner, self.offset_low, self.offset_high)

        # call it also to set self.offset and self.scale
        return (_inner(offset) < 10)
Example #4
0
def Apply_Smoothing_To_Shear_Channels(Data_Type, Data, sigma_pxl):
    # read in Data_Type to determine if Data is composed of shear only
    # or shear+kappa. Only going to apply smoothing to shear.
    
    if Data_Type=="Shear":
        Data = gauss(Data, sigma=[0,0,sigma_pxl,sigma_pxl])
        
    elif Data_Type=="ShearKappa":
        # Omit every 3rd map from smoothing (it's a kappa map, pre-smoothed in mass recon).
        for j in range( Data.shape[1] ):
            if (j+1)%3 != 0:
                Data[:,j,:,:] = gauss(Data[:,j,:,:], sigma=[0,sigma_pxl,sigma_pxl])

    return Data
Example #5
0
 def fft(self):
     """The FFT of the original line."""
     if self._fft is None:
         # This test only looks at the bins for the harmonics.
         # It could be made smarter by looking at all bins.
         self._fft = normalise(gauss(np.abs(np.fft.fft(np.diff(self._original, n=1))[:256]), 4))
     return self._fft
Example #6
0
 def showTextLabel(self, x, y, secure=25):
     """
     add labels of principle peaks of spectrum or chroma
     on the plot, return the labels, that we can show hide
     
     """
     maxis=[]#will contain tuple(rt, intens)
     indexes=[]
     #from core.MetObjects import MSAbstractTypes
     from scipy.ndimage import gaussian_filter1d as gauss        
     z=gauss(y, 1)
     #z = MSAbstractTypes.computeBaseLine(z, 92., 0.8)
     i=0
     while i <len(z)-1:
         while z[i+1] >= z[i] and i < len(y)-2:
             i+=1
         maxis.append((x[i], y[i])) 
         indexes.append(i)
         while z[i+1] <= z[i] and i<len(z)-2:
             i+=1
         i+=1
     labels=[]    
     for t in sorted(maxis, key=lambda x:x[1])[-5:]:
         g=QGraphicsTextItem(str(t[0]))
         g.setFlag(QGraphicsItem.ItemIgnoresTransformations)
         font=QApplication.font()
         font.setPointSizeF(6.5)
         g.setFont(font)
         g.setDefaultTextColor(Qt.black)
         g.setPos(t[0], t[1])
         labels.append(g)
         self.pw.addItem(g)
     return labels
Example #7
0
 def showTextLabel(self, x, y, secure=25):
     """
     add labels of principle peaks of spectrum or chroma
     on the plot, return the labels, that we can show hide
     
     """
     maxis = []  #will contain tuple(rt, intens)
     indexes = []
     #from core.MetObjects import MSAbstractTypes
     from scipy.ndimage import gaussian_filter1d as gauss
     z = gauss(y, 1)
     #z = MSAbstractTypes.computeBaseLine(z, 92., 0.8)
     i = 0
     while i < len(z) - 1:
         while z[i + 1] >= z[i] and i < len(y) - 2:
             i += 1
         maxis.append((x[i], y[i]))
         indexes.append(i)
         while z[i + 1] <= z[i] and i < len(z) - 2:
             i += 1
         i += 1
     labels = []
     for t in sorted(maxis, key=lambda x: x[1])[-5:]:
         g = QGraphicsTextItem(str(t[0]))
         g.setFlag(QGraphicsItem.ItemIgnoresTransformations)
         font = QApplication.font()
         font.setPointSizeF(6.5)
         g.setFont(font)
         g.setDefaultTextColor(Qt.black)
         g.setPos(t[0], t[1])
         labels.append(g)
         self.pw.addItem(g)
     return labels
Example #8
0
 def is_teletext(self):
     """Determine whether the VBI data in this line contains a teletext signal."""
     if self._is_teletext is None:
         # First try to detect by comparing pre-start noise floor to post-start levels.
         # Store self._gstart so that self.start can re-use it.
         self._gstart = gauss(self._original[Line.config.start_slice],
                              Line.config.gauss)
         smax = np.max(self._gstart)
         if smax < 64:
             self._is_teletext = False
             self._reason = f'Signal max is {smax}'
         elif self.noisefloor > 80:
             self._is_teletext = False
             self._reason = f'Noise is {self.noisefloor}'
         elif smax < (self.noisefloor + 16):
             # There is no interesting signal in the start_slice.
             self._is_teletext = False
             self._reason = f'Noise is higher than signal {smax} {self.noisefloor}'
         else:
             # There is some kind of signal in the line. Check if
             # it is teletext by looking for harmonics of teletext
             # symbol rate.
             fftchop = np.add.reduceat(self.fft, self.config.fftbins)
             self._is_teletext = np.sum(fftchop[1:-1:2]) > 1000
     return self._is_teletext
Example #9
0
 def fft(self):
     """The FFT of the original line."""
     if self._fft is None:
         # This test only looks at the bins for the harmonics.
         # It could be made smarter by looking at all bins.
         self._fft = normalise(gauss(np.abs(np.fft.fft(np.diff(self._original, n=1))[:256]), 4))
     return self._fft
Example #10
0
    def find_start(self):
        # First try to detect by comparing pre-start noise floor to post-start levels.
        # Store self._gstart so that self.start can re-use it.
        self._gstart = gauss(self._resampled[self.config.start_slice],
                             Line.config.gauss)
        smax = np.max(self._gstart)
        if smax < 64:
            self._is_teletext = False
            self._reason = f'Signal max is {smax}'
        elif self.noisefloor > 80:
            self._is_teletext = False
            self._reason = f'Noise is {self.noisefloor}'
        elif smax < (self.noisefloor + 16):
            # There is no interesting signal in the start_slice.
            self._is_teletext = False
            self._reason = f'Noise is higher than signal {smax} {self.noisefloor}'
        else:
            # There is some kind of signal in the line. Check if
            # it is teletext by looking for harmonics of teletext
            # symbol rate.
            fftchop = np.add.reduceat(self.fft, self.config.fftbins)
            self._is_teletext = np.sum(fftchop[1:-1:2]) > 1000
        if not self._is_teletext:
            return

        # Find the steepest part of the line within start_slice.
        # This gives a rough location of the start.
        self._start = np.argmax(
            np.gradient(np.maximum.accumulate(
                self._gstart))) + self.config.start_slice.start
        # Now find the extra roll needed to lock in the clock run-in and framing code.
        confidence = []

        for roll in range(max(-30, 8 - self._start), 20):
            self.roll = roll
            # 15:20 is the last bit of CRI and first 4 bits of FC - 01110.
            # This is the most distinctive part of the CRI/FC to look for.
            c = self.chop(15, 21)
            confidence.append((c[1] + c[2] + c[3] - c[0] - c[4] - c[5], roll))
            #confidence.append((np.sum(self.chop(15, 20) * self.config.crifc[15:20]), roll))

        self._start += max(confidence)[1]
        self.roll = 0

        # Use the observed CRIFC to lock to the framing code
        confidence = []
        for roll in range(-4, 4):
            self.roll = roll
            x = np.gradient(self.fchop(8, 24))
            c = np.sum(np.square(x - self.config.observed_crifc_gradient))
            confidence.append((c, roll))

        self._start += min(confidence)[1]
        self.roll = 0

        self._start += self.config.extra_roll
Example #11
0
def destar(I, sigma, t):
    D = np.zeros_like(I)
    B = gauss(I, sigma)
    M = I - B
    for i in range(len(I)):
        for j in range(len(I[0])):
            if M[i][j] > t * I[i][j]:
                D[i][j] = B[i][j]
            else:
                D[i][j] = I[i][j]
    return D
Example #12
0
    def deconvolve(self):
        target = gauss(self.vbi, self.gauss_sd)
        self.target = normalise(target)

        self.make_guess_mask()
        self.make_possible_bytes(Vbi.possible_bytes)

        self._oldbytes = np.zeros(42, dtype=np.uint8)

        self._deconvolve()

        packet = "".join([chr(x) for x in self.g.bytes])

        F = finders.test(self.finders, packet)
        if F:
                sys.stderr.write("matched by finder "+F.name+"\n");
                sys.stderr.flush()               
                self.make_possible_bytes(F.possible_bytes)
                self._deconvolve()
                F.find(self.g.bytes)
                packet = F.fixup()
                return packet

        # if the packet did not match any of the finders then it isn't 
        # a packet 0 (or 30). if the packet still claims to be a packet 0 it 
        # will mess up the page splitter. so redo the deconvolution but with 
        # packet 0 (and 30) header removed from possible bytes.

        # note: this doesn't work. i am not sure why. a packet in 63322
        # does not match the finders but still passes through this next check
        # with r=0. which should be impossible.
        ((m,r),e) = mrag(self.g.bytes[:2])
        if r == 0:
            sys.stderr.write("packet falsely claimed to be packet %d\n" % r);
            sys.stderr.flush()
            if not self.allow_unmatched:
                self._nzdeconvolve()
            packet = "".join([chr(x) for x in self.g.bytes])
        # if it's a link packet, it is completely hammed
        elif r == 27:
            self.make_possible_bytes([hammbytes]*42)
            self._deconvolve()
            packet = "".join([chr(x) for x in self.g.bytes])

        return packet
Example #13
0
def main():

    fig = plt.figure(figsize=(10, 10))

    gs = gridspec.GridSpec(10, 1)

    axisA = plt.subplot(gs[0:7, 0])
    axisB = plt.subplot(gs[7:, 0])

    colors = ['r', 'b', 'g', 'c', 'orange', 'k']

    ratioYearToMonth = []
    pklFileName = '../output/CDM/combinedPDF_100.0.pkl'
    finalMergedPDFdict = pkl.load(open(pklFileName, 'rb'))

    finalMergedPDFdict['y'] /= np.max(finalMergedPDFdict['y'])
    convolvedWithObservationalNoise = gauss(finalMergedPDFdict['y'],
                                            np.log10(100.))

    axisA.plot(finalMergedPDFdict['x']+2.56, finalMergedPDFdict['y'], \
                   label=r"Without Obs", color='green')


    axisA.plot(finalMergedPDFdict['x']+2.56, convolvedWithObservationalNoise,\
                   label=r"With Obs", color='red')
    axisB.plot(finalMergedPDFdict['x'] + 2.56,
               finalMergedPDFdict['y'] / convolvedWithObservationalNoise)
    axisB.plot([0, 4], [1, 1], 'k--')
    axisA.legend()
    axisA.set_yscale('log')

    axisB.set_xlabel(r'log($\Delta T$/ days)')
    axisA.set_ylabel(r'P(log($\Delta T$/ days))')
    axisA.set_xlim(0.6, 3.)
    axisB.set_xlim(0.6, 3.)
    axisA.set_ylim(2e-3, 1.2)
    axisB.set_ylim(0.5, 2)
    axisB.set_yscale('log')
    axisA.set_xticklabels([])

    plt.show()
Example #14
0
 def is_teletext(self):
     """Determine whether the VBI data in this line contains a teletext signal."""
     if self._is_teletext is None:
         # First try to detect by comparing pre-start noise floor to post-start levels.
         # Store self._gstart so that self.start can re-use it.
         self._gstart = gauss(self._original[Line.config.start_slice], Line.config.gauss)
         smax = np.max(self._gstart)
         if smax < 64:
             self._is_teletext = False
             self._reason = f'Signal max is {smax}'
         elif self.noisefloor > 80:
             self._is_teletext = False
             self._reason = f'Noise is {self.noisefloor}'
         elif smax < (self.noisefloor + 16):
             # There is no interesting signal in the start_slice.
             self._is_teletext = False
             self._reason = f'Noise is higher than signal {smax} {self.noisefloor}'
         else:
             # There is some kind of signal in the line. Check if
             # it is teletext by looking for harmonics of teletext
             # symbol rate.
             fftchop = np.add.reduceat(self.fft, self.config.fftbins)
             self._is_teletext = np.sum(fftchop[1:-1:2]) > 1000
     return self._is_teletext
Example #15
0
 def gradient(self):
     return (np.gradient(gauss(self.rolled, 12))[20:300]>0)*255
    def gen_images(self,
                   image_shape,
                   min_angle,
                   max_angle,
                   line_width,
                   curve_count,
                   clean_curves=1,
                   fadeout=1.0,
                   weight=0.5,
                   overlapping=True,
                   intensity=100,
                   directed=None):
        '''
        Method which creates a corresponding pair of ground truth and degraded image.
        :param image_shape: tuple(int, int), contains shape of image
        :param min_angle: int, minimum angle for bezier arc generation
        :param max_angle: int, maximum angle for bezier arc generation
        :param line_width: int, thickness of line of arc
        :param curve_count: int, number of curves in final image
        :param clean_curves: int, number of curves which have not to be convolved
        :param fadeout: float: level for Gaussian filter
        :param weight: float, middle control point weight, tension of arc
        :param overlapping: overlapping structures intensity will not get summed
        :param intensity: int in range 0-255, optional, intensity of curves
        :param directed: int, radius of size of starting and destination patch
        :return: ground truth image(2d array), degraded image (2d array), ground trouth infocus image(2d array)
        '''

        # draw arcs, generate stacks
        self.image_shape = image_shape
        clean_img_stack = np.ndarray(
            (curve_count, image_shape[0], image_shape[1]))
        if directed is None:
            for curve in range(curve_count):
                clean_img_stack[curve] = Arc.draw_arc(min_angle=min_angle,
                                                      max_angle=max_angle,
                                                      array_shape=image_shape,
                                                      intensity=intensity,
                                                      weight=weight)
                clean_img_stack[curve] = Arc.thickness(clean_img_stack[curve],
                                                       line_width=line_width,
                                                       substitute=intensity)
        else:
            center1 = (random.randint(directed,
                                      self.image_shape[0] - directed),
                       random.randint(directed,
                                      self.image_shape[1] - directed))
            center2 = (random.randint(directed,
                                      self.image_shape[0] - directed),
                       random.randint(directed,
                                      self.image_shape[1] - directed))
            for curve in range(curve_count):
                clean_img_stack[curve] = Arc.draw_arc(min_angle=min_angle,
                                                      max_angle=max_angle,
                                                      array_shape=image_shape,
                                                      intensity=intensity,
                                                      weight=weight,
                                                      directed=(center1,
                                                                center2,
                                                                directed))
                clean_img_stack[curve] = Arc.thickness(clean_img_stack[curve],
                                                       line_width=line_width,
                                                       substitute=intensity)
        convolved_img_stack = deepcopy(clean_img_stack)

        # create convolution strength levels
        for curve_ in range(curve_count):
            self.conv_strength.append(random.randint(2, 10))
        for clean in range(clean_curves):
            self.conv_strength[clean] = random.uniform(0.1, 1)

        # convolve curves
        for curve in range(curve_count):
            convolved_img_stack[curve] = gauss(
                convolved_img_stack[curve],
                fadeout * self.conv_strength[curve])

        # generate clean in-focus image
        self.infocus_image = np.zeros((self.image_shape))
        for clean in range(clean_curves):
            self.infocus_image += clean_img_stack[clean]

        # turn stacks into images
        self.clean_image = ImagePair.__stack_to_img(clean_img_stack,
                                                    count=curve_count,
                                                    shape=image_shape,
                                                    overlapping=overlapping)
        self.degraded_image = ImagePair.__stack_to_img(convolved_img_stack,
                                                       count=curve_count,
                                                       shape=image_shape,
                                                       overlapping=overlapping)
Example #17
0
	def Run_Analysis(self):
		# Overall function to scroll through the designated statistics and combinations of statistics,
		# computing the 2D/1D likelihoods for the specified data vectors
		# and produce a plot at the end.		

		from Functions_4_Lhd import LogLhd_Gauss, Return_Contours, Return_Contour_Areas

		# Scroll through the statistics we're reading in & calculating likelihoods for
		Use_Stats = self.Use_Stats()
		Log_Lhds_Stats = []    # store natural log of likelihoods per statistic
		Contours_Stats = []    # store 1&2sigma contour levels per statistic
		Areas_Stats = []       # store the areas of the 1&2 sigma contours per statistic
		Constraints_Stats = [] # store the 1D constraints on the x&y axes: [(Mean,1sig,2sig)_x,(Mean,1sig,2sig)_y] 

		for stat in Use_Stats:
			print("Producing likelihood for statistic number %s of " %stat, Use_Stats)

			# Load the cov, predictions, data & calc the likelihood on the grid
			cov = self.LoadCov(stat)
			preds = self.LoadPred(stat)[1]
			data = self.LoadData(stat)[1]
			LogL = LogLhd_Gauss(preds, data, cov)
			# apply an S8 prior to the likeilhood if specified.
			if self.Apply_S8Prior():
				LogL = self.Implement_S8Prior(LogL, stat)

			if self.OneD_TwoD_Or_nD() == "2D":
				# reshape the likelihood to 2D
				LogL = np.reshape(LogL, (-1, self.x_Res() ))

			if self.SmoothContour(stat):
				SS = self.SmoothScale(stat)
				print("Smoothing contour for statistic %s with sigma=%s [pxls]" %(stat, SS))
				from scipy.ndimage import gaussian_filter as gauss
				LogL = gauss(LogL, sigma=[SS,SS])


			# Store the 68% & 95% contour areas, and the Log-likelihood
			contours = Return_Contours(LogL)
			Contours_Stats.append( contours )
			Areas_Stats.append( Return_Contour_Areas(LogL, contours) )
			Log_Lhds_Stats.append( LogL )

			# get and store the 1D x & y constraints
			x_constraints = self.Marginalise_Over_Dimension(LogL, 0, stat)
			y_constraints = self.Marginalise_Over_Dimension(LogL, 1, stat)  
			Constraints_Stats.append( np.vstack((x_constraints,y_constraints)) )

		# Now scroll through the combinations of statistics, if any
		Combine_Stats = self.Combine_Stats()
		Log_Lhds_Comb = []    # store natural log of likelihoods per combination of statistics
		Contours_Comb = []    # store 1&2sigma contour levels per combination of statistics
		Areas_Comb = []       # store the areas of the 1&2 sigma contours per combination of statistics
		Constraints_Comb = [] # store the 1D constraints on the x&y axes: [(Mean,1sig,2sig)_x,(Mean,1sig,2sig)_y] 

		for i in range(len(Combine_Stats)):
			print("Producing likelihood for %s'th combination of statistics "%(i+1), Combine_Stats[i])
			cov = self.LoadCovCombined(i+1)
			preds = self.CombinePreds( Combine_Stats[i] )
			data = self.CombineData( Combine_Stats[i] )
			LogL = LogLhd_Gauss(preds, data, cov)
			if self.Apply_S8Prior():
				LogL = self.Implement_S8Prior(LogL, Combine_Stats[i][0])

			if self.OneD_TwoD_Or_nD() == "2D":
				# reshape the likelihood to 2D
				LogL = np.reshape(LogL, (-1, self.x_Res() ))

			if self.SmoothCombinedContour(i+1):
				SS = self.SmoothCombinedScale(i+1)
				print("Smoothing contour for combination %s with sigma=%s [pxls]" %(i+1, SS))
				from scipy.ndimage import gaussian_filter as gauss
				LogL = gauss(LogL, sigma=[SS,SS])

			# Store the 68% & 95% contour areas, and the Log-likelihood
			contours = Return_Contours(LogL)
			Contours_Comb.append( contours )
			Areas_Comb.append( Return_Contour_Areas(LogL, contours) )
			Log_Lhds_Comb.append( LogL )

			# get and store the 1D x & y constraints
			x_constraints = self.Marginalise_Over_Dimension(LogL, 0, Combine_Stats[i][0])
			y_constraints = self.Marginalise_Over_Dimension(LogL, 1, Combine_Stats[i][0])  
			Constraints_Comb.append( np.vstack((x_constraints,y_constraints)) )

	
		# Plot the likelihood
		self.Plot_2D_Lhd(Log_Lhds_Stats, Contours_Stats, 
					     Log_Lhds_Comb,  Contours_Comb)

		return Log_Lhds_Stats, Contours_Stats, Areas_Stats, Constraints_Stats, Log_Lhds_Comb,  Contours_Comb, Areas_Comb, Constraints_Comb
Example #18
0
def main():
    if len(sys.argv) != 2:
        print('.')
        print('.')
        print('.')
        print(
            'Number of parameters does not add up. Please enter command e.g. like this:'
        )
        print('python transform_h5_to_csv.py test')
        print(' ')
        print('Nothing was done')
        return -1

    filename = sys.argv[1]
    if filename[-3:] == '.h5':
        filename = filename[:-3]

    import os.path

    if not os.path.isfile(filename + '.h5'):
        print('.')
        print('.')
        print('.')
        print("The following file did not exist:")
        print(filename + '.h5')
        print(' ')
        print('Nothing was done')
        return -2

    f = h5py.File(filename + '.h5', 'r')

    t_data = np.array(f['t'])
    t_0 = np.array(f['t_0'])
    x_data = np.array(f['x_data'])
    x_data[0, :10]

    y_data = np.array(f['y_data'])

    samples = y_data.shape[1]
    datapoints = y_data.shape[0]

    assert np.max(np.std(x_data, 0)) < 1e-10
    # If this assertion catches, your x-axis changes during the experiment!

    # Gaussian blur in order to find potential approximate minima
    gauss_data = np.zeros_like(y_data)
    sigma = 10
    print("Standard deviation for gaussian bluring: " +
          str(np.round(sigma * (x_data[0, 1] - x_data[0, 0]), 3)) + " MHz")
    for i in range(y_data.shape[0]):
        gauss_data[i, :] = gauss(y_data[i, :], sigma)

    # Get all minima. Do so by looking at the change in sign of the derivative.
    # A minima needs to be below -0.2 to be relevant!
    diff_data = gauss_data[:, 1:] - gauss_data[:, :-1]
    minima = np.zeros_like(y_data)
    minima[:, 1:-1] = ((diff_data[:, :-1] < 0) * 1.) * (
        (diff_data[:, 1:] > 0) * 1.) * ((y_data[:, 1:-1] < -0.2) * 1.)

    # Calculate here the approximal values based on the minima approach
    approx_minima = np.ones_like(y_data[:, 0]) * -1.
    for i in range(0, y_data.shape[0], 1):
        # Get index of all the minima in ith trial
        temp_minima = np.argwhere(minima[i, :])
        temp_minima = np.array(
            [temp_minima[index][0] for index in range(temp_minima.shape[0])])

        if temp_minima.shape[0] > 0:
            # Get the blurred values of these minima
            values_minima = gauss_data[i, temp_minima]

            # Get the smallest minimum in case of multiple minima
            arg_smallest = np.argmin(values_minima)

            # Get the index of the smallest minimum
            smallest_min = temp_minima[arg_smallest]

            approx_minima[i] = smallest_min

    # Define the pdf of a Polynomial
    def poly_pdf(x, a, b, c, d, e, f):
        return a + b * x + c * (x**2) + d * (x**3) + e * (x**4) + f * (x**5)

    # Calculate now the minima based on fit around the data points from the approximal values
    slength = 10  # Defines the region where to search for lowest point first (2*slength+1)
    dlength = 6  # Defines how many data points to consider left and right to the minimum (2*dlength+1)

    fit_minima = np.copy(approx_minima)

    for i in range(fit_minima.shape[0]):
        approx_i = approx_minima[i]
        if approx_i >= 0:  # This means the minimum is relevant
            # Find first actual minima in data, since blurring shifts that peak
            lower_bound = max(approx_i - slength, 0)
            upper_bound = min(approx_i + slength + 1, x_data.shape[1])

            x_data_i = np.arange(int(lower_bound), int(upper_bound))
            y_data_i = y_data[i, int(lower_bound):int(upper_bound)]

            approx_i = np.argmin(y_data_i) + lower_bound
            approx_minima[i] = approx_i  # Update minimum to actual minimum

            lower_bound = max(approx_i - dlength, 0)
            upper_bound = min(approx_i + dlength + 1, x_data.shape[1])

            x_data_i = np.arange(int(lower_bound), int(upper_bound))
            y_data_i = y_data[i, int(lower_bound):int(upper_bound)]

            p, _ = fit(poly_pdf, x_data_i - approx_i,
                       y_data_i)  #,p0=[1,1,1,approx_i,1])

            def current_polynomial(x):
                return p[0] + p[1] * x + p[2] * (x**2) + p[3] * (
                    x**3) + p[4] * (x**4) + p[5] * (x**5)

            #assert i!=2260

            f = minimize(current_polynomial, x0=0)
            fit_minima[i] = f.x + approx_i

        if temp_minima.shape[0] > 0:
            # Get the blurred values of these minima
            values_minima = gauss_data[i, temp_minima]

            # Get the smallest minimum in case of multiple minima
            arg_smallest = np.argmin(values_minima)

            # Get the index of the smallest minimum
            smallest_min = temp_minima[arg_smallest]

            approx_minima[i] = smallest_min

    # Transform the fitted positions to actual frequencies
    resonant_freq = np.zeros_like(fit_minima)
    for i in range(resonant_freq.shape[0]):
        if fit_minima[i] > 0:
            min_full = int(fit_minima[i] // 1 + 0.5)
            min_diff = fit_minima[i] - min_full
            resonant_freq[i] = x_data[i, min_full] + min_diff * (
                x_data[i, min_full + 1] - x_data[i, min_full])

    # Write data into file
    with open(filename + '.csv', 'w') as f:
        # Write header
        s = str(t_0)
        for i in range(samples):
            s += ',' + str(x_data[0, i])
        s += ',f0'
        f.write(s)
        f.write('\n')

        for j in range(datapoints):
            s = str(t_data[j])
            for i in range(samples):
                s += ',' + str(y_data[j, i])
            s += ',' + str(resonant_freq[j])
            f.write(s)
            f.write('\n')
Example #19
0
def imprep(red,
           green=None,
           blue=None,
           xcen=None,
           ycen=None,
           xdim=None,
           ydim=None,
           scaletype="lin",
           scalepow=0.5,
           zlo=None,
           zhi=None,
           scalemode=100,
           scalelo=None,
           scalehi=None,
           sigmaclip=0,
           colmap="rgb",
           colinvert=False,
           alpha=1,
           smoothfwhm=0,
           block=1):
    """Prepare image for plotting."""
    # imports
    import numpy as np
    from scipy.ndimage import gaussian_filter as gauss
    import matplotlib
    import copy
    import astropy.stats as stats
    # setup
    if blue is None:
        blue = copy.deepcopy(red)
    if green is None:
        green = (copy.deepcopy(red) + copy.deepcopy(blue)) / 2
    if xcen is None:
        xcen = red.shape[0] / 2
    if ycen is None:
        ycen = red.shape[1] / 2
    if xdim is None:
        xdim = red.shape[0]
    if ydim is None:
        ydim = red.shape[1]
    xcen = np.tile(xcen, 3)[:3]
    ycen = np.tile(ycen, 3)[:3]
    xdim = np.tile(xdim, 3)[:3]
    ydim = np.tile(ydim, 3)[:3]
    zlo = np.tile(zlo, 3)[:3]
    zhi = np.tile(zhi, 3)[:3]
    scalemode = np.tile(scalemode, 3)[:3]
    scalelo = np.tile(scalelo, 3)[:3]
    scalehi = np.tile(scalehi, 3)[:3]
    # trim image to dimensions
    dats = [np.array([]), np.array([]), np.array([])]
    for i, dat in enumerate([red, green, blue]):
        xlo = int(xcen[i] - (xdim[i] / 2))
        xhi = int(xcen[i] + (xdim[i] / 2))
        ylo = int(ycen[i] - (ydim[i] / 2))
        yhi = int(ycen[i] + (ydim[i] / 2))
        dat = dat[xlo:xhi, ylo:yhi]
        dats[i] = dat
    # zscales, smoothing and blocking
    for i in range(len(dats)):
        if block > 1:
            new_shape = tuple(np.array(dats[i].shape) / block / block)
            dats[i] = rebin(dats[i], shape=new_shape)
        if smoothfwhm > 0:
            sigma = smoothfwhm / (2 * np.sqrt(2 * np.log(2)))
            dats[i] = gauss(input=dats[i], sigma=sigma)
        scaledat = copy.deepcopy(dats[i])[~np.isnan(dats[i])]
        if sigmaclip > 0:
            scaledat = stats.sigma_clip(scaledat,
                                        sigma=sigmaclip,
                                        masked=False)
        if zlo[i] is None:
            if scalelo[i] is None:
                scalelo[i] = (50 - (scalemode[i] / 2))
            zlo[i] = np.nanquantile(scaledat, scalelo[i] / 100)
        if zhi[i] is None:
            if scalehi[i] is None:
                scalehi[i] = (50 + (scalemode[i] / 2))
            zhi[i] = np.nanquantile(scaledat, scalehi[i] / 100)
    # generate average map
    avgs = [np.array([]), np.array([]), np.array([])]
    notnans = [np.array([]), np.array([]), np.array([])]
    for i, dat in enumerate(dats):
        tempavg = (dat - zlo[i]) / (zhi[i] - zlo[i])
        notnans[i] = ~np.isnan(tempavg)
        tempavg[np.isnan(tempavg)] = 0
        avgs[i] = tempavg
    avg = sum(avgs) / np.clip(sum(notnans), 1, 3)
    avg[avg == 0] = np.nan
    # apply scaling function
    scaled = tonemap(data=avg,
                     lo=0,
                     hi=1,
                     scaletype=scaletype,
                     scalepow=scalepow)
    # rescaled input images
    for i, dat in enumerate(dats):
        dats[i] = np.clip(
            (scaled * (((dats[i] - zlo[i]) / (zhi[i] - zlo[i])) / avg)), 0, 1)
        dats[i][np.isnan(dats[i])] = 0
        if colinvert:
            dats[i] = 1 - dats[i]
    # generate colour map
    if colmap == "rgb":
        alphas = np.full_like(dats[0], alpha)
        out = np.stack((dats[0], dats[1], dats[2], alphas), axis=2)
    elif colmap == "grey":
        cmap = matplotlib.cm.get_cmap("Greys_r")
        out = cmap(dats[0], alpha=alpha)
    elif colmap == "sls":
        out = sls(dats[0], alpha=alpha)
    else:
        cmap = matplotlib.cm.get_cmap(colmap)
        out = cmap(dats[0], alpha=alpha)
    return out, zlo, zhi