def evaluate_bioid(loss, load_batch, n_batches): from scipy.ndimage.measurements import center_of_mass, maximum_position from numpy.linalg import norm L = [] for batch in xrange(n_batches()): load_batch(batch) output = loss.get_sink("output") out = output.evaluate().np target = loss.get_parameter("target") tch = target.data.np out = np.rollaxis(out, 3) for i in xrange(out.shape[0]): o0, o1 = out[i].reshape(2, 30, 30) t0, t1 = tch[i].reshape(2, 30, 30) mo0 = np.array(maximum_position(o0)) mo1 = np.array(maximum_position(o1)) mt0 = np.array(center_of_mass(t0)) mt1 = np.array(center_of_mass(t1)) mask0 = np.zeros_like(o0) mask1 = np.zeros_like(o1) region0 = np.clip((mo0 - 2, mo0 + 3), 0, 30) region1 = np.clip((mo1 - 2, mo1 + 3), 0, 30) mask0[region0[0, 0]:region0[1, 0], region0[0, 1]:region0[1, 1]] = 1 mask1[region1[0, 0]:region1[1, 0], region1[0, 1]:region1[1, 1]] = 1 mo0 = center_of_mass(o0, mask0) mo1 = center_of_mass(o1, mask1) dst = max(norm(mo0 - mt0), norm(mo1 - mt1)) / norm(mt0 - mt1) L.append(dst) L = np.array(L) print "n =", len(L) print "Average normalized distance: ", L.mean() print "Correct: ", np.sum(L < 0.25) / float(len(L))
def run_analyzer(run): ds = run.primary.read() if "adsimdet_image" in ds: print("image") np_frame = ds["adsimdet_image"][0][0].to_masked_array() com = center_of_mass(np_frame) maxp = maximum_position(np_frame) print(f"{com = }") print(f"{maxp = }") elif "noisy" in ds: print("1-D scan") y_arr = ds["noisy"].to_masked_array() x_arr = ds["m1"].to_masked_array() n_arr = np.arange(len(y_arr)) com = np.interp(center_of_mass(y_arr), n_arr, x_arr) maxp = np.interp(maximum_position(y_arr), n_arr, x_arr) print(f"{com = }") print(f"{maxp = }") print(f"{len(y_arr) = }") elif "temperature_readback" in ds: print("temperatures") y_arr = ds["temperature_readback"] print(f"{y_arr.mean().values = }") print(f"{y_arr.std().values = }") print(f"{len(y_arr) = }") else: print(f"Not recognized: {ds = }")
def translate_back_locations(outputs, threshold=0.5): """ Translates back the network output to a class sequence. Thresholds on class 0, then assigns the maximum (non-zero) class to each region. Difference to translate_back is the output region not just the maximum's position is returned. Args: Returns: A list with tuples (class, start, end, max). max is the maximum value of the softmax layer in the region. """ labels, n = measurements.label(outputs[:,0] < threshold) mask = np.tile(labels.reshape(-1,1), (1,outputs.shape[1])) maxima = measurements.maximum_position(outputs, mask, np.arange(1, np.amax(mask)+1)) p = 0 start = None x = [] for idx, val in enumerate(labels): if val != 0 and start is None: start = idx p += 1 if val == 0 and start: if maxima[p-1][1] == 0: start = None else: x.append((maxima[p-1][1], start, idx, outputs[maxima[p-1]])) start = None # append last non-zero region to list of no zero region occurs after it if start: x.append((maxima[p-1][1], start, len(outputs), outputs[maxima[p-1]])) return x
def find_local_peaks_new(scoremap: np.ndarray, local_reference: np.ndarray, animal_number: int, config: dict) -> dict: """ Function for finding local peaks for each joint on provided scoremap :param scoremap: scmap from get_pose function :param local_reference: locref from get_pose function :param animal_number: number of animals for which we need to find peaks, also used for critical joints Critical joint are used to define skeleton of an animal There can not be more than animal_number point for each critical joint :param config: DeepLabCut config from load_deeplabcut() :returns all_joints dictionary with coordinates as list of tuples for each joint """ # loading animal joints from config all_joints_names = config['all_joints_names'] # critical_joints = ['neck', 'tailroot'] all_peaks = {} # loading stride from config stride = config['stride'] # filtering scoremap scoremap[scoremap < 0.1] = 0 for joint_num, joint in enumerate(all_joints_names): all_peaks[joint] = [] # selecting the joint in scoremap and locref lr_joint = local_reference[:, :, joint_num] sm_joint = scoremap[:, :, joint_num] # applying maximum filter with footprint neighborhood = generate_binary_structure(2, 1) sm_max_filter = maximum_filter(sm_joint, footprint=neighborhood) # eroding filtered scoremap erosion_structure = generate_binary_structure(2, 3) sm_max_filter_eroded = binary_erosion( sm_max_filter, structure=erosion_structure).astype(sm_max_filter.dtype) # labeling eroded filtered scoremap labeled_sm_eroded, num = label(sm_max_filter_eroded) # if joint is 'critical' and we have too few labels then we try a workaround to ensure maximum found peaks # for all other joints - normal procedure with cutoff point at animal_number peaks = maximum_position(sm_joint, labels=labeled_sm_eroded, index=range(1, num + 1)) if num != animal_number: peaks = [ tuple(peak) for peak in peak_local_max( sm_joint, min_distance=4, num_peaks=animal_number) ] if len(peaks) > animal_number: peaks = peaks[:animal_number + 1] # using scoremap peaks to get the coordinates on original image for peak in peaks: offset = lr_joint[peak] prob = sm_joint[peak] # not used # some weird DLC magic with stride and offsets coordinates = np.floor( np.array(peak)[::-1] * stride + 0.5 * stride + offset) all_peaks[joint].append([tuple(coordinates.astype(int)), joint]) return all_peaks
def align_converge(y_LR,size=64): """iterate until offsets converge""" (h,w) = y_LR.shape # split image y_L = y_LR[:,:w/2] y_R = y_LR[:,w/2:] (h,w) = y_L.shape s = size / 2 # now find n offsets rand = RandomState(0) prev_dx, prev_dy = 0, 0 series = [] while True: # at a random locations in y_L y = rand.randint(h/4,h*3/4) x = rand.randint(w/4,w*3/4) it = y_L[y:y+s,x:x+s] # take an s x s chunk there tm = match_template(y_R,it) # match it against y_R ry, rx = maximum_position(tm) # max value is location series += [((y-ry), (x-rx))] # accumulatea print series n = len(series) if n % 2 == 0: # take the median dy, dx = np.median(np.asarray(series),axis=0).astype(int) if n > 100 or (abs(dy-prev_dy) == 0 and abs(dx-prev_dx) == 0): return dy, dx prev_dy, prev_dx = dy, dx
def hotonelist_to_intlist0(outputs, threshold=0.7, pos=0): """Helper function for LSTM-based OCR: decode LSTM outputs. Translate back. Thresholds on class 0, then assigns the maximum class to each region. ``pos`` determines the depth of character information returned: - `pos=0`: Return list of recognized characters - `pos=1`: Return list of position-character tuples - `pos=2`: Return list of character-probability tuples :param outputs: 2D array containing posterior probabilities :param threshold: posterior probability threshold :param pos: what to return :returns: decoded hot one outputs """ labels, n = measurements.label(outputs[:, 0] < threshold) mask = np.tile(labels.reshape(-1, 1), (1, outputs.shape[1])) maxima = measurements.maximum_position(outputs, mask, np.arange(1, np.amax(mask) + 1)) if pos == 1: return maxima # include character position if pos == 2: return [(r, c, outputs[r, c]) for (r, c) in maxima] # include character probabilities return [c for (r, c) in maxima] # only recognized characters
def peak_finder(arr, thresh): '''Absolute amplitude-based peak finding algorithm. Parameters ---------- arr : array, shape=(n_samples,) 1-d timeseries of extracellular recording. thresh : scalar amplitude threshold. Only samples above this value will be included in clusters. Returns ------- peak_loc : array indices of cluster peaks. peak_mag : array magnitude of cluster peaks. ''' assert arr.ndim == 1 clusters, ix = measurements.label(arr > thresh) if not ix: return np.array([]), np.array([]) peak_loc = np.concatenate( measurements.maximum_position(arr, labels=clusters, index=np.arange(ix) + 1)) peak_mag = measurements.maximum(arr, labels=clusters, index=np.arange(ix) + 1) return peak_loc, peak_mag
def pad_and_center_psf(psf, s): ss = psf.shape psf = np.pad(psf, ((0, s[0]-ss[0]), (0, s[1]-ss[1])), mode='constant', constant_values=(0.0, 0.0)) mp = maximum_position(psf) psf = np.roll(np.roll(psf, -mp[0], axis=0), -mp[1], axis=1) return psf
def translate_back(outputs,threshold=0.7): """Translate back. Thresholds on class 0, then assigns the maximum class to each region.""" labels,n = measurements.label(outputs[:,0]<threshold) mask = tile(labels.reshape(-1,1),(1,outputs.shape[1])) maxima = measurements.maximum_position(outputs,mask,arange(1,amax(mask)+1)) return [c for (r,c) in maxima]
def find(self, alpha=5): """ Takes an image and detect the peaks usingthe local maximum filter. Returns a boolean mask of the peaks (i.e. 1 when the pixel's value is the neighborhood maximum, 0 otherwise). Taken from http://stackoverflow.com/questions/9111711/ get-coordinates-of-local-maxima-in-2d-array-above-certain-value """ self.alpha = alpha image_max = maximum_filter(self.image_conv, self.win_size) maxima = (self.image_conv == image_max) self.mean = np.mean(self.image_conv) self.std = np.sqrt(np.mean((self.image_conv - self.mean)**2)) self.threshold = self.alpha * self.std + self.mean diff = (image_max > self.threshold) maxima[diff == 0] = 0 labeled, num_objects = label(maxima) if num_objects > 0: self.positions = maximum_position(self.image, labeled, range(1, num_objects + 1)) self.positions = np.array(self.positions).astype(int) self.drop_overlapping() self.drop_border() else: self.positions = np.zeros((0, 2), dtype=int)
def peak_finder(X, thresh): """Simple peak finding algorithm. Parameters ---------- X : array_like, shape (n_times,) Raw data trace. thresh : float Amplitude threshold. Returns ------- peak_loc : array_like, shape (n_clusters,) Index of peak amplitudes. peak_mag : array_like, shape (n_clusters,) Magnitude of peak amplitudes. """ ## Error-catching. assert X.ndim == 1 ## Identify clusters. clusters, ix = measurements.label(X > thresh) ## Identify index of peak amplitudes. peak_loc = np.concatenate( measurements.maximum_position(X, labels=clusters, index=np.arange(ix) + 1)) ## Identify magnitude of peak amplitudes. peak_mag = measurements.maximum(X, labels=clusters, index=np.arange(ix) + 1) return peak_loc, peak_mag
def find(self, alpha=5): """ Takes an image and detect the peaks usingthe local maximum filter. Returns a boolean mask of the peaks (i.e. 1 when the pixel's value is the neighborhood maximum, 0 otherwise). Taken from http://stackoverflow.com/questions/9111711/ get-coordinates-of-local-maxima-in-2d-array-above-certain-value """ self.alpha = alpha image_max = maximum_filter(self.image_conv, self.win_size) maxima = (self.image_conv == image_max) self.mean = np.mean(self.image_conv) self.std = np.sqrt(np.mean((self.image_conv - self.mean)**2)) self.threshold = self.alpha*self.std + self.mean diff = (image_max > self.threshold) maxima[diff == 0] = 0 labeled, num_objects = label(maxima) if num_objects > 0: self.positions = maximum_position(self.image, labeled, range(1, num_objects + 1)) self.positions = np.array(self.positions).astype(int) self.drop_overlapping() self.drop_border() else: self.positions = np.zeros((0, 2), dtype=int)
def channels_edges(bg, approxwidth, angle=None, std=10, Nwalls=8): """ Get the position of the edges Parameters ---------- bg: 2d array image containning the 4 channels approxwidth: integer the approximate width angle: float if given, angle at which the edges are std: integer Tolerence on wall position in pixels Returns ------- edges: 1d integer array Position in the rotated image of the edges in pixels """ bg = bg / rmbg.polyfit2d(bg) if angle is not None: bg = ir.rotate_scale(bg, -angle, 1, borderValue=np.nan) prof = gfilter(np.nanmean(bg, 0), 3) edges = np.abs(np.diff(prof)) edges[np.isnan(edges)] = 0 #create approximate walls x = np.arange(len(edges)) gwalls = np.zeros(len(edges), dtype=float) for center in (1 + np.arange(Nwalls)) * approxwidth: gwalls += edges.max() * np.exp(-(x - center)**2 / (2 * std**2)) #Get best fit for approximate walls c = int( np.correlate(edges, gwalls, mode='same').argmax() - len(gwalls) / 2) ''' from matplotlib.pyplot import plot, figure, imshow figure() imshow(bg) figure() plot(edges) plot(gwalls) figure() plot(np.correlate(edges,gwalls,mode='same')) #''' #Roll gwalls = np.roll(gwalls, c) if c < 0: gwalls[c:] = 0 else: gwalls[:c] = 0 #label wall position label, n = msr.label(gwalls > .1 * gwalls.max()) #Get the positions edges = np.squeeze(msr.maximum_position(edges, label, range(1, n + 1))) assert len(edges) == 8, 'Did not detect 8 edges' return edges
def fring_filter(a): ny, nx = a.shape iy, ix = measurements.maximum_position(a) for i in range(0, max(0, ix - 3)) + range(min(nx - 1, ix + 3), nx - 1): a[iy, i] = np.median(a[:, i]) for i in range(0, max(0, iy - 3)) + range(min(ny - 1, iy + 3), ny - 1): a[i, ix] = np.median(a[i, :]) return a
def fring_filter(a): ny,nx = a.shape iy,ix = measurements.maximum_position(a) for i in range(0,max(0,ix-3)) + range(min(nx-1,ix+3),nx-1): a[iy,i] = np.median(a[:,i]) for i in range(0,max(0,iy-3)) + range(min(ny-1,iy+3),ny-1): a[i,ix] = np.median(a[i,:]) return a
def translate_back(outputs, threshold=0.7, pos=0): labels, n = measurements.label(outputs[:, 0] < threshold) mask = tile(labels.reshape(-1, 1), (1, outputs.shape[1])) maxima = measurements.maximum_position(outputs, mask, arange(1, amax(mask) + 1)) if pos == 1: return maxima if pos == 2: return [(c, outputs[r, c]) for (r, c) in maxima] return [c for (r, c) in maxima]
def amplitude_thresholding(x0, thresh): '''Simple peak finding algorithm.''' assert x0.ndim == 1 clusters, ix = measurements.label(x0 > thresh) peak_loc = np.concatenate( measurements.maximum_position(x0, labels=clusters, index=np.arange(ix) + 1)) peak_mag = measurements.maximum(x0, labels=clusters, index=np.arange(ix) + 1) return peak_loc, peak_mag
def translate_back(outputs,threshold=0.7,pos=0): """Translate back. Thresholds on class 0, then assigns the maximum class to each region. ``pos`` determines the depth of character information returned: * `pos=0`: Return list of recognized characters * `pos=1`: Return list of position-character tuples * `pos=2`: Return list of character-probability tuples """ labels,n = measurements.label(outputs[:,0]<threshold) mask = np.tile(labels.reshape(-1,1),(1,outputs.shape[1])) maxima = measurements.maximum_position(outputs,mask,np.arange(1,np.amax(mask)+1)) if pos==1: return maxima # include character position if pos==2: return [(c, outputs[r,c]) for (r,c) in maxima] # include character probabilities return [c for (r,c) in maxima] # only recognized characters
def pad_and_center_psf(psf, s): ss = psf.shape if len(ss) == 2: psf = np.pad(psf, ((0, s[0] - ss[0]), (0, s[1] - ss[1])), mode='constant', constant_values=(0.0, 0.0)) elif len(ss) == 3: psf = np.pad(psf, ((0, s[0] - ss[0]), (0, s[1] - ss[1]), (0, s[2] - ss[2])), mode='constant') mp = maximum_position(psf) for idx in xrange(len(ss)): psf = np.roll(psf, -mp[idx], axis=idx) return psf
def translate_back(outputs,threshold=0.5): """Translate back. Thresholds on class 0, then assigns the maximum class to each region.""" # BGR # here I tried changing threshold to 0.4 to make it more liberal # but as the training set increased in size, it gave more uc erroneous # doubles # for a while I stipulated 0.5, which might be a better compromise labels,n = measurements.label(outputs[:,0]<threshold) mask = tile(labels.reshape(-1,1),(1,outputs.shape[1])) maxima = measurements.maximum_position(outputs,mask,arange(1,amax(mask)+1)) #print "maxima:" #for (r,c) in maxima: # print r, c return maxima
def getting_maximum(image, theta_ini, theta_fin, r_ini, r_fin, delta_r): lim_rad = len(image[:, 0]) lim_theta = len(image[0, :]) nt_ini = int(theta_ini * lim_theta / 360.0) nt_fin = int(theta_fin * lim_theta / 360.0) nr_ini = int(r_ini * lim_rad) nr_fin = int(r_fin * lim_rad) rango = nt_fin - nt_ini r_max_pos = [] theta_max_pos = [] for i in range(rango): nr_ini = nr_ini + delta_r * i r_max_pos = np.append( r_max_pos, meas.maximum_position(image[nr_ini:nr_fin, nt_ini + i])) theta_max_pos = np.append(theta_max_pos, nt_ini + i) return (theta_max_pos, nr_ini + r_max_pos)
def connected_structure(self, structure=None, mask=None, feature_filter=None): img = self.img.data if mask is not None: img = img * mask self.labels, n = measurements.label(img, structure=structure) self.labels += 1 for id in range(2, n + 2): coord = measurements.maximum_position(img, self.labels, id) intensity = img[tuple(coord)] segment = Segment(coord, intensity, id, self) if feature_filter is not None and feature_filter.filter(segment) is False: self.labels[self.labels == id] = 0 continue self.add_feature(segment)
def detect(self): """ a method for detecting peaks, trying to speed things up! Run this after running the thresholding (which sets the pk_mask)! """ self.pos = [] self.intens = [] sz = self.pk_par["sz"] sig_G = self.pk_par["sig_G"] min_conn = self.pk_par["min_conn"] max_conn = self.pk_par["max_conn"] peak_COM = self.pk_par["peak_COM"] self.lab_img, _ = measurements.label(self.img * self.pk_mask) obs = measurements.find_objects(self.lab_img) for i, (sy, sx) in enumerate(obs): lab_idx = i + 1 y1 = max(0, sy.start - sz) y2 = min(self.img_sh[0], sy.stop + sz) x1 = max(0, sx.start - sz) x2 = min(self.img_sh[1], sx.stop + sz) l = self.lab_img[y1:y2, x1:x2] nconn = np.sum(l == (lab_idx)) if nconn < min_conn: continue if nconn > max_conn: continue pix = self.img[y1:y2, x1:x2] self.intens.append(measurements.maximum(pix, l, lab_idx)) if peak_COM: y, x = measurements.center_of_mass(pix, l, lab_idx) else: y, x = measurements.maximum_position(pix, l, lab_idx) self.pos.append((y + y1, x + x1)) self.intens = np.array(self.intens)
def blank_threshold_decoder( outputs: np.ndarray, threshold: float = 0.5) -> List[Tuple[int, int, int, float]]: """ Translates back the network output to a label sequence as the original ocropy/clstm. Thresholds on class 0, then assigns the maximum (non-zero) class to each region. Args: output: (C, W) shaped softmax output tensor threshold: Threshold for 0 class when determining possible label locations. Returns: A list with tuples (class, start, end, max). max is the maximum value of the softmax layer in the region. """ outputs = outputs.T labels, n = measurements.label(outputs[:, 0] < threshold) mask = np.tile(labels.reshape(-1, 1), (1, outputs.shape[1])) maxima = measurements.maximum_position(outputs, mask, np.arange(1, np.amax(mask) + 1)) p = 0 start = None x = [] for idx, val in enumerate(labels): if val != 0 and start is None: start = idx p += 1 if val == 0 and start is not None: if maxima[p - 1][1] == 0: start = None else: x.append( (maxima[p - 1][1], start, idx, outputs[maxima[p - 1]])) start = None # append last non-zero region to list of no zero region occurs after it if start: x.append( (maxima[p - 1][1], start, len(outputs), outputs[maxima[p - 1]])) return [y for y in x if x[0] != 0]
def gen_feature_mfcc(x): v, asp = x try: if np.isnan(v).any() or np.isinf(v).any(): raise Exception('MFCC contains Nan of Inf') xn, (xmin, xmax), xmean, xvar, xskew, xkurt = stats.describe(v) d = np.diff(v, 1, 0) dmean = np.mean(d, 0) feat = np.reshape((measurements.center_of_mass(v)[0], measurements.center_of_mass(asp)[0], measurements.maximum_position(asp)[0]),(3,)) hist = 1.0*measurements.histogram(asp, 0, 1, 4) / asp.shape[0]/asp.shape[1] x = np.concatenate((xmean,xmin,xmax,xvar,dmean,feat,hist[-1:])) if np.isnan(x).any() or np.isinf(x).any(): raise Exception('Feature vector contains Nan of Inf') except: #print d, dd print xmin.shape raise return x.T
def detect_beam_peak(filename): img_info = read_image(filename) img = img_info.image img_array = numpy.fromstring(img.tostring(), numpy.uint32) img_array.shape = img.size[1], img.size[0] # filter the array so that features less than 8 pixels wide are blurred out # assumes that beam center is at least 8 pixels wide arr = filters.gaussian_filter(img_array, 8) beam_y, beam_x = measurements.maximum_position(arr) # valid beam centers must be within the center 1/5 region of the detector surface shape = img_array.shape cmin = [2 * v / 5 for v in shape] cmax = [3 * v / 5 for v in shape] good = False if cmin[0] < beam_y < cmax[0] and cmin[1] < beam_x < cmax[1]: good = True return beam_x, beam_y, good
def connected_structure(self, structure=None, mask=None, feature_filter=None): img = self.img.data if mask is not None: img = img * mask self.labels, n = measurements.label(img, structure=structure) self.labels += 1 for id in range(2, n + 2): coord = measurements.maximum_position(img, self.labels, id) intensity = img[tuple(coord)] segment = Segment(coord, intensity, id, self) if feature_filter is not None and feature_filter.filter( segment) is False: self.labels[self.labels == id] = 0 continue self.add_feature(segment)
def align(y_LR,size=64,n=12): (h,w) = y_LR.shape # split image y_L = y_LR[:,:w/2] y_R = y_LR[:,w/2:] (h,w) = y_L.shape s = size / 2 # now find n offsets R = np.zeros((n,2)) rand = RandomState(0) for i in range(n): # to find each offset # at a random locations in y_L y = rand.randint(h/4,h*3/4) x = rand.randint(w/4,w*3/4) it = y_L[y:y+s,x:x+s] # take an s x s chunk there tm = match_template(y_R,it) # match it against y_R ry, rx = maximum_position(tm) # max value is location R[i,:] = ((y-ry), (x-rx)) # accumulatea # take the median dy, dx = np.median(R,axis=0).astype(int) return dy, dx
def gen_feature_mfcc(x): v, asp = x try: if np.isnan(v).any() or np.isinf(v).any(): raise Exception('MFCC contains Nan of Inf') xn, (xmin, xmax), xmean, xvar, xskew, xkurt = stats.describe(v) d = np.diff(v, 1, 0) dmean = np.mean(d, 0) feat = np.reshape((measurements.center_of_mass(v)[0], measurements.center_of_mass(asp)[0], measurements.maximum_position(asp)[0]), (3, )) hist = 1.0 * measurements.histogram(asp, 0, 1, 4) / asp.shape[0] / asp.shape[1] x = np.concatenate((xmean, xmin, xmax, xvar, dmean, feat, hist[-1:])) if np.isnan(x).any() or np.isinf(x).any(): raise Exception('Feature vector contains Nan of Inf') except: #print d, dd print xmin.shape raise return x.T
def translate_back_locations(outputs, threshold=0.5): """ Translates back the network output to a class sequence. Thresholds on class 0, then assigns the maximum (non-zero) class to each region. Difference to translate_back is the output region not just the maximum's position is returned. Args: Returns: A list with tuples (class, start, end, max). max is the maximum value of the softmax layer in the region. """ labels, n = measurements.label(outputs[:, 0] < threshold) mask = np.tile(labels.reshape(-1, 1), (1, outputs.shape[1])) maxima = measurements.maximum_position(outputs, mask, np.arange(1, np.amax(mask) + 1)) p = 0 start = None x = [] for idx, val in enumerate(labels): if val != 0 and start is None: start = idx p += 1 if val == 0 and start: if maxima[p - 1][1] == 0: start = None else: x.append( (maxima[p - 1][1], start, idx, outputs[maxima[p - 1]])) start = None # append last non-zero region to list of no zero region occurs after it if start: x.append( (maxima[p - 1][1], start, len(outputs), outputs[maxima[p - 1]])) return x
def blank_threshold_decoder(outputs: np.ndarray, threshold: float = 0.5) -> List[Tuple[int, int, int, float]]: """ Translates back the network output to a label sequence as the original ocropy/clstm. Thresholds on class 0, then assigns the maximum (non-zero) class to each region. Args: output (numpy.array): (C, W) shaped softmax output tensor threshold (float): Threshold for 0 class when determining possible label locations. Returns: A list with tuples (class, start, end, max). max is the maximum value of the softmax layer in the region. """ outputs = outputs.T labels, n = measurements.label(outputs[:, 0] < threshold) mask = np.tile(labels.reshape(-1, 1), (1, outputs.shape[1])) maxima = measurements.maximum_position(outputs, mask, np.arange(1, np.amax(mask)+1)) p = 0 start = None x = [] for idx, val in enumerate(labels): if val != 0 and start is None: start = idx p += 1 if val == 0 and start is not None: if maxima[p-1][1] == 0: start = None else: x.append((maxima[p-1][1], start, idx, outputs[maxima[p-1]])) start = None # append last non-zero region to list of no zero region occurs after it if start: x.append((maxima[p-1][1], start, len(outputs), outputs[maxima[p-1]])) return [y for y in x if x[0] != 0]
def align_better(y_LR,n=4,max_dy=10): # copy y_LR = np.copy(y_LR) # normalize y_LR /= y_LR.max() # unsharp mask us = 3 um = gaussian_filter(y_LR,30) y_LR = ((us / (us - 1)) * (y_LR - um / us)).clip(0.,1.) size = 256 (h,w) = y_LR.shape # split image y_L = y_LR[:,:w/2] y_R = y_LR[:,w/2:] (h,w) = y_L.shape s = size / 2 # align n times rand = RandomState(0) R = np.zeros((n,2)) i,j = 0,n*10 # give up if you can't get good offsets while n > 0 and j > 0: y = rand.randint(h/3,h*2/3) x = rand.randint(w/3,w*2/3) it = y_L[y:y+s,x:x+s] # take an s x s chunk there tm = match_template(y_R,it,pad_input=True) # match it against y_R ry, rx = maximum_position(tm) dy = y - (ry - s/2) dx = x - (rx - s/2) if dy > 0 and dx > 0 and dy < max_dy: R[i,:] = (dy, dx) # accumulate i += 1 n -= 1 j -= 1 # take the median dy, dx = np.median(R,axis=0).astype(int) return dy, dx
def _find_positions(self, image, labeled_array): return maximum_position(image, labels=labeled_array, index=self.label)
def vol2iso_viz(vol_file, bg_file, bgcolor=(0,0,0), bgslice='', auto_slice=True, show_label=1, force_show_fig=False, save_fig=None, show_outline=False, plane_orientation='z_axes', slice_index=0, size=(1024,768),vmin=0.2,nb_labels=10,nb_colors=10,label_orientation='vertical'): def getNiftiAsScalarField(filename): fimg = nib.load(filename) hdr = fimg.get_header() data = fimg.get_data() print data.shape af = fimg.get_affine() origin = af[:3,3] stride = [af[0,0],af[1,1],af[2,2]] print stride if stride[0] < 0: print 'flipX' data = data[::-1,:,:] #data = np.flipud(data) if stride[1] < 0: print 'flipY' data = data[:,::-1,:] if stride[2] < 0: print 'flipZ' data = data[:,:,::-1] src = mlab.pipeline.scalar_field(data) src.spacing = hdr.get_zooms() src.origin = origin src.update_image_data = True return src, data if save_fig: mlab.options.offscreen=True mlab.figure(bgcolor=bgcolor, size=size) # try tp turn on depth peeling # https://dl.dropboxusercontent.com/u/1200872/mayavi/depth_peeling.py scene = mlab.gcf().scene scene.renderer.render_window.set(alpha_bit_planes=1,multi_samples=0) scene.renderer.set(use_depth_peeling=True,maximum_number_of_peels=4,occlusion_ratio=0.1) # print "Traits for the renderer" # scene.renderer.print_traits() # print "Traits for the render window" # scene.renderer.render_window.print_traits() src, data = getNiftiAsScalarField(vol_file) iso = mlab.pipeline.iso_surface(src, opacity=0.2, contours=10, vmin=0.2, vmax=1.0) iso if show_label==1: mlab.colorbar(object=iso, nb_labels=10, nb_colors=10, orientation='vertical') if plane_orientation=='z_axes': mlab.gcf().scene.camera.parallel_projection=True mlab.view(azimuth=0, elevation=180, distance='auto', focalpoint='auto') elif plane_orientation=='x_axes': mlab.gcf().scene.camera.parallel_projection=True mlab.view(azimuth=180, elevation=90, distance='auto', focalpoint='auto') elif plane_orientation=='y_axes': mlab.gcf().scene.camera.parallel_projection=True mlab.view(azimuth=90, elevation=90, distance='auto', focalpoint='auto') else: mlab.gcf().scene.isometric_view() if bg_file != '': mrsrc, data = getNiftiAsScalarField(bg_file) orie = plane_orientation if plane_orientation=='iso': orie = 'z_axes' # from scipy import stats # data = stats.threshold(data, threshmin=0.5, threshmax=1, newval=0) # print data.shape from scipy.ndimage.measurements import center_of_mass, maximum_position com = maximum_position(data) print '# max pos = ',com if auto_slice: if orie=='x_axes': slice_index = com[0] elif orie=='y_axes': slice_index = com[1] elif orie=='z_axes': slice_index = com[2] else: slice_index = com[2] opacity=0.5 slice_index = int(slice_index) # auto flip z-index to below center # if plane_orientation=='iso': # opacity=0.5 # center = data.shape[2]/2 # if slice_index > center: # d = abs(center-slice_index) # slice_index = center-d # slice_index = com[2] mlab.pipeline.image_plane_widget(mrsrc, opacity=opacity, plane_orientation=orie, slice_index=int(slice_index), colormap='black-white', line_width=0, reset_zoom=False) if show_outline: mlab.outline() if save_fig: mlab.savefig(save_fig) if force_show_fig: mlab.show() else: mlab.close() else: mlab.show()
def fp_volume_image(fname, fp, tags): """Fingerprint for volumetric images """ # this version needs an increment whenever this implementation changes fp['__version__'] = 0 import nibabel as nb import numpy as np from scipy.ndimage import measurements as msr from scipy.stats import describe img = nb.load(fname) img_data = img.get_data().astype('float') # float for z-score # cleanup the original image to get a leaner footprint del img # keep a map where the original data is larger than zero zero_thresh = img_data > 0 # basic descriptive stats img_size, img_minmax, img_mean, img_var, img_skew, img_kurt = \ describe(img_data, axis=None) img_std = np.sqrt(img_var) fp['std'] = img_std fp['mean'] = img_mean fp['min'] = img_minmax[0] fp['max'] = img_minmax[1] fp['skewness'] = img_skew fp['kurtosis'] = img_kurt if not 'zscores' in tags and not 'tscores' in tags: # unknown distribution of values -> global zscore to normalize img_data -= img_mean img_data /= img_std zmin = img_data.min() zmax = img_data.max() # normalized luminance histogram luminance_hist_params = (-10, 10, 21) fp['histogram_[%i,%i,%i]' % luminance_hist_params] = \ np.histogram(img_data, normed=True, bins=np.linspace(*luminance_hist_params))[0] if not img_std: # no variance, nothing to do return if not '3D image' in tags: # the following clustering can become insane for 4D and higher images return # perform thresholding at various levels and compute descriptive # stats of the resulting clusters clusters = np.empty(img_data.shape, dtype=np.int32) for thresh in (zero_thresh, 2.0, 4.0, 8.0, -2.0, -4.0, -8.0): # create a binarized map for the respective threshold if not isinstance(thresh, float): thresh_map = thresh thresh = 'orig_zero' else: if thresh < 0: if thresh < zmin: # thresholding would yield nothing continue thresh_map = img_data < thresh else: if thresh > zmax: # thresholding would yield nothing continue thresh_map = img_data > thresh nclusters = msr.label(thresh_map, output=clusters) # sort by cluster size cluster_sizes = [(cl, np.sum(clusters == cl)) for cl in xrange(1, nclusters + 1)] cluster_sizes = sorted(cluster_sizes, key=operator.itemgetter(1), reverse=True) # how many clusters to report max_nclusters = 3 if not len(cluster_sizes): # nothing to report, do not clutter the dict continue clinfo = {} fp['thresh_%s' % thresh] = clinfo clinfo['nclusters'] = nclusters # only for the biggest clusters cl_id = 0 for cl_label, cl_size in cluster_sizes[:max_nclusters]: cl_id += 1 cli = dict(size=cl_size) clinfo['cluster_%i' % cl_id] = cli # center of mass of the cluster extent (ignoring actual values) cli['extent_ctr_of_mass'] = \ msr.center_of_mass(thresh_map, labels=clusters, index=cl_label) # center of mass of the cluster considering actual values cli['ctr_of_mass'] = msr.center_of_mass(img_data, labels=clusters, index=cl_label) if isinstance(thresh, float) and thresh < 0: # position of minima pos = msr.minimum_position(img_data, labels=clusters, index=cl_label) cli['min_pos'] = pos cli['min'] = img_data[pos] else: # position of maxima pos = msr.maximum_position(img_data, labels=clusters, index=cl_label) cli['max_pos'] = pos cli['max'] = img_data[pos]
def _call(self, ds): if len(ds) > 1: # average all samples into one, assuming we got something like one # sample per subject as input avgr = mean_sample() ds = avgr(ds) # threshold input; at this point we only have one sample left thrd = ds.samples[0] > self._thrmap # mapper default mapper = IdentityMapper() # overwrite if possible if hasattr(ds, 'a') and 'mapper' in ds.a: mapper = ds.a.mapper # reverse-map input othrd = _verified_reverse1(mapper, thrd) # TODO: what is your purpose in life osamp? ;-) osamp = _verified_reverse1(mapper, ds.samples[0]) # prep output dataset outds = ds.copy(deep=False) outds.fa['featurewise_thresh'] = self._thrmap # determine clusters labels, num = measurements.label(othrd,structure=np.ones([3,3,3])) area = measurements.sum(othrd, labels, index=np.arange(1, num + 1)).astype(int) com = measurements.center_of_mass( osamp, labels=labels, index=np.arange(1, num + 1)) maxpos = measurements.maximum_position( osamp, labels=labels, index=np.arange(1, num + 1)) # for the rest we need the labels flattened labels = mapper.forward1(labels) # relabel clusters starting with the biggest and increase index with # decreasing size ordered_labels = np.zeros(labels.shape, dtype=int) ordered_area = np.zeros(area.shape, dtype=int) ordered_com = np.zeros((num, len(osamp.shape)), dtype=float) ordered_maxpos = np.zeros((num, len(osamp.shape)), dtype=float) for i, idx in enumerate(np.argsort(area)): ordered_labels[labels == idx + 1] = num - i # kinda ugly, but we are looping anyway ordered_area[i] = area[idx] ordered_com[i] = com[idx] ordered_maxpos[i] = maxpos[idx] labels = ordered_labels area = ordered_area[::-1] com = ordered_com[::-1] maxpos = ordered_maxpos[::-1] del ordered_labels # this one can be big # store cluster labels after forward-mapping outds.fa['clusters_featurewise_thresh'] = labels.copy() # location info outds.a['clusterlocations'] = \ np.rec.fromarrays( [com, maxpos], names=('center_of_mass', 'max')) # update cluster size histogram with the actual result to get a # proper lower bound for p-values # this will make a copy, because the original matrix is int cluster_probs_raw = _transform_to_pvals( area, self._null_cluster_sizes.astype('float')) clusterstats = ( [area, cluster_probs_raw], ['size', 'prob_raw'] ) # evaluate a bunch of stats for all clusters morestats = {} for cid in xrange(len(area)): # keep clusters on outer loop, because selection is more expensive clvals = ds.samples[0, labels == cid + 1] for id_, fx in ( ('mean', np.mean), ('median', np.median), ('min', np.min), ('max', np.max), ('std', np.std)): stats = morestats.get(id_, []) stats.append(fx(clvals)) morestats[id_] = stats for k, v in morestats.items(): clusterstats[0].append(v) clusterstats[1].append(k) if self.params.multicomp_correction is not None: # do a local import as only this tiny portion needs statsmodels import statsmodels.stats.multitest as smm rej, probs_corr = smm.multipletests( cluster_probs_raw, alpha=self.params.fwe_rate, method=self.params.multicomp_correction)[:2] # store corrected per-cluster probabilities clusterstats[0].append(probs_corr) clusterstats[1].append('prob_corrected') # remove cluster labels that did not pass the FWE threshold for i, r in enumerate(rej): if not r: labels[labels == i + 1] = 0 outds.fa['clusters_fwe_thresh'] = labels outds.a['clusterstats'] = \ np.rec.fromarrays(clusterstats[0], names=clusterstats[1]) return outds
# Check the dominant modes # In[13]: from scipy.ndimage.measurements import maximum_position nmodes = 5 dominant_wn = np.zeros(nmodes) search = power zelab = search > 0 # (all elements) itit = 1 # -- while loop while itit <= nmodes: # -- find dominant mode ij = maximum_position(search,labels=zelab) dominant_wn[itit-1] = 2.*np.pi / periods[ij[0]] spower = search[ij] # -- print result print("%2i %8.2f %8.2f" % (itit,dominant_wn[itit-1],spower)) # -- iterate zelab = search < spower # remove maximum found itit += 1 # Display Lomb-Scargle periodogram with a vertical line for the dominant mode. Save figure. # In[17]: mpl.plot(wavenumber, power) mpl.xlabel("Zonal wavenumber")
def vol2iso_viz(vol_file, bg_file, bgcolor=(0,0,0), bgslice='', auto_slice=True, show_label=1, force_show_fig=False, save_fig=None, show_outline=False, plane_orientation='z_axes', slice_index=0, size=(1024,768),vmin=0,nb_labels=10,nb_colors=10,label_orientation='vertical'): from mayavi import mlab if save_fig: mlab.options.offscreen=True mlab.figure(bgcolor=bgcolor, size=size) # try tp turn on depth peeling # https://dl.dropboxusercontent.com/u/1200872/mayavi/depth_peeling.py scene = mlab.gcf().scene scene.renderer.render_window.set(alpha_bit_planes=1,multi_samples=0) scene.renderer.set(use_depth_peeling=True,maximum_number_of_peels=4,occlusion_ratio=0.1) # print "Traits for the renderer" # scene.renderer.print_traits() # print "Traits for the render window" # scene.renderer.render_window.print_traits() _plasma_data = [[0.050383, 0.029803, 0.527975, 1.0], [0.063536, 0.028426, 0.533124, 1.0], [0.075353, 0.027206, 0.538007, 1.0], [0.086222, 0.026125, 0.542658, 1.0], [0.096379, 0.025165, 0.547103, 1.0], [0.105980, 0.024309, 0.551368, 1.0], [0.115124, 0.023556, 0.555468, 1.0], [0.123903, 0.022878, 0.559423, 1.0], [0.132381, 0.022258, 0.563250, 1.0], [0.140603, 0.021687, 0.566959, 1.0], [0.148607, 0.021154, 0.570562, 1.0], [0.156421, 0.020651, 0.574065, 1.0], [0.164070, 0.020171, 0.577478, 1.0], [0.171574, 0.019706, 0.580806, 1.0], [0.178950, 0.019252, 0.584054, 1.0], [0.186213, 0.018803, 0.587228, 1.0], [0.193374, 0.018354, 0.590330, 1.0], [0.200445, 0.017902, 0.593364, 1.0], [0.207435, 0.017442, 0.596333, 1.0], [0.214350, 0.016973, 0.599239, 1.0], [0.221197, 0.016497, 0.602083, 1.0], [0.227983, 0.016007, 0.604867, 1.0], [0.234715, 0.015502, 0.607592, 1.0], [0.241396, 0.014979, 0.610259, 1.0], [0.248032, 0.014439, 0.612868, 1.0], [0.254627, 0.013882, 0.615419, 1.0], [0.261183, 0.013308, 0.617911, 1.0], [0.267703, 0.012716, 0.620346, 1.0], [0.274191, 0.012109, 0.622722, 1.0], [0.280648, 0.011488, 0.625038, 1.0], [0.287076, 0.010855, 0.627295, 1.0], [0.293478, 0.010213, 0.629490, 1.0], [0.299855, 0.009561, 0.631624, 1.0], [0.306210, 0.008902, 0.633694, 1.0], [0.312543, 0.008239, 0.635700, 1.0], [0.318856, 0.007576, 0.637640, 1.0], [0.325150, 0.006915, 0.639512, 1.0], [0.331426, 0.006261, 0.641316, 1.0], [0.337683, 0.005618, 0.643049, 1.0], [0.343925, 0.004991, 0.644710, 1.0], [0.350150, 0.004382, 0.646298, 1.0], [0.356359, 0.003798, 0.647810, 1.0], [0.362553, 0.003243, 0.649245, 1.0], [0.368733, 0.002724, 0.650601, 1.0], [0.374897, 0.002245, 0.651876, 1.0], [0.381047, 0.001814, 0.653068, 1.0], [0.387183, 0.001434, 0.654177, 1.0], [0.393304, 0.001114, 0.655199, 1.0], [0.399411, 0.000859, 0.656133, 1.0], [0.405503, 0.000678, 0.656977, 1.0], [0.411580, 0.000577, 0.657730, 1.0], [0.417642, 0.000564, 0.658390, 1.0], [0.423689, 0.000646, 0.658956, 1.0], [0.429719, 0.000831, 0.659425, 1.0], [0.435734, 0.001127, 0.659797, 1.0], [0.441732, 0.001540, 0.660069, 1.0], [0.447714, 0.002080, 0.660240, 1.0], [0.453677, 0.002755, 0.660310, 1.0], [0.459623, 0.003574, 0.660277, 1.0], [0.465550, 0.004545, 0.660139, 1.0], [0.471457, 0.005678, 0.659897, 1.0], [0.477344, 0.006980, 0.659549, 1.0], [0.483210, 0.008460, 0.659095, 1.0], [0.489055, 0.010127, 0.658534, 1.0], [0.494877, 0.011990, 0.657865, 1.0], [0.500678, 0.014055, 0.657088, 1.0], [0.506454, 0.016333, 0.656202, 1.0], [0.512206, 0.018833, 0.655209, 1.0], [0.517933, 0.021563, 0.654109, 1.0], [0.523633, 0.024532, 0.652901, 1.0], [0.529306, 0.027747, 0.651586, 1.0], [0.534952, 0.031217, 0.650165, 1.0], [0.540570, 0.034950, 0.648640, 1.0], [0.546157, 0.038954, 0.647010, 1.0], [0.551715, 0.043136, 0.645277, 1.0], [0.557243, 0.047331, 0.643443, 1.0], [0.562738, 0.051545, 0.641509, 1.0], [0.568201, 0.055778, 0.639477, 1.0], [0.573632, 0.060028, 0.637349, 1.0], [0.579029, 0.064296, 0.635126, 1.0], [0.584391, 0.068579, 0.632812, 1.0], [0.589719, 0.072878, 0.630408, 1.0], [0.595011, 0.077190, 0.627917, 1.0], [0.600266, 0.081516, 0.625342, 1.0], [0.605485, 0.085854, 0.622686, 1.0], [0.610667, 0.090204, 0.619951, 1.0], [0.615812, 0.094564, 0.617140, 1.0], [0.620919, 0.098934, 0.614257, 1.0], [0.625987, 0.103312, 0.611305, 1.0], [0.631017, 0.107699, 0.608287, 1.0], [0.636008, 0.112092, 0.605205, 1.0], [0.640959, 0.116492, 0.602065, 1.0], [0.645872, 0.120898, 0.598867, 1.0], [0.650746, 0.125309, 0.595617, 1.0], [0.655580, 0.129725, 0.592317, 1.0], [0.660374, 0.134144, 0.588971, 1.0], [0.665129, 0.138566, 0.585582, 1.0], [0.669845, 0.142992, 0.582154, 1.0], [0.674522, 0.147419, 0.578688, 1.0], [0.679160, 0.151848, 0.575189, 1.0], [0.683758, 0.156278, 0.571660, 1.0], [0.688318, 0.160709, 0.568103, 1.0], [0.692840, 0.165141, 0.564522, 1.0], [0.697324, 0.169573, 0.560919, 1.0], [0.701769, 0.174005, 0.557296, 1.0], [0.706178, 0.178437, 0.553657, 1.0], [0.710549, 0.182868, 0.550004, 1.0], [0.714883, 0.187299, 0.546338, 1.0], [0.719181, 0.191729, 0.542663, 1.0], [0.723444, 0.196158, 0.538981, 1.0], [0.727670, 0.200586, 0.535293, 1.0], [0.731862, 0.205013, 0.531601, 1.0], [0.736019, 0.209439, 0.527908, 1.0], [0.740143, 0.213864, 0.524216, 1.0], [0.744232, 0.218288, 0.520524, 1.0], [0.748289, 0.222711, 0.516834, 1.0], [0.752312, 0.227133, 0.513149, 1.0], [0.756304, 0.231555, 0.509468, 1.0], [0.760264, 0.235976, 0.505794, 1.0], [0.764193, 0.240396, 0.502126, 1.0], [0.768090, 0.244817, 0.498465, 1.0], [0.771958, 0.249237, 0.494813, 1.0], [0.775796, 0.253658, 0.491171, 1.0], [0.779604, 0.258078, 0.487539, 1.0], [0.783383, 0.262500, 0.483918, 1.0], [0.787133, 0.266922, 0.480307, 1.0], [0.790855, 0.271345, 0.476706, 1.0], [0.794549, 0.275770, 0.473117, 1.0], [0.798216, 0.280197, 0.469538, 1.0], [0.801855, 0.284626, 0.465971, 1.0], [0.805467, 0.289057, 0.462415, 1.0], [0.809052, 0.293491, 0.458870, 1.0], [0.812612, 0.297928, 0.455338, 1.0], [0.816144, 0.302368, 0.451816, 1.0], [0.819651, 0.306812, 0.448306, 1.0], [0.823132, 0.311261, 0.444806, 1.0], [0.826588, 0.315714, 0.441316, 1.0], [0.830018, 0.320172, 0.437836, 1.0], [0.833422, 0.324635, 0.434366, 1.0], [0.836801, 0.329105, 0.430905, 1.0], [0.840155, 0.333580, 0.427455, 1.0], [0.843484, 0.338062, 0.424013, 1.0], [0.846788, 0.342551, 0.420579, 1.0], [0.850066, 0.347048, 0.417153, 1.0], [0.853319, 0.351553, 0.413734, 1.0], [0.856547, 0.356066, 0.410322, 1.0], [0.859750, 0.360588, 0.406917, 1.0], [0.862927, 0.365119, 0.403519, 1.0], [0.866078, 0.369660, 0.400126, 1.0], [0.869203, 0.374212, 0.396738, 1.0], [0.872303, 0.378774, 0.393355, 1.0], [0.875376, 0.383347, 0.389976, 1.0], [0.878423, 0.387932, 0.386600, 1.0], [0.881443, 0.392529, 0.383229, 1.0], [0.884436, 0.397139, 0.379860, 1.0], [0.887402, 0.401762, 0.376494, 1.0], [0.890340, 0.406398, 0.373130, 1.0], [0.893250, 0.411048, 0.369768, 1.0], [0.896131, 0.415712, 0.366407, 1.0], [0.898984, 0.420392, 0.363047, 1.0], [0.901807, 0.425087, 0.359688, 1.0], [0.904601, 0.429797, 0.356329, 1.0], [0.907365, 0.434524, 0.352970, 1.0], [0.910098, 0.439268, 0.349610, 1.0], [0.912800, 0.444029, 0.346251, 1.0], [0.915471, 0.448807, 0.342890, 1.0], [0.918109, 0.453603, 0.339529, 1.0], [0.920714, 0.458417, 0.336166, 1.0], [0.923287, 0.463251, 0.332801, 1.0], [0.925825, 0.468103, 0.329435, 1.0], [0.928329, 0.472975, 0.326067, 1.0], [0.930798, 0.477867, 0.322697, 1.0], [0.933232, 0.482780, 0.319325, 1.0], [0.935630, 0.487712, 0.315952, 1.0], [0.937990, 0.492667, 0.312575, 1.0], [0.940313, 0.497642, 0.309197, 1.0], [0.942598, 0.502639, 0.305816, 1.0], [0.944844, 0.507658, 0.302433, 1.0], [0.947051, 0.512699, 0.299049, 1.0], [0.949217, 0.517763, 0.295662, 1.0], [0.951344, 0.522850, 0.292275, 1.0], [0.953428, 0.527960, 0.288883, 1.0], [0.955470, 0.533093, 0.285490, 1.0], [0.957469, 0.538250, 0.282096, 1.0], [0.959424, 0.543431, 0.278701, 1.0], [0.961336, 0.548636, 0.275305, 1.0], [0.963203, 0.553865, 0.271909, 1.0], [0.965024, 0.559118, 0.268513, 1.0], [0.966798, 0.564396, 0.265118, 1.0], [0.968526, 0.569700, 0.261721, 1.0], [0.970205, 0.575028, 0.258325, 1.0], [0.971835, 0.580382, 0.254931, 1.0], [0.973416, 0.585761, 0.251540, 1.0], [0.974947, 0.591165, 0.248151, 1.0], [0.976428, 0.596595, 0.244767, 1.0], [0.977856, 0.602051, 0.241387, 1.0], [0.979233, 0.607532, 0.238013, 1.0], [0.980556, 0.613039, 0.234646, 1.0], [0.981826, 0.618572, 0.231287, 1.0], [0.983041, 0.624131, 0.227937, 1.0], [0.984199, 0.629718, 0.224595, 1.0], [0.985301, 0.635330, 0.221265, 1.0], [0.986345, 0.640969, 0.217948, 1.0], [0.987332, 0.646633, 0.214648, 1.0], [0.988260, 0.652325, 0.211364, 1.0], [0.989128, 0.658043, 0.208100, 1.0], [0.989935, 0.663787, 0.204859, 1.0], [0.990681, 0.669558, 0.201642, 1.0], [0.991365, 0.675355, 0.198453, 1.0], [0.991985, 0.681179, 0.195295, 1.0], [0.992541, 0.687030, 0.192170, 1.0], [0.993032, 0.692907, 0.189084, 1.0], [0.993456, 0.698810, 0.186041, 1.0], [0.993814, 0.704741, 0.183043, 1.0], [0.994103, 0.710698, 0.180097, 1.0], [0.994324, 0.716681, 0.177208, 1.0], [0.994474, 0.722691, 0.174381, 1.0], [0.994553, 0.728728, 0.171622, 1.0], [0.994561, 0.734791, 0.168938, 1.0], [0.994495, 0.740880, 0.166335, 1.0], [0.994355, 0.746995, 0.163821, 1.0], [0.994141, 0.753137, 0.161404, 1.0], [0.993851, 0.759304, 0.159092, 1.0], [0.993482, 0.765499, 0.156891, 1.0], [0.993033, 0.771720, 0.154808, 1.0], [0.992505, 0.777967, 0.152855, 1.0], [0.991897, 0.784239, 0.151042, 1.0], [0.991209, 0.790537, 0.149377, 1.0], [0.990439, 0.796859, 0.147870, 1.0], [0.989587, 0.803205, 0.146529, 1.0], [0.988648, 0.809579, 0.145357, 1.0], [0.987621, 0.815978, 0.144363, 1.0], [0.986509, 0.822401, 0.143557, 1.0], [0.985314, 0.828846, 0.142945, 1.0], [0.984031, 0.835315, 0.142528, 1.0], [0.982653, 0.841812, 0.142303, 1.0], [0.981190, 0.848329, 0.142279, 1.0], [0.979644, 0.854866, 0.142453, 1.0], [0.977995, 0.861432, 0.142808, 1.0], [0.976265, 0.868016, 0.143351, 1.0], [0.974443, 0.874622, 0.144061, 1.0], [0.972530, 0.881250, 0.144923, 1.0], [0.970533, 0.887896, 0.145919, 1.0], [0.968443, 0.894564, 0.147014, 1.0], [0.966271, 0.901249, 0.148180, 1.0], [0.964021, 0.907950, 0.149370, 1.0], [0.961681, 0.914672, 0.150520, 1.0], [0.959276, 0.921407, 0.151566, 1.0], [0.956808, 0.928152, 0.152409, 1.0], [0.954287, 0.934908, 0.152921, 1.0], [0.951726, 0.941671, 0.152925, 1.0], [0.949151, 0.948435, 0.152178, 1.0], [0.946602, 0.955190, 0.150328, 1.0], [0.944152, 0.961916, 0.146861, 1.0], [0.941896, 0.968590, 0.140956, 1.0], [0.940015, 0.975158, 0.131326, 1.0]] # colormap for the new viridis viridis_data = [[0.267004, 0.004874, 0.329415, 1.0], [0.268510, 0.009605, 0.335427, 1.0], [0.269944, 0.014625, 0.341379, 1.0], [0.271305, 0.019942, 0.347269, 1.0], [0.272594, 0.025563, 0.353093, 1.0], [0.273809, 0.031497, 0.358853, 1.0], [0.274952, 0.037752, 0.364543, 1.0], [0.276022, 0.044167, 0.370164, 1.0], [0.277018, 0.050344, 0.375715, 1.0], [0.277941, 0.056324, 0.381191, 1.0], [0.278791, 0.062145, 0.386592, 1.0], [0.279566, 0.067836, 0.391917, 1.0], [0.280267, 0.073417, 0.397163, 1.0], [0.280894, 0.078907, 0.402329, 1.0], [0.281446, 0.084320, 0.407414, 1.0], [0.281924, 0.089666, 0.412415, 1.0], [0.282327, 0.094955, 0.417331, 1.0], [0.282656, 0.100196, 0.422160, 1.0], [0.282910, 0.105393, 0.426902, 1.0], [0.283091, 0.110553, 0.431554, 1.0], [0.283197, 0.115680, 0.436115, 1.0], [0.283229, 0.120777, 0.440584, 1.0], [0.283187, 0.125848, 0.444960, 1.0], [0.283072, 0.130895, 0.449241, 1.0], [0.282884, 0.135920, 0.453427, 1.0], [0.282623, 0.140926, 0.457517, 1.0], [0.282290, 0.145912, 0.461510, 1.0], [0.281887, 0.150881, 0.465405, 1.0], [0.281412, 0.155834, 0.469201, 1.0], [0.280868, 0.160771, 0.472899, 1.0], [0.280255, 0.165693, 0.476498, 1.0], [0.279574, 0.170599, 0.479997, 1.0], [0.278826, 0.175490, 0.483397, 1.0], [0.278012, 0.180367, 0.486697, 1.0], [0.277134, 0.185228, 0.489898, 1.0], [0.276194, 0.190074, 0.493001, 1.0], [0.275191, 0.194905, 0.496005, 1.0], [0.274128, 0.199721, 0.498911, 1.0], [0.273006, 0.204520, 0.501721, 1.0], [0.271828, 0.209303, 0.504434, 1.0], [0.270595, 0.214069, 0.507052, 1.0], [0.269308, 0.218818, 0.509577, 1.0], [0.267968, 0.223549, 0.512008, 1.0], [0.266580, 0.228262, 0.514349, 1.0], [0.265145, 0.232956, 0.516599, 1.0], [0.263663, 0.237631, 0.518762, 1.0], [0.262138, 0.242286, 0.520837, 1.0], [0.260571, 0.246922, 0.522828, 1.0], [0.258965, 0.251537, 0.524736, 1.0], [0.257322, 0.256130, 0.526563, 1.0], [0.255645, 0.260703, 0.528312, 1.0], [0.253935, 0.265254, 0.529983, 1.0], [0.252194, 0.269783, 0.531579, 1.0], [0.250425, 0.274290, 0.533103, 1.0], [0.248629, 0.278775, 0.534556, 1.0], [0.246811, 0.283237, 0.535941, 1.0], [0.244972, 0.287675, 0.537260, 1.0], [0.243113, 0.292092, 0.538516, 1.0], [0.241237, 0.296485, 0.539709, 1.0], [0.239346, 0.300855, 0.540844, 1.0], [0.237441, 0.305202, 0.541921, 1.0], [0.235526, 0.309527, 0.542944, 1.0], [0.233603, 0.313828, 0.543914, 1.0], [0.231674, 0.318106, 0.544834, 1.0], [0.229739, 0.322361, 0.545706, 1.0], [0.227802, 0.326594, 0.546532, 1.0], [0.225863, 0.330805, 0.547314, 1.0], [0.223925, 0.334994, 0.548053, 1.0], [0.221989, 0.339161, 0.548752, 1.0], [0.220057, 0.343307, 0.549413, 1.0], [0.218130, 0.347432, 0.550038, 1.0], [0.216210, 0.351535, 0.550627, 1.0], [0.214298, 0.355619, 0.551184, 1.0], [0.212395, 0.359683, 0.551710, 1.0], [0.210503, 0.363727, 0.552206, 1.0], [0.208623, 0.367752, 0.552675, 1.0], [0.206756, 0.371758, 0.553117, 1.0], [0.204903, 0.375746, 0.553533, 1.0], [0.203063, 0.379716, 0.553925, 1.0], [0.201239, 0.383670, 0.554294, 1.0], [0.199430, 0.387607, 0.554642, 1.0], [0.197636, 0.391528, 0.554969, 1.0], [0.195860, 0.395433, 0.555276, 1.0], [0.194100, 0.399323, 0.555565, 1.0], [0.192357, 0.403199, 0.555836, 1.0], [0.190631, 0.407061, 0.556089, 1.0], [0.188923, 0.410910, 0.556326, 1.0], [0.187231, 0.414746, 0.556547, 1.0], [0.185556, 0.418570, 0.556753, 1.0], [0.183898, 0.422383, 0.556944, 1.0], [0.182256, 0.426184, 0.557120, 1.0], [0.180629, 0.429975, 0.557282, 1.0], [0.179019, 0.433756, 0.557430, 1.0], [0.177423, 0.437527, 0.557565, 1.0], [0.175841, 0.441290, 0.557685, 1.0], [0.174274, 0.445044, 0.557792, 1.0], [0.172719, 0.448791, 0.557885, 1.0], [0.171176, 0.452530, 0.557965, 1.0], [0.169646, 0.456262, 0.558030, 1.0], [0.168126, 0.459988, 0.558082, 1.0], [0.166617, 0.463708, 0.558119, 1.0], [0.165117, 0.467423, 0.558141, 1.0], [0.163625, 0.471133, 0.558148, 1.0], [0.162142, 0.474838, 0.558140, 1.0], [0.160665, 0.478540, 0.558115, 1.0], [0.159194, 0.482237, 0.558073, 1.0], [0.157729, 0.485932, 0.558013, 1.0], [0.156270, 0.489624, 0.557936, 1.0], [0.154815, 0.493313, 0.557840, 1.0], [0.153364, 0.497000, 0.557724, 1.0], [0.151918, 0.500685, 0.557587, 1.0], [0.150476, 0.504369, 0.557430, 1.0], [0.149039, 0.508051, 0.557250, 1.0], [0.147607, 0.511733, 0.557049, 1.0], [0.146180, 0.515413, 0.556823, 1.0], [0.144759, 0.519093, 0.556572, 1.0], [0.143343, 0.522773, 0.556295, 1.0], [0.141935, 0.526453, 0.555991, 1.0], [0.140536, 0.530132, 0.555659, 1.0], [0.139147, 0.533812, 0.555298, 1.0], [0.137770, 0.537492, 0.554906, 1.0], [0.136408, 0.541173, 0.554483, 1.0], [0.135066, 0.544853, 0.554029, 1.0], [0.133743, 0.548535, 0.553541, 1.0], [0.132444, 0.552216, 0.553018, 1.0], [0.131172, 0.555899, 0.552459, 1.0], [0.129933, 0.559582, 0.551864, 1.0], [0.128729, 0.563265, 0.551229, 1.0], [0.127568, 0.566949, 0.550556, 1.0], [0.126453, 0.570633, 0.549841, 1.0], [0.125394, 0.574318, 0.549086, 1.0], [0.124395, 0.578002, 0.548287, 1.0], [0.123463, 0.581687, 0.547445, 1.0], [0.122606, 0.585371, 0.546557, 1.0], [0.121831, 0.589055, 0.545623, 1.0], [0.121148, 0.592739, 0.544641, 1.0], [0.120565, 0.596422, 0.543611, 1.0], [0.120092, 0.600104, 0.542530, 1.0], [0.119738, 0.603785, 0.541400, 1.0], [0.119512, 0.607464, 0.540218, 1.0], [0.119423, 0.611141, 0.538982, 1.0], [0.119483, 0.614817, 0.537692, 1.0], [0.119699, 0.618490, 0.536347, 1.0], [0.120081, 0.622161, 0.534946, 1.0], [0.120638, 0.625828, 0.533488, 1.0], [0.121380, 0.629492, 0.531973, 1.0], [0.122312, 0.633153, 0.530398, 1.0], [0.123444, 0.636809, 0.528763, 1.0], [0.124780, 0.640461, 0.527068, 1.0], [0.126326, 0.644107, 0.525311, 1.0], [0.128087, 0.647749, 0.523491, 1.0], [0.130067, 0.651384, 0.521608, 1.0], [0.132268, 0.655014, 0.519661, 1.0], [0.134692, 0.658636, 0.517649, 1.0], [0.137339, 0.662252, 0.515571, 1.0], [0.140210, 0.665859, 0.513427, 1.0], [0.143303, 0.669459, 0.511215, 1.0], [0.146616, 0.673050, 0.508936, 1.0], [0.150148, 0.676631, 0.506589, 1.0], [0.153894, 0.680203, 0.504172, 1.0], [0.157851, 0.683765, 0.501686, 1.0], [0.162016, 0.687316, 0.499129, 1.0], [0.166383, 0.690856, 0.496502, 1.0], [0.170948, 0.694384, 0.493803, 1.0], [0.175707, 0.697900, 0.491033, 1.0], [0.180653, 0.701402, 0.488189, 1.0], [0.185783, 0.704891, 0.485273, 1.0], [0.191090, 0.708366, 0.482284, 1.0], [0.196571, 0.711827, 0.479221, 1.0], [0.202219, 0.715272, 0.476084, 1.0], [0.208030, 0.718701, 0.472873, 1.0], [0.214000, 0.722114, 0.469588, 1.0], [0.220124, 0.725509, 0.466226, 1.0], [0.226397, 0.728888, 0.462789, 1.0], [0.232815, 0.732247, 0.459277, 1.0], [0.239374, 0.735588, 0.455688, 1.0], [0.246070, 0.738910, 0.452024, 1.0], [0.252899, 0.742211, 0.448284, 1.0], [0.259857, 0.745492, 0.444467, 1.0], [0.266941, 0.748751, 0.440573, 1.0], [0.274149, 0.751988, 0.436601, 1.0], [0.281477, 0.755203, 0.432552, 1.0], [0.288921, 0.758394, 0.428426, 1.0], [0.296479, 0.761561, 0.424223, 1.0], [0.304148, 0.764704, 0.419943, 1.0], [0.311925, 0.767822, 0.415586, 1.0], [0.319809, 0.770914, 0.411152, 1.0], [0.327796, 0.773980, 0.406640, 1.0], [0.335885, 0.777018, 0.402049, 1.0], [0.344074, 0.780029, 0.397381, 1.0], [0.352360, 0.783011, 0.392636, 1.0], [0.360741, 0.785964, 0.387814, 1.0], [0.369214, 0.788888, 0.382914, 1.0], [0.377779, 0.791781, 0.377939, 1.0], [0.386433, 0.794644, 0.372886, 1.0], [0.395174, 0.797475, 0.367757, 1.0], [0.404001, 0.800275, 0.362552, 1.0], [0.412913, 0.803041, 0.357269, 1.0], [0.421908, 0.805774, 0.351910, 1.0], [0.430983, 0.808473, 0.346476, 1.0], [0.440137, 0.811138, 0.340967, 1.0], [0.449368, 0.813768, 0.335384, 1.0], [0.458674, 0.816363, 0.329727, 1.0], [0.468053, 0.818921, 0.323998, 1.0], [0.477504, 0.821444, 0.318195, 1.0], [0.487026, 0.823929, 0.312321, 1.0], [0.496615, 0.826376, 0.306377, 1.0], [0.506271, 0.828786, 0.300362, 1.0], [0.515992, 0.831158, 0.294279, 1.0], [0.525776, 0.833491, 0.288127, 1.0], [0.535621, 0.835785, 0.281908, 1.0], [0.545524, 0.838039, 0.275626, 1.0], [0.555484, 0.840254, 0.269281, 1.0], [0.565498, 0.842430, 0.262877, 1.0], [0.575563, 0.844566, 0.256415, 1.0], [0.585678, 0.846661, 0.249897, 1.0], [0.595839, 0.848717, 0.243329, 1.0], [0.606045, 0.850733, 0.236712, 1.0], [0.616293, 0.852709, 0.230052, 1.0], [0.626579, 0.854645, 0.223353, 1.0], [0.636902, 0.856542, 0.216620, 1.0], [0.647257, 0.858400, 0.209861, 1.0], [0.657642, 0.860219, 0.203082, 1.0], [0.668054, 0.861999, 0.196293, 1.0], [0.678489, 0.863742, 0.189503, 1.0], [0.688944, 0.865448, 0.182725, 1.0], [0.699415, 0.867117, 0.175971, 1.0], [0.709898, 0.868751, 0.169257, 1.0], [0.720391, 0.870350, 0.162603, 1.0], [0.730889, 0.871916, 0.156029, 1.0], [0.741388, 0.873449, 0.149561, 1.0], [0.751884, 0.874951, 0.143228, 1.0], [0.762373, 0.876424, 0.137064, 1.0], [0.772852, 0.877868, 0.131109, 1.0], [0.783315, 0.879285, 0.125405, 1.0], [0.793760, 0.880678, 0.120005, 1.0], [0.804182, 0.882046, 0.114965, 1.0], [0.814576, 0.883393, 0.110347, 1.0], [0.824940, 0.884720, 0.106217, 1.0], [0.835270, 0.886029, 0.102646, 1.0], [0.845561, 0.887322, 0.099702, 1.0], [0.855810, 0.888601, 0.097452, 1.0], [0.866013, 0.889868, 0.095953, 1.0], [0.876168, 0.891125, 0.095250, 1.0], [0.886271, 0.892374, 0.095374, 1.0], [0.896320, 0.893616, 0.096335, 1.0], [0.906311, 0.894855, 0.098125, 1.0], [0.916242, 0.896091, 0.100717, 1.0], [0.926106, 0.897330, 0.104071, 1.0], [0.935904, 0.898570, 0.108131, 1.0], [0.945636, 0.899815, 0.112838, 1.0], [0.955300, 0.901065, 0.118128, 1.0], [0.964894, 0.902323, 0.123941, 1.0], [0.974417, 0.903590, 0.130215, 1.0], [0.983868, 0.904867, 0.136897, 1.0], [0.993248, 0.906157, 0.143936, 1.0]] cmap_256 = np.array(viridis_data) * 255 src, data = getNiftiAsScalarField(vol_file) iso = mlab.pipeline.iso_surface(src, opacity=0.2, contours=10, vmin=0, vmax=1.0) iso.module_manager.scalar_lut_manager.lut.table = cmap_256 if show_label==1: mlab.colorbar(object=iso, nb_labels=10, nb_colors=10, orientation='vertical') if plane_orientation=='z_axes': mlab.gcf().scene.camera.parallel_projection=True mlab.view(azimuth=0, elevation=180, distance='auto', focalpoint='auto') elif plane_orientation=='x_axes': mlab.gcf().scene.camera.parallel_projection=True mlab.view(azimuth=180, elevation=90, distance='auto', focalpoint='auto') elif plane_orientation=='y_axes': mlab.gcf().scene.camera.parallel_projection=True mlab.view(azimuth=90, elevation=90, distance='auto', focalpoint='auto') else: mlab.gcf().scene.isometric_view() if bg_file != '': mrsrc, data = getNiftiAsScalarField(bg_file) orie = plane_orientation if plane_orientation=='iso': orie = 'z_axes' # from scipy import stats # data = stats.threshold(data, threshmin=0.5, threshmax=1, newval=0) # print data.shape from scipy.ndimage.measurements import center_of_mass, maximum_position com = maximum_position(data) print '# max pos = ',com if auto_slice: if orie=='x_axes': slice_index = com[0] elif orie=='y_axes': slice_index = com[1] elif orie=='z_axes': slice_index = com[2] else: slice_index = com[2] opacity=0.5 slice_index = int(slice_index) # auto flip z-index to below center # if plane_orientation=='iso': # opacity=0.5 # center = data.shape[2]/2 # if slice_index > center: # d = abs(center-slice_index) # slice_index = center-d # slice_index = com[2] mlab.pipeline.image_plane_widget(mrsrc, opacity=opacity, plane_orientation=orie, slice_index=int(slice_index), colormap='black-white', line_width=0, reset_zoom=False) if show_outline: mlab.outline() if save_fig: mlab.savefig(save_fig) if force_show_fig: mlab.show() else: mlab.close() else: mlab.show()
def translate_back_locations_extended(network, threshold=0.7): """ Translates back the network output to a class sequence. Thresholds on class 0, then assigns the maximum (non-zero) class to each region. Difference to translate_back is the output region not just the maximum's position is returned. Args: Returns: A list with tuples (class, start, end, max). max is the maximum value of the softmax layer in the region. """ outputs = network.outputs labels, n = measurements.label(outputs[:, 0] < threshold) mask = np.tile(labels.reshape(-1, 1), (1, outputs.shape[1])) maxima = measurements.maximum_position(outputs, mask, np.arange(1, np.amax(mask) + 1)) p = 0 start = None x = [] def find_alternatives(char, start, end, maxProb): alt = [] symbols = "" for lbl in range(len(outputs[0])): alt.append([lbl, 0]) symbols += network.l2s([lbl]) for pos in range(start, end): for lbl in range(len(outputs[pos, :])): if lbl != char: prob = outputs[pos, lbl] if prob > 0.01: if prob > alt[lbl][1]: alt[lbl][1] = prob #alt[lbl][1] += prob #else: # prob = outputs[pos, char] # if prob > 0.01: # maxProb += prob return char, start, end, maxProb, alt for idx, val in enumerate(labels): if val != 0 and start is None: start = idx p += 1 if val == 0 and start is not None: if maxima[p - 1][1] == 0: start = None else: x.append( find_alternatives(maxima[p - 1][1], start, idx, outputs[maxima[p - 1]])) start = None # append last non-zero region to list of no zero region occurs after it if start: x.append( find_alternatives(maxima[p - 1][1], start, len(outputs), outputs[maxima[p - 1]])) return x
def forwardModel(file, out='Data', wavelength=None, gain=3.1, size=10, burn=500, spotx=2888, spoty=3514, run=700, simulation=False, truths=None): """ Forward models the spot data found from the input file. Can be used with simulated and real data. Notes: - emcee is run three times as it is important to have a good starting point for the final run. """ print '\n\n\n' print '_'*120 print 'Processing:', file #get data and convert to electrons o = pf.getdata(file)*gain if simulation: data = o else: #roughly the correct location - to avoid identifying e.g. cosmic rays data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy() #maximum position within the cutout y, x = m.maximum_position(data) #spot and the peak pixel within the spot, this is also the CCD kernel position spot = data[y-size:y+size+1, x-size:x+size+1].copy() CCDy, CCDx = m.maximum_position(spot) print 'CCD Kernel Position (within the postage stamp):', CCDx, CCDy #bias estimate if simulation: bias = 9000. rn = 4.5 else: bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20]) #works for read o rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20]) print 'Readnoise (e):', rn if rn < 2. or rn > 6.: print 'NOTE: suspicious readout noise estimate...' print 'ADC offset (e):', bias #remove bias spot -= bias #save to file fileIO.writeFITS(spot, out+'small.fits', int=False) #make a copy ot generate error array data = spot.copy().flatten() #assume that uncertanties scale as sqrt of the values + readnoise #sigma = np.sqrt(data/gain + rn**2) tmp = data.copy() tmp[tmp + rn**2 < 0.] = 0. #set highly negative values to zero var = tmp.copy() + rn**2 #Gary B. said that actually this should be from the model or is biased, #so I only pass the readout noise part now #fit a simple model print 'Least Squares Fitting...' gaus = models.Gaussian2D(spot.max(), size, size, x_stddev=0.5, y_stddev=0.5) gaus.theta.fixed = True #fix angle p_init = gaus fit_p = fitting.LevMarLSQFitter() stopy, stopx = spot.shape X, Y = np.meshgrid(np.arange(0, stopx, 1), np.arange(0, stopy, 1)) p = fit_p(p_init, X, Y, spot) print p model = p(X, Y) fileIO.writeFITS(model, out+'BasicModel.fits', int=False) fileIO.writeFITS(model - spot, out+'BasicModelResidual.fits', int=False) #goodness of fit gof = (1./(np.size(data) - 5.)) * np.sum((model.flatten() - data)**2 / var) print 'GoF:', gof print 'Done\n\n' #maximum value max = np.max(spot) peakrange = (0.9*max, 1.7*max) sum = np.sum(spot) print 'Maximum Value:', max print 'Sum of the values:', sum print 'Peak Range:', peakrange #MCMC based fitting print 'Bayesian Model Fitting...' nwalkers = 1000 # Initialize the sampler with the chosen specs. #Create the coordinates x and y x = np.arange(0, spot.shape[1]) y = np.arange(0, spot.shape[0]) #Put the coordinates in a mesh xx, yy = np.meshgrid(x, y) #Flatten the arrays xx = xx.flatten() yy = yy.flatten() print 'Fitting full model...' ndim = 7 #Choose an initial set of positions for the walkers - fairly large area not to bias the results p0 = np.zeros((nwalkers, ndim)) #peak, center_x, center_y, radius, focus, width_x, width_y = theta p0[:, 0] = np.random.normal(max, max/100., size=nwalkers) # peak value p0[:, 1] = np.random.normal(p.x_mean.value, 0.1, size=nwalkers) # x p0[:, 2] = np.random.normal(p.y_mean.value, 0.1, size=nwalkers) # y print 'Using initial guess [radius, focus, width_x, width_y]:', [1.5, 0.6, 0.02, 0.03] p0[:, 3] = np.random.normal(1.5, 0.01, size=nwalkers) # radius p0[:, 4] = np.random.normal(0.6, 0.01, size=nwalkers) # focus p0[:, 5] = np.random.normal(0.02, 0.0001, size=nwalkers) # width_x p0[:, 6] = np.random.normal(0.03, 0.0001, size=nwalkers) # width_y #initiate sampler pool = Pool(cores) #A hack Dan gave me to not have ghost processes running as with threads keyword #sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[xx, yy, data, var, peakrange, spot.shape], sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[xx, yy, data, rn**2, peakrange, spot.shape], pool=pool) # Run a burn-in and set new starting position print "Burning-in..." pos, prob, state = sampler.run_mcmc(p0, burn) maxprob_index = np.argmax(prob) params_fit = pos[maxprob_index] print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction) print 'Estimate:', params_fit sampler.reset() print "Running MCMC..." pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state) print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction) #Get the index with the highest probability maxprob_index = np.argmax(prob) #Get the best parameters and their respective errors and print best fits params_fit = pos[maxprob_index] errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)] _printResults(params_fit, errors_fit) #Best fit model peak, center_x, center_y, radius, focus, width_x, width_y = params_fit amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius, x_0=CCDx, y_0=CCDy) airy = models.AiryDisk2D(amplitude, center_x, center_y, radius) adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape) f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.) focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape) foc = signal.convolve2d(adata, focusdata, mode='same') CCDdata = np.array([[0.0, width_y, 0.0], [width_x, (1.-width_y-width_y-width_x-width_x), width_x], [0.0, width_y, 0.0]]) fileIO.writeFITS(CCDdata, 'kernel.fits', int=False) model = signal.convolve2d(foc, CCDdata, mode='same') #save model fileIO.writeFITS(model, out+'model.fits', int=False) #residuals fileIO.writeFITS(model - spot, out+'residual.fits', int=False) fileIO.writeFITS(((model - spot)**2 / var.reshape(spot.shape)), out+'residualSQ.fits', int=False) # a simple goodness of fit gof = (1./(np.size(data) - ndim)) * np.sum((model.flatten() - data)**2 / var) maxdiff = np.max(np.abs(model - spot)) print 'GoF:', gof, ' Maximum difference:', maxdiff if maxdiff > 2e3 or gof > 4.: print '\nFIT UNLIKELY TO BE GOOD...\n' print 'Amplitude estimate:', amplitude #plot samples = sampler.chain.reshape((-1, ndim)) extents = None if simulation: extents = [(0.91*truth, 1.09*truth) for truth in truths] extents[1] = (truths[1]*0.995, truths[1]*1.005) extents[2] = (truths[2]*0.995, truths[2]*1.005) extents[3] = (0.395, 0.425) extents[4] = (0.503, 0.517) truths[0] = _peakFromTruth(truths) print truths fig = triangle.corner(samples, labels=['peak', 'x', 'y', 'radius', 'focus', 'width_x', 'width_y'], truths=truths)#, extents=extents) fig.savefig(out+'Triangle.png') plt.close() pool.close()
def forwardModelTest(file, CCDPSFmodel='Gaus', out='Data', gain=3.1, size=10, spotx=2888, spoty=3514, burn=100, run=200, nwalkers=1000): """ A single file to quickly test if the method works """ #get data and convert to electrons print '\n\n\n' print '_'*120 print 'Processing:', file o = pf.getdata(file)*gain #roughly the correct location - to avoid identifying e.g. cosmic rays data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy() #maximum position within the cutout y, x = m.maximum_position(data) #spot and the peak pixel within the spot, this is also the CCD kernel position spot = data[y-size:y+size+1, x-size:x+size+1].copy() CCDy, CCDx = m.maximum_position(spot) bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20]) #works for read o rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20]) print 'Readnoise (e):', rn if rn < 2. or rn > 6.: print 'NOTE: suspicious readout noise estimate...' print 'ADC offset (e):', bias #remove bias spot -= bias #save to file fileIO.writeFITS(spot, out+'small.fits', int=False) #make a copy ot generate error array data = spot.copy().flatten() data[data + rn**2 < 0.] = 0. #set highly negative values to zero #assume errors scale as sqrt of the values + readnoise sigma = np.sqrt(data + rn**2) #variance is the true noise model var = sigma**2 #maximum value max = np.max(spot) print 'Maximum Value:', max #fit a simple model print 'Least Squares Fitting...' gaus = models.Gaussian2D(spot.max(), size, size, x_stddev=0.5, y_stddev=0.5) gaus.theta.fixed = True #fix angle p_init = gaus fit_p = fitting.LevMarLSQFitter() stopy, stopx = spot.shape X, Y = np.meshgrid(np.arange(0, stopx, 1), np.arange(0, stopy, 1)) p = fit_p(p_init, X, Y, spot) print p model = p(X, Y) fileIO.writeFITS(model, out+'BasicModelG.fits', int=False) fileIO.writeFITS(model - spot, out+'BasicModelResidualG.fits', int=False) airy = models.AiryDisk2D(spot.max(), size, size, 0.6) p_init = airy fit_p = fitting.LevMarLSQFitter() a = fit_p(p_init, X, Y, spot) print a model = p(X, Y) fileIO.writeFITS(model, out+'BasicModelA.fits', int=False) fileIO.writeFITS(model - spot, out+'BasicModelResidualA.fits', int=False) #goodness of fit gof = (1./(len(data)-5.)) * np.sum((model.flatten() - data)**2 / var) print 'GoF:', gof print 'Done' #MCMC based fitting if 'Gaus' in CCDPSFmodel: ndim = 7 print 'Model with a Gaussian CCD PSF, %i dimensions' % ndim #Choose an initial set of positions for the walkers - fairly large area not to bias the results #amplitude, center_x, center_y, radius, focus, width_x, width_y = theta p0 = np.zeros((nwalkers, ndim)) p0[:, 0] = np.random.uniform(max, 2.*max, size=nwalkers) # amplitude p0[:, 1] = np.random.uniform(7., 14., size=nwalkers) # x p0[:, 2] = np.random.uniform(7., 14., size=nwalkers) # y p0[:, 3] = np.random.uniform(.1, 1., size=nwalkers) # radius p0[:, 4] = np.random.uniform(.1, 1., size=nwalkers) # focus p0[:, 5] = np.random.uniform(.1, 0.5, size=nwalkers) # width_x p0[:, 6] = np.random.uniform(.1, 0.5, size=nwalkers) # width_y # Initialize the sampler with the chosen specs. #Create the coordinates x and y x = np.arange(0, spot.shape[1]) y = np.arange(0, spot.shape[0]) #Put the coordinates in a mesh xx, yy = np.meshgrid(x, y) #Flatten the arrays xx = xx.flatten() yy = yy.flatten() #initiate sampler pool = Pool(7) #A hack Dan gave me to not have ghost processes running as with threads keyword sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posteriorG, args=[xx, yy, data, var], pool=pool) # Run a burn-in and set new starting position print "Burning-in..." pos, prob, state = sampler.run_mcmc(p0, burn) best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()] pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers) # Reset the chain to remove the burn-in samples. sampler.reset() # Starting from the final position in the burn-in chain print "Running MCMC..." pos, prob, state = sampler.run_mcmc(pos, burn) sampler.reset() pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state) # Print out the mean acceptance fraction print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction) #Get the index with the highest probability maxprob_index = np.argmax(prob) #Get the best parameters and their respective errors and print best fits params_fit = pos[maxprob_index] errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)] _printResults2(params_fit, errors_fit, model=CCDPSFmodel) #Best fit model amplitude, center_x, center_y, radius, focus, width_x, width_y = params_fit airy = models.AiryDisk2D(amplitude, center_x, center_y, radius) adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape) f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.) focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape) foc = signal.convolve2d(adata, focusdata, mode='same') CCD = models.Gaussian2D(1., CCDx, CCDy, width_x, width_y, 0.) CCDdata = CCD.eval(xx, yy, 1., CCDx, CCDy, width_x, width_y, 0.).reshape(spot.shape) model = signal.convolve2d(foc, CCDdata, mode='same') #save model fileIO.writeFITS(model, out+'model.fits', int=False) #residuals fileIO.writeFITS(model - spot, out+'residual.fits', int=False) fileIO.writeFITS(((model-spot)**2 / var.reshape(spot.shape)), out+'residualSQ.fits', int=False) #results _printFWHM(width_x, width_y, errors_fit[5], errors_fit[6]) #plot samples = sampler.chain[:, burn:, :].reshape((-1, ndim)) fig = triangle.corner(samples, labels=['amplitude', 'center_x', 'center_y', 'radius', 'focus', 'width_x', 'width_y']) fig.savefig(out+'Triangle.png') elif 'Cross' in CCDPSFmodel: ndim = 8 print 'Model with a Cross CCD PSF, %i dimensions' % ndim #amplitude, center_x, center_y, radius, focus, width_x, width_y, width_d = theta # Choose an initial set of positions for the walkers using the Gaussian fit p0 = [np.asarray([1.3*max,#p.amplitude.value, p.x_mean.value, p.y_mean.value, np.max([p.x_stddev.value, p.y_stddev.value]), 0.5, 0.08, 0.1, 0.01]) + 1e-3*np.random.randn(ndim) for i in xrange(nwalkers)] # Initialize the sampler with the chosen specs. #Create the coordinates x and y x = np.arange(0, spot.shape[1]) y = np.arange(0, spot.shape[0]) #Put the coordinates in a mesh xx, yy = np.meshgrid(x, y) #Flatten the arrays xx = xx.flatten() yy = yy.flatten() #initiate sampler pool = Pool(7) #A hack Dan gave me to not have ghost processes running as with threads keyword sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posteriorC, args=[xx, yy, data, var], pool=pool) # Run a burn-in and set new starting position print "Burning-in..." pos, prob, state = sampler.run_mcmc(p0, burn) best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()] pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers) # Reset the chain to remove the burn-in samples. sampler.reset() # Starting from the final position in the burn-in chain print "Running MCMC..." pos, prob, state = sampler.run_mcmc(pos, burn) sampler.reset() pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state) # Print out the mean acceptance fraction print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction) #Get the index with the highest probability maxprob_index = np.argmax(prob) #Get the best parameters and their respective errors and print best fits params_fit = pos[maxprob_index] errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)] _printResults2(params_fit, errors_fit, model=CCDPSFmodel) #Best fit model amplitude, center_x, center_y, radius, focus, width_x, width_y, width_d = params_fit airy = models.AiryDisk2D(amplitude, center_x, center_y, radius) adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape) f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.) focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape) foc = signal.convolve2d(adata, focusdata, mode='same') #3)Apply CCD diffusion kernel kernel = np.array([[width_d, width_y, width_d], [width_x, 1., width_x], [width_d, width_y, width_d]]) kernel /= kernel.sum() model = signal.convolve2d(foc, kernel, mode='same') #save model fileIO.writeFITS(model, out+'model.fits', int=False) #residuals fileIO.writeFITS(model - spot, out+'residual.fits', int=False) fileIO.writeFITS(((model-spot)**2 / var.reshape(spot.shape)), out+'residualSQ.fits', int=False) #results print kernel gaus = models.Gaussian2D(kernel.max(), 1.5, 1.5, x_stddev=0.3, y_stddev=0.3) gaus.theta.fixed = True p_init = gaus fit_p = fitting.LevMarLSQFitter() stopy, stopx = kernel.shape X, Y = np.meshgrid(np.arange(0, stopx, 1), np.arange(0, stopy, 1)) p = fit_p(p_init, X, Y, kernel) #print p _printFWHM(p.x_stddev.value, p.y_stddev.value, errors_fit[5], errors_fit[6]) #plot samples = sampler.chain[:, burn:, :].reshape((-1, ndim)) fig = triangle.corner(samples, labels=['amplitude', 'center_x', 'center_y', 'radius', 'focus', 'width_x', 'width_y', 'width_d']) fig.savefig(out+'Triangle.png') # a simple goodness of fit gof = (1./(len(data)-ndim)) * np.sum((model.flatten() - data)**2 / var) print 'GoF:', gof, ' Maximum difference:', np.max(np.abs(model - spot))
def forwardModel(file, out='Data', gain=3.1, size=10, burn=20, spotx=2888, spoty=3514, run=50, simulation=False, truths=None): """ Forward models the spot data found from the input file. Can be used with simulated and real data. Notes: - The emcee is run three times as it is important to have a good starting point for the final run. - It is very important to have the amplitude well estimated, otherwise it is difficult to get good parameter estimates. """ print '\n\n\n' print '_'*120 print 'Processing:', file #get data and convert to electrons o = pf.getdata(file)*gain if simulation: data = o else: #roughly the correct location - to avoid identifying e.g. cosmic rays data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy() #maximum position within the cutout y, x = m.maximum_position(data) #spot and the peak pixel within the spot, this is also the CCD kernel position spot = data[y-size:y+size+1, x-size:x+size+1].copy() CCDy, CCDx = m.maximum_position(spot) print 'CCD Kernel Position (within the postage stamp):', CCDx, CCDy #bias estimate if simulation: bias = 9000. rn = 4.5 else: bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20]) #works for read o rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20]) print 'Readnoise (e):', rn if rn < 2. or rn > 6.: print 'NOTE: suspicious readout noise estimate...' print 'ADC offset (e):', bias #remove bias spot -= bias #save to file fileIO.writeFITS(spot, out+'small.fits', int=False) #make a copy ot generate error array data = spot.copy().flatten() data[data + rn**2 < 0.] = 0. #set highly negative values to zero #assume errors scale as sqrt of the values + readnoise #sigma = np.sqrt(data/gain + rn**2) var = data.copy() + rn**2 #maximum value max = np.max(spot) print 'Maximum Value:', max #MCMC based fitting print 'Bayesian Fitting...' ndim = 7 nwalkers = 1000 #Choose an initial set of positions for the walkers - fairly large area not to bias the results #amplitude, center_x, center_y, radius, focus, width_x, width_y = theta p0 = np.zeros((nwalkers, ndim)) p0[:, 0] = np.random.uniform(max, 2.*max, size=nwalkers) # amplitude p0[:, 1] = np.random.uniform(7., 14., size=nwalkers) # x p0[:, 2] = np.random.uniform(7., 14., size=nwalkers) # y p0[:, 3] = np.random.uniform(.1, 1., size=nwalkers) # radius p0[:, 4] = np.random.uniform(.1, 1., size=nwalkers) # focus p0[:, 5] = np.random.uniform(.1, 0.5, size=nwalkers) # width_x p0[:, 6] = np.random.uniform(.1, 0.5, size=nwalkers) # width_y # Initialize the sampler with the chosen specs. #Create the coordinates x and y x = np.arange(0, spot.shape[1]) y = np.arange(0, spot.shape[0]) #Put the coordinates in a mesh xx, yy = np.meshgrid(x, y) #Flatten the arrays xx = xx.flatten() yy = yy.flatten() #initiate sampler pool = Pool(7) #A hack Dan gave me to not have ghost processes running as with threads keyword sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[xx, yy, data, var], pool=pool) # Run a burn-in and set new starting position print "Burning-in..." pos, prob, state = sampler.run_mcmc(p0, burn) best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()] pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers) # Reset the chain to remove the burn-in samples. sampler.reset() # Starting from the final position in the burn-in chain print "Running MCMC..." pos, prob, state = sampler.run_mcmc(pos, burn) sampler.reset() pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state) # Print out the mean acceptance fraction print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction) #Get the index with the highest probability maxprob_index = np.argmax(prob) #Get the best parameters and their respective errors and print best fits params_fit = pos[maxprob_index] errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)] amplitudeE, center_xE, center_yE, radiusE, focusE, width_xE, width_yE = errors_fit _printResults(params_fit, errors_fit) #Best fit model amplitude, center_x, center_y, radius, focus, width_x, width_y = params_fit airy = models.AiryDisk2D(amplitude, center_x, center_y, radius) adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape) f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.) focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape) foc = signal.convolve2d(adata, focusdata, mode='same') CCD = models.Gaussian2D(1., CCDx, CCDy, width_x, width_y, 0.) CCDdata = CCD.eval(xx, yy, 1., CCDx, CCDy, width_x, width_y, 0.).reshape(spot.shape) model = signal.convolve2d(foc, CCDdata, mode='same') #save model fileIO.writeFITS(model, out+'model.fits', int=False) #residuals fileIO.writeFITS(model - spot, out+'residual.fits', int=False) fileIO.writeFITS(((model - spot)**2 / var.reshape(spot.shape)), out+'residualSQ.fits', int=False) # a simple goodness of fit gof = (1./(np.size(data) - ndim)) * np.sum((model.flatten() - data)**2 / var) print 'GoF:', gof, ' Maximum difference:', np.max(np.abs(model - spot)) #results and save results _printFWHM(width_x, width_y, errors_fit[5], errors_fit[6]) res = dict(wx=width_x, wy=width_y, wxerr=width_xE, wyerr=width_yE, out=out, peakvalue=max, CCDmodel=CCD, CCDmodeldata=CCDdata, GoF=gof) fileIO.cPickleDumpDictionary(res, out+'.pkl') #plot samples = sampler.chain.reshape((-1, ndim)) extents = None if simulation: extents = [(0.91*truth, 1.09*truth) for truth in truths] extents[1] = (truths[1]*0.995, truths[1]*1.005) extents[2] = (truths[2]*0.995, truths[2]*1.005) extents[3] = (0.395, 0.425) extents[4] = (0.503, 0.517) fig = triangle.corner(samples, labels=['amplitude', 'x', 'y', 'radius', 'focus', 'width_x', 'width_y'], truths=truths)#, extents=extents) fig.savefig(out+'Triangle.png') pool.close()
def forwardModelJointFit(files, out, wavelength, gain=3.1, size=10, burn=50, run=100, spotx=2888, spoty=3514, simulated=False, truths=None): """ Forward models the spot data found from the input files. Models all data simultaneously so that the Airy disc centroid and shift from file to file. Assumes that the spot intensity, focus, and the CCD PSF kernel are the same for each file. Can be used with simulated and real data. """ print '\n\n\n' print '_'*120 images = len(files) orig = [] image = [] noise = [] peakvalues = [] for file in files: print file #get data and convert to electrons o = pf.getdata(file)*gain if simulated: data = o else: #roughly the correct location - to avoid identifying e.g. cosmic rays data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy() #maximum position within the cutout y, x = m.maximum_position(data) #spot and the peak pixel within the spot, this is also the CCD kernel position spot = data[y-size:y+size+1, x-size:x+size+1].copy() orig.append(spot.copy()) #bias estimate if simulated: bias = 9000. rn = 4.5 else: bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20]) rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20]) print 'Readnoise (e):', rn if rn < 2. or rn > 6.: print 'NOTE: suspicious readout noise estimate...' print 'ADC offset (e):', bias #remove bias spot -= bias #set highly negative values to zero spot[spot + rn**2 < 0.] = 0. max = np.max(spot) print 'Maximum Value:', max peakvalues.append(max) #noise model variance = spot.copy() + rn**2 #save to a list image.append(spot) noise.append(variance) #sensibility test, try to check if all the files in the fit are of the same dataset if np.std(peakvalues) > 5*np.sqrt(np.median(peakvalues)): #check for more than 5sigma outliers, however, this is very sensitive to the centroiding of the spot... print 'POTENTIAL OUTLIER, please check the input files...' print np.std(peakvalues), 5*np.sqrt(np.median(peakvalues)) #MCMC based fitting ndim = 2*images + 5 #xpos, ypos for each image and single amplitude, radius, focus, and sigmaX and sigmaY nwalkers = 1000 print 'Bayesian Fitting, model has %i dimensions' % ndim # Choose an initial set of positions for the walkers using the Gaussian fit p0 = np.zeros((nwalkers, ndim)) for x in xrange(images): p0[:, 2*x] = np.random.uniform(7., 14., size=nwalkers) # x p0[:, 2*x+1] = np.random.uniform(7., 14., size=nwalkers) # y p0[:, -5] = np.random.uniform(max, 2.*max, size=nwalkers) # amplitude p0[:, -4] = np.random.uniform(.1, 1., size=nwalkers) # radius p0[:, -3] = np.random.uniform(.1, 1., size=nwalkers) # focus p0[:, -2] = np.random.uniform(.1, 0.5, size=nwalkers) # width_x p0[:, -1] = np.random.uniform(.1, 0.5, size=nwalkers) # width_y # Initialize the sampler with the chosen specs. #Create the coordinates x and y x = np.arange(0, spot.shape[1]) y = np.arange(0, spot.shape[0]) #Put the coordinates in a mesh xx, yy = np.meshgrid(x, y) #Flatten the arrays xx = xx.flatten() yy = yy.flatten() #initiate sampler pool = Pool(7) #A hack Dan gave me to not have ghost processes running as with threads keyword sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posteriorJoint, args=[xx, yy, image, noise], pool=pool) # Run a burn-in and set new starting position print "Burning-in..." pos, prob, state = sampler.run_mcmc(p0, burn) best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()] pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers) # Reset the chain to remove the burn-in samples. sampler.reset() # Starting from the final position in the burn-in chain print "Running MCMC..." pos, prob, state = sampler.run_mcmc(pos, burn) sampler.reset() pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state) # Print out the mean acceptance fraction print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction) #Get the index with the highest probability maxprob_index = np.argmax(prob) #Get the best parameters and their respective errors and print best fits params_fit = pos[maxprob_index] errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)] print params_fit #unpack the fixed parameters amplitude, radius, focus, width_x, width_y = params_fit[-5:] amplitudeE, radiusE, focusE, width_xE, width_yE = errors_fit[-5:] #print results _printFWHM(width_x, width_y, width_xE, width_yE) #save the best models per file size = size*2 + 1 gofs = [] for index, file in enumerate(files): #path, file = os.path.split(file) id = 'test/' + out + str(index) #X and Y are always in pairs center_x = params_fit[2*index] center_y = params_fit[2*index+1] #1)Generate a model Airy disc airy = models.AiryDisk2D(amplitude, center_x, center_y, radius) adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape((size, size)) #2)Apply Focus f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.) focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape((size, size)) model = signal.convolve2d(adata, focusdata, mode='same') #3)Apply CCD diffusion, approximated with a Gaussian CCD = models.Gaussian2D(1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.) CCDdata = CCD.eval(xx, yy, 1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.).reshape((size, size)) model = signal.convolve2d(model, CCDdata, mode='same') #save the data, model and residuals fileIO.writeFITS(orig[index], id+'data.fits', int=False) fileIO.writeFITS(image[index], id+'datafit.fits', int=False) fileIO.writeFITS(model, id+'model.fits', int=False) fileIO.writeFITS(model - image[index], id+'residual.fits', int=False) fileIO.writeFITS(((model - image[index])**2 / noise[index]), id+'residualSQ.fits', int=False) #a simple goodness of fit gof = (1./(np.size(image[index])*images - ndim)) * np.sum((model - image[index])**2 / noise[index]) print 'GoF:', gof, ' Max difference', np.max(np.abs(model - image[index])) gofs.append(gof) #save results res = dict(wx=width_x, wy=width_y, wxerr=width_xE, wyerr=width_yE, files=files, out=out, wavelength=wavelength, peakvalues=np.asarray(peakvalues), CCDmodel=CCD, CCDmodeldata=CCDdata, GoFs=gofs) fileIO.cPickleDumpDictionary(res, 'test/' + out + '.pkl') #plot samples = sampler.chain.reshape((-1, ndim)) #extents = None #if simulated: # extents = [(0.9*truth, 1.1*truth) for truth in truths] # print extents fig = triangle.corner(samples, labels=['x', 'y']*images + ['amplitude', 'radius', 'focus', 'width_x', 'width_y'], truths=truths)#, extents=extents) fig.savefig('test/' + out + 'Triangle.png') pool.close()
def fitgauss (infile,gu=np.array([]),o=[],docrt=0,dotv=1,ngauss=1,tiewidth=0): a = getdata (infile) b = edgenoise(a,5) a -= b[1] if (dotv): plt.imshow(a) cur = np.around(pcurs()) ngauss = cur.shape[0] else: # find brightest ngauss separated points, will be # overwritten if user has supplied something aa = np.copy (a) temp = measurements.maximum_position (aa) cur=np.array([[np.float(temp[1]),np.float(temp[0])]]) for i in range (1,ngauss): for j in range (aa.shape[0]): for k in range (aa.shape[1]): xdist = cur[i-1][0]-k ydist = cur[i-1][1]-j if np.sqrt(xdist*xdist+ydist*ydist)<3.0: aa[j,k]=0.0 temp = measurements.maximum_position (aa) cur=np.vstack((cur,[temp[1],temp[0]])) if docrt > -1: print (cur) # ---------------------------------------------------------------- fluxes = np.zeros_like(cur[:,0]) widths = np.zeros_like(fluxes) for i in range (ngauss): widths[i] = np.median([a[cur[i,1]+1,cur[i,0]],\ a[cur[i,1]-1,cur[i,0]],a[cur[i,1],cur[i,0]+1],\ a[cur[i,1],cur[i,0]-1]]) widths[i] = np.sqrt(1.0/np.log(a[cur[i,1],cur[i,0]]/widths[i])) if widths[i]<1.5 or np.isnan(widths[i]): widths[i]=2.0 fluxes[i] = a[cur[i,1],cur[i,0]]*np.pi*widths[i]*widths[i] g = np.hstack ((cur, np.array(fluxes,ndmin=2).transpose(),\ np.array(widths,ndmin=2).transpose())) for i in range (gu.shape[0]): for j in range (4): try: if gu[i,j] != 0.0: g[i,j]=gu[i,j] except: pass if docrt > -1: print ('Initial guesses:',g) if o==[]: o=[0,0] for i in range(ngauss): for j in range(4): if i or j: o=np.vstack((o,[i,j])) x0=[] for i in range (len(o)): x0 = np.append(x0,g[o[i][0],o[i][1]]) # ---------------------------------------------------------------- xopt = fmin (fg_func,x0,args=[a.ravel(),a.shape,g,b,o,tiewidth],disp=0) m = np.zeros_like(a) for i in range (g.shape[0]): for j in range (len(o)): if o[j][0]==i: g[i][o[j][1]]=xopt[j] m = m+mkgauss(a.shape[::-1], (g[i][0],g[i][1]), g[i][2], g[i][3]) if (docrt>0): print (xopt, ((a-m)*(a-m)).sum(), (a-m).max(),(a-m).min()) plt.subplot(221);plt.imshow(a);plt.colorbar() plt.subplot(222);plt.imshow(m);plt.colorbar() plt.subplot(223);plt.imshow(a-m);plt.colorbar();plt.show() # ---------------------------------------------------------------- if docrt > -1: print ('Final array:',g) return xopt, g
# move "band below" up tho current band band_below_tray[topo] = ll # find and label connected components topo_con[:, :, ll], ncomponents[ll] = label(topo, structure) # record band_below for each peak peaklist.loc[:, "band_below"] = band_below_tray[peaks_xy] print("Calculating prominence by band") print("=============================") col_elev_tray = elev.copy() # record approximate elevation of col # for each topo_con layer (top to bottom) for ll in range(len(topo_elev) - 1, -1, -1): # find maximum within each neighborhood comp_peaks = maximum_position(elev, labels=topo_con[:, :, ll], index=list(range(1, ncomponents[ll] + 1))) # adjust estimation of col for current peaks col_elev_tray[tuple(np.array(list(zip(*comp_peaks))))] = topo_elev[ll] print(len(topo_elev) - ll, " of ", len(topo_elev)) # record col_elev for each peak peaklist.loc[:, "col_elev"] = col_elev_tray[peaks_xy] # estimate prominence by distance from peak height to col (assume col halfway between topo layers) peaklist.loc[:, "prom_expected"] = peaklist.elev - (peaklist.col_elev - z_step / 2) print("Calculating isolation by band") print("=============================") # estimate isolation iso_init = np.sqrt(ras.rows**2 + ras.cols**2) * ras.T0[ 0] # isolation = size of site unless found otherwise
"intr. period (" + opt.unit + ")", "log(SP)")) else: fifi.write("%4s & %8s & %8s & %8s \\\\ \hline \n" % ("$s$", "$\sigma \, (" + unitlon + "$/" + opt.unit + ")", "period (" + opt.unit + ")", "log(SP)")) #fifi.write("---------------------------------------\n") # -- initialize while loop search = np.empty_like(spec) search[:, :] = spec[:, :] zelab = search > 0 # (all elements) itit = 1 spowermax = -9999. # -- while loop while itit <= opt.ndom: # -- find dominant mode ij = maximum_position(search, labels=zelab) dominant_wn = specx[ij[0]] dominant_fq = spect[ij[1]] spower = search[ij] # -- break if a given orders of magnitude difference in power if spower < spowermax / 100.: break if (1. / dominant_fq) < lowerperiod: reliable = "x" else: reliable = "o" # -- print result if reliable == "o": # -- correct by zonalwind if needed if opt.zonalwind is not None: dominant_fq_int, dominant_wn_int = intrinsic( 360. * dominant_fq, dominant_wn)