def hemi_merge(self, meth='single', weight=None): """ Parameters ---------- meth : 'single' or 'both'.single, keep roi which appear in a single hemisphere; both, only keep roi which appear in both hemisphere weight: weight for each roi, n_subj x n_roi np.array Returns ------- """ if self.type is 'scalar': self.roi_name = [self.roi_name[i] for i in np.arange(0, len(self.roi_name), 2)] odd_f = np.arange(0, len(self.feat_name), 2) self.feat_name = [self.feat_name[i] for i in odd_f] if weight is None: weight = np.ones(self.meas.shape) weight[np.isnan(self.meas)] = 0 else: weight = np.repeat(weight, self.meas.shape[1]/weight.shape[1], axis=1) if meth is 'single': for f in odd_f: meas = self.meas[:, f:f+2] bool_nan = np.isnan(self.meas) index = np.logical_xor(bool_nan[:, 0], bool_nan[:, 1]) value = np.where(np.isnan(meas[index, 0]), meas[index, 1], meas[index, 0]) meas[index, :] = np.repeat(value[..., np.newaxis], 2, axis=1) elif meth is 'both': pass odd_meas = self.meas[:, odd_f] * weight[:, odd_f] even_meas = self.meas[:, odd_f+1] * weight[:, odd_f+1] self.meas = (odd_meas + even_meas)/(weight[:, odd_f] + weight[:, odd_f+1]) else: self.roi_name = [self.roi_name[i] for i in np.arange(0, len(self.roi_name), 2)] n_subj, n_feat = self.meas.shape meas = np.reshape(self.meas, (n_subj, -1, 3)) odd_f = np.arange(0, meas.shape[1], 2) f_index = [] for i in np.arange(0, meas.shape[1], 2): for j in [0, 1, 2]: f_index.append(i*3+j) self.feat_name = [self.feat_name[i] for i in f_index] if meth is 'single': for f in odd_f: f_meas = meas[:, f:f+2, :] bool_nan = np.isnan(np.prod(f_meas, axis=2)) index = np.logical_xor(bool_nan[:, 0], bool_nan[:, 1]) value = np.where(np.isnan(f_meas[index, 0, :]), f_meas[index, 1, :], f_meas[index, 0, :]) meas[index, f:f+2, :] = np.repeat(value[:, np.newaxis, :], 2, axis=1) meas[:, odd_f+1, 0] = -meas[:, odd_f+1, 0] elif meth is 'both': meas[:, odd_f+1, 0] = -meas[:, odd_f+1, 0] self.meas = np.reshape((meas[:, odd_f, :] + meas[:, odd_f+1, :])/2, (n_subj, -1))
def pre_compute_threshes_uci(features, label, threshes): ''' :param features: :param label: :param threshes: :return: ''' threshes_cheatsheet = [] n, dim = np.shape(features) label_plus_one = np.array(label) + 1 n_ones = np.ones((1, n))[0] for i in range(dim): cur_f = np.array([x[i] for x in features]) # sorted(cur_f, key= lambda x: x[0]) c_cs = [] if threshes[i][0]: # discrete feature for t in threshes[i][1]: cur_r = cur_f - t cur_r = np.logical_xor(cur_r, n_ones) cur_r = np.logical_xor(cur_r, label_plus_one) # w_err = np.dot(cur_r, d) c_cs.append(cur_r) else: # continuous feature for t in threshes[i][1]: cur_r = np.sign(cur_f - t) + 1 cur_r = np.logical_xor(cur_r, label_plus_one) # w_err = np.dot(cur_r, d) # n_err = np.dot(cur_r, n_ones) c_cs.append(cur_r) threshes_cheatsheet.append(c_cs) return threshes_cheatsheet
def calc_link_dis(base_side, side1, side2): # print base_side.shape, side1.shape, side2.shape ans = np.zeros_like(base_side, dtype=np.float) mask = np.ones_like(base_side, dtype=np.bool) #point on the link mask_on_line = np.logical_and(base_side == side1+side2, mask) mask = np.logical_xor(mask, mask_on_line) ans[mask_on_line] = 0 #the adjaceny points on the link is overlapped mask_point = np.logical_and(base_side < 1e-10, mask) mask = np.logical_xor(mask, mask_point) ans[mask_point] = side1[mask_point] side1_sqr = side1 * side1 side2_sqr = side2 * side2 base_side_sqr = base_side * base_side #obtuse case 1 mask_obtuse1 = np.logical_and(side1_sqr > base_side_sqr + side2_sqr, mask) mask = np.logical_xor(mask, mask_obtuse1) ans[mask_obtuse1] = side2[mask_obtuse1] #obtuse case 2 mask_obtuse2 = np.logical_and(side2_sqr > base_side_sqr + side1_sqr, mask) mask = np.logical_xor(mask, mask_obtuse2) ans[mask_obtuse2] = side1[mask_obtuse2] #compute height by Heron's formula half_p = (base_side[mask] + side1[mask] + side2[mask]) * 0.5 # half perimeter area = np.sqrt(half_p * (half_p - side1[mask]) * (half_p - side2[mask]) * (half_p - base_side[mask])) ans[mask] = 2 * area / base_side[mask] return ans
def export(): # type: () -> None node = onnx.helper.make_node( 'Xor', inputs=['x', 'y'], outputs=['xor'], ) # 2d x = (np.random.randn(3, 4) > 0).astype(np.bool) y = (np.random.randn(3, 4) > 0).astype(np.bool) z = np.logical_xor(x, y) expect(node, inputs=[x, y], outputs=[z], name='test_xor2d') # 3d x = (np.random.randn(3, 4, 5) > 0).astype(np.bool) y = (np.random.randn(3, 4, 5) > 0).astype(np.bool) z = np.logical_xor(x, y) expect(node, inputs=[x, y], outputs=[z], name='test_xor3d') # 4d x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool) y = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool) z = np.logical_xor(x, y) expect(node, inputs=[x, y], outputs=[z], name='test_xor4d')
def get_land_mask(self, points): '''Get a landmask respecting lakes, and ponds in island in lake as water :param points: List of lat, lon pairs :type points: :class:`numpy.ndarray` of shape Nx2 :return: Boolean land mask :rtype: :class:`numpy.ndarray` of shape N ''' lats = points[:, 0] lons = points[:, 1] west, east, south, north = (lons.min(), lons.max(), lats.min(), lats.max()) relevant_polygons = self.get_polygons_within(west, east, south, north) relevant_polygons.sort() mask = num.zeros(points.shape[0], dtype=num.bool) for p in relevant_polygons: if (p.is_land() or p.is_antarctic_grounding_line() or p.is_island_in_lake()): mask += p.contains_points(points) elif p.is_lake() or p.is_pond_in_island_in_lake(): water = p.contains_points(points) num.logical_xor(mask, water, out=mask) return mask
def score(filename): """ Score individual image files for the genetic algorithm. The idea is to derive predictive factors for the langmuir performance (i.e., max power) based on the connectivity, phase fractions, domain sizes, etc. The scoring function should be based on multivariate fits from a database of existing simulations. To ensure good results, use robust regression techniques and cross-validate the best-fit. :param filename: image file name :type filename: str :return score (ideally as an estimated maximum power in W/(m^2)) :rtype float """ # this works around a weird bug in scipy.misc.imread with 1-bit images # open them with PIL as 8-bit greyscale "L" and then convert to ndimage pil_img = Image.open(filename) image = misc.fromimage(pil_img.convert("L")) width, height = image.shape if width != 256 or height != 256: print "Size Error: ", filename # isize = analyze.interface_size(image) ads1, std1 = analyze.average_domain_size(image) # we now need to invert the image to get the second domain size inverted = (image < image.mean()) ads2, std2 = analyze.average_domain_size(inverted) #overall average domain size ads = (ads1 + ads2) / 2.0 # transfer distances # connectivity td1, connect1, td2, connect2 = analyze.transfer_distance(image) spots = np.logical_xor(image, ndimage.binary_erosion(image, structure=np.ones((2,2)))) erosion = np.count_nonzero(spots) spots = np.logical_xor(image, ndimage.binary_dilation(image, structure=np.ones((2,2)))) dilation = np.count_nonzero(spots) # fraction of phase one nonzero = np.count_nonzero(image) fraction = float(nonzero) / float(image.size) # scores zero at 0, 1 and maximum at 0.5 ps = fraction*(1.0-fraction) # from simulations with multivariate nonlinear regression return (-1.98566e8) + (-1650.14)/ads + (-680.92)*math.pow(ads,0.25) + \ 1.56236e7*math.tanh(14.5*(connect1 + 0.4)) + 1.82945e8*math.tanh(14.5*(connect2 + 0.4)) \ + 2231.32*connect1*connect2 \ + (-4.72813)*td1 + (-4.86025)*td2 \ + 3.79109e7*ps**8 \ + 0.0540293*dilation + 0.0700451*erosion
def render_points_as_image(self,points,bounds,resolution): """Inputs: Polygon data: a list of coordinates of points that define the corners of a polygon Bounds: The top right hand corner of the square inside which all of the points in the polygon data will fit (x,x) (these are technically coordinates, but they should be the same for the sake of squares) Resolution: The resolution of the output image; a single number, all output images are square Output: a black and white image (stored as a matrix of Booleans).""" output_image = np.zeros((resolution,resolution), dtype=bool) step_size = bounds[1] * 2.0 / resolution #Tack the first point onto the end, to make looping through #adjacent pairs of points easier points = np.append(points,[points[0]],axis = 0) #Make sure all points are positive points = points + bounds[1] #Scale the points so rounding them to whole numbers will place #them within the output resolution points = points / step_size #Round the points to prevent future rounding errors points = np.floor(points) for i in range(len(points)-1): #For each pair of points p1 = points[i] p2 = points[i+1] #Calculate the slope slope = (p2[1]-p1[1])/(p2[0]-p1[0]) #Then for each step (of 1) in the y-direction from p1 to p2 for y_step in range(int(np.abs(p2[1]-p1[1]))): if slope: if p2[1] > p1[1]: #Find which x value corresponds to the new y value (using the slope) new_y = int(p1[1] + y_step) new_x = int(p1[0] + y_step/slope) else: new_y = int(p1[1] - y_step) new_x = int(p1[0] - y_step/slope) #Then invert every pixel to the left of the new point. #This very nicely fills in the shape, regardless of concavity/convexity. output_image[-new_y][0:new_x] = np.logical_xor(True,output_image[-new_y][0:new_x]) for point in points[:-1]: #The above algorithm consistently leaves a couple corners with lines not inverted correctly #This for loop fixes that with only a small increase in runtime if output_image[-point[1]][0]: output_image[-point[1]][0:point[0]] = np.logical_xor(True,output_image[-point[1]][0:point[0]]) return output_image
def chk_c(arr): xr=0; for sw in range(0,arr.size): xr=np.logical_xor(xr,arr[sw]) u_vect=np.zeros(arr.size) for it in range(0,arr.size): if np.logical_xor(xr,arr[it])==True: u_vect[it]=1 return(u_vect)
def intersect(a, b, c, d) : a = np.array(a) b = np.array(b) c = np.array(c) d = np.array(d) def ccw(a, b, c) : cross = (a[0, :]-b[0, :])*(c[1, :]-b[1, :]) cross -= (a[1, :]-b[1, :])*(c[0, :]-b[0, :]) return cross > 0 c1 = np.logical_xor(ccw(a, b, c), ccw(a, b, d)) c2 = np.logical_xor(ccw(c, d, a), ccw(c, d, b)) return np.logical_and(c1, c2)
def decision_stupms(D): stumps = {} m,d = D.shape X = D[:,:d-1] labels = D[:,d-1] for feat in range(d-1): thresholds = np.unique(X[:,feat]) before_min = np.min(thresholds) - 1 stumps[(feat,before_min)] = np.logical_xor((X[:,feat] > before_min),labels) for thres in thresholds: stumps[(feat,thres)] = np.logical_xor((X[:,feat] > thres),labels) return stumps
def shape_symmetry(image, center, major_axis, attrs={}, debug=False): # pad to make image center coincide with symmetry center lesion_mask, _ = pad_for_rotation(image[..., 3], center) rotated = rotate(lesion_mask, 90-major_axis.angle) flipped = rotated[:,::-1] diff = np.logical_xor(rotated, flipped) pixels_diff = diff.sum() / 2. major_ratio = pixels_diff / rotated.sum() if debug: print """\ ==== Shape Symmetry ==== --- Major Axis --- num of pixels : %d shape sym ratio : %.3f """ % (pixels_diff, major_ratio) plt.subplot(231) plt.imshow(rotated) plt.subplot(232) plt.imshow(flipped) plt.subplot(233) plt.imshow(diff) rotated = rotate(lesion_mask, 180-major_axis.angle) flipped = rotated[:,::-1] diff = np.logical_xor(rotated, flipped) pixels_diff = diff.sum() / 2. minor_ratio = pixels_diff / rotated.sum() if debug: print """\ --- Minor Axis --- num of pixels : %d shape sym ratio : %.3f """ % (pixels_diff, minor_ratio) plt.subplot(234) plt.imshow(rotated) plt.subplot(235) plt.imshow(flipped) plt.subplot(236) plt.imshow(diff) plt.show() attrs.update([ ('Shape Asymmetry Major Ratio', major_ratio), ('Shape Asymmetry Minor Ratio', minor_ratio), ('--Shape Asymmetry Score', (major_ratio > 0.13)*1 + (minor_ratio > 0.15)*1), ])
def black_tophat(image, selem=None, out=None): """Return black top hat of an image. The black top hat of an image is defined as its morphological closing minus the original image. This operation returns the dark spots of the image that are smaller than the structuring element. Note that dark spots in the original image are bright spots after the black top hat. Parameters ---------- image : ndarray Image array. selem : ndarray, optional The neighborhood expressed as a 2-D array of 1's and 0's. If None, use cross-shaped structuring element (connectivity=1). out : ndarray, optional The array to store the result of the morphology. If None is passed, a new array will be allocated. Returns ------- out : array, same shape and type as `image` The result of the morphological black top hat. Examples -------- >>> # Change dark peak to bright peak and subtract background >>> import numpy as np >>> from skimage.morphology import square >>> dark_on_grey = np.array([[7, 6, 6, 6, 7], ... [6, 5, 4, 5, 6], ... [6, 4, 0, 4, 6], ... [6, 5, 4, 5, 6], ... [7, 6, 6, 6, 7]], dtype=np.uint8) >>> black_tophat(dark_on_grey, square(3)) array([[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 1, 5, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]], dtype=uint8) """ if out is image: original = image.copy() else: original = image out = closing(image, selem, out=out) if np.issubdtype(out.dtype, np.bool_): np.logical_xor(out, original, out=out) else: out -= original return out
def circleNum(imageMatrix): """ number of circles of a digit image :param dataMatrix: digit image :return: number of circles """ M, N = imageMatrix.shape visited = np.zeros((M, N), dtype = int) stack = [(0, 0)] visited[0][0] = 1 circle = 0 while True: while len(stack) != 0:# do DFS to find connected component current = stack[-1] available = reachable(current, imageMatrix, visited) if len(available) == 0: stack.pop() else: chosen = available[0] visited[chosen[0]][chosen[1]] = 1 stack.append(chosen) temp = np.logical_xor(visited, imageMatrix) if np.logical_not(temp.all()):# if there are components unvisited circle += 1 i, j = argmax(np.logical_not(temp))# do DFS in one of the unvisited components stack.append((i, j)) visited[i][j] = 1 else:# all components visited return circle
def compute_positional_agreement(score_breakdown): score_breakdown = np.array(score_breakdown)>0 N,L = score_breakdown.shape agreement = reduce(lambda x,y: x+y, [1-np.logical_xor(score.reshape(L,1),score) for score in score_breakdown]) agreement = agreement/float(N) return agreement
def _extrapolate_out_mask(data, mask, iterations=1): """ Extrapolate values outside of the mask. """ if iterations > 1: data, mask = _extrapolate_out_mask(data, mask, iterations=iterations - 1) new_mask = ndimage.binary_dilation(mask) larger_mask = np.zeros(np.array(mask.shape) + 2, dtype=np.bool) larger_mask[1:-1, 1:-1, 1:-1] = mask # Use nans as missing value: ugly masked_data = np.zeros(larger_mask.shape + data.shape[3:]) masked_data[1:-1, 1:-1, 1:-1] = data.copy() masked_data[np.logical_not(larger_mask)] = np.nan outer_shell = larger_mask.copy() outer_shell[1:-1, 1:-1, 1:-1] = np.logical_xor(new_mask, mask) outer_shell_x, outer_shell_y, outer_shell_z = np.where(outer_shell) extrapolation = list() for i, j, k in [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)]: this_x = outer_shell_x + i this_y = outer_shell_y + j this_z = outer_shell_z + k extrapolation.append(masked_data[this_x, this_y, this_z]) extrapolation = np.array(extrapolation) extrapolation = (np.nansum(extrapolation, axis=0) / np.sum(np.isfinite(extrapolation), axis=0)) extrapolation[np.logical_not(np.isfinite(extrapolation))] = 0 new_data = np.zeros_like(masked_data) new_data[outer_shell] = extrapolation new_data[larger_mask] = masked_data[larger_mask] return new_data[1:-1, 1:-1, 1:-1], new_mask
def calcOverCorrected(df): #Expect dataframes with fields tilt, startLeft, upDown, respLeftRight #Actually only can give legitimate answer when tilt is 0, because only that is an ambiguous stimulus #canonical case is startLeft, upDown, tilt 0 # 1 #A B # 2 #If you undercorrect, ~respLeftRight # 1 #B A ~startLeft # 2 #If you undercorrect, respLeftRight # 2 #B A ~startLeft, ~upDown # 1 #If you undercorrect, respLeftRight. upDown does not affect it if df.ndim==1: anyObjectsInThere = df[['upDown','startLeft']].dtype == 'object' #dtype for series elif df.ndim==2: anyObjectsInThere = (df[['upDown','startLeft']].dtypes == 'object').any() #dtypes for dataframe if anyObjectsInThere: print('ERROR: calcOverCorrected expects relevant columns to not be objects but instead something interpretable as boolean') startLeft = np.array( df['startLeft'] ) #make it a numpy array so can use its elementwise logical operators upDown = np.array( df['upDown'] ) respLeftRight= np.array( df['respLeftRight'] ) overCorrected = respLeftRight # np.logical_not( respLeftRight ) #elementwise not #for canonical case. backslash means overcorrect, fwdslash means undercorrect #any departure from canonical case inverts the answer. Use XOR (^) to invert conditional on another boolean. startLeft_not_canonical = np.logical_not( startLeft ) #startLeft = True is canonical case. So, flip otherwise overCorrected = np.logical_xor( overCorrected, startLeft_not_canonical ) return overCorrected
def _select_coords(self, coords, prm): """ Choose between two intersection points on a quadric surface. This implementation extends QuadricGM's behaviour by not choosing intersections outside the rectangular aperture. Arguments: coords - a 2 by 3 by n array whose each column is the global coordinates of one intersection point of a ray with the sphere. prm - the corresponding parametric location on the ray where the intersection occurs. Returns: The index of the selected intersection, or None if neither will do. """ select = QuadricGM._select_coords(self, coords, prm) coords = N.concatenate((coords, N.ones((2,1,coords.shape[2]))), axis = 1) # assumed no additional parameters to coords, axis = 1 local = N.sum(N.linalg.inv(self._working_frame)[None,:2,:,None] * \ coords[:,None,:,:], axis=2) abs_x = abs(local[:,0,:]) abs_y = abs(local[:,1,:]) outside = abs_x > self._w outside |= abs_y > self._h inside = (~outside) & (prm > 1e-6) select[~N.logical_or(*inside)] = N.nan one_hit = N.logical_xor(*inside) select[one_hit] = N.nonzero(inside.T[one_hit,:])[1] return select
def _select_coords(self, coords, prm): """ Refinement of QuadricGM._select_coords; we want to choose the correct intersection point for a set of rays and our *truncated* quadric cone surface. Arguments: coords - a 2 by 3 by n array whose each column is the global coordinates of one intersection point of a ray with the surface. prm - a 2 by n array (CHECK THIS) giving the parametric location on the ray where the intersection occurs. Returns: The index of the selected intersection, or None if neither will do. """ select = N.empty(prm.shape[1]) select.fill(N.nan) # Projects the hit coordinates in a local frame on the z axis. height = N.sum(N.linalg.inv(self._working_frame)[None,2,:,None]*N.concatenate((coords, N.ones((2,1,coords.shape[-1]))), axis=1), axis=1) # Checks if the local_z-projected hit coords are in the actual height of the furstum # and if the parameter is positive so that the ray goes ahead. inside = (self.z1 <= height) & (height <= self.z2) positive = prm > 1e-10 hitting = inside & positive # Choses between the two intersections offered by the surface. select[N.logical_and(*hitting)] = 1 one_hitting = N.logical_xor(*hitting) select[one_hitting] = N.nonzero(hitting.T[one_hitting,:])[1] return select
def intervalls(self,t,d, condition, frames = False): selection = condition(d) #print 'selection',selection #selection = numpy.append(selection, [False]) #~ if the condition evaluates to true for the last frame (selection[-1] == True and selection[0] == False) the following roll-xor combination will lead switch_frame[0] == True switch_frames = numpy.logical_xor(numpy.roll(selection, 1), selection) switch_frames[0] = selection[0] switch_frames[-1] = False # always drop unfinished intervalls #print 'switchframes',switch_frames # detect where the the condition changes from true to false, the roll will directly mark the first and last frame where the condition is true start_end = switch_frames.nonzero()[0] # make the returned 0-dimensional tuple an array # we condition is true up to the end, we need drop the last transition to condition = true, since we cannot return a closed interval if start_end.shape[0] % 2 == 1: start_end = numpy.reshape(start_end[:-1],[start_end.size/2,2]) # reshape the array to contain start-end pairs else: start_end = numpy.reshape(start_end,[start_end.size/2,2]) # reshape the array to contain start-end pairs # always drop intervalls already started at t=0 if selection[0]: start_end = start_end[1:] if frames: return start_end else: # return the intervalls where the condition is true return numpy.array([t[start_end[:,0]],t[start_end[:,1]]]).transpose()
def _select_coords(self, coords, prm): """ Refinement of QuadricGM._select_coords; we want to choose the correct intersection point for a set of rays and our *truncated* quadric cone surface. Arguments: coords - a 2 by 3 by n array whose each column is the global coordinates of one intersection point of a ray with the surface geometry. prm - a 2 by n array (CHECK THIS) giving the parametric location on the ray where the intersection occurs. Returns: The index of the selected intersection, or None if neither will do. """ select = N.empty(prm.shape[1]) select.fill(N.nan) height = N.sum(N.linalg.inv(self._working_frame)[None,2,:,None] * N.concatenate((coords, N.ones((2,1,coords.shape[-1]))), axis=1), axis=1) inside = (height >= 0) & (height <= self.h) positive = prm > 1e-10 hitting = inside & positive select[N.logical_and(*hitting)] = 1 one_hitting = N.logical_xor(*hitting) select[one_hitting] = N.nonzero(hitting.T[one_hitting,:])[1] return select[prm_ord]
def _shift2boolean(self, q_mesh_shift, is_gamma_center=False, tolerance=1e-5): """ Tolerance is used to judge zero/half gird shift. This value is not necessary to be changed usually. """ if q_mesh_shift is None: shift = np.zeros(3, dtype='double') else: shift = np.array(q_mesh_shift, dtype='double') diffby2 = np.abs(shift * 2 - np.rint(shift * 2)) if (diffby2 < 0.01).all(): # zero/half shift if is_gamma_center: is_shift = [0, 0, 0] else: # Monkhorst-pack diff = np.abs(shift - np.rint(shift)) is_shift = list(np.logical_xor((diff > 0.1), (self._mesh % 2 == 0)) * 1) else: is_shift = None return is_shift
def encodeMask(M): """ Encode binary mask M using run-length encoding. :param M (bool 2D array) : binary mask to encode :return: R (object RLE) : run-length encoding of binary mask """ [h, w] = M.shape M = M.flatten(order='F') N = len(M) counts_list = [] pos = 0 # counts counts_list.append(1) diffs = np.logical_xor(M[0:N-1], M[1:N]) for diff in diffs: if diff: pos +=1 counts_list.append(1) else: counts_list[pos] += 1 # if array starts from 1. start with 0 counts for 0 if M[0] == 1: counts_list = [0] + counts_list return {'size': [h, w], 'counts': counts_list , }
def compFit(self, s): sum = 0 for i in range(self.data.shape[0]): neg = self.data[i] > 0 sum += np.logical_not( np.logical_xor( int(s[abs(int(self.data[i,0]))-1]), neg[0]) ) or np.logical_not( np.logical_xor( int(s[abs(int(self.data[i,1]))-1]), neg[1]) ) or np.logical_not( np.logical_xor( int(s[abs(int(self.data[i,2]))-1]), neg[2]) ) return sum
def _support_recovery_norm(self, X_test, relative=False): """ accuracy related error pseudo-norm Parameters ---------- X_test : positive-definite, symmetric numpy.ndarray of shape (p, p) the target precision matrix relative: boolean whether the error is given as a percentage or as an absolute number of counts Returns ------- ell0 pseudo-norm between X_test and the estimator """ if relative: p = X_test.shape[0] c = p * (p - 1) else: c = 2. return np.sum(np.logical_xor( np.abs(self.auxiliary_prec_) > machine_eps(0), np.abs(X_test) > machine_eps(0))) / c
def _select_coords(self, coords, prm): """ Choose between two intersection points on a quadric surface. This implementation extends QuadricGM's behaviour by not choosing intersections outside the circular aperture. Arguments: coords - a 2 by 3 by n array whose each column is the global coordinates of one intersection point of a ray with the sphere. prm - the corresponding parametric location on the ray where the intersection occurs. Returns: The index of the selected intersection, or None if neither will do. """ select = N.empty(prm.shape[1]) select.fill(N.nan) positive = prm > 1e-6 coords = N.concatenate((coords, N.ones((2,1,coords.shape[2]))), axis=1) local_z = N.sum(N.linalg.inv(self._working_frame)[None,2,:,None]*coords, axis=1) under_cut = (local_z <= self._h) & (local_z >= 0) hitting = under_cut & positive select[N.logical_and(*hitting)] = 1 one_hitting = N.logical_xor(*hitting) select[one_hitting] = N.nonzero(hitting.T[one_hitting,:])[1] return select
def _select_coords(self, coords, prm): """ Choose between two intersection points on a quadric surface. This is a default implementation that takes the first positive- parameter intersection point. The default behaviour is to take the first intersection not behind the ray's vertex (positive prm). Arguments: coords - a 2x3 array whose each row is the global coordinates of one intersection point of a ray with the sphere. prm - the corresponding parametric location on the ray where the intersection occurs. Returns: The index of the selected intersection, or None if neither will do. """ is_positive = prm > 1e-9 select = N.empty(prm.shape[1]) select.fill(N.nan) # If both are positive, use the smaller one select[N.logical_and(*is_positive)] = 1 # If either one is negative, use the positive one one_pos = N.logical_xor(*is_positive) select[one_pos] = N.nonzero(is_positive.T[one_pos,:])[1] return select
def _select_coords(self, coords, prm): """ Choose between two intersection points on a quadric surface. This implementation extends QuadricGM's behaviour by not choosing intersections higher or lower than half the cylinder height. Arguments: coords - a 2 by 3 by n array whose each column is the global coordinates of one intersection point of a ray with the sphere. prm - the corresponding parametric location on the ray where the intersection occurs. Returns: The index of the selected intersection, or None if neither will do. """ select = N.empty(prm.shape[1]) select.fill(N.nan) height = N.sum(N.linalg.inv(self._working_frame)[None,2,:,None] * \ N.concatenate((coords, N.ones((2,1,coords.shape[-1]))), axis=1), axis=1) inside = (abs(height) <= self._half_h) positive = prm > 1e-10 hitting = inside & positive select[N.logical_and(*hitting)] = 1 one_hitting = N.logical_xor(*hitting) select[one_hitting] = N.nonzero(hitting.T[one_hitting,:])[1] return select
def confuse(soll, ist): assert(soll.shape==ist.shape) df = pd.DataFrame(zip(soll, ist, np.logical_not(np.logical_xor(soll,ist))), columns=['soll','ist', 'TF']) df['PN'] = df['soll'].apply(lambda x: 'Pos' if x==1 else 'Neg') df['confuse'] = df[['TF', 'PN']].apply(lambda (x,y): "{}{}".format(x,y), axis=1) confusion_matrix = np.zeros([3,3]) confusion_matrix[0,0] = df.confuse.value_counts().TrueNeg confusion_matrix[1,1] = df.confuse.value_counts().TruePos try: confusion_matrix[0,1] = df.confuse.value_counts().FalsePos except Exception: print "have no False Positives" try: confusion_matrix[1,0] = df.confuse.value_counts().FalseNeg except Exception: print "have no False Negatives" confusion_matrix[0,2] = confusion_matrix[0,0] + confusion_matrix[0,1] confusion_matrix[1,2] = confusion_matrix[1,0] + confusion_matrix[1,1] confusion_matrix[2,0] = confusion_matrix[0,0] + confusion_matrix[1,0] confusion_matrix[2,1] = confusion_matrix[0,1] + confusion_matrix[1,1] confusion_matrix[2,2] = confusion_matrix[0,0] + confusion_matrix[0,1] + \ confusion_matrix[1,0] + confusion_matrix[1,1] accuracy = float(confusion_matrix[0,0] + confusion_matrix[1,1])/confusion_matrix[2,2] misclass_rate = float(confusion_matrix[1,0] + confusion_matrix[0,1])/confusion_matrix[2,2] assert((accuracy+misclass_rate)==1) recall = float(confusion_matrix[1,1]) / confusion_matrix[1,2] specificity = float(confusion_matrix[0,0]) / confusion_matrix[0,2] print "accuracy={}, misclassifaction={}, recall={}, specificity={}".format(accuracy, misclass_rate, recall, specificity) return accuracy, misclass_rate, recall, specificity
def contiguous_regions(condition): """Find contiguous ``True`` regions of the boolean array ``condition``. Return a 2D array where the first column is the start index of the region and the second column is the end index, found on [so-contiguous]_. Parameters ---------- condition : bool array Returns ------- idx : ``[[i0_0, i0_1], [i1_0, i1_1], ...]`` A list of integer couples, with the start and end of each ``True`` blocks in the original array Notes ----- .. [so-contiguous] http://stackoverflow.com/questions/4494404/find-large-number-of-consecutive-values-fulfilling-condition-in-a-numpy-array """ # Find the indices of changes in "condition" diff = np.logical_xor(condition[1:], condition[:-1]) idx, = diff.nonzero() # We need to start things after the change in "condition". Therefore, # we'll shift the index by 1 to the right. idx += 1 if condition[0]: # If the start of condition is True prepend a 0 idx = np.r_[0, idx] if condition[-1]: # If the end of condition is True, append the length of the array idx = np.r_[idx, condition.size] # Reshape the result into two columns idx.shape = (-1, 2) return idx
def calcAnomaly(self, actual, predicted): """ Calculates the anomaly of two SDRs Uses the equation presented on the wiki: https://github.com/numenta/nupic/wiki/Anomaly-Score-Memo To put this in terms of the temporal pooler: A is the actual input array at a given timestep P is the predicted array that was produced from the previous timestep(s) [A - (A && P)] / [A] Rephrasing as questions: What bits are on in A that are not on in P? How does that compare to total on bits in A? Outputs 0 is there's no difference between P and A. Outputs 1 if P and A are totally distinct. Not a perfect metric - it doesn't credit proximity Next step: combine with a metric for a spatial pooler """ combined = numpy.logical_and(actual, predicted) delta = numpy.logical_xor(actual,combined) delta_score = sum(delta) actual_score = float(sum(actual)) return delta_score / actual_score
# raise SystemExit d_parallax = 0.1 # mean_parallax = 0.3 for mean_parallax in [0.58]: # [0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95, 1.05, 1.15]: idx_use = np.logical_and(idx_use_coord, np.abs(gaia_data_all['parallax'] - mean_parallax) <= d_parallax) gaia_data = gaia_data_all[idx_use] gaia_ra_dec = coord.ICRS(ra=gaia_data['ra']*un.deg, dec=gaia_data['dec']*un.deg, distance=1e3/gaia_data['parallax']*un.pc) gaia_gal = gaia_ra_dec.transform_to(coord.Galactocentric()) idx_ob2 = np.in1d(gaia_data['source_id'], cyg_ob2['source_id']) idx_ves263 = gaia_data['source_id'] == ves_263_source idx_ob2 = np.logical_xor(idx_ob2, idx_ves263) ves_263 = gaia_data[gaia_data['source_id'] == ves_263_source] print 'VES:', ves_263['ra'], ves_263['dec'], ves_263['parallax'], ves_263['parallax_error'] # plt.scatter(gaia_gal.x, gaia_gal.y, lw=0, s=6, alpha=0.2, c='black') # plt.scatter(gaia_gal.x[idx_ob2], gaia_gal.y[idx_ob2], lw=0, s=12, alpha=1., c='red') # plt.scatter(gaia_gal.x[idx_ves263], gaia_gal.y[idx_ves263], lw=0, s=50, alpha=1., c='blue') # plt.show() # plt.close() idx_inparams = np.logical_and(gaia_data['pmra'] >= -0.5, gaia_data['pmra'] <= 0.5) idx_inparams = np.logical_and(idx_inparams, gaia_data['pmdec'] >= -4) idx_inparams = np.logical_and(idx_inparams, gaia_data['pmdec'] <= -2.5) idx_inparams = np.logical_and(idx_inparams, gaia_data['parallax'] >= 0.65) idx_inparams = np.logical_and(idx_inparams, gaia_data['parallax'] <= 0.85)
def onp_logical_xor(x1, x2): return onp.logical_xor(x1, x2)
def extractFile(self, filename): image = imread(filename, 1) #apply threshold in order to make the image binary bw = image < 120 # remove artifacts connected to image border cleared = bw.copy() clear_border(cleared) # label image regions label_image = label(cleared, neighbors=8) borders = np.logical_xor(bw, cleared) label_image[borders] = -1 print label_image.max() fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) ax.imshow(bw, cmap='jet') #Setting Up variables letters = list() order = list() #if the area of the object/letter is less than 255 remove it from the array invalidAreaObjects = 255 #store a list of all merged rectangle regions mergedRectangles = np.array([]) #array to store non_duplicate rectangles uniqueArray = np.array([]) #array which stores all non letters (e.g. scribbles on page) nonElements = np.array([]) #declaring the distance between each bbox (cannot be any bigger or all letters rectangles(bbox) would merge) bbox_xBoundary = 40 bbox_yBoundary = 140 #Loop which finds all rectangles for a given image and appends them to an array for region in regionprops(label_image): minr, minc, maxr, maxc = region.bbox # skip small images if region.area > 40: # draw rectangle around segmented objects selectedRectangle = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=2) # add new rectangle to array mergedRectangles = np.append(mergedRectangles, selectedRectangle) # This is where all the logic happens when it comes to merging overalpping/close rectangles around a letter for i in range(0, mergedRectangles.size): #Select a rectangle from the array to compare selectedRectangle = mergedRectangles[i] for j in range(0, mergedRectangles.size): # go through every other rectangle and run logic gate checks previousRectangle = mergedRectangles[j] # Check to see whether the two rectangles overalap overlap = (selectedRectangle.get_bbox()).fully_overlaps( previousRectangle.get_bbox() ) or (selectedRectangle.get_bbox().x1 - previousRectangle.get_bbox().x1 < bbox_xBoundary and selectedRectangle.get_bbox().x1 - previousRectangle.get_bbox().x1 > -bbox_xBoundary) and ( previousRectangle.get_bbox().y0 < selectedRectangle. get_bbox().y0 < previousRectangle.get_bbox().y1 < selectedRectangle.get_bbox().y1) # The next two checks are to see whether the rectangles are within a certain distance of eachother i.e. less than the boundaries. checkOne = ( selectedRectangle.get_bbox().y0 - previousRectangle.get_bbox().y0 < bbox_yBoundary and selectedRectangle.get_bbox().y0 - previousRectangle.get_bbox().y0 > -bbox_yBoundary) and ( selectedRectangle.get_bbox().x0 - previousRectangle.get_bbox().x0 < bbox_xBoundary and selectedRectangle.get_bbox().x0 - previousRectangle.get_bbox().x0 > -bbox_xBoundary) or ( selectedRectangle.get_bbox().y1 - previousRectangle.get_bbox().y1 < bbox_yBoundary and selectedRectangle.get_bbox().y1 - previousRectangle.get_bbox().y1 > -bbox_yBoundary ) and ( selectedRectangle.get_bbox().x1 - previousRectangle.get_bbox().x1 < bbox_xBoundary and selectedRectangle.get_bbox().x1 - previousRectangle.get_bbox().x1 > -bbox_xBoundary) checkTwo = ( selectedRectangle.get_bbox().y0 - previousRectangle.get_bbox().y1 < bbox_yBoundary and selectedRectangle.get_bbox().y0 - previousRectangle.get_bbox().y1 > -bbox_yBoundary) and ( selectedRectangle.get_bbox().x0 - previousRectangle.get_bbox().x1 < bbox_xBoundary and selectedRectangle.get_bbox().x0 - previousRectangle.get_bbox().x1 > -bbox_xBoundary) or ( selectedRectangle.get_bbox().y1 - previousRectangle.get_bbox().y0 < bbox_yBoundary and selectedRectangle.get_bbox().y1 - previousRectangle.get_bbox().y0 > -bbox_yBoundary ) and ( selectedRectangle.get_bbox().x1 - previousRectangle.get_bbox().x0 < bbox_xBoundary and selectedRectangle.get_bbox().x1 - previousRectangle.get_bbox().x0 > -bbox_xBoundary) #If statement to check logic gates if overlap or checkOne or checkTwo: print "Merging... May take a while", if previousRectangle.get_bbox( ).x0 > selectedRectangle.get_bbox().x0: minc = selectedRectangle.get_bbox().x0 else: minc = previousRectangle.get_bbox().x0 if previousRectangle.get_bbox( ).x1 > selectedRectangle.get_bbox().x1: maxc = previousRectangle.get_bbox().x1 else: maxc = selectedRectangle.get_bbox().x1 if previousRectangle.get_bbox( ).y0 > selectedRectangle.get_bbox().y0: minr = selectedRectangle.get_bbox().y0 else: minr = previousRectangle.get_bbox().y0 if previousRectangle.get_bbox( ).y1 > selectedRectangle.get_bbox().y1: maxr = previousRectangle.get_bbox().y1 else: maxr = selectedRectangle.get_bbox().y1 #create and store a new rectangle which consists of both previous rectangles newRectangle = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=2) # store new rectangle mergedRectangles[j] = newRectangle print "Finished Merging removing non-letters" # This checks for objects whose areas are smaller than a given amount for i in range(0, mergedRectangles.size): #if the object is smaller than the given area store it in a new array if mergedRectangles[i].get_width( ) * mergedRectangles[i].get_height() < invalidAreaObjects: nonElements = np.append(nonElements, mergedRectangles[i]) # This line removes all the nonElements from the main array mergedRectangles = np.setdiff1d(mergedRectangles, nonElements) print "Finished Merging removing duplicates" # Storing only unique rectangles in a new array (removing duplicates) for i in range( 0, mergedRectangles.size): #check if value is unqiue or not noDuplicate = False for j in reversed( range(0, i) ): #If the two rectangles are equal move onto the next one currentRectangle = mergedRectangles[j].get_bbox() comparisonRect = mergedRectangles[i].get_bbox() if str(currentRectangle) == str(comparisonRect): noDuplicate = True break if noDuplicate == False: uniqueArray = np.append(uniqueArray, mergedRectangles[i]) # Going through the sorted array and adding the rectangles to the image (Checking system) for i in range(0, uniqueArray.size): ax.add_patch(uniqueArray[i]) # getting the bbox of that rectangle and storing it in the list order for chracter extraction bbox = long(uniqueArray[i].get_bbox().y0), long( uniqueArray[i].get_bbox().x0), long( uniqueArray[i].get_bbox().y1), long( uniqueArray[i].get_bbox().x1) order.append(bbox) lines = list() first_in_line = '' counter = 0 # I need to sort by y value as the bboxs are scattered randomly within the array for example (800,1500,700) # This will not function correctly when using your extract letters code. # By ordering the values correcly i can simply import your code to extract the letters order.sort(key=lambda tup: tup[0]) #worst case scenario there can be 1 character per line for x in range(len(order)): lines.append([]) for character in order: if first_in_line == '': first_in_line = character lines[counter].append(character) elif abs(character[0] - first_in_line[0]) < (first_in_line[2] - first_in_line[0]): lines[counter].append(character) elif abs(character[0] - first_in_line[0]) > (first_in_line[2] - first_in_line[0]): first_in_line = character counter += 1 lines[counter].append(character) for x in range(len(lines)): lines[x].sort(key=lambda tup: tup[1]) final = list() prev_tr = 0 prev_line_br = 0 for i in range(len(lines)): for j in range(len(lines[i])): tl_2 = lines[i][j][1] bl_2 = lines[i][j][0] if tl_2 > prev_tr and bl_2 > prev_line_br: tl, tr, bl, br = lines[i][j] letter_raw = bw[tl:bl, tr:br] letter_norm = resize(letter_raw, (20, 20)) final.append(letter_norm) prev_tr = lines[i][j][3] if j == (len(lines[i]) - 1): prev_line_br = lines[i][j][2] prev_tr = 0 tl_2 = 0 print 'Characters recognized: ' + str(len(final)) return final def __init__(self): print "Extracting characters..."
plt.ylabel('weight coefficient', fontsize=12) plt.xlabel('C', fontsize=12) plt.legend(loc=0, fontsize=12) plt.xscale('log') plt.tight_layout() plt.show() from sklearn.svm import SVC svm = SVC(kernel='linear', C=1.0, random_state=0) svm.fit(x_train, y_train) y_pred = svm.predict(x_test) print('y_pred', y_pred) np.random.seed(0) x_xor = np.random.randn(200, 2) y_xor = np.logical_xor(x_xor[:, 0] > 0, x_xor[:, 1] > 0) y_xor = np.where(y_xor, 1, -1) plt.figure(figsize=(6, 4)) plt.scatter(x_xor[y_xor == 1, 0], x_xor[y_xor == 1, 1], c='b', marker='x', label='1') plt.scatter(x_xor[y_xor == -1, 0], x_xor[y_xor == -1, 1], c='r', marker='s', label='-1') plt.ylim(-3, 0) plt.legend(loc=0)
def eval( self, front: Optional[Union[str, Dict[str, complex], np.ndarray, OperatorBase]] = None ) -> Union[OperatorBase, float, complex]: if front is None: return self.to_matrix_op() # pylint: disable=import-outside-toplevel,cyclic-import from ..state_fns.state_fn import StateFn from ..state_fns.dict_state_fn import DictStateFn from ..state_fns.circuit_state_fn import CircuitStateFn from ..list_ops.list_op import ListOp from .circuit_op import CircuitOp new_front = None # For now, always do this. If it's not performant, we can be more granular. if not isinstance(front, OperatorBase): front = StateFn(front, is_measurement=False) if isinstance(front, ListOp) and front.distributive: new_front = front.combo_fn([ self.eval(front.coeff * front_elem) # type: ignore for front_elem in front.oplist ]) else: if self.num_qubits != front.num_qubits: raise ValueError( 'eval does not support operands with differing numbers of qubits, ' '{} and {}, respectively.'.format(self.num_qubits, front.num_qubits)) if isinstance(front, DictStateFn): new_dict = {} # type: Dict corrected_x_bits = self.primitive.x[::-1] # type: ignore corrected_z_bits = self.primitive.z[::-1] # type: ignore for bstr, v in front.primitive.items(): bitstr = np.asarray(list(bstr)).astype(np.int).astype( np.bool) new_b_str = np.logical_xor(bitstr, corrected_x_bits) new_str = ''.join(map(str, 1 * new_b_str)) z_factor = np.product( 1 - 2 * np.logical_and(bitstr, corrected_z_bits)) y_factor = np.product( np.sqrt(1 - 2 * np.logical_and(corrected_x_bits, corrected_z_bits) + 0j)) new_dict[new_str] = (v * z_factor * y_factor) + new_dict.get(new_str, 0) new_front = StateFn(new_dict, coeff=self.coeff * front.coeff) elif isinstance(front, StateFn) and front.is_measurement: raise ValueError( 'Operator composed with a measurement is undefined.') # Composable types with PauliOp elif isinstance(front, (PauliOp, CircuitOp, CircuitStateFn)): new_front = self.compose(front) # Covers VectorStateFn and OperatorStateFn elif isinstance(front, OperatorBase): new_front = self.to_matrix_op().eval( front.to_matrix_op()) # type: ignore return new_front
def ugaEvolve(popSize, bitstringLength, probMutation, fitnessFn): """ A Python generator that evolves a population of bitstrings on the specified fitness function using uniform crossover, and a per bit probability of mutation given by probMutation. Selection is as follows: if at least one bitstring evaluates to True under the fitnessFn: Pick 2*popSize parents as close as possible to evenly from amongst the bistrings that evaluate to True under the fitnessFn else: Pick 2*popSize parents evenly from amongst the bitstrings in the existing population (each bitstring gets picked twice) Parameters ---------- popSize : int The size of the population bitstringLength : int The length of each bitstring in the population probMutation : float The probability that a given bit will be mutated in a single generation fitnessFn : Python function The fitness function to be used """ # initialize a population of bitstrings drawn uniformly at random pop = rand(popSize, bitstringLength) < 0.5 recombinationMasksRepo = rand(popSize * 10, bitstringLength) < 0.5 mutationMasksRepo = rand(popSize * 10, bitstringLength) < probMutation while True: # evaluate fitness of each bitstring in the population fitnessVals = fitnessFn(pop) # calculate the oneFrequency of all bitstringLength attributes oneFreqs = pop.sum(axis=0, dtype=float) / popSize yield dict(oneFreqs=oneFreqs, fitnessVals=fitnessVals) # select parents nonZeroFitnessValIndices = fitnessVals.nonzero()[0] if len(nonZeroFitnessValIndices): parentIndices = nonZeroFitnessValIndices[arange( 2 * popSize, dtype=int) % len(nonZeroFitnessValIndices)] shuffle(parentIndices) else: parentIndices = arange(2 * popSize) % popSize shuffle(parentIndices) # recombine the parents using uniform crossover to generate # one offspring per parent pair recombinationMasks = recombinationMasksRepo[ randint(popSize * 10, size=popSize), :] newPop = pop[parentIndices[:popSize], :] newPop[recombinationMasks] = pop[ parentIndices[popSize:], :][recombinationMasks] # mutate the offspring mutationMasks = mutationMasksRepo[randint(popSize * 10, size=popSize), :] pop = logical_xor(newPop, mutationMasks)
np.subtract( # pylint: disable=g-long-lambda logits, reduce_logsumexp( logits, -1 if axis is None else axis, keepdims=True)))) logical_and = utils.copy_docstring( tf.math.logical_and, lambda x, y, name=None: np.logical_and(x, y)) logical_not = utils.copy_docstring(tf.math.logical_not, lambda x, name=None: np.logical_not(x)) logical_or = utils.copy_docstring(tf.math.logical_or, lambda x, y, name=None: np.logical_or(x, y)) logical_xor = utils.copy_docstring( tf.math.logical_xor, lambda x, y, name=None: np.logical_xor(x, y)) maximum = utils.copy_docstring(tf.math.maximum, lambda x, y, name=None: np.maximum(x, y)) minimum = utils.copy_docstring(tf.math.minimum, lambda x, y, name=None: np.minimum(x, y)) multiply = utils.copy_docstring(tf.math.multiply, lambda x, y, name=None: np.multiply(x, y)) multiply_no_nan = utils.copy_docstring( tf.math.multiply_no_nan, lambda x, y, name=None: np.where( # pylint: disable=g-long-lambda np.broadcast_to(np.equal(y, 0.), np.array(x).shape), np.zeros_like(np.multiply(x, y)),
def main(): usage = 'usage: %prog [options] <scores_file>' parser = OptionParser(usage) parser.add_option('-d', dest='n_components', default=None, type='int', help='PCA n_components [Default: %default]') parser.add_option( '-e', dest='num_estimators', default=100, type='int', help='Number of random forest estimators [Default: %default]') parser.add_option('-g', dest='genome', default='ce11', help='PhyloP and FASTA genome [Default: %default]') parser.add_option('-i', dest='iterations', default=1, type='int', help='Cross-validation iterations [Default: %default]') parser.add_option('-o', dest='out_dir', default='regr_out') parser.add_option( '-p', dest='parallel_threads', default=1, type='int', help= 'Parallel threads passed to scikit-learn n_jobs [Default: %default]') parser.add_option('-r', dest='random_seed', default=44, type='int') parser.add_option('--stat', dest='sad_stat', default='sum', help='HDF5 key stat to consider. [Default: %default]') (options, args) = parser.parse_args() if len(args) != 1: parser.error('Must provide ISM scores and PhyloP bigwig file.') else: scores_file = args[0] np.random.seed(options.random_seed) options.genome = options.genome.lower() if not os.path.isdir(options.out_dir): os.mkdir(options.out_dir) ################################################################ # read ISM scores with h5py.File(scores_file, 'r') as h5o: score_chrs = [chrm.decode('UTF-8') for chrm in h5o['chr']] score_starts = h5o['start'][:] score_ends = h5o['end'][:] score_strands = [strand.decode('UTF-8') for strand in h5o['strand']] score_seqs = h5o['seqs'][:] nt_scores = h5o[options.sad_stat][:].astype('float16') num_seqs, mut_len, _, num_targets = nt_scores.shape # reference transform nt_scores_ref = np.reshape(nt_scores[score_seqs], (num_seqs, mut_len, num_targets)) # min/max transform nt_scores_min = nt_scores.min(axis=-2) nt_scores_max = nt_scores.max(axis=-2) pos_mask = (nt_scores_ref > 0) nt_scores_refm = nt_scores_ref.copy() nt_scores_refm[pos_mask] -= nt_scores_min[pos_mask] nt_scores_refm[~pos_mask] -= nt_scores_max[~pos_mask] ################################################################ # read phylop bigwig annotations genome_path = os.environ[options.genome.upper()] fasta_file = '%s/assembly/%s.fa' % (genome_path, options.genome) if options.genome == 'ce11': phylop_file = '%s/phylop/ce11.phyloP26way.bw' % genome_path else: print('Genome PhyloP not found', file=sys.stderr) exit(1) seqs_phylop = [] seqs_phylop_dna1 = [] seqs_phylop_mask = np.ones(num_seqs, dtype='bool') fasta_open = pysam.FastaFile(fasta_file) phylop_open = pyBigWig.open(phylop_file, 'r') for si in range(num_seqs): phylop_chr = score_chrs[si] if not phylop_chr.startswith('chr'): phylop_chr = 'chr%s' % phylop_chr # read values try: seq_phylop = phylop_open.values(phylop_chr, score_starts[si], score_ends[si], numpy=True).astype('float16') # read DNA seq_phylop_dna = fasta_open.fetch(score_chrs[si], score_starts[si], score_ends[si]) seq_phylop_dna1 = dna_io.dna_1hot(seq_phylop_dna) # reverse complement if score_strands[si] == '-': seq_phylop = seq_phylop[::-1] seq_phylop_dna1 = dna_io.hot1_rc(seq_phylop_dna1) # save seqs_phylop.append(seq_phylop) seqs_phylop_dna1.append(seq_phylop_dna1) except RuntimeError: print('Ignoring %s:%d-%d; phylop not found.' % \ (phylop_chr, score_starts[si], score_ends[si]), file=sys.stderr) seqs_phylop_mask[si] = False # filter for valid sequences nt_scores = nt_scores[seqs_phylop_mask] nt_scores_ref = nt_scores_ref[seqs_phylop_mask] nt_scores_refm = nt_scores_refm[seqs_phylop_mask] score_seqs = score_seqs[seqs_phylop_mask] num_seqs = len(score_seqs) # transform PhyloP seqs_phylop = np.array(seqs_phylop, dtype='float32') seqs_phylop = np.nan_to_num(seqs_phylop) seqs_phylop = np.clip(seqs_phylop, -1.5, 5) # verify DNA seqs_phylop_dna1 = np.array(seqs_phylop_dna1) for si in range(num_seqs): seq_diff = np.logical_xor(score_seqs[si], seqs_phylop_dna1[si]) nts_diff = seq_diff.sum() // 2 if nts_diff != 0: pdb.set_trace() ################################################################ # regression # add positions seqs_pos = np.arange(mut_len) seqs_pos = np.tile(seqs_pos, num_seqs) seqs_pos = np.reshape(seqs_pos, (num_seqs, -1, 1)) # flatten everything # seqs_phylop_flat = seqs_phylop.flatten() # seqs_pos_flat = seqs_pos.flatten() # nt_scores_refm_flat = nt_scores_refm.reshape((-1,num_targets)) # num_pos = nt_scores_refm_flat.shape[0] # form matrix # X_scores = nt_scores_refm_flat # if options.n_components is not None: # options.n_components = min(options.n_components, num_targets) # X_scores = PCA(options.n_components).fit_transform(nt_scores_refm_flat) # X_pos = seqs_pos.reshape(num_pos,1) # X = np.concatenate([X_scores,X_pos], axis=1) X = np.concatenate([nt_scores_refm, seqs_pos], axis=-1) X = X.astype('float32') # regressor r2s, pcors = randfor_cv(X, seqs_phylop, iterations=options.iterations, n_estimators=options.num_estimators, random_state=options.random_seed, n_jobs=options.parallel_threads) # save np.save('%s/r2.npy' % options.out_dir, r2s) np.save('%s/pcor.npy' % options.out_dir, pcors) # print stats iterations = len(r2s) stats_out = open('%s/stats.txt' % options.out_dir, 'w') print('R2 %.4f (%.4f)' % (r2s.mean(), r2s.std() / np.sqrt(iterations)), file=stats_out) print('pR %.4f (%.4f)' % (pcors.mean(), pcors.std() / np.sqrt(iterations)), file=stats_out) stats_out.close()
def ber_pratico(sinal_1, sinal_2): bits_errados = np.sum(np.logical_xor(sinal_1, sinal_2)) ber = bits_errados / len(sinal_1) return ber
def symmetric(x, y): return np.logical_xor(x, y)
random_state=1) clf3 = GaussianNB() clf4 = SVC() # Loading Plotting Utilities import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import itertools from mlxtend.plotting import plot_decision_regions import numpy as np xx, yy = np.meshgrid(np.linspace(-3, 3, 50), np.linspace(-3, 3, 50)) rng = np.random.RandomState(0) X = rng.randn(300, 2) y = np.array(np.logical_xor(X[:, 0] > 0, X[:, 1] > 0), dtype=int) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab)
scores, pred, pred_std, loss = np.empty((7)), np.empty((7, 7)), np.empty((7, 7)), np.empty((7, 7)) correct_trend = np.empty((7, 7), dtype=np.int8) for d in range(0, 7): val_idx = np.where(y[:,0] == split_date + d) X_val, y_val = X[val_idx], y[val_idx, 1:].reshape(-1, 7) # Calculate validation score scores[d], pred_all = net.test(X_val, y_val) # Predict future prices in a week pred_all = pred_all.detach().cpu().numpy() pred_std[d] = pred_all.std(axis=0) pred[d] = pred_all.mean(axis=0) # Calculate errors in prediction loss[d] = pred[d] - y_val[0] # Calculate # of positive trends correct_trend[d] = np.logical_xor(pred[d] > 1, y_val[0] < 1) print("Walk-forward Validation at {}({}) Err (mean_score/max/min/correct_trend): {:.6f} {:.6f} {:.6f} {}" .format(print_hist(hist_processed[split_date]), split_date, scores.mean(), loss.max(), loss.min(), list(correct_trend.sum(axis=0)))) print("\tErrors (first days): {}".format(" ".join(["{:.4f}".format(l) for l in loss[:,0]]))) print("\tSD (first days): {}".format(" ".join(["{:.4f}".format(l) for l in pred_std[:,0]]))) print("\tAbsolute errors (avg by day): {}".format(" ".join(["{:.4f}".format(l) for l in abs(loss).mean(axis=0)]))) ## Save model fname_model, fname_weights, fname_weights_h5 = "saved/model_json_week.pkl", "saved/model_weights_week.pkl", 'saved/model_weights_week.h5' print("Persist model to saved/ ...") model_json = model.to_json() model_weights = model.get_weights() pickle.dump(model_json, open(fname_model, 'wb')) pickle.dump(model_weights, open(fname_weights, 'wb')) model.save(fname_weights_h5)
def compose(self, other, qargs=None, front=False, inplace=False): """Return the composition of Paulis. Args: a ({cls}): an operator object. b ({cls}): an operator object. qargs (list or None): Optional, qubits to apply dot product on (default: None). inplace (bool): If True update in-place (default: False). Returns: {cls}: The operator a.compose(b) Raises: QiskitError: if number of qubits of other does not match qargs. """.format(cls=type(self).__name__) # Validation if qargs is None and other.num_qubits != self.num_qubits: raise QiskitError( f"other {type(self).__name__} must be on the same number of qubits." ) if qargs and other.num_qubits != len(qargs): raise QiskitError( f"Number of qubits of the other {type(self).__name__} does not match qargs." ) if other._num_paulis not in [1, self._num_paulis]: raise QiskitError("Incompatible BasePaulis. Second list must " "either have 1 or the same number of Paulis.") # Compute phase shift if qargs is not None: x1, z1 = self._x[:, qargs], self._z[:, qargs] else: x1, z1 = self._x, self._z x2, z2 = other._x, other._z # Get phase shift phase = self._phase + other._phase if front: phase += 2 * np.sum(np.logical_and(x1, z2), axis=1) else: phase += 2 * np.sum(np.logical_and(z1, x2), axis=1) # Update Pauli x = np.logical_xor(x1, x2) z = np.logical_xor(z1, z2) if qargs is None: if not inplace: return BasePauli(z, x, phase) # Inplace update self._x = x self._z = z self._phase = phase return self # Qargs update ret = self if inplace else self.copy() ret._x[:, qargs] = x ret._z[:, qargs] = z ret._phase = np.mod(phase, 4) return ret
def xor_stream_random(): while True: i = N.random.randint(0, 2, 2) o = N.int_(N.logical_xor(i[0], i[1])) yield (i, o)
def add_residual_ratio_plot( self, axis: AxesType, ratio_type: str, data_bin_count: np.ndarray, mc_bin_count: np.ndarray, mc_error_sq: np.ndarray, markers_with_width: bool = True, systematics_are_included: bool = False, marker_color: str = plot_style.KITColors.kit_black, include_outlier_info: bool = False, plot_outlier_indicators: bool = False) -> None: if ratio_type.lower() == "normal": axis.set_ylabel(r"$\frac{\mathrm{Data - MC}}{\mathrm{Data}}$") elif ratio_type.lower() == "vs_uncert": if systematics_are_included: axis.set_ylabel( r"$\frac{\mathrm{Data - MC}}{\sigma_\mathrm{stat + sys}^\mathrm{Data - MC}}$" ) else: axis.set_ylabel( r"$\frac{\mathrm{Data - MC}}{\sigma_\mathrm{stat}^\mathrm{Data - MC}}$" ) else: raise ValueError( f"The provided ratio_type '{ratio_type}' is not valid!\n" f"The ratio_type must be one of {DataMCHistogramPlot.valid_ratio_types}!" ) try: uh_data = unp.uarray(data_bin_count, np.sqrt(data_bin_count)) uh_mc = unp.uarray(mc_bin_count, np.sqrt(mc_error_sq)) if ratio_type.lower() == "normal": divisor = copy.deepcopy(uh_data) elif ratio_type.lower() == "vs_uncert": divisor = unp.uarray(unp.std_devs(uh_data - uh_mc), 0.) else: divisor = None divisor[divisor == 0] = ufloat(0.01, 0.1) ratio = (uh_data - uh_mc) / divisor ratio[(uh_data == 0.) & (uh_mc == 0.)] = ufloat(0., 0.) if ratio_type.lower() == "normal": ratio[np.logical_xor((uh_data == 0.), (uh_mc == 0.))] = ufloat(-99, 0.) max_val = 1. axis.set_ylim(bottom=-1. * max_val, top=1. * max_val) elif ratio_type.lower() == "vs_uncert": max_val_mask = (uh_data != 0.) & (uh_mc != 0.) & ( (uh_data - uh_mc) != 0) max_val = np.around(max( abs( np.min( unp.nominal_values(ratio[max_val_mask]) - unp.std_devs(ratio[max_val_mask]))), abs( np.max( unp.nominal_values(ratio[max_val_mask]) + unp.std_devs(ratio[max_val_mask])))), decimals=1) assert isinstance(max_val, float), (type(max_val), max_val) axis.set_ylim(bottom=-1. * max_val, top=max_val) else: max_val = None axis.axhline(y=0, color=plot_style.KITColors.dark_grey, alpha=0.8) axis.errorbar(self.bin_mids, unp.nominal_values(ratio), yerr=unp.std_devs(ratio), xerr=self.bin_widths / 2 if markers_with_width else None, ls="", marker=".", color=marker_color) if not include_outlier_info: return for bin_mid, r_val, mc_val, data_val in zip( self.bin_mids, ratio, uh_mc, uh_data): if mc_val == 0. and ( (data_val != 0. and ratio_type.lower() != "vs_uncert") or (abs(r_val) > max_val and ratio_type.lower() == "vs_uncert")): axis.text(x=bin_mid, y=+0.1 * max_val, s="No MC", fontsize=5, rotation=90, ha="center", va="bottom") axis.text(x=bin_mid, y=+0.1 * max_val, s=f"#Data={int(unp.nominal_values(data_val))}", fontsize=5, rotation=90, ha="center", va="bottom") elif data_val == 0. and ( (mc_val != 0. and ratio_type.lower() != "vs_uncert") or (abs(r_val) > max_val and ratio_type.lower() == "vs_uncert")): axis.text(x=bin_mid, y=+0.1 * max_val, s=f"#MC={unp.nominal_values(mc_val):.0f}", fontsize=5, rotation=90, ha="center", va="bottom") axis.text(x=bin_mid, y=-0.1 * max_val, s="No Data", fontsize=5, rotation=90, ha="center", va="top") elif r_val > 1.0 and plot_outlier_indicators: axis.text(x=bin_mid, y=+0.08 * max_val, s=f"{unp.nominal_values(r_val):3.2f}" + r"$\rightarrow$", fontsize=5, rotation=90, ha="right", va="bottom") elif r_val < -1.0 and plot_outlier_indicators: axis.text(x=bin_mid, y=-0.08 * max_val, s=r"$\leftarrow$" + f"{unp.nominal_values(r_val):3.2f}", fontsize=5, rotation=90, ha="right", va="top") else: pass except ZeroDivisionError: axis.text(x=self.bin_mids[int(np.ceil(len(self.bin_mids) / 2.))], y=0.1, s="DataMCHistogramPlot: ZeroDivisionError occurred!", fontsize=8, ha="center", va="bottom") axis.axhline(y=0, color=plot_style.KITColors.dark_grey, alpha=0.8)
def test_half_ufuncs(self): """Test the various ufuncs""" a = np.array([0, 1, 2, 4, 2], dtype=float16) b = np.array([-2, 5, 1, 4, 3], dtype=float16) c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) assert_equal(np.equal(a, b), [False, False, False, True, False]) assert_equal(np.not_equal(a, b), [True, True, True, False, True]) assert_equal(np.less(a, b), [False, True, False, False, True]) assert_equal(np.less_equal(a, b), [False, True, False, True, True]) assert_equal(np.greater(a, b), [True, False, True, False, False]) assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) assert_equal(np.logical_and(a, b), [False, True, True, True, True]) assert_equal(np.logical_or(a, b), [True, True, True, True, True]) assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) assert_equal(np.logical_not(a), [True, False, False, False, False]) assert_equal(np.isnan(c), [False, False, False, True, False]) assert_equal(np.isinf(c), [False, False, True, False, False]) assert_equal(np.isfinite(c), [True, True, False, False, True]) assert_equal(np.signbit(b), [True, False, False, False, False]) assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) x = np.maximum(b, c) assert_(np.isnan(x[3])) x[3] = 0 assert_equal(x, [0, 5, 1, 0, 6]) assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) x = np.minimum(b, c) assert_(np.isnan(x[3])) x[3] = 0 assert_equal(x, [-2, -1, -np.inf, 0, 3]) assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) assert_equal(np.square(b), [4, 25, 1, 16, 9]) assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) assert_equal(np.conjugate(b), b) assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) assert_equal(np.negative(b), [2, -5, -1, -4, -3]) assert_equal(np.positive(b), b) assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
def xor_(a: Bool = True, b: Bool = False) -> Bool: return np.logical_xor(a, b)
def new_constraint_to_old(con, x0): """ Converts new-style constraint objects to old-style constraint dictionaries. """ if isinstance(con, NonlinearConstraint): if (con.finite_diff_jac_sparsity is not None or con.finite_diff_rel_step is not None or not isinstance(con.hess, BFGS) or # misses user specified BFGS con.keep_feasible): warn( "Constraint options `finite_diff_jac_sparsity`, " "`finite_diff_rel_step`, `keep_feasible`, and `hess`" "are ignored by this method.", OptimizeWarning) fun = con.fun if callable(con.jac): jac = con.jac else: jac = None else: # LinearConstraint if con.keep_feasible: warn( "Constraint option `keep_feasible` is ignored by this " "method.", OptimizeWarning) A = con.A if issparse(A): A = A.todense() fun = lambda x: np.dot(A, x) jac = lambda x: A # FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out, # use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above. pcon = PreparedConstraint(con, x0) lb, ub = pcon.bounds i_eq = lb == ub i_bound_below = np.logical_xor(lb != -np.inf, i_eq) i_bound_above = np.logical_xor(ub != np.inf, i_eq) i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf) if np.any(i_unbounded): warn( "At least one constraint is unbounded above and below. Such " "constraints are ignored.", OptimizeWarning) ceq = [] if np.any(i_eq): def f_eq(x): y = np.array(fun(x)).flatten() return y[i_eq] - lb[i_eq] ceq = [{"type": "eq", "fun": f_eq}] if jac is not None: def j_eq(x): dy = jac(x) if issparse(dy): dy = dy.todense() dy = np.atleast_2d(dy) return dy[i_eq, :] ceq[0]["jac"] = j_eq cineq = [] n_bound_below = np.sum(i_bound_below) n_bound_above = np.sum(i_bound_above) if n_bound_below + n_bound_above: def f_ineq(x): y = np.zeros(n_bound_below + n_bound_above) y_all = np.array(fun(x)).flatten() y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below] y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above]) return y cineq = [{"type": "ineq", "fun": f_ineq}] if jac is not None: def j_ineq(x): dy = np.zeros((n_bound_below + n_bound_above, len(x0))) dy_all = jac(x) if issparse(dy_all): dy_all = dy_all.todense() dy_all = np.atleast_2d(dy_all) dy[:n_bound_below, :] = dy_all[i_bound_below] dy[n_bound_below:, :] = -dy_all[i_bound_above] return dy cineq[0]["jac"] = j_ineq old_constraints = ceq + cineq if len(old_constraints) > 1: warn( "Equality and inequality constraints are specified in the same " "element of the constraint list. For efficient use with this " "method, equality and inequality constraints should be specified " "in separate elements of the constraint list. ", OptimizeWarning) return old_constraints
def make_l5i(prn): xb_offset = l5i_init[prn] n = code_length xb_shift = xb[np.mod(np.arange(xb_offset, xb_offset + n), 8191)] return np.logical_xor(xa, xb_shift)
def draw(self, da, cr): r"""Redraw the widget.""" geometry = [self.get_allocated_width(), self.get_allocated_height()] if self.geometry is None: adjust_default_sizes(self.g, geometry, self.vprops, self.eprops) self.fit_to_window(ink=False) self.regenerate_surface() self.geometry = geometry cr.save() cr.set_matrix(self.smatrix) c1 = self.pos_to_device((0, 0), surface=True, cr=cr) c2 = self.pos_to_device((0, self.base_geometry[1]), surface=True, cr=cr) c3 = self.pos_to_device((self.base_geometry[0], 0), surface=True, cr=cr) c4 = self.pos_to_device(self.base_geometry, surface=True, cr=cr) c = [c1, c2, c3, c4] ul = [min([x[0] for x in c]), min([x[1] for x in c])] lr = [max([x[0] for x in c]), max([x[1] for x in c])] cr.restore() if ((ul[0] > 0 or lr[0] < geometry[0] or ul[1] > 0 or lr[1] < geometry[1]) or self.lazy_regenerate): self.regenerate_surface(reset=True) elif self.regenerate_generator is not None: self.regenerate_surface() if self.background is None: # draw checkerboard self.background = cairo.ImageSurface(cairo.FORMAT_ARGB32, 14, 14) bcr = cairo.Context(self.background) bcr.rectangle(0, 0, 7, 7) bcr.set_source_rgb(102. / 256, 102. / 256, 102. / 256) bcr.fill() bcr.rectangle(7, 0, 7, 7) bcr.set_source_rgb(153. / 256, 153. / 256, 153. / 256) bcr.fill() bcr.rectangle(0, 7, 7, 7) bcr.set_source_rgb(153. / 256, 153. / 256, 153. / 256) bcr.fill() bcr.rectangle(7, 7, 7, 7) bcr.set_source_rgb(102. / 256, 102. / 256, 102. / 256) bcr.fill() del bcr self.background = cairo.SurfacePattern(self.background) self.background.set_extend(cairo.EXTEND_REPEAT) cr.set_source(self.background) cr.paint() cr.save() cr.set_matrix(self.smatrix) cr.set_source_surface(self.base) cr.paint() cr.restore() if self.picked is not None or self.picked is not False: # draw immediate neighbourhood if self.selected.fa.sum() == 1: vprops = dict(**self.vprops) vprops["halo"] = self.highlight vprops["halo_color"] = (0.9372549019607843, 0.1607843137254902, 0.1607843137254902, .9) vprops["halo_size"] = 1.3 if self.highlight_color is not None: vprops["halo_color"] = self.highlight_color eprops = {} eprops["color"] = (0.9372549019607843, 0.1607843137254902, 0.1607843137254902, .9) if "control_points" in self.eprops: eprops["control_points"] = self.eprops["control_points"] if self.highlight_color is not None: eprops["color"] = self.highlight_color self.highlight.fa = self.selected.fa infect_vertex_property(GraphView(self.g, directed=False), self.highlight, [True]) self.highlight.fa = numpy.logical_xor(self.selected.fa, self.highlight.fa) hsrc = edge_endpoint_property(self.g, self.selected, "source") htgt = edge_endpoint_property(self.g, self.selected, "target") self.sel_edge_filt.fa = numpy.logical_or(hsrc.fa, htgt.fa) u = GraphView(self.g, vfilt=numpy.logical_or(self.highlight.fa, self.selected.fa), efilt=self.sel_edge_filt) eprops["pen_width"] = self.eprops.get("pen_width", _edefaults["pen_width"]) if isinstance(eprops["pen_width"], PropertyMap): pw = eprops["pen_width"] pw = u.own_property(pw.copy()) pw.fa *= 1.1 else: eprops["pen_width"] *= 1.1 cr.save() cr.set_matrix(self.tmatrix * self.smatrix) cairo_draw(u, self.pos, cr, vprops, eprops, self.vorder, self.eorder, self.nodesfirst) cr.restore() # draw selected edges vprops = dict(**self.vprops) vprops["halo"] = True eprops = {} u = GraphView(self.g, vfilt=self.selected, efilt=self.sel_edge_filt) cr.save() cr.set_matrix(self.tmatrix * self.smatrix) cairo_draw(u, self.pos, cr, vprops, eprops, self.vorder, self.eorder, self.nodesfirst) cr.restore() if self.srect is not None: cr.move_to(self.srect[0], self.srect[1]) cr.line_to(self.srect[0], self.srect[3]) cr.line_to(self.srect[2], self.srect[3]) cr.line_to(self.srect[2], self.srect[1]) cr.line_to(self.srect[0], self.srect[1]) cr.close_path() cr.set_source_rgba(0, 0, 1, 0.3) cr.fill() if self.regenerate_generator is not None: icon = self.render_icon(Gtk.STOCK_EXECUTE, Gtk.IconSize.BUTTON) Gdk.cairo_set_source_pixbuf(cr, icon, 10, 10) cr.paint() if (self.picked is not None and self.picked is not False and not isinstance(self.picked, PropertyMap)): if isinstance(self.display_prop, PropertyMap): txt = str(self.display_prop[self.picked]) else: txt = ", ".join([str(x[self.picked]) for x in self.display_prop]) geometry = [self.get_allocated_width(), self.get_allocated_height()] pos = [10, geometry[1] - 10] cr.set_font_size(self.display_prop_size) ext = cr.text_extents(txt) pad = 8 cr.rectangle(pos[0] - pad / 2, pos[1] - ext[3] - pad / 2, ext[2] + pad, ext[3] + pad) cr.set_source_rgba(1, 1, 1, 1.0) cr.fill() cr.move_to(pos[0], pos[1]) cr.set_source_rgba(0, 0, 0, 1.0) cr.show_text(txt) if self.regenerate_generator is not None: self.queue_draw() return False
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Oct 31 10:52:09 2018 @author: khanhdeux """ import numpy as np import matplotlib.pyplot as plt from lib import plot_decision_regions, get_iris_data from sklearn.svm import SVC np.random.seed(1) X_xor = np.random.randn(200, 2) y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0) y_xor = np.where(y_xor, 1, -1) plt.scatter(X_xor[y_xor == 1, 0], X_xor[y_xor == 1, 1], c='b', marker='x', label='1') plt.scatter(X_xor[y_xor == -1, 0], X_xor[y_xor == -1, 1], c='r', marker='s', label='-1') plt.xlim([-3, 3]) plt.ylim([-3, 3]) plt.legend(loc='best')
def run_samples_lasso(N, B, alpha, theta1, theta2, s1, s2): import myKernels.RandomWalk as rw test_info = pd.DataFrame() k = theta1.shape[0] for sample in tqdm.tqdm(range(N)): Gs1 = [] Gs2 = [] error_1 = [] error_2 = [] n = 50 for i in range(50): x1 = np.random.multivariate_normal(mean=np.zeros(k), cov=theta1, size=100) A1 = np.corrcoef(x1.T) if alpha == 0: np.fill_diagonal(A1, 0) A1[np.abs(A1) < 1e-5] = 0 else: gl = graphical_lasso(A1, alpha=alpha, max_iter=1000) A1 = gl[0] A1[np.abs(A1) < 1e-5] = 0 np.fill_diagonal(A1, 0) Gs1.append(nx.from_numpy_matrix(A1)) error_1.append( np.sum( np.logical_xor( np.abs(np.triu(A1, 1)) > 0, np.abs(np.triu(theta1, 1)) > 0))) x2 = np.random.multivariate_normal(mean=np.zeros(k), cov=theta2, size=100) A2 = np.corrcoef(x2.T) if alpha == 0: np.fill_diagonal(A2, 0) A2[np.abs(A2) < 1e-5] = 0 else: gl = graphical_lasso(A2, alpha=alpha, max_iter=1000) A2 = gl[0] A2[np.abs(A2) < 1e-5] = 0 np.fill_diagonal(A2, 0) Gs2.append(nx.from_numpy_matrix(A2)) error_2.append( np.sum( np.logical_xor( np.abs(np.triu(A2, 1)) > 0, np.abs(np.triu(theta2, 1)) > 0))) Gs = Gs1 + Gs2 try: #rw_kernel = rw.RandomWalk(Gs, c = 0.0001, normalize=0) #K = rw_kernel.fit_ARKU_plus(r = 6, normalize_adj=False, edge_attr= None, verbose=False) graph_list = gk.graph_from_networkx(Gs) kernel = [{"name": "SP", "with_labels": 0}] init_kernel = gk.GraphKernel(kernel=kernel, normalize=0) K = init_kernel.fit_transform(graph_list) except: continue MMD_functions = [mg.MMD_b, mg.MMD_u] kernel_hypothesis = mg.BoostrapMethods(MMD_functions) function_arguments = [dict(n=n, m=n), dict(n=n, m=n)] kernel_hypothesis.Bootstrap(K, function_arguments, B=B) #print(f'p_value {kernel_hypothesis.p_values}') #print(f"MMD_u {kernel_hypothesis.sample_test_statistic['MMD_u']}") test_info = pd.concat( (test_info, pd.DataFrame( { 'p_val': kernel_hypothesis.p_values['MMD_u'], 'sample': sample, 'mean_error_1': np.mean(error_1), 'mean_error_2': np.mean(error_2), 'alpha': alpha, 's1': s1, 's2': s2 }, index=[0])), ignore_index=True) return test_info
def _test_tensor_scalar_logical_xor(test_case, shape, scalar, dtype, device): np_input = np.random.randint(3, size=shape) input = flow.tensor(np_input, dtype=dtype, device=flow.device(device)) of_out = input.logical_xor(scalar) np_out = np.logical_xor(np_input, scalar) test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def merge_atlases(atlases, voxel_reference=None, tsv=None, names=None): """ This code merges the schaefer atlases together, so I have access to the cortical, subcortical, and cerebellar components Parameters ---------- atlases : list list of atlases to merge together (first one being the base) voxel_reference : str the nifti file with the voxel dimensions wanted for output tsv : str or None tsv file that has the columns "regions" and "index" names : list or None list of names to append to the Network Mappings to know where the regions are coming from (e.g. subcortical or cerebellar) """ if len(atlases) == 1: warnings.warn("only one atlas, returning original atlas") return atlases[0] # load and resample the base atlas if necessary base_atlas_img = nib.load(atlases[0]) if voxel_reference: base_atlas_img = resample_from_to(base_atlas_img, nib.load(voxel_reference), order=0, mode='nearest') # generate names from filenames if not passed in # (only makes a difference if tsv is used) if not names: names = [ os.path.basename(atlas).split('.')[0] for atlas in atlases[1:] ] # to collect all the tsv information at the end of atlas iteration if tsv: lut = pd.read_csv(tsv, sep='\t') df_collector = [lut] # the atlas should be integers (one region: one number) base_atlas_data = base_atlas_img.get_data().astype(int) # tracking the highest index so numbers do not overlap in the final atlas max_idx = base_atlas_data.max() # keeping track of the final atlas structure built_atlas_data = base_atlas_data # iterate over the names and atlases for name, atlas in zip(names, atlases[1:]): # had to squeeze in case image is fuax 4d (e.g. (x,y,z,1)) atlas_img = nib.funcs.squeeze_image(nib.load(atlas)) # the order is 0 splines so that nearest neighbors keeps integers # (I don't want any fancy processing for edges to bleed values) if not np.array_equal(atlas_img.affine, base_atlas_img.affine): atlas_img = resample_from_to(atlas_img, base_atlas_img, order=0, mode='nearest') # atlases should be treated as integers (not floats) atlas_data = atlas_img.get_data().astype(int) # make boolean masks to uniquely identify atlas voxels atlas_mask = atlas_data > 0 built_atlas_mask = built_atlas_data > 0 # clip off any voxels that are already a part of the built atlas atlas_mask_uniq = np.logical_xor(atlas_mask, (atlas_mask & built_atlas_mask)) # see what schaefer networks are missing in the atlas atlas_idxs = set(np.unique(atlas_data[atlas_mask_uniq])) network_idxs = set(NETWORK_MAPPING.keys()) missing_idxs = network_idxs - atlas_idxs # print out the missing networks missing_networks = [NETWORK_MAPPING[net] for net in missing_idxs] warnings.warn('Missing from atlas {file}:'.format(file=atlas) + ' '.join(missing_networks)) # add the max value from the current built # atlas so there is no overlap atlas_data[atlas_mask_uniq] += max_idx # add the current atlas data to the built atlas built_atlas_data[atlas_mask_uniq] += atlas_data[atlas_mask_uniq] # add new entries to the tsv if tsv: entries = [('-'.join([name, net]), num + max_idx) for num, net in NETWORK_MAPPING.items() if num in atlas_idxs] df_collector.append( pd.DataFrame.from_records(entries, columns=['regions', 'index'])) # get the new max of the built atlas max_idx = built_atlas_data.max() # after all dataframes are processed, merge them all together # and write out a file. if tsv: out_lut = pd.concat(df_collector, ignore_index=True) out_lut.to_csv('lut.tsv', sep='\t', index=False) # write out the built atlas with all the pieces. out = 'mergedAtlas.nii.gz' base_atlas_img.__class__(built_atlas_data, base_atlas_img.affine, base_atlas_img.header).to_filename(out)
def visualizeMatchingResult(self, code1, code2, mask1, mask2, shift, im, pupil_xyr, iris_xyr): resMask = np.zeros((self.ISO_RES[1], self.ISO_RES[0])) # calculate heat map xorCodes = np.logical_xor( self.code1, np.roll(self.code2, self.max_shift - shift, axis=1)) andMasks = np.logical_and( self.mask1, np.roll(self.mask2, self.max_shift - shift, axis=1)) heatMap = 1 - xorCodes.astype(int) heatMap = np.pad(np.mean(heatMap, axis=2), pad_width=((8, 8), (0, 0)), mode='constant', constant_values=0) andMasks = np.pad(andMasks, pad_width=((8, 8), (0, 0)), mode='constant', constant_values=0) heatMap = heatMap * andMasks if 'single' in self.vis_mode: heatMap = (heatMap >= self.visMinAgreedBits / 100).astype(np.uint8) heatMap = np.roll(heatMap, int(self.polar_width / 2), axis=1) for j in range(self.ISO_RES[0]): for i in range(self.ISO_RES[1]): xi = j - iris_xyr[0] yi = i - iris_xyr[1] ri = iris_xyr[2] xp = j - pupil_xyr[0] yp = i - pupil_xyr[1] rp = pupil_xyr[2] if xi**2 + yi**2 < ri**2 and xp**2 + yp**2 > rp**2: rr, tt = self.polar(xi, yi) tt = np.clip( np.round(self.polar_width * ((180 + tt) / 360)).astype(int), 0, self.polar_width - 1) rr = np.clip( np.round(self.polar_height * (rr - rp) / (ri - rp)).astype(int), 0, self.polar_height - 1) resMask[i, j] = heatMap[ rr, tt] # *** TODO correct mapping for shifted p/i centers heatMap = 255 * cv2.morphologyEx( resMask, cv2.MORPH_OPEN, kernel=self.se) mask_blur = cv2.filter2D(heatMap, -1, self.sk) if 'single' in self.vis_mode: mask_blur = (48 * mask_blur / np.max(mask_blur)).astype(int) imVis = np.stack((np.array(im), ) * 3, axis=-1) imVis[:, :, 1] = np.clip(imVis[:, :, 1] + mask_blur, 0, 255) elif 'heat_map' in self.vis_mode: mask_blur = (255 * mask_blur / np.max(mask_blur)).astype(int) heatMap = np.uint8(np.stack((np.array(mask_blur), ) * 3, axis=-1)) heatMap = cv2.applyColorMap(heatMap, cv2.COLORMAP_JET) cl_im = self.clahe.apply(np.array(im)) imVis = np.stack((cl_im, ) * 3, axis=-1) imVis = cv2.addWeighted(heatMap, 0.1, np.array(imVis), 0.9, 32) else: raise Exception("Unknown visualization mode") return imVis
#-*- coding: utf-8 -* ''' @author: PY131, created on 17.4.29 this is an test of RBF network on xor io samples ''' ''' preparation of data ''' import numpy as np # train set X_trn = np.random.randint(0, 2, (100, 2)) y_trn = np.logical_xor(X_trn[:, 0], X_trn[:, 1]) # test set X_tst = np.random.randint(0, 2, (100, 2)) y_tst = np.logical_xor(X_tst[:, 0], X_tst[:, 1]) ''' implementation of RBF network ''' from RBF_BP import * # generate the centers (4 centers with 2 dimensions) based on XOR data centers = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # construct the network rbf_nn = RBP_network() # initial a BP network class rbf_nn.CreateNN(4, centers, learningrate=0.05) # build the network structure # parameter training e = [] for i in range(10):
def get_trans(sgn0, sgn1): return np.sum(np.logical_xor(sgn0, sgn1))
'm', linewidth=2, label='Pressure Moment Z') ax4.set_xlabel('Time (s)', fontsize=16) ax4.set_ylabel(r'Moment ($N\cdot m$)', fontsize=16) ax4.legend(loc='lower left', fontsize=16) ax4.tick_params(labelsize=14) plt.savefig('./gen%i/ind%i/VALg%ii%i.png' % (gen, ind, gen, ind), bbox_inches='tight', dpi=100) # Get timestp40 = int(np.argwhere(forces[:, 0] > 40)[0]) matFX = np.invert(forces[timestp40:, 1] > forces[-1, 1]) logicFX = np.logical_xor(matFX[0:-2], matFX[1:-1]) if len(np.argwhere(logicFX)) % 2 == 1: fx = int(np.argwhere(logicFX)[1]) else: fx = int(np.argwhere(logicFX)[0]) matFY = np.invert(forces[timestp40:, 2] > forces[-1, 2]) logicFY = np.logical_xor(matFY[0:-2], matFY[1:-1]) if np.sum(logicFY) == 0: fy = timestp40 else: if len(np.argwhere(logicFY)) % 2 == 1: fy = int(np.argwhere(logicFY)[1]) else: fy = int(np.argwhere(logicFY)[0])
# %% Problem #3 print("-----------") print("Problem #3:") print("-----------") # Part a XG = np.zeros(15) XG_ones = np.zeros(15) G = np.ones(4) for ii in range(15): newbit_G = int(np.logical_xor(G[3], G[0])) XG[ii] = G[3] if G[3] == 1: XG_ones[ii] = 1 else: XG_ones[ii] = -1 G[1:4] = G[0:3] G[0] = newbit_G #indexes = list(range(-6,1)) #indexes = indexes + list(range(-14,-6)) indexes = range(0, 15) auto_a = np.zeros(15) ind = 0
def check_plan(self, print_results=False) -> bool: grounding = Grounding() grounding.ground_problem(self.domain, self.problem) state = grounding.get_initial_state_spike() # check if the plan is executable for action in self.action_list: pos, neg = grounding.get_action_condition_spike(action) result = np.logical_xor(pos, np.logical_and(pos, state)) if np.any(result): if print_results: print("Action " + str(action.formula) + " is not applicable.") for index in np.nonzero(result)[0]: print("Positive condition is not met: " + str(grounding.get_proposition_from_id(index))) return False result = np.logical_xor(neg, np.logical_and(neg, np.logical_not(state))) if np.any(result): if print_results: print("Action " + str(action.formula) + " is not applicable.") for index in np.nonzero(result)[0]: print("Negative condition is not met: " + str(grounding.get_proposition_from_id(index))) return False # apply action effects adds, dels = grounding.get_action_effect_spike(action) np.logical_xor(np.logical_and(state, dels), state, out=state) np.logical_or(state, adds, out=state) if print_results: print("Plan is executable.") # check if goal is satisfied positive_conditions = np.zeros(grounding.proposition_count, dtype=bool) negative_conditions = np.zeros(grounding.proposition_count, dtype=bool) grounding.get_simple_conditions(self.problem.goal, positive_conditions, negative_conditions) # positive goals goal_achieved = True reuslt = np.logical_xor(positive_conditions, np.logical_and(positive_conditions, state)) if np.any(result): if print_results: print("Goal is not satisfied.") for index in np.nonzero(result)[0]: print("Positive goal is not met: " + str(grounding.get_proposition_from_id(index))) goal_achieved = False # negative goals result = np.logical_xor(negative_conditions, np.logical_and(negative_conditions, np.logical_not(state))) if np.any(result): if print_results: print("Goal is not satisfied.") for index in np.nonzero(result)[0]: print("Negative goal is not met: " + str(grounding.get_proposition_from_id(index))) goal_achieved = False if goal_achieved and print_results: print("Goal is satisfied.") return goal_achieved