def auc_score(self, ground_truth, predictions, **kwargs): """ Calculate the AUC score for this particular trial. This will also calculate the F scores and ROC curves Args: ground_truth: vector of class labels predictions: vector of predicted class labels Returns: AUC score for this trial """ # calculate f scores thresholded = threshold(predictions[:, 1], threshmin=0.5) thresholded = threshold(thresholded, threshmax=0.5, newval=1.0).astype(int) fhalf_score = metrics.fbeta_score(ground_truth.astype(int), thresholded, beta=0.5) f2_score = metrics.fbeta_score(ground_truth.astype(int), thresholded, beta=2) f1_score = metrics.fbeta_score(ground_truth.astype(int), thresholded, beta=1) # calculate ROC curve and AUC fpr, tpr, _ = metrics.roc_curve(ground_truth, predictions[:, 1]) area = metrics.auc(fpr, tpr) self.fhalf_scores_.append(fhalf_score) self.f2scores_.append(f2_score) self.f1scores_.append(f1_score) self.rates_.append((fpr, tpr)) self.aucs_.append(area) return area
def thresh(im, thresh): """ Threshold image with threshdold value thresh """ tlow = threshold(im, threshmin=thresh, newval=0) thigh = threshold(tlow, threshmax=thresh, newval=255) return thigh.astype(np.uint8)
def otsu( small_img ): #filter black and gray img_R = small_img[:,:,0] img_G = small_img[:,:,1] img_B = small_img[:,:,2] black = (img_R < 10 ) & (img_G < 10) & (img_B < 10) small_img[:,:,:3][black] = [255,255,255] grey = (img_R >= 220 ) & (img_G >= 220) & (img_B >=220) small_img[:,:,:3][grey] = [255,255,255] gray = cv2.cvtColor( small_img , cv2.COLOR_RGB2HSV ) mask = (gray[:,:,0] < 0.06) & (gray[:,:,1] < 0.06) & (gray[:,:,2]< 70) small_img[:,:,:3][mask] = [255,255,255] return small_img #changing to gray scale img_gray = cv2.cvtColor( small_img, cv2.COLOR_RGB2GRAY ) # clahe clahe = cv2.createCLAHE() img_cl = clahe.apply(img_gray); # otsu algorithm try: thresh = threshold_otsu(img_cl) threshold( small_img[:,:,:3] , thresh , [255,255,255] ) except: pass return small_img
def render_image(self, dir): input_file = h5py.File(self.file, 'r') wires = input_file['image/wires'] n = 1 scale = 100 thresh = 25 self.logger.info("""Producing {} images with scale {} and threshold {}""".format(n, scale, thresh)) try: image = wires[0] self.logger.info("Image: min: {}, max: {}".format( np.min(image), np.max(image))) buff = np.ndarray(shape=(image.shape[1], image.shape[2], image.shape[0]), dtype=np.float64) for i in range(3): buff[:, :, i] = image[i, :, :] buff = buff * scale buff = threshold(buff, threshmin=thresh) + threshold( buff, threshmax=-thresh) self.logger.info("Buffer: min: {}, max: {}".format( np.min(buff), np.max(buff))) output_file = os.path.join(dir, 'wires.png') imsave(output_file, buff) except Exception as e: self.logger.warning("problem creating image") self.logger.warning(e)
def create_gist_parameters(self, selected_path, video_name): org_images = images.read_video_as_list_by_path(selected_path, video_name) background_model = cv2.BackgroundSubtractorMOG2(len(org_images), varThreshold=266, bShadowDetection=True) for frame in org_images: background_model.apply( frame, len(org_images)) # given frame, learning-rate th = 150 fgmask_set = [] dp_mask = [] for frame in org_images: forward = background_model.apply( frame ) # create foreground mask which is gray-scale(0~255) image. tmp = cv2.cvtColor(forward, cv2.COLOR_GRAY2BGR) # convert to color dp_mask.append(tmp) #convert gray-scale foreground mask to binary image. a = stats.threshold(forward, threshmin=th, threshmax=255, newval=0) a = stats.threshold(a, threshmin=0, threshmax=th, newval=1) fgmask_set.append(a) return fgmask_set, org_images, dp_mask
def recv_cmd(self): data, address = self.sck.recvfrom(296) uData = self.rec_fmt.unpack(data) tempImage = np.asarray(uData[1:65]) tSamp = uData[70] tempImageMat = np.reshape(tempImage, (16, 4)) tempRow = tempImageMat.sum(axis=1)/4 tempRowSq = tempRow**2 # Select a threshold as a percentace above the RMS value tempRms = np.sqrt(tempRowSq.sum()/tempRow.size) threshold = tempRms*1.06 # Binarizing the signal (0=no reading, 1=person detected) defaultVals = stats.threshold(tempRow, threshmin=threshold, newval=0) defaultVals = stats.threshold(defaultVals, threshmax=threshold, newval=1) # print(defaultVals) span = 3.2 # Width of sensed floor # print(defaultVals) values = np.dot([i for i, x in enumerate(defaultVals) if x == 1], span/15) self.pos_values.append((tSamp, defaultVals)) if len(self.pos_values) > 100: self.pos_values.pop(0)
def channel_enhance(self, img, channel, level=1): if channel == 'B': blue_channel = img[:, :, 0] # blue_channel = (blue_channel - 128) * (level) +128 blue_channel = blue_channel * level blue_channel = stats.threshold(blue_channel, threshmax=255, newval=255) img[:, :, 0] = blue_channel elif channel == 'G': green_channel = img[:, :, 1] # green_channel = (green_channel - 128) * (level) +128 green_channel = green_channel * level green_channel = stats.threshold(green_channel, threshmax=255, newval=255) img[:, :, 0] = green_channel elif channel == 'R': red_channel = img[:, :, 2] # red_channel = (red_channel - 128) * (level) +128 red_channel = red_channel * level red_channel = stats.threshold(red_channel, threshmax=255, newval=255) img[:, :, 0] = red_channel img = img.astype(np.uint8) return img
def brightness_contrast(self, img, alpha=1.0, beta=0): img_contrast = img * (alpha) img_bright = img_contrast + (beta) # img_bright = img_bright.astype(int) img_bright = stats.threshold(img_bright, threshmax=255, newval=255) img_bright = stats.threshold(img_bright, threshmin=0, newval=0) img_bright = img_bright.astype(np.uint8) return img_bright
def test_basic(self): a = [-1, 2, 3, 4, 5, -1, -2] assert_array_equal(stats.threshold(a), a) assert_array_equal(stats.threshold(a, 3, None, 0), [0, 0, 3, 4, 5, 0, 0]) assert_array_equal(stats.threshold(a, None, 3, 0), [-1, 2, 3, 0, 0, -1, -2]) assert_array_equal(stats.threshold(a, 2, 4, 0), [0, 2, 3, 4, 0, 0, 0])
def brightness_contrast(self, img, alpha = 1.0, beta = 0): img_contrast = img * (alpha) img_bright = img_contrast + (beta) # img_bright = img_bright.astype(int) img_bright = stats.threshold(img_bright,threshmax=255, newval=255) img_bright = stats.threshold(img_bright,threshmin=0, newval=0) img_bright = img_bright.astype(np.uint8) return img_bright
def threshold_values(vector): """ Returns a new vector with all values < 0.5 set to 0, and all values >= 0.5 set to 1. """ tmp_vec = stats.threshold(vector, None, 0.5, 1) tmp_vec = stats.threshold(tmp_vec, 0.5, None, 0) return tmp_vec
def main(): connection = pg.connect("dbname = rem user = wireless password = wireless") df = psql.read_sql("select occ, noise_floor, timetag from spectruminfo order by timetag DESC LIMIT 1000", connection) tempocc = df['occ'].values tempnf = df['noise_floor'].values occ = np.zeros((df.shape[0],16)) nf = np.zeros((df.shape[0],16)) for i in range (0, len(occ)-1): occ[i,:] = np.copy(np.array(tempocc[i])) nf[i,:] = np.copy(np.array(tempnf[i])) fitness = np.zeros((16,1)) plt.subplot(411) for i in range(195,210): plt.plot(occ[i,:]) plt.subplot(412) plt.plot(occ[:,6]) #plt.plot(occ[:,6]) plt.subplot(413) plt.plot(occ[:,13]) plt.subplot(414) plt.plot(occ[:,12]) for i in range(0,16): thr = np.mean(nf[:,i]) print 10.0/np.fabs(thr) #print np.mean(occ[:,i]) occ[:,i] = stats.threshold(occ[:,i],threshmax = 10.0/np.fabs(thr), newval=1) occ[:,i] = stats.threshold(occ[:,i],threshmin = 0.9, newval=0) plt.subplot(413) plt.plot(occ[:,13]) plt.subplot(412) plt.plot(occ[:,8]) plt.subplot(414) plt.plot(occ[:,1]) print bd.enumerate_bursts(occ[:,8], 'burstLabel') # print zero_runs(occ[:,8]) #plt.hist(np.histogram(occ[:,0]), bins = [0, 1]) plt.show()
def test_basic(self): a = [-1,2,3,4,5,-1,-2] assert_array_equal(stats.threshold(a),a) assert_array_equal(stats.threshold(a,3,None,0), [0,0,3,4,5,0,0]) assert_array_equal(stats.threshold(a,None,3,0), [-1,2,3,0,0,-1,-2]) assert_array_equal(stats.threshold(a,2,4,0), [0,2,3,4,0,0,0])
def _minmaxThreshold(simMat, targetL): nNodes = simMat.shape[0] targetL *= 0.75 # heuristic adjustment to get roughly the right number of (undirected) links # maxVals = simMat.copy() maxVals = simMat.max(axis=1) # print("MaxVals: " + str(maxVals)) maxVals[maxVals == 0.0] = 1.0 # set max of empty rows to max possible sim value threshold = float(min(maxVals)) thrSimMat = sps.threshold(simMat, threshmin=threshold, threshmax=None, newval=0) nLinks = np.count_nonzero(thrSimMat) print("Thresholded to %d links" % nLinks) if nLinks < targetL: # thresolding cut off too many links, use full matrix print("Using full matrix") thrSimMat = sps.threshold(simMat, threshmin=-1, threshmax=None, newval=0) nLinks = np.count_nonzero(thrSimMat) if nLinks > targetL: # thresholding cut off too few links newSimMat = np.matrix(np.zeros(simMat.shape)) frac = float(targetL) / nLinks print("Keep frac = %f of the links" % frac) # keep frac links in each row nLinksRow = [np.count_nonzero(row) for row in thrSimMat] for i in range(nNodes): rowSim = thrSimMat[i].ravel() linksTarget = int(math.floor(frac * nLinksRow[i])) + 1 idx = nNodes - linksTarget threshold = np.partition(rowSim, idx)[idx] rowLinks = sps.threshold(rowSim, threshmin=threshold, threshmax=None, newval=0) nRowLinks = np.count_nonzero(rowLinks) if nRowLinks > 1.1 * linksTarget: # got too many, select at random, set unselected to zero linkIdx = np.nonzero(rowLinks)[ 0] # get indices of nonzero row elements linkVals = rowLinks[ linkIdx] # compute link weights - 0.5*threshold topIdx = np.argsort( linkVals)[len(linkVals) - linksTarget:] # get highest similarity links linkIdx = linkIdx[topIdx] # linkIdx = rnd.choice(linkIdx, linksTarget, replace=False, p=linkVals/np.sum(linkVals)) # (weighted) random draw the ones to keep linkVals = rowLinks[linkIdx] # save the selected links rowLinks = np.zeros(nNodes) # new links, all zero rowLinks[linkIdx] = linkVals # assign the ones that were saved newSimMat[i] = rowLinks # print("Row %i contains %f links, should have %d. Threshold = %f"%(i, np.count_nonzero(newSimMat[i]), frac*nLinksRow[i], threshold)) simMat = newSimMat else: simMat = thrSimMat return simMat
def hue_saturation(self, img_rgb, alpha = 1, beta = 1): img_hsv = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2HSV) hue = img_hsv[:,:,0] saturation = img_hsv[:,:,1] hue = stats.threshold(hue * alpha ,threshmax=179, newval=179) saturation = stats.threshold(saturation * beta,threshmax=255, newval=255) img_hsv[:,:,0] = hue img_hsv[:,:,1] = saturation img_transformed = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR) return img_transformed
def hue_saturation(self, img_rgb, alpha=1, beta=1): img_hsv = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2HSV) hue = img_hsv[:, :, 0] saturation = img_hsv[:, :, 1] hue = stats.threshold(hue * alpha, threshmax=179, newval=179) saturation = stats.threshold(saturation * beta, threshmax=255, newval=255) img_hsv[:, :, 0] = hue img_hsv[:, :, 1] = saturation img_transformed = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR) return img_transformed
def silence_ratio(signal, arg_dict): t = arg_dict['silenceThreshold'] if np.isclose(np.max(signal), 0): signal = threshold(signal, threshmin=t, newval=-100) silent = np.where(signal == -100)[0] return len(silent) / len(signal) else: # Normalization needed to max = 1 signal = signal / np.max(signal) signal = threshold(signal, threshmin=t, newval=-100) # print(signal) silent = np.where(signal == -100)[0] return len(silent) / len(signal)
def train(self): D = self.A_true.shape[1] for t in range(self.epo): self.show_error() for i in range(D): start = time.time() Yi = self.Y - self.A * self.Z + self.A[:, i] * self.Z[i, :] fi = self.A[:, i].copy() gi = self.Z[i, :].copy().transpose() self.A[:, i] = 1.0/(norm(gi) * norm(gi)) * threshold(Yi * gi, threshmin = 0) self.Z[i, :] = (1.0/(norm(fi) * norm(fi)) * threshold(Yi.transpose() * fi, threshmin = 0)).transpose() end = time.time() self.time = self.time + end - start
def detect_hand(color, depth): thresh = threshold( depth, threshmin=thresh_lo, threshmax=thresh_hi, newval=0) #throw out all values that are too small or too large thresh = threshold(thresh, threshmax=1, newval=255) #make remaining values 255 thresh = thresh.astype(np.uint8) kernel = np.ones((5, 5), np.uint8) thresh = cv2.erode(thresh, kernel, iterations=1) kernel = np.ones((7, 7), np.uint8) thresh_dilation = cv2.dilate(thresh, kernel, iterations=1) _, contours, _ = cv2.findContours(thresh_dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) rois = [ ] #regions of interest. each one is a tuple: (contour, area, center) #print print 'len(contours): ', len(contours) for contour in contours: r = cv2.minAreaRect(contour) (r_cent, (w, h), r_angle) = r if cv2.contourArea(contour) > min_contour and min( [x for [[x, y]] in contour]) > 50: M = cv2.moments(contour) rois.append((contour, M['m00'], (int(M['m10'] / M['m00']), int(M['m01'] / M['m00'])))) if (len(rois) > 0): hand = max( rois, key=lambda roi: roi[1])[0] # get the biggest roi as the hand else: return None, None, 0 #find the first local max enclosed circle greater than min_palm_circle palm_cent = None palm_radius = 0 miny = min([y for [(x, y)] in hand]) maxy = max([y for [(x, y)] in hand]) (vx, vy, x0, y0) = cv2.fitLine(hand, cv2.DIST_L2, 0, 0.01, 0.01) for y in range(int(miny), int(maxy), 10): x = float((y - y0) * vx / vy + x0) radius = cv2.pointPolygonTest(hand, (x, y), True) if (radius > min_palm_circle and radius > palm_radius): palm_cent = (int(x), int(y)) palm_radius = radius if (palm_cent != None and radius < 0.9 * palm_radius): #hand = filter_hsv(color, depth, hand, palm_cent, palm_radius) break #we reached a local min return hand, palm_cent, palm_radius
def find_tstar(phi0, discretization=200): """ finds tstar and t_opt (at discretization) for a given phi0""" # define a periodic spline for the start_time = pos_start_root + phi0 * 23.7 / (2 * np.pi) prc_pos = stats.threshold(-pmodel.pPRC_interp(times + start_time)[:, 15], threshmin=0) prc_neg = stats.threshold(-pmodel.pPRC_interp(times + start_time)[:, 15], threshmax=0) prc = -pmodel.pPRC_interp(times + start_time)[:, 15] prc_pos_spl = ut.PeriodicSpline(times, prc_pos, period=pmodel.T) prc_neg_spl = ut.PeriodicSpline(times, prc_neg, period=pmodel.T) def dphitot_dt(phis, t): # integrates everything [phi_osc_pos, phi_osc_neg, phi_shift_pos, phi_shift_neg] = phis dphi_osc_pos_dt = (2*np.pi)/pmodel.T +\ umax*prc_pos_spl(phi_osc_pos*pmodel.T/(2*np.pi)) dphi_osc_neg_dt = (2*np.pi)/pmodel.T +\ umax*prc_neg_spl(phi_osc_neg*pmodel.T/(2*np.pi)) dphi_shft_pos_dt = umax * prc_pos_spl(phi_osc_pos * pmodel.T / (2 * np.pi)) dphi_shft_neg_dt = umax * prc_neg_spl(phi_osc_neg * pmodel.T / (2 * np.pi)) return (dphi_osc_pos_dt, dphi_osc_neg_dt, dphi_shft_pos_dt, dphi_shft_neg_dt) int_times = np.linspace(0, 3 * pmodel.T, 10001) delta_phis_total = integrate.odeint(dphitot_dt, [0, 0, 0, 0], int_times, hmax=0.001) # get the delta phis delta_phi_fs = np.linspace(0, 2 * np.pi, discretization) cross_loc = np.min( np.where(delta_phis_total[:, 2] - delta_phis_total[:, 3] >= 2 * np.pi)) t_star = int_times[cross_loc] phi_star = delta_phis_total[cross_loc, 3] % (2 * np.pi) t_opts = [] for phif in delta_phi_fs: if phif > phi_star: time_to_reach = np.min(int_times[np.where( delta_phis_total[:, 3] + 2 * np.pi <= phif)]) else: time_to_reach = np.min( int_times[np.where(delta_phis_total[:, 2] >= phif)]) t_opts.append(time_to_reach) return delta_phi_fs, np.asarray(t_opts), t_star, phi_star
def thresholdSimilarityMatrix(simMat, targetLS, fixed=False, directed=False): nNodes = simMat.shape[0] targetL = min(targetLS * nNodes, nNodes * (nNodes - 1) / 2) nNonZero = np.count_nonzero(simMat) print("[SimToNetwork.thresholdSimilarityMatrix] targetL nNonZero " + str(targetL) + ' ' + str(nNonZero)) if nNonZero > 0: # sim matrix must have at least one non-zero entry targetL *= 2 if fixed or nNonZero <= targetL: # keep links with sim above some value idx = nNodes * nNodes - (min(targetL, nNonZero) - 1) - 1 threshold = simMat.flatten() threshold.partition(idx) print(threshold.shape) threshold = float(threshold[idx]) print("partitioned threshold " + str(threshold)) simMat = np.matrix(sps.threshold(simMat, threshold, None, 0)) else: # threshold by min max sim so all nodes have at least one link, then threshold preserving relative number of links simMat = _minmaxThreshold(simMat, targetL) # simMat = _locallyAdaptiveThreshold(simMat, targetL) if not directed: # combine upper and lower triangles into final upper triangular similarity matrix upper = np.triu(simMat, 1) # upper triangle lower = np.tril(simMat, -1).T # transposed lower triangle simMat = np.maximum(upper, lower) # take max print("[SimToNetwork.thresholdSimilarityMatrix] newSimMat nNonZero " + str(np.count_nonzero(simMat))) return simMat
def threshold(array, threshold=5): """Set all pixels below threshold to zero. Parameters ---------- array : `~numpy.ndarray` Input array threshold : float, optional Minimum threshold Returns ------- data : `~numpy.ndarray` Copy of input array with pixels below threshold set to zero. """ # TODO: np.clip is simpler, no? from scipy import stats # NaNs are set to 1 by thresholding, which is not # what we want for detection, so we replace them with 0 here. data = np.nan_to_num(array) data = stats.threshold(data, threshold, None, 0) # Note that scipy.stats.threshold doesn't binarize, # it only sets values below the threshold to 0, # which is not what we want here. return data.astype(np.bool).astype(np.uint8)
def convertdigits_to_ampere(digits): ampere = digits ampere *= 0.04132 ampere += 0.3 ampere = threshold(ampere, 0.6) return ampere
def compute_mean_of_non_zero(li_val, thres): t1 = threshold(li_val, thres) #t1 = li_val > bin_thres t2 = np.count_nonzero(t1) t3 = sum(t1) mean = t3 / t2 return mean
def skeletonize(self, image): image = grey_closing(image, footprint=circle(8), mode='constant', cval=0.0) image = add_zero_mat(image) prev_binary_image = np.zeros_like(image) image_bit_depth = (image.dtype.itemsize * 8) / 2 print "image_bit_depth: " + str(image_bit_depth) #image_thresholds = range(2**image_bit_depth,-1,-16) image_thresholds = [2**x for x in range(image_bit_depth, 3, -1)] + range(15, 0, -1) print "image_thresholds: " + str(image_thresholds) for curr_threshold in image_thresholds: print "curr_threshold: " + str(curr_threshold) curr_thresh_image = threshold(image, curr_threshold) curr_binary_image = curr_thresh_image.astype(np.bool).astype(np.int) imsave(skeleton_images_path + "binary_" + str(curr_threshold) + ".png", curr_binary_image) curr_sum_image = (prev_binary_image + curr_binary_image) curr_skeleton_image = self.thin_pixels(curr_sum_image) imsave(skeleton_images_path + "skeleton_" + str(curr_threshold) + ".png", curr_skeleton_image) print "curr_skeleton max: " + str(curr_skeleton_image.max()) prev_binary_image = curr_skeleton_image return remove_zero_mat(prev_binary_image)
def getFrame(frameNo, ID): """ @param frameNo int identifying the number of the frame in one file desired @param ID int identifying the binary file currently being accessesed, suffix of the filename of the binary file @return im uint16 array containing the intensity data of one frame with the background subtracted off and intensity above the threshold This function returns the desired frame with the intensity data above the threshold and the background data subtracted off. """ im = np.zeros(size) if ID == 0: IDstr = '' else: IDstr = str(ID) filename = directory + filePrefix + IDstr # reads the frame from the binary file into a string to be converted if not os.path.isfile(filename): sys.stdout.write("Cannot find the file: {0}. Check if it is in the right directory, or the filePrefix is correct.".format(filename)) sys.exit() f = open(filename,'rb') offset = header + (frameNo - 1)*frameSize f.seek(offset) im_data_hex = f.read(frameSize) im = convertBin(im_data_hex) f.close() im = stats.threshold(im,threshmin = threshold, newval = 0) return im
def err(parameter_shifts, future_ext_phases, mpc_time): # assert that args are good assert len(parameter_shifts)==len(future_ext_phases), \ "length mismatch between u, phi_ext" osc_phases = np.zeros(len(parameter_shifts) + 1) osc_phases[0] = mpc_phi for i, pshift in enumerate(parameter_shifts): # next oscilltor phase = curr phase + norm prog +integr pPRC def dphidt(phi, t0): return 2 * np.pi / (pmodel.T) + pshift * prcs( pmodel._phi_to_t(phi))[:, control_vectors[0]] osc_phases[i + 1] = integrate.odeint( dphidt, osc_phases[i], single_step_ts)[-1] #calc difference p1 = osc_phases[1:] p2 = future_ext_phases differences = np.asarray([(p1 - p2) % (2 * np.pi), (p2 - p1) % (2 * np.pi)]).min(0) differences = stats.threshold(differences, threshmin=0.1) #quadratic cost in time weights = (np.arange(len(differences)) + 1) return np.sum(weights * differences**2) + 0.001 * np.sum( parameter_shifts**2)
def save_results(batch_label, batch_infer, batch_diff_infer, batch_name, result_list): for i in xrange(len(batch_name)): image_name = batch_name[i].split(" ")[0].replace( ".resnet_hypercolumn", ".jpg") image_base_name = convert_image_name(image_name) image = iuf.load_image(image_name) image = iuf.resize_image(image, (FLAGS.label_row, FLAGS.label_col)) label_norm = iuf.repeat_image(iuf.norm_image(batch_label[i])) num_car_label = np.sum(batch_label[i]) num_car_infer = np.sum(batch_infer[i]) + batch_diff_infer[i][0] batch_infer[i] = ss.threshold(batch_infer[i], threshmin=0.0, newval=0) infer_norm = iuf.repeat_image(iuf.norm_image(batch_infer[i])) stack_image = np.hstack((image, label_norm, infer_norm)) #iuf.show_image(stack_image, normalize = False) iuf.save_image( stack_image, FLAGS.result_dir + "/" + image_base_name.replace(".jpg", "resdeconv_result.jpg")) batch_infer[i].tofile(FLAGS.result_dir + "/" + image_base_name.replace(".jpg", ".npy")) print("label: %.2f, infer: %.2f" % (num_car_label, num_car_infer)) result_list.append(image_name + " " + str(num_car_label) + " " + str(num_car_infer))
def image_to_sketch(img): """ :param image: An image represented in numpy array with shape (height, width, 3) or (batch, height, width, 3) :return: A sketch of the image with shape (height, width) or (batch, height, width) """ # We must apply a lower threshold. Otherwise the sketch image will be filled with non-zero values that may provide # hints to the cnn trained. (It is unlikely to occur in human provided sketches that we have many pixels with # brightness lower than 32. ) SKETCH_LOWEST_BRIGHTNESS = 32 if len(img.shape) == 4: img_diff_dilation_gray = np.array([image_to_sketch(img[i,...]) for i in range(img.shape[0])]) return img_diff_dilation_gray elif len(img.shape) == 3: assert img.dtype == np.float32 # Otherwise the conversion does not work properly. kernel = np.ones((5, 5), np.uint8) img_dilation = cv2.dilate(img, kernel, iterations=1) img_diff_dilation = np.abs(np.subtract(img, img_dilation)) img_diff_dilation_gray = cv2.cvtColor(img_diff_dilation, cv2.COLOR_RGB2GRAY) img_diff_dilation_gray_thresholded = threshold(img_diff_dilation_gray, SKETCH_LOWEST_BRIGHTNESS) return img_diff_dilation_gray_thresholded else: print('Image has to be either of shape (height, width, num_features) or (batch_size, height, width, num_features)') raise AssertionError
def create_mask(foreground, background, blur_sigma, thresh, opacity): """ Given two RGB images, foreground and background, give a mask of the areas where foreground differs from the background by more than thresh. Apply blur_sigma amount of blurring, and set the opacity of the nonzero parts of the mask to opacity. """ blurred_fg = gaussian_filter(foreground, blur_sigma).astype(int) blurred_bg = gaussian_filter(background, blur_sigma).astype(int) diff = np.sum(np.abs(fg - bg), axis=2) diff = threshold(diff, threshmin=thresh, newval=0) diff = threshold(diff, threshmax=thresh+1, newval=opacity) diff = gaussian_filter(diff, blur_sigma).astype(np.uint8) return diff
def plot_W(W, threshmin=None, threshmax=None): W_filtered = threshold(np.abs(W), threshmin=threshmin, threshmax=threshmax) row_indices, column_indices = np.indices(W_filtered.shape) plt.scatter(column_indices.flatten(), row_indices.flatten(), s=W_filtered.flatten(), color='black', marker='s')
def prepareDistributionImage(image): valmax = np.amax(image) if valmax > 0: scale = valmax / 255.0 else: scale = 1 img = stats.threshold(image, np.median(image)) return img / scale
def spread_spectrum(img): img = stats.threshold(img, threshmin=12, newval=0) # see http://docs.opencv.org/3.1.0/d5/daf/tutorial_py_histogram_equalization.html clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) img = clahe.apply(img) return img
def prepareDistributionImage(image): valmax = np.amax(image) if valmax>0: scale = valmax/255.0 else: scale = 1 img = stats.threshold(image, np.median(image)) return img/scale
def compare_rows(a,row): """take numpy array a and compare to row row Return new numpy array of same size as a containing 0's and 1's 0's indicate element of a was greater than corresponding element of row 1's indicate element of a was less than corresponding element of row""" import numpy as np from scipy import stats (number_rows,number_cols) = np.shape(a) b = np.zeros_like(a) for i in range(number_rows): b[i,:] = a[i,:] - row[:] b = stats.threshold(b, threshmin=0, newval=-1) b = stats.threshold(b, threshmax=0, newval=0) b = -1*b return b
def remove_background_percent(frame, thresh=.5, average=None): if average is None: max = np.max(frame) else: max = n_largest(frame, average) max = int(np.mean(max)) threshold = max - (thresh * max) frame = sp.threshold(frame, threshold, None, 0) return frame
def rgb2gray_and_binary(raw): """ converts color img to binary image( foreground 1 , background 0) :param raw: image list. :return: binary image list """ fg_list = [] if len(raw[0].shape) == 2: for i in range(len(raw)): fg_list.append( stats.threshold(raw[i], threshmin=0, threshmax=0, newval=1)) else: for i in range(len(raw)): tmp = cvtColor(raw[i], COLOR_RGB2GRAY) fg_list.append( stats.threshold(tmp, threshmin=0, threshmax=0, newval=1)) return fg_list
def plot_2d_max(image, layer, th=-300): # p = image.transpose(2,1,0) # p = p[:,:,::-1] p = image.transpose(0, 1, 2) p = threshold(p, threshmax=th) print p.shape plt.imshow(p[layer], cmap=plt.cm.gray) plt.show()
def compare_rows(a, row): """take numpy array a and compare to row row Return new numpy array of same size as a containing 0's and 1's 0's indicate element of a was greater than corresponding element of row 1's indicate element of a was less than corresponding element of row""" import numpy as np from scipy import stats (number_rows, number_cols) = np.shape(a) b = np.zeros_like(a) for i in range(number_rows): b[i, :] = a[i, :] - row[:] b = stats.threshold(b, threshmin=0, newval=-1) b = stats.threshold(b, threshmax=0, newval=0) b = -1 * b return b
def proj_unit_disk(w, t=None): """ we receive a vector [v1,v2,...,vp] and project [v1,v2,...,vp-1] on the positive unit disk. """ v = threshold(w, threshmin=0, newval=0) if np.linalg.norm(v)**2 <= 1: return v else: return v / np.linalg.norm(v)
def map_channels(i_x): i, x = i_x x = (x * 255).astype(np.uint8) if x.max() > 0.35 * 255: threshold = np.fabs(x.max() - x.max() * .65) else: threshold = 255 threshImage = stats.threshold(x, threshmin=threshold) threshImage[threshImage > 0] = i return threshImage
def processPixels (self, pixels): pixels_np = np.array(pixels) # Difference from baseline pixels_np = self.baseline - pixels_np # normalize p_max = np.max(pixels_np) * 1.0 p_min = np.min(pixels_np) * 1.0 pixels_np = (pixels_np - p_min) / (p_max - p_min) # std > 0.1 -> no coins #print("std:", np.std(pixels_np)) # low pass filter pixels_np = stats.threshold(pixels_np, threshmin=0.70, newval=0.0) # set all else to max (255) pixels_np = stats.threshold(pixels_np, threshmax=0.1, newval=255.0) return pixels_np.tolist()
def remove_background_farthest(frame, thresh, average=None): if average is None: max = np.min(frame) else: max = n_smallest(frame, average) max = int(np.mean(max)) if max == 0: max = np.mean(frame) threshold = max + (thresh * max) frame = sp.threshold(frame, threshold, None, 0) return frame
def channel_enhance(self, img, channel, level=1): if channel == 'B': blue_channel = img[:,:,0] # blue_channel = (blue_channel - 128) * (level) +128 blue_channel = blue_channel * level blue_channel = stats.threshold(blue_channel,threshmax=255, newval=255) img[:,:,0] = blue_channel elif channel == 'G': green_channel = img[:,:,1] # green_channel = (green_channel - 128) * (level) +128 green_channel = green_channel * level green_channel = stats.threshold(green_channel,threshmax=255, newval=255) img[:,:,0] = green_channel elif channel == 'R': red_channel = img[:,:,2] # red_channel = (red_channel - 128) * (level) +128 red_channel = red_channel * level red_channel = stats.threshold(red_channel,threshmax=255, newval=255) img[:,:,0] = red_channel img = img.astype(np.uint8) return img
def xyzs(self, drift_vel=None, clock=None, pads=None, peaks_only=False): """Find the scatter points of the event in space. If a drift velocity and write clock frequency are provided, then the result gives the z dimension in meters. Otherwise, the z dimension is measured in time buckets. Parameters ---------- drift_vel : number or array-like The drift velocity in the detector, in cm/us. This can be the scalar magnitude or a vector. A vectorial drift velocity can be used to correct for the Lorentz angle. clock : int or float, optional The write clock rate, in MHz pads : ndarray, optional An array of pad vertices. If provided, these pads will be used instead of the default pad plane. peaks_only : bool, optional If True, only the peak of each activation curve will be used. Returns ------- xyzs : numpy.ndarray A 4D array of points including (x, y, tb or z, activation) See also -------- pytpc.simulation.drift_velocity_vector """ if peaks_only: traces_copy = numpy.copy(self.traces) for i, tr in enumerate(traces_copy): traces_copy[i]['data'][:] = threshold(tr['data'], threshmin=tr['data'].max(), newval=0) nz = traces_copy['data'].nonzero() else: nz = self.traces['data'].nonzero() if pads is None: pads = generate_pad_plane() pcenters = pads.mean(1) xys = numpy.array([pcenters[self.traces[i]['pad']] for i in nz[0]]) zs = nz[1].reshape(nz[1].shape[0], 1) cs = self.traces['data'][nz].reshape(nz[0].shape[0], 1) xyzs = numpy.hstack((xys, zs, cs)) if drift_vel is not None and clock is not None: xyzs = calibrate(xyzs, drift_vel, clock) return xyzs
def pack_thresh(R, Th=0.3, PosOnly=0): ''' A function to transform the correlation matrix to the uppder diagonal only, and threshold with a given parameter. The resulting file compresses very compactly. input parameters: R: A full correlation matrix of size V x V. Th: A threshold to eliminate small correlation values. For positive elements, R>Th For negative elements, R<-Th (if PosOnly=0) The default value of Th is 0.3. PosOnly: A flag to indicate whether the thresholded correlation matrix should only include positive values. PosOnly=0: Both positive and negative correlations are retained. PosOnly=1: Only positive correlations are retained PosOnly=-1: Only negative correlations are retained (with R<-Th) returns: thrR_csr: The thresholded sparse correaltion matrix. ''' # upper triangle only uR = np.triu(R,1) # thresholding if PosOnly==0: thuRpos = stats.threshold(uR, threshmin=Th, newval=0) thuRneg = stats.threshold(uR, threshmax=-Th, newval=0) thuR = thuRpos + thuRneg elif PosOnly==1: thuR = stats.threshold(uR, threshmin=Th, newval=0) elif PosOnly==-1: thuR = stats.threshold(uR, threshmax=-Th, newval=0) # sparse matrix thuR_csr = sparse.csr_matrix(thuR) return thuR_csr
def threshold_frame( frame, nstd = None): # Change the parameter to one's liking. Currently low threshold value is 3 # std more than mean. mean = int(frame.mean()) std = int(frame.std()) if nstd is None: nstd = 3 low = max(0, mean + (nstd * std)) high = int( frame.max() ) logging.debug("Thresholding at %s + %s * %s" % (mean, nstd, std)) logging.debug("|- low, high = %s, %s" % (low, high)) frame = stat.threshold( frame, low, high, newval = 0) return to_grayscale( frame )
def thresh_com(x, y): """Threshold the results at the half-range level and find their centre of mass. :param x: The array of the last set of positions, sorted in ascending order. :param y: The brightness measurements corresponding to x. :return: The centre of mass in position and the thresholded brightness array.""" thresh_level = np.mean([np.min(y), np.max(y)]) thresh = stat.threshold(y, threshmin=thresh_level) normalisation = integrate.simps(thresh, x) numerator = integrate.simps(x * thresh, x) return numerator / normalisation, thresh
def collabStandardK(self, X, Xflip, K, lenoflist): #K=10 Y = dist.squareform(dist.pdist(Xflip, "jaccard")) #find the closest K rows Y = np.nan_to_num(Y) #house keeping for na's U = np.argsort(Y)[:, :K] #average them out = np.mean(Xflip[U], 1) #threshold back to binary out = stats.threshold(out, threshmax=0.5, newval=1) out = stats.threshold(out, threshmin=0.49, newval=0) #plt.matshow(out,cmap='Greys_r') print("\nComputed RMS Errors from Collaborative filtering") print self.errors(X, out) print("\n\n") plt.title("CF - users") figstring = "outputs/CF_plot_" + str(lenoflist) plt.savefig(figstring) #plt.show() cnt = 0 for i in X: for j in i: if (j == 1): cnt = cnt + 1 print("Count of 1's in X: " + str(cnt)) cnt = 0 for i in out: for j in i: if (j == 1): cnt = cnt + 1 print("Count of 1's in result: " + str(cnt))
def _findLocalMax(self): """Find the centers of particles by thresholding and dilating.""" dilationKernel = im.makeCircularKernel(self._dilationRadius) maxed = [] for image in self._morphed: # set pixels below morph thresh to 0 threshed = stats.threshold(image, self._morphThreshold, newval=0.0) dilated = cv2.dilate(threshed, dilationKernel) # expThreshold is so named because the original algorithm # originally exponentiated and then thresholded, which is the same # as flipping the sign and exponentiating the threshold. binary = (dilated - threshed) >= self._expThreshold maxed.append(binary) self._maxed = maxed
def _findLocalMax(self): """Find the centers of particles by thresholding and dilating.""" dilationKernel = im.makeCircularKernel(self._dilationRadius) self._maxed = im.createImageArray(self, "morphMax", dtype=np.bool, shape = self._morphed[0].shape, expectedrows=len(self._morphed)) for image in self._morphed: # set pixels below morph thresh to 0 threshed = stats.threshold(image, self._morphThreshold, newval=0.0) dilated = cv2.dilate(threshed, dilationKernel) # expThreshold is so named because the original algorithm # originally exponentiated and then thresholded, which is the same # as flipping the sign and exponentiating the threshold. binary = (dilated - threshed) >= self._expThreshold self._maxed.append([binary]) self._maxed.flush()
def HRinst(dataset, secperunit=60, peak_threshold=98, filt=True): """ Takes the input data of the time and voltage to convert it into an array with time and instantaneous heart rate. :param dataset: (tuple) Two elements, each a 1xN ndarray for time and voltage values respectively :param secperunit: (int or double) Conversion from unit of time ndarray to seconds :param peak_threshold: (double) percentage of maximum peak to set thresholding :param filt: (boolean) true if user wants to filter data, false if not :returns: (ndarray) 2 columns. First column with time in s, second column with heart rate in BPM. Each element in the ndarray is a float. """ import numpy as np from scipy import stats time = dataset[:][0] voltage = dataset[:][1] if filt: voltage = data_filter(voltage) thresholded = stats.threshold(voltage, np.percentile(voltage, peak_threshold)) hrinst = np.zeros(len(thresholded)) is_increasing = np.roll(thresholded, 1) <= thresholded will_decrease = np.roll(thresholded, -1) < thresholded is_maximum = is_increasing * will_decrease peakInd = np.asarray(np.where(is_maximum)) for i, val in enumerate(thresholded): peaks = peakInd[peakInd < i] if i > peakInd[0][1]: hrinst[i] = \ secperunit / (time[int(peaks[-1])] - time[int(peaks[-2])]) else: hrinst[i] = 0 hrinst[-1] = hrinst[-2] return hrinst
def isolate_depths(frame, expand=1.0, length=256, blur=None): """Isolate depths of a greyscale depth image frame: frame to analyse expand: amount to expand the range of the isolation length: possible values of pixels blur: amount to blur, None for none """ if blur is not None: frame = cv2_medianBlur(frame, blur, frame) expand -= (expand - 1) / 2 count = __get_bins(length) ranges = __get_depths_range(count).astype(float) ranges[:, 1] *= expand ranges[:, 0] /= expand ranges = ranges.astype(uint8) iso = [threshold(frame, start, stop - 1) for start, stop in ranges] if len(iso) == 0: return zeros_like(frame) return np_sum(iso, 0).astype(uint8)
def detectFeatures(grayImage): print 'Detecting features for image...' gaussImage = ndimage.filters.gaussian_filter(grayImage, 5) gradY, gradX = np.gradient(gaussImage) gradXX = gradX * gradX gradYY = gradY * gradY gradXY = gradX * gradY gauXX = ndimage.filters.gaussian_filter(gradXX, 5) gauYY = ndimage.filters.gaussian_filter(gradYY, 5) gauXY = ndimage.filters.gaussian_filter(gradXY, 5) Det = gauXX * gauYY - gauXY * gauXY Trace = gauXX + gauYY R = Det - k * Trace * Trace ThresR = stats.threshold(R, thresholdR) ThresRdilate = ndimage.grey_dilation(ThresR, footprint=[[1, 1, 1], [1, 0, 1], [1, 1, 1]]) ThresRpeak = ThresR > ThresRdilate features = ThresRpeak.nonzero() features = zip(features[1], features[0]) #(column, row), (width, height) (x, y) print 'Detected', len(features), 'features' return features
def toImage(image_data): """ @param image_data uint16 array containing intensities from binary files This function converts the intensity data from all files into high-res and low-res images. """ """ Plots the intensity data using matplotlib; faster but high resolution is lost """ # Removes values below a threshold to make the background of the ring images # black image_data.shape = size plt.imshow(np.minimum(stats.threshold(image_data,threshmin=threshold, newval=0), 255 + 0*image_data)) # Specifies the color map; can be modified if you want! plt.hot() plt.grid() # Saves image to output directory plt.savefig(imDirectory + output + "-lo.png") """ Plots the intensity map by converting intensities to RGB values; slower but has higher resolution """ # Determines max intensity for the gradient maxI = np.max(image_data) # creates an mxnx3 array of zeros of type uint8; this array will store # the RGB values that will be converted to an image rgbArr = np.zeros((size[0],size[0],3),dtype = 'uint8') sys.stdout.write("Converting to Image\n") for i in range(size[0]): for j in range(size[0]): # Converts intensity to pixel rgbArr[i,j] = toRGB(image_data[i][j],maxI) image = Image.fromarray(rgbArr,'RGB') # Saves image to output director provided image.save(imDirectory + output + ".png")
return image_data def toImage(image_data): """ @param im_data uint16 array containing intensities from binary files This function converts the intensity data from all files into an image and saves it """ """ Plots the intensity data using matplotlib; faster but high resolution is lost """ # Removes values below a threshold to make the background of the ring images # black image_data.shape = size plt.imshow(np.minimum(stats.threshold(image_data,threshmin=threshold, newval=0), 255 + 0*image_data)) # Specifies the color map; can be modified if you want! plt.hot() # Saves image to output directory plt.savefig(imDirectory + output + "-lo.png") """ Plots the intensity map by converting intensities to RGB values; slower but has higher resolution """ # Determines max intensity for the gradient maxI = np.max(image_data) # creates an mxnx3 array of zeros of type uint8; this array will store # the RGB values that will be converted to an image rgbArr = np.zeros((size[0],size[0],3),dtype = 'uint8') sys.stdout.write("Converting to Image\n")