def top_five_freq(file_path): denoise(file_path) file_path = 'denoise_file.wav' f = wave.open(file_path, 'rb') num = file_path[-5] params = f.getparams() nchannels, samplewidth, framerate, nframes = params[:4] str_data = f.readframes(nframes) f.close() wave_data = np.fromstring(str_data, dtype=np.short) wave_data.shape = -1, 1 if nchannels == 2: wave_data.shape = -1, 2 else: pass wave_data = wave_data.T time = np.arange(0, nframes) * (1.0 / framerate) df = framerate / (nframes - 1) freq = [df * n for n in range(0, nframes)] transformed = np.fft.fft(wave_data[0]) d = int(len(transformed) / 2) while (freq[d] > 4000 or freq[d] < 250): d -= 10 freq = freq[:d] transformed = transformed[:d] for i, data in enumerate(transformed): transformed[i] = abs(data) local_partmax = [] local_max = [] for i in range(1, len(transformed), 700): for j in range(i, i + 700): if j >= len(transformed) - 1: continue if transformed[j] > transformed[ j - 1] and transformed[j] > transformed[j + 1]: local_partmax.append(transformed[j]) local_partmax = sorted(local_partmax) local_max.append(local_partmax[-1]) local_partmax = [] local_max = sorted(local_max) max_freq_array = [] for i in range(1, 5): loc1 = np.where(transformed == local_max[-i]) if freq[loc1[0][0]] > 750 and freq[loc1[0][0]] < 2000: max_freq_array.append(freq[loc1[0][0]]) return max_freq_array
def denoise_image(self): model = self.select_method_btn.currentText() self.noising_image.toImage().save('./tmp/tmp.jpg') t = time.time() denoise(model, './tmp/tmp.jpg') self.time = time.time() - t pxmap = QImage('./tmp/tmp.jpg') pxmap = pxmap.convertToFormat(QImage.Format_Grayscale8) self.denoising_img_pix = QPixmap.fromImage(pxmap) self.denoising_img_lbl.setPixmap( self.denoising_img_pix.scaled(512, 512, QtCore.Qt.KeepAspectRatio)) self.show_stat_btn.setEnabled(True) self.save_btn.setEnabled(True) self.show_denoising_img_btn.setEnabled(True)
def estimate_covariance(input_images): #Number of images and pixels n = len(input_images) p = np.size(input_images[0]) low = 0.75 hi = 1.5 #what is this value noise_var = 1 #Estimate the whitening filter #For uncolored noise set W to be identity W = dn.estimate_whitening_filter(input_images) #Fourier transform and multiply by random alpha Ys = (lambda x: random.uniform(low, hi) * hp.fft(x), input_images) # "Whitened images" WYs = (lambda x: W * x, Ys) #Check if this is matrix mul or element-wise wys = (lambda x: np.vectorize(x), WYs) #Read in the point spread and Fourier transform it point_spread = ip.read_microscopy_spread() As = (lambda x: hp.fft(x), point_spread) mu, sigw = dn.denoise(wys, As) sig = np.invert(W) * (sigw) * np.invert(np.transpose(W)) def H(i): A = sig * np.transpose(As[i]) * np.transpose(W) B = W * As[i] * sig * np.transpose(As[i]) * np.transpose(W) C = np.invert(B + noise_var * np.identity(p)) return A * C Hs = [H(i) for i in range(0, n)] return Ys, mu, As, W, Hs
def main(): print( "Automatic Image Blur Detection/Removal and Improving Image Resolution" ) print("\n Converting to .png . . . .") imgTypeConv() print( "\n Preprocessing in progres. <Converting Image to grayscale of size 256 x 256>" ) PreprocessImg() print("\n Check the destination folder for Resized Images") print("\n Calculating and Printing Brisque Score . . . .") brisqueCalc() print("\n Performing Denoising. . . . ") denoise() print("\n Detecting whether Image is Blurry or Not . . . .") blurDetector() print("\n Check the destination folder for Blur or Not . . . .")
def getimg(code): os.chdir(savedir) savelist = [] times = [] counter = 0 starttime = timer() with requests.Session() as s: s.headers = headers for i in range(0, 36): start = timer() ticked = False filename = "%s %s.png" % (code, suffixnum[i]) if not os.path.isfile(filename): resp = s.post(url, data=data) # r = requests.post(url, data = data, headers = headers) pic = resp.content if 12000 < len(pic) < 20000: savelist.append((filename, pic)) else: # if len(pic) == 0: # win32api.MessageBox(0, 'replace session id', 'error :(', 0x00001000) # raise ValueError("Nothing returned, check session id?"%len(pic)) # Should this happen? If it cuts off halfway it should keep going? # break # for loop: line 36 # else: # write contents into an error log: try bytes, then text. # errorlogname = time.time() # try: # with open() pywin32_system32.MessageBox(0, 'didn\'t work', 'error :(', 0x00001000) raise ValueError("File size is out of range: %s" % len(pic)) counter += 1 ticked = True end = timer() times.append(end - start) if not counter % 6 and counter * ticked: # prints every 6 if counter has ticked and >0 print("saved %s codes" % counter) endtime = timer() print('done (%s)' % code) # print('Total: %s. Min: %s Max: %s Avg: %s'%(endtime-starttime, min(times), max(times), sum(times)/len(times))) print('Total: {:.5f}. Avg: {:.5f}\nMin: {:.5f} Max: {:.5f}'.format( endtime - starttime, sum(times) / len(times), min(times), max(times))) pywin32_system32.MessageBox(0, 'do the captcha you sausage', 'pics saved', 0x00001000) import denoise for pic in savelist: filename = pic[0] picture = Image.open(io.BytesIO(pic[1])) picture = picture.convert("RGB") result = denoise.denoise(pic=picture, filename=filename) result = result.crop((0, 0, 150, 55)) result.save(filename) if len(savelist) > 0: print('converted & saved')
def getCheckcode(): """ Use 'checkcode.jpg' return the string it contains """ img = Image.open(checkcode_file) img.load() img = denoise.denoise(img) tmp = pytesseract.image_to_string(img, config="-psm 7 digits") ret = tmp number = "0123456789" for c in tmp: if c not in number: ret = ret.replace(c, '') return ret
def getCheckcode() : """ Use 'checkcode.jpg' return the string it contains """ img = Image.open(checkcode_file) img.load() img = denoise.denoise(img) tmp = pytesseract.image_to_string(img, config = "-psm 7 digits") ret = tmp number = "0123456789" for c in tmp : if c not in number : ret = ret.replace(c, '') return ret
clf = Ridge(alpha=alpha) clf.fit(ChestPhantom, sinogram) y = clf.predict(ChestPhantom) img_recon = iradon(y, theta) a = rrmse(img_recon, ChestPhantom) print(a) if a < val: param = alpha val = a print(param) # Part e denoise(img_recon, ChestPhantom, alpha=0.0875, optimize_mode=False, prior='quadratic', save_results_dir=os.path.join(save_results_dir, 'quadratic', 'img_recon')) print( 'RRMSE at 1.2 times optimum alpha=', round( denoise(img_recon, ChestPhantom, alpha=0.0875 * 1.2, optimize_mode=True, prior='quadratic', save_results_dir=os.path.join(save_results_dir, 'quadratic', 'img_recon')), 5)) print( 'RRMSE at 0.8 times optimum alpha=',
tas.append(hm.hu_moments("/Users/abdurrahman/desktop/goruntu/tasatma3/MEI.bmp")) tas.append(hm.hu_moments("/Users/abdurrahman/desktop/goruntu/tasatma4/MEI.bmp")) tas.append(hm.hu_moments("/Users/abdurrahman/desktop/goruntu/tasatma5/MEI.bmp")) tas.append(hm.hu_moments("/Users/abdurrahman/desktop/goruntu/tasatma6/MEI.bmp")) tas.append(hm.hu_moments("/Users/abdurrahman/desktop/goruntu/tasatma7/MEI.bmp")) tas.append(hm.hu_moments("/Users/abdurrahman/desktop/goruntu/tasatma8/MEI.bmp")) tas.append(hm.hu_moments("/Users/abdurrahman/desktop/goruntu/tasatma9/MEI.bmp")) tas.append(hm.hu_moments("/Users/abdurrahman/desktop/goruntu/tasatma10/MEI.bmp")) cam = cv2.VideoCapture(0) i=0 while i<5: ret,frame = cam.read() i=i+1 frame = denoise(frame) frame = cv2.resize(frame,(0,0),fx=1/3, fy=1/3) if ret is True: backSubtractor = bs.BackGroundSubtractor(frame) run = True else: run = False counter=1 boolMEI=False while(run): ret,frame = cam.read() if ret is True: frame = denoise(frame)
denoised_img = plt.imread('../data/mri_image_noiseless.png') low_noise_img = plt.imread('../data/mri_image_noise_level_low.png') med_noise_img = plt.imread('../data/mri_image_noise_level_medium.png') high_noise_img = plt.imread('../data/mri_image_noise_level_high.png') print('QUADRATIC PRIOR') print( '\n---------------------------------------------------------------------------------' ) print('Denoising Low Noise Level Image...') denoise(low_noise_img, denoised_img, alpha=0.0875, optimize_mode=False, prior='quadratic', save_results_dir=os.path.join(save_results_dir, 'quadratic', 'low_noise_level')) print( 'RRMSE at 1.2 times optimum alpha=', round( denoise(low_noise_img, denoised_img, alpha=0.0875 * 1.2, optimize_mode=True, prior='quadratic', save_results_dir=os.path.join(save_results_dir, 'quadratic', 'low_noise_level')), 5)) print( 'RRMSE at 0.8 times optimum alpha=',
image = low if image_type == "med": image = med if image_type == "high": image = high rrmse_final_min = 100 beta_opt = 0 gamma_opt = 0 for i in range(10): for gamma in np.linspace(gamma_lower_bound, gamma_upper_bound, 10): for beta in np.linspace(beta_lower_bound, beta_upper_bound, 10): rrmse_final = denoise(image, denoised_img, beta=beta, gamma=gamma, optimize_mode=True, prior=function_type) if rrmse_final < rrmse_final_min: rrmse_final_min = rrmse_final beta_opt = beta gamma_opt = gamma gamma_lower_bound = max(gamma_opt - gamma_opt / 2, 0.00001) gamma_upper_bound = min(gamma_opt + gamma_opt / 2, 1.0) beta_upper_bound = min(beta_opt + beta_opt / 2, 1.0) beta_lower_bound = max(beta_opt - beta_opt / 2, 0.00001) print(beta_opt) print(gamma_opt) print(image_type)
def run(): while True: trial = pull_pending() if trial is None: break logger.info("Starting - dataset: %s - feature: %s - clf: %s" % (trial['Dataset'], trial['Feature'], trial['Classifier'])) assert trial['Dataset'] in [ 'gtsrb', 'cifar10', 'stl10', 'mnist', 'feret' ] assert trial['Classifier'] in ['KNN', 'RFC', 'SVM', 'LDA'] assert trial['Feature'] in ['sift', 'surf', 'hog', 'none'] assert trial['Noise_Type'] in noise_params.keys() or 'none' or 'random' assert trial['Train_Noise'] in ['yes', 'no'] scale = False if trial['Dataset'].startswith('feret'): (X_train_clean, y_train), (X_test_clean, y_test) = feret.load_data() scale = 0.25 else: ds = eval(trial['Dataset']) (X_train_clean, y_train), (X_test_clean, y_test) = ds.load_training_data(), ds.load_test_data() noise_type, noise_level, train_noise = trial['Noise_Type'], trial[ 'Noise_Level'], trial['Train_Noise'] params = eval(trial['Parameters']) feature_params = {} denoise_params = None if 'feature_params' in params: feature_params = params['feature_params'] if 'denoise_params' in params: denoise_params = params['denoise_params'] if noise_type != 'none' and noise_level != 'none': if noise_type == 'random': noise_types = [ np.random.choice(['sp', 'gauss', 'quantization']) for _ in X_test_clean ] noise_levels = [ np.random.choice(get_noise_params(n_type)) for n_type in noise_types ] else: noise_types = [noise_type for _ in X_test_clean] if noise_level == 'random': noise_range = get_noise_params(noise_type) noise_levels = [ np.random.choice(noise_range) for _ in X_test_clean ] else: noise_levels = [noise_level for _ in X_test_clean] X_test = [] for img, noise_type, noise_level in zip(X_test_clean, noise_types, noise_levels): noisy = apply_noise(img, noise_type, noise_level) if denoise_params: denoised = denoise(noisy, denoise_params[0], denoise_params[1]) if denoised.max() == np.nan or denoised.min( ) == np.nan or np.count_nonzero(np.isnan(denoised)) > 0: i = 0 while denoised.max( ) == np.nan or denoised.min == np.nan or np.count_nonzero( np.isnan(denoised)) > 0: if i >= 1000: logger.error( 'Failed to denoise image with method: %s, %s. Noise type: %s, %s' % (denoise_params[0], denoise_params[1], noise_type, noise_level)) raise ValueError denoised = denoise(noisy, denoise_params[0], denoise_params[1]) i += 1 X_test.append(denoised) else: X_test.append(noisy) X_test = np.array(X_test) if train_noise == 'yes': if noise_type == 'random': noise_types = [ np.random.choice(['sp', 'gauss', 'quantization']) for _ in X_train_clean ] noise_levels = [ np.random.choice(get_noise_params(n_type)) for n_type in noise_types ] X_train = np.array([ apply_noise(img, noise_type, noise_level) for img, noise_type, noise_level in zip( X_train_clean, noise_types, noise_levels) ]) else: if noise_level == 'random': noise_range = get_noise_params(noise_type) noise_levels = [ np.random.choice(noise_range) for _ in X_train_clean ] X_train = np.array([ apply_noise(img, noise_type, noise_level) for img, noise_level in zip( X_train_clean, noise_levels) ]) else: X_train = np.array([ apply_noise(img, noise_type, noise_level) for img in X_train_clean ]) else: X_train = X_train_clean else: X_train, X_test = X_train_clean, X_test_clean feature = trial['Feature'] if feature == 'hog': if np.count_nonzero(np.isnan(X_train)) > 0 or np.count_nonzero( np.isnan(X_test)) > 0: print( 'NaN values found in dataset prior to feature extraction') X_train = get_hog(X_train, **feature_params) X_test = get_hog(X_test, **feature_params) if np.count_nonzero(np.isnan(X_train)) > 0 or np.count_nonzero( np.isnan(X_test)) > 0: print('NaN values found in hog descriptors') elif feature == 'none': X_train = get_pix(X_train, scale=scale) X_test = get_pix(X_test, scale=scale) assert len(X_train) == len(y_train), (len(X_train), len(y_train)) assert len(X_test) == len(y_test), (len(X_test), len(y_test)) clf_params = params['clf_params'] clf = eval(trial['Classifier'])(**clf_params) clf.fit(X_train, y_train) predictions = clf.predict(X_test) score = metrics.accuracy_score(y_test, predictions) logger.info( "Finished - dataset: %s - feature: %s - clf: %s noise: (%s, %s) - score: %s" % (trial['Dataset'], trial['Feature'], trial['Classifier'], trial['Noise_Type'], trial['Noise_Level'], score)) submit_result(trial, score)
dest='do_segment', action='store_true' ) args = parser.parse_args() # generate filenames raw_data_filename, spectrogram_filename = generate_filenames(args) print('Save to {} & {}!'.format(raw_data_filename, spectrogram_filename)) figsize = (int(args.size_x), int(args.size_y)) if re.search('.*.bin', args.filename, re.IGNORECASE): # EKG peak_indices, segment_indices = None, None ekg_raw, sampling_rates = get_ekg(args.filename) if args.do_denoise: ekg_raw = denoise.denoise(ekg_raw, 1000, number_channels=8) # NOTE: fixed channel number if args.do_segment: import ecgseg ekg_signal = ekg_raw if args.do_denoise else denoise.denoise(ekg_raw, 1000, number_channels=8) peak_indices, segment_indices = ecgseg.predict('./2000-0.75.h5', ekg_signal) ekg_spectrograms = generate_spectrogram(ekg_raw, sampling_rates) save_fig(raw_data_filename, ekg_raw, grid=True, peak_indices=peak_indices, segment_indices=segment_indices, figsize=figsize) save_spectrogram_fig(spectrogram_filename, ekg_spectrograms, figsize=figsize) elif re.search('.*.raw', args.filename, re.IGNORECASE): # Heart Sound if args.do_segment: print('''--segment option is ignored, since it's specified for EKGs.''') start_s = convert_time_to_sec(args.start_time) if args.start_time else 0 end_s = convert_time_to_sec(args.end_time) if args.end_time else np.inf
import numpy as np import wave from denoise import denoise from matplotlib import pyplot as plt denoise() file_path = 'denoise_file.wav' f = wave.open(file_path, 'rb') num = file_path[-5] params = f.getparams() nchannels, samplewidth, framerate, nframes = params[:4] str_data = f.readframes(nframes) f.close() wave_data = np.fromstring(str_data, dtype=np.short) wave_data.shape = -1, 1 if nchannels == 2: wave_data.shape = -1, 2 else: pass wave_data = wave_data.T time = np.arange(0, nframes) * (1.0 / framerate) plt.subplot(211) plt.plot(time, wave_data[0], 'r-') plt.xlabel('Time/s') plt.ylabel('Ampltitude') plt.title('Num ' + num + ' time/ampltitude') df = framerate / (nframes - 1) freq = [df * n for n in range(0, nframes)] transformed = np.fft.fft(wave_data[0])
def grid(args): src, dest, init = args['src'], args['grid'], args['dst'] grid = dest + ".m.npy" dest = dest + ".avi" cap = cv2.VideoCapture(src) ret, frame = cap.read() frame_h, frame_w, _ = frame.shape Pts = np.genfromtxt(init, delimiter=",") courtPts, framePts = Pts[:, :2], Pts[:, -2:] fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(dest, fourcc, 20.0, (frame_w, frame_h)) tpl = template() num = 0 closing = denoise(frame) M = cv2.getPerspectiveTransform(np.float32(courtPts), np.float32(framePts)) Ms = [M] cRot = cv2.warpPerspective(tpl, M, (frame_w, frame_h)) raw, blank, cRotc, blank2 = frame.copy(), cRot.copy(), frame.copy(), cRot.copy() while(cap.isOpened()): ret, frame = cap.read() if not ret : break closing = denoise(frame) #cv2.imshow('raw', frame) dstIdx = np.vstack(np.nonzero(closing)[::-1]).T #import ipdb; ipdb.set_trace() nbrs = NearestNeighbors(n_neighbors=1, algorithm = 'ball_tree').fit(dstIdx) converge = deque([], maxlen=5) cnt = True num_c = 0 while cnt: cv2.warpPerspective(tpl, M, (frame_w, frame_h), dst = cRot) oKeys, nKeys = neighbors(cRot, dstIdx, nbrs, d=10) dm, ret = cv2.findHomography(oKeys, nKeys) M = np.dot( dm , M ) cv2.bitwise_and(cRot, closing, dst=blank) score = np.sum(blank)/float(np.sum(cRot)) converge.append(score) csum = np.sum(np.diff(np.array(converge))) if num_c > 500 or (csum < 0 and len(converge)>1): cnt = False num_c+=1 Ms.append(M) cv2.cvtColor(cRot, cv2.COLOR_GRAY2BGR, dst=cRotc) cRotc[:,:, 0] = 0 cRotc[:,:, 2] = 0 cv2.bitwise_or(cRotc, frame, dst=raw) cv2.putText(raw, "# %s %s"%(num, num_c), (30,30), font, 1, (255,255,255), 1) cv2.putText(raw, "S %.2f %%"%(100*score), (30,70), font, 1, (255,255,255), 1) cv2.putText(raw, "D %.2f %%"%(100*csum), (30,110), font, 1, (255,255,255), 1) out.write(raw) #cRot = cv2.bitwise_not(cRot) #closing = cv2.bitwise_not(closing) cv2.imshow('frame', raw) k = cv2.waitKey(20) & 0xFF if k == ord('c'): break elif k == ord('q'): cnt = False cap.release() break while False: cv2.imshow('frame', raw) cv2.imshow('crot', cRot) cv2.imshow('closing', closing) k = cv2.waitKey(20) & 0xFF if k == ord('c'): break elif k == ord('q'): cnt = False cap.release() break num += 1 out.release() cap.release() cv2.destroyAllWindows() if grid: np.save(grid, np.array(Ms))
# for gamma in np.linspace(0.001,1,40): # rrmse_final = denoise(noisy_img[:,:,2], denoised_img[:,:,2], alpha=beta, gamma=gamma, optimize_mode=True, prior='discontinuity_adaptive_huber') # if rrmse_final < rrmse_final_min: # rrmse_final_min = rrmse_final # beta_opt = beta # gamma_opt = gamma # rrmse_init_channel = rrmse(denoised_img[:,:,2], noisy_img[:,:,2]) # rrmse_init_total = rrmse(denoised_img, noisy_img) # print('rrmse_min = {}, beta_opt = {}, gamma_opt = {}'.format(rrmse_final_min, beta_opt, gamma_opt)) channel_0 = denoise(noisy_img[:, :, 0], denoised_img[:, :, 0], alpha=1, gamma=1, optimize_mode=False, prior='discontinuity_adaptive_huber', color_mode=True) channel_1 = denoise(noisy_img[:, :, 1], denoised_img[:, :, 1], alpha=0.8974, gamma=0.02662, optimize_mode=False, prior='discontinuity_adaptive_huber', color_mode=True) channel_2 = denoise(noisy_img[:, :, 2], denoised_img[:, :, 2], alpha=0.8718,
def sub_mapping(frame, tpl, num, courtPts=[], rawPts=[], players=[], matchedKey=[]) : frame_h, frame_w, _ = frame.shape edgeRaw = cv2.Canny(frame,100,200) oldM = np.zeros((3,3), dtype=np.float32) cRot = np.zeros_like(frame) cPlayers = [] score = 0 cv2.setMouseCallback('court', clickCourt, param=(tpl, num, courtPts, rawPts)) cv2.setMouseCallback('dst', clickRaw, param=(frame, num, courtPts, rawPts, players)) court, raw = tpl.copy(), frame.copy() dns = denoise(frame) dns = cv2.cvtColor(dns, cv2.COLOR_GRAY2BGR) nbKeys = [] if len(matchedKey)>0: X = np.array(matchedKey) dnsGray = dns[:,:,0]+ dns[:,:,1]+ dns[:,:,2] dnsIdx = np.vstack(np.nonzero(dnsGray)[::-1]).T nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(dnsIdx) distances, indices = nbrs.kneighbors(X) nbKeys = dnsIdx[indices].reshape((-1,2)) dM = cv2.estimateRigidTransform(np.float32(X), np.float32(nbKeys), False) dM = np.vstack( (dM , np.array([0,0,1]))) cv2.warpPerspective(tpl, dM, (frame_w, frame_h), dst=cRot) dnsCopy = dns.copy() newKeys = [] while(1): cv2.copyMakeBorder(tpl, 0,0,0,0,0, dst=court) cv2.copyMakeBorder(frame, 0,0,0,0,0, dst=raw) for (x, y) in POI: cv2.circle(court, (x, y), 2, (0,0,255), -1) #for (x, y) in HOOPS: # cv2.circle(court, (x, y), 2, (0,0,255), -1) for idx, (x,y) in enumerate(courtPts): cv2.circle(court, (x, y), 5, COLORS[idx], 2) for idx, player in enumerate(players): cv2.ellipse(raw, player, (25, 15), 0, 0, 360, COLORS[idx], 2) cv2.circle(raw, player, 3, COLORS[idx], -1) cv2.line(raw, player, (player[0]+25, player[1]), COLORS[idx], 2) for x, y in matchedKey: cv2.circle(dnsCopy, (x,y), 4, (0,255,0), 1) for x, y in nbKeys: cv2.circle(dnsCopy, (x,y), 6, (255,0,0), 1) if len(courtPts) == 4 : M = cv2.getPerspectiveTransform(np.float32(courtPts), np.float32(rawPts)) N = cv2.getPerspectiveTransform(np.float32(rawPts), np.float32(courtPts)) if np.linalg.norm(M-oldM)>0.01: print "call warpPerspective" cv2.warpPerspective(tpl, M, (frame_w, frame_h), dst=cRot) POIs = cv2.perspectiveTransform(np.float32(POI).reshape(-1,1,2), M).reshape((-1,2)).astype(int) cv2.bitwise_and(cRot, dns, dst=dnsCopy) idx = (POIs[:,1]<720) * (POIs[:,0]<1280) * (POIs[:,1]>0) * (POIs[:,0]>0) newKeys = [] for x, y in POIs[idx]: #import pdb; pdb.set_trace() if np.sum(dnsCopy[y, x]) > 0: cv2.circle(dnsCopy, (x,y), 4, (0,0,255), 2) newKeys.append((x,y)) else: cv2.circle(dnsCopy, (x,y), 4, (0,0,32), 2) score = np.sum(dnsCopy)/float(np.sum(cRot)) oldM = M cv2.addWeighted(cRot, 0.3, dns, 0.7, 0, dst=raw) if len(players)>0: cPlayers = cv2.perspectiveTransform(np.float32(players).reshape(-1,1,2), N) cPlayers = cPlayers.reshape(-1,2).astype(int) for idx, (x,y) in enumerate(rawPts): cv2.circle(raw, (x, y), 6, COLORS[idx], 3) for idx, player in enumerate(cPlayers): cv2.circle(court, tuple(player), 5, COLORS[idx], -1) cv2.putText(raw, "# %s"%num, (30,30), font, 1, (255,255,255), 1) cv2.putText(raw, "S %.2f %%"%(100*score), (30,70), font, 1, (255,255,255), 1) cv2.putText(dnsCopy, "# %s"%num, (30,30), font, 1, (255,255,255), 1) cv2.putText(dnsCopy, "S %.2f %%"%(100*score), (30,70), font, 1, (255,255,255), 1) cv2.imshow('dst', raw) cv2.imshow('court', court) k = cv2.waitKey(20) & 0xFF if k == ord('c'): print courtPts, rawPts return courtPts, rawPts elif k == ord('q'): return
function_type = sys.argv[1] denoised_img = cv2.cvtColor(plt.imread('../data/histology_noiseless.png'), cv2.COLOR_BGR2HSV) noisy_img = cv2.cvtColor(plt.imread('../data/histology_noisy.png'), cv2.COLOR_BGR2HSV) for j in range(3): gamma_upper_bound = 1 gamma_lower_bound = 0.00001 alpha_upper_bound = 1 alpha_lower_bound = 0.00001 rrmse_final_min = 100 alpha_opt = 0 gamma_opt = 0 for i in range(10): for gamma in np.linspace(gamma_lower_bound,gamma_upper_bound,10): for alpha in np.linspace(alpha_lower_bound,alpha_upper_bound,10): rrmse_final = denoise(noisy_img[:,:,j], denoised_img[:,:,j], alpha=alpha,gamma = gamma, optimize_mode=True, prior=function_type) if rrmse_final < rrmse_final_min: rrmse_final_min = rrmse_final alpha_opt = alpha gamma_opt = gamma gamma_lower_bound = max(gamma_opt - gamma_opt/2, 0.00001) gamma_upper_bound = min(gamma_opt + gamma_opt/2, 1.0) alpha_upper_bound = min(alpha_opt + alpha_opt/2, 1.0) alpha_lower_bound = max(alpha_opt - alpha_opt/2, 0.00001) print (alpha_opt) print (gamma_opt) import pdb; pdb.set_trace()