def get_best_miner_and_algorithm(self, device): hashrates = calibration.Calibration().get_hashrates(device) if hashrates == None: return [None, None, None, None] best_hashrates = calibration.Calibration( ).get_best_algorithm_benchmarks(hashrates) if self.algorithm not in best_hashrates.keys(): return [None, None, None, None] payrates = self.mbtc_per_day(best_hashrates) payrates[None] = pools.Pools().deduct_power_from_payrates( device, payrates[None]) best_miner = device.get_best_miner_for_algorithm( self.algorithm, self.supported_miners) if best_miner == None: return [None, None, None, None] return [ best_miner, self.algorithm, None, payrates[None][self.algorithm] ]
def get_best_miner_and_algorithm(self, device): hashrates = calibration.Calibration().get_hashrates(device) if hashrates == None: return [None, None, None, None] best_hashrates = calibration.Calibration( ).get_best_algorithm_benchmarks(hashrates) payrates = self.mbtc_per_day(best_hashrates) for region in payrates.keys(): payrates[region] = pools.Pools().deduct_power_from_payrates( device, payrates[region]) best_rate = 0 best_algo = None best_region = None best_miner = None if Config().get('pools.nicehash.primary_region') == 'usa': regions = ["usa", "eu"] else: regions = ["eu", "usa"] for region in regions: for algo in payrates[region].keys(): if payrates[region][algo] > best_rate: best_rate = payrates[region][algo] best_algo = algo best_region = region if best_algo == 'equihash': supported_miners = ['excavator', 'ewbf'] else: supported_miners = self.supported_miners best_miner = device.get_best_miner_for_algorithm( best_algo, supported_miners) if best_miner == None: best_rate = 0 best_algo = None best_region = None if payrates['eu'] == {} and payrates['usa'] == {}: device.log( 'warning', 'no nicehash payrate information - defaulting to %s' % (Config().get('pools.nicehash.default_algorithm'))) best_algo = Config().get('pools.nicehash.default_algorithm') best_region = Config().get('pools.nicehash.primary_region') return [best_miner, best_algo, best_region, best_rate]
def __init__(self): self.calibrator = cal.Calibration() self.suns, self.dates = unzip(self.calibrator.u.get_sun()) self.center = 0 self.curve = np.array([]) self.light_curve() self.background_light = self.generate_light_noise()
def __init__(self, runs=100, datacount=10000, noSave=False): self.datacount = datacount for testType in TEST_TYPES: print('Calibrating Independence Test: ', testType) cal = calibration.Calibration(testType) self.origThreshold = eval(testType + '.DEPENDENCE_THRESHOLD') self.currThreshold = self.origThreshold prevDirection = 0 #scale = .1 flipFlops = 2 zeroCount = 0 for i in range(runs): self.testCount = 0 self.err1Count = 0 # Type 1 Error. Should be dependent, but was found independent self.err2Count = 0 # Type 2 Error. Should be independent, but was found dependent for filePath in FILES: self.calibrateOneCITest(testType, filePath) errors = self.err1Count + self.err2Count direction = self.err2Count - self.err1Count print('Score = ', (1 - (errors / self.testCount)) * 100, '% ', self.err1Count, self.err2Count, direction) if errors == 0: print('No Errors -- Perfect Calibration') zeroCount += 1 if zeroCount > ERROR_FREE_RUNS: break else: zeroCount = 0 if flipFlops > 16: break if (prevDirection > 0 and direction < 0) or (prevDirection < 0 and direction > 0): flipFlops += 1 #print('flipFlops = ', flipFlops, prevDirection, direction) if direction < 0: adjustment = -self.origThreshold * 2**-(flipFlops) elif direction > 0: adjustment = self.origThreshold * 2**-(flipFlops) else: print('Errors are balanced') adjustment = 0.0 if adjustment != 0: self.currThreshold += adjustment print('Adjusting threshold by ', adjustment, 'to', self.currThreshold) exec(testType + '.DEPENDENCE_THRESHOLD = ' + str(self.currThreshold)) cal.set('DEPENDENCE_THRESHOLD', str(self.currThreshold)) #cal.save() prevDirection = direction if not noSave: cal.save() print('Finished Calibration for ', testType) return
def new(self): self.testReceptionPage.destroy() self.configurationPage.destroy() self.calibrationPage.destroy() self.autrePage.destroy() self.testReceptionPage = tR.TestReception(fenetre_principale=self) self.testReceptionPage.pack() self.configurationPage = conf.Configuration(fenetre_principale=self) self.calibrationPage = calib.Calibration(fenetre_principale=self) self.autrePage = autr.Autres(fenetre_principale=self)
def get_best_miner_and_algorithm(self, device): hashrates = calibration.Calibration().get_hashrates(device) if hashrates == None: return [None, None, None, None] best_hashrates = calibration.Calibration( ).get_best_algorithm_benchmarks(hashrates) payrates = self.mbtc_per_day(best_hashrates) payrates[None] = pools.Pools().deduct_power_from_payrates( device, payrates[None]) best_rate = 0 best_algo = None best_miner = None for algo in payrates[None]: if payrates[None][algo] > best_rate: best_rate = payrates[None][algo] best_algo = algo best_miner = device.get_best_miner_for_algorithm( best_algo, self.supported_miners) if best_miner == None: best_rate = 0 best_algo = None best_region = None if payrates[None] == {}: device.log( 'warning', 'no miningpoolhub payrate information - defaulting to %s' % (Config().get('pools.miningpoolhub.default_algorithm'))) best_algo = Config().get('pools.miningpoolhub.default_algorithm') best_region = Config().get('pools.miningpoolhub.primary_region') return [best_miner, best_algo, None, best_rate]
def __init__(self): Tk.__init__(self) self.title('Smoke Detector Maintenance') self.resizable(0, 0) #self.wm_attributes('-type','splash') self.configure(bg=bgColor) self.geometry("1170x870") self.protocol("WM_DELETE_WINDOW", self.quit) x = (self.winfo_screenwidth() - self.winfo_reqwidth()) / 50 y = (self.winfo_screenheight() - self.winfo_reqheight()) / 50 self.geometry("+%d+%d" % (x, y)) #### self.menu() ### self.testReceptionPage = tR.TestReception(fenetre_principale=self) self.testReceptionPage.pack() self.configurationPage = conf.Configuration(fenetre_principale=self) self.calibrationPage = calib.Calibration(fenetre_principale=self) self.autrePage = autr.Autres(fenetre_principale=self)
def _createMasters(self, zero='yes', dark='yes', flat='yes'): """Calls to the calibration module to create master calibration images. zero, dark, and flat are set to 'yes,' but can be set to 'no' if any of those are not desired. """ True = 'yes' False = 'no' while (1): if not zero and dark and flat: break masters = calibration.Calibration(self.reduction_dir) reduction_tools.makeDir(self.reduction_dir + 'Masters/') if zero: masters.makeZero() if dark: masters.makeDark() if flat: masters.makeFlat() break
def refresh(self): for pool_name in self.pools.keys(): if not self.pools[pool_name].refresh_data(): Log().add('warning', 'failed to refresh data for pool: %s' % (pool_name)) if Config().get('stats.enable'): best_hashrates = calibration.Calibration().get_best_hashrates() payrates = {} for pool_name in self.pools.keys(): pool_rates = self.pools[pool_name].mbtc_per_day(best_hashrates) for region in pool_rates.keys(): for algorithm in pool_rates[region].keys(): if not algorithm in payrates.keys() or pool_rates[region][algorithm] > payrates[algorithm]: payrates[algorithm] = pool_rates[region][algorithm] if payrates != self.payrates: Stats().log_algorithms(payrates) self.update_exchange_rate()
def test_gt_flow(): import calibration plt.close('all') cal = calibration.Calibration("indoor_flying") gtf = Flow(cal) p0 = np.array([0., 0., 0.]) q0 = quaternion(1.0, 0.0, 0.0, 0.0) depth = 10. * np.ones((cal.left_map.shape[0], cal.left_map.shape[1])) V, Omega = gtf.compute_velocity(p0, q0, p0, q0, 0.1) x, y = gtf.compute_flow_single_frame(V, Omega, depth, 0.1) fig = plt.figure() gtf.visualize_flow(x, y, fig) p1 = np.array([0., 0.25, 0.5]) q1 = quaternion(1.0, 0.0, 0.0, 0.0) V, Omega = gtf.compute_velocity(p0, q0, p1, q1, 0.1) print V, Omega x, y = gtf.compute_flow_single_frame(V, Omega, depth, 0.1) fig = plt.figure() gtf.visualize_flow(x, y, fig) p1 = np.array([0., -0.25, 0.5]) q1 = quaternion(1.0, 0.0, 0.0, 0.0) V, Omega = gtf.compute_velocity(p0, q0, p1, q1, 0.1) print V, Omega x, y = gtf.compute_flow_single_frame(V, Omega, depth, 0.1) fig = plt.figure() gtf.visualize_flow(x, y, fig)
def newCalibration(self): self.calNamelist=[] self.calFileobj=[] self.calCameraID=[] self.fullPath=[] self.calFileobj=QFileDialog.getOpenFileNames(self,"Video files", self.path, filter="Text Files (*.mp4)") for i in range(len(self.calFileobj)): self.pathThrow, self.calFilename=os.path.split(os.path.abspath(self.calFileobj[i])) self.fullPath.append(os.path.abspath(self.calFileobj[i])) slicestr=self.calFilename[0:6] self.calCameraID=self.calFilename[0:2] self.calNamelist.append(slicestr) self.cal_LW.addItem(self.calNamelist[i]) self.toAppend_calList=[calibration.Calibration(self.calNamelist[i],self.path,self.fullPath[i], self.cal_TE) for i in range(len(self.calNamelist))] if len(self.master_calList) == 0: self.master_calList=self.toAppend_calList else: self.master_calList=self.master_calList+self.toAppend_calList
N = np.arange(1, N_zern + 1) s = 20 colors = cm.Reds(np.linspace(0.5, 1.0, N_crop)) SNRs = [1e4, 1e3, 250, 100] fig, axes = plt.subplots(2, 2) for j in range(4): snr = SNRs[j] ax = axes.flatten()[j] stds_crop = np.zeros((N_crop, N_zern)) for k, crop_pix in enumerate(crop_pixels): # modify the crop_pix attribute to get a proper input shape PSF_zernike.crop_pix = crop_pix calib_zern = calibration.Calibration(PSF_model=PSF_zernike) calib_zern.create_cnn_model(layer_filters, kernel_size, name='NOM_ZERN', activation='relu') # crop the PSF images train_PSF_crop = crop_images(train_PSF, crop_pix=crop_pix) test_PSF_crop = crop_images(test_PSF, crop_pix=crop_pix) # train and test the models on the crop images losses = calib_zern.train_calibration_model( train_PSF_crop, train_coef, test_PSF_crop, test_coef,
def main(dataset_folder, temporal_folder, experiment): """Run the script. Args: dataset_folder : specifies output folder experiment : tuple (<experiment name>, <sequence number>), that specifies which experiment to download. We sequences 1,2,3,4 of indoor_flying experiment. temporal_folder : specifies folder were the original dataset will be placed. """ if not _is_experiment_correctly_defined(experiment): raise ValueError('"experiments" are not correctly defined.') dataset_folder = os.path.abspath(dataset_folder) temporal_folder = os.path.abspath(temporal_folder) _make_if_does_not_exist(temporal_folder) _make_if_does_not_exist(dataset_folder) downloader.TMP_FOLDER = temporal_folder experiment_name, experiment_number = experiment paths = dataset_constants.experiment_paths(experiment_name, experiment_number, dataset_folder) dataset_constants.create_folders(paths) calibration_data = calibration.Calibration(experiment_name) data_path = downloader.get_data(experiment_name, experiment_number)[0] data_bag = bag_indexer.get_bag_indexer(data_path) gt_bag_path = downloader.get_ground_truth(experiment_name, experiment_number)[0] gt_bag = bag_indexer.get_bag_indexer(gt_bag_path) depth_topic_reader = gt_bag.get_topic_reader(TOPICS['depth']) focal_length_x_baseline = calibration_data.intrinsic_extrinsic['cam1'][ 'projection_matrix'][0][3] synchronization_timestamps = [] for index, depth_message in enumerate(depth_topic_reader): depth_image, timestamp = _load_image_message(depth_message) disparity_image = _depth2disparity(depth_image, focal_length_x_baseline) disparity_path = paths['disparity_file'] % index cv2.imwrite(disparity_path, disparity_image) synchronization_timestamps.append(timestamp) np.savetxt(paths['timestamps_file'], np.array(synchronization_timestamps), fmt='%f', header="timestamp") distorted_to_rectified = { 'cam0': calibration_data.left_map, 'cam1': calibration_data.right_map } for camera in ['cam0', 'cam1']: rectified_to_distorted_x, rectified_to_distorted_y = \ _rectification_map( calibration_data.intrinsic_extrinsic[camera]) image_size = calibration_data.intrinsic_extrinsic[camera]['resolution'] events_topic_reader = data_bag.get_topic_reader( TOPICS[camera]['events']) images_topic_reader = data_bag.get_topic_reader( TOPICS[camera]['image']) event_bags_timestamps = _get_bags_timestamps(events_topic_reader) image_bags_timestamps = _get_bags_timestamps(images_topic_reader) for synchronization_index, synchronization_timestamp in enumerate( synchronization_timestamps): synchronized_events = _get_synchronized_events( synchronization_timestamp, event_bags_timestamps, events_topic_reader) rectified_synchronized_events = _rectify_events( synchronized_events, distorted_to_rectified[camera], image_size) events_path = paths[camera]['event_file'] % synchronization_index np.save(events_path, np.array(rectified_synchronized_events)) synchronized_image = _get_synchronized_image( synchronization_timestamp, image_bags_timestamps, images_topic_reader) rectified_synchronized_image = cv2.remap(synchronized_image, rectified_to_distorted_x, rectified_to_distorted_y, cv2.INTER_LINEAR) image_path = paths[camera]['image_file'] % synchronization_index cv2.imwrite(image_path, rectified_synchronized_image)
def setup_class(self): self.c = calibration.Calibration()
directory = os.path.join(os.getcwd(), 'Multiwave') np.save(os.path.join(directory, 'train_PSF'), train_PSF) np.save(os.path.join(directory, 'train_coef'), train_coef) np.save(os.path.join(directory, 'test_PSF'), test_PSF) np.save(os.path.join(directory, 'test_coef'), test_coef) train_PSF = np.load(os.path.join(directory, 'train_PSF.npy')) train_coef = np.load(os.path.join(directory, 'train_coef.npy')) test_PSF = np.load(os.path.join(directory, 'test_PSF.npy')) test_coef = np.load(os.path.join(directory, 'test_coef.npy')) utils.show_PSF_multiwave(train_PSF) plt.show() # Train the Calibration Model with the complete Wavelength Datacube calib = calibration.Calibration(PSF_model=PSFs) calib.create_cnn_model(layer_filers, kernel_size, name='CALIBR', activation='relu') losses = calib.train_calibration_model(train_PSF, train_coef, test_PSF, test_coef, N_loops, epochs_loop, verbose=1, batch_size_keras=32, plot_val_loss=False, readout_noise=True, RMS_readout=[1. / SNR], readout_copies=readout_copies) RMS_evolution, residual = calib.calibrate_iterations(test_PSF, test_coef, wavelength=WAVE, N_iter=N_iter, readout_noise=True, RMS_readout=1./SNR) MU, STD = np.mean(RMS_evolution[-1][-1]), np.std(RMS_evolution[-1][-1]) calib.plot_RMS_evolution(RMS_evolution) plt.show() ### ============================================================================================================ ### # Does the number of channels we use matter? # Let's train several models with different number of wavelength channels
# Implements the independence test of Garcia and Gonzales-Lopez (2009) # Test is based on longest increasing sequence and longest decreasing sequence import numpy as np import math from scipy.stats.distributions import norm import random from debug import * maxTries = 20 maxData = 1000000 import calibration cal = calibration.Calibration('IndLn') DEPENDENCE_THRESHOLD = float(cal.get('DEPENDENCE_THRESHOLD', '.5')) def isIndependent(X, Y): result = False score = scoreDependence(X, Y) #print('score = ', score, len(X)) if score < DEPENDENCE_THRESHOLD: result = True return result def scoreDependence(X, Y): XY = np.array([X[:maxData], Y[:maxData]]).T # Sort the array by its X value and create an index Xi Xi = XY[:, 0].argsort() # Rearrange the sample pairs by Xi
# This is the basis of Fig 10.34 of AR4, and in (c) they have pretty much levelled off by year 3000 # Bern2D-CC 0.488; CLIMBER-2 0.458; CLIMBER-3a 0.200; MIT 0.214; MoBidiC # 0.626; UCL 0.386 alpha_te = np.array([0.488, 0.458, 0.200, 0.214, 0.626, 0.386]) sl_contributor = cf.thermal_expansion te_params = {} # te_condensed = np.array([]) for obs_te in cs.thermexp_observations: print obs_te observation_period = cs.observation_period["thermexp"][obs_te] temp_anomaly_year = cs.temp_anomaly_year["thermexp"][obs_te] sl_observation = cs.thermexp_observations[obs_te] calib = calibration.Calibration(gmt, "thermexp", sl_observation, alpha_te, sl_contributor, observation_period, temp_anomaly_year) calib.calibrate() te_params[obs_te] = calib outfile = calibdatadir + "thermexp.pkl" pickle.dump({"params": te_params}, open(outfile, "wb"), protocol=2) ##### Glaciers and ice caps ##### if "gic" in calibrate_these: sl_contributor = cf.glaciers_and_icecaps gic_modelno = np.arange(len(sl.gic_equi_functions)) gic_anth_params = {}
N_zern = zernike_matrix.shape[-1] zernike_matrices = [zernike_matrix, pupil_mask_zernike, flat_zernike] PSF_zernike = psf.PointSpreadFunction(matrices=zernike_matrices, N_pix=N_PIX, crop_pix=pix, diversity_coef=np.zeros(zernike_matrix.shape[-1])) defocus_zernike = np.zeros(zernike_matrix.shape[-1]) defocus_zernike[1] = diversity / (2 * np.pi) PSF_zernike.define_diversity(defocus_zernike) # C train_PSF, train_coef, test_PSF, test_coef = calibration.generate_dataset(PSF_zernike, N_train, N_test, coef_strength, rescale) # Train the Calibration Model on images with the nominal defocus epochs = 10 calib_zern = calibration.Calibration(PSF_model=PSF_zernike) calib_zern.create_cnn_model(layer_filters, kernel_size, name='NOM_ZERN', activation='relu') losses = calib_zern.train_calibration_model(train_PSF, train_coef, test_PSF, test_coef, N_loops=1, epochs_loop=epochs, verbose=1, batch_size_keras=32, plot_val_loss=False, readout_noise=False, RMS_readout=[1. / SNR], readout_copies=0) # Now we test the performance on an anamorphic error ratio = 1.10 zernike_matrix_anam, pupil_mask_zernike_anam, flat_zernike_anam = psf.zernike_matrix(N_levels=N_levels, rho_aper=RHO_APER, rho_obsc=RHO_OBSC, N_PIX=N_PIX, radial_oversize=1.0, anamorphic_ratio=ratio) zernike_matrices_anam = [zernike_matrix_anam, pupil_mask_zernike_anam, flat_zernike_anam] PSF_zernike_anam = psf.PointSpreadFunction(matrices=zernike_matrices_anam, N_pix=N_PIX, crop_pix=pix, diversity_coef=np.zeros(zernike_matrix.shape[-1]))
def experiment_flow(experiment_name, experiment_num, save_movie=True, save_numpy=True, start_ind=None, stop_ind=None): if experiment_name == "motorcycle": print "The motorcycle doesn't have lidar and we can't compute flow without it" return import time import calibration cal = calibration.Calibration(experiment_name) import ground_truth gt = ground_truth.GroundTruth(experiment_name, experiment_num) flow = Flow(cal) P0 = None nframes = len(gt.left_cam_readers['/davis/left/depth_image_raw']) if stop_ind is not None: stop_ind = min(nframes, stop_ind) else: stop_ind = nframes if start_ind is not None: start_ind = max(0, start_ind) else: start_ind = 0 nframes = stop_ind - start_ind depth_image, _ = gt.left_cam_readers['/davis/left/depth_image_raw'](0) flow_shape = (nframes, depth_image.shape[0], depth_image.shape[1]) x_flow_dist = np.zeros(flow_shape, dtype=np.float) y_flow_dist = np.zeros(flow_shape, dtype=np.float) timestamps = np.zeros((nframes, ), dtype=np.float) Vs = np.zeros((nframes, 3), dtype=np.float) Omegas = np.zeros((nframes, 3), dtype=np.float) dTs = np.zeros((nframes, ), dtype=np.float) ps = np.zeros((nframes, 3), dtype=np.float) qs = np.zeros((nframes, 4), dtype=np.float) sOmega = np.zeros((3, )) sV = np.zeros((3, )) print "Extracting velocity" for frame_num in range(nframes): P1 = gt.left_cam_readers['/davis/left/odometry'][frame_num + start_ind].message if P0 is not None: V, Omega, dt = flow.compute_velocity_from_msg(P0, P1) Vs[frame_num, :] = V Omegas[frame_num, :] = Omega dTs[frame_num] = dt timestamps[frame_num] = P1.header.stamp.to_sec() tmp_p, tmp_q, _ = flow.p_q_t_from_msg(P1) ps[frame_num, :] = tmp_p qs[frame_num, 0] = tmp_q.w qs[frame_num, 0] = tmp_q.x qs[frame_num, 0] = tmp_q.y qs[frame_num, 0] = tmp_q.z P0 = P1 filter_size = 10 smoothed_Vs = Vs smoothed_Omegas = Omegas print "Computing flow" for frame_num in range(nframes): depth_image = gt.left_cam_readers['/davis/left/depth_image_raw'][ frame_num + start_ind] depth_image.acquire() if frame_num - filter_size < 0: V = np.mean(Vs[0:frame_num + filter_size + 1, :], axis=0) Omega = np.mean(Omegas[0:frame_num + filter_size + 1, :], axis=0) elif frame_num + filter_size >= nframes: V = np.mean(Vs[frame_num - filter_size:nframes, :], axis=0) Omega = np.mean(Omegas[frame_num - filter_size:nframes, :], axis=0) else: V = np.mean(Vs[frame_num - filter_size:frame_num + filter_size + 1, :], axis=0) Omega = np.mean(Omegas[frame_num - filter_size:frame_num + filter_size + 1, :], axis=0) dt = dTs[frame_num] smoothed_Vs[frame_num, :] = V smoothed_Omegas[frame_num, :] = Omega flow_x_dist, flow_y_dist = flow.compute_flow_single_frame( V, Omega, depth_image.img, dt) x_flow_dist[frame_num, :, :] = flow_x_dist y_flow_dist[frame_num, :, :] = flow_y_dist depth_image.release() import downloader import os base_name = os.path.join(downloader.get_tmp(), experiment_name, experiment_name + str(experiment_num)) if save_numpy: print "Saving numpy" numpy_name = base_name + "_gt_flow_dist.npz" np.savez(numpy_name, timestamps=timestamps, x_flow_dist=x_flow_dist, y_flow_dist=y_flow_dist) numpy_name = base_name + "_odom.npz" np.savez(numpy_name, timestamps=timestamps, lin_vel=smoothed_Vs, ang_vel=smoothed_Omegas, pos=ps, quat=qs) if save_movie: print "Saving movie" import matplotlib.animation as animation plt.close('all') fig = plt.figure() first_img = flow.colorize_image(x_flow_dist[0], y_flow_dist[0]) im = plt.imshow(first_img, animated=True) def updatefig(frame_num, *args): im.set_data( flow.colorize_image(x_flow_dist[frame_num], y_flow_dist[frame_num])) return im, ani = animation.FuncAnimation(fig, updatefig, frames=len(x_flow_dist)) movie_path = base_name + "_gt_flow.mp4" ani.save(movie_path) plt.show() return x_flow_dist, y_flow_dist, timestamps, Vs, Omegas
diversity_coef=diversity_actuators) plt.show() # ================================================================================================================ # # Machine Learning | Single Calibration Model # ================================================================================================================ # # Let us begin with a baseline design. One calibration model with No Dropout or anything fancy # Generate training and test datasets (clean PSF images) train_PSF, train_coef, test_PSF, test_coef = calibration.generate_dataset( PSF_actuators, N_train, N_test, coef_strength, rescale) # Single Calibration Model || Baseline design calib = calibration.Calibration(PSF_model=PSF_actuators) calib.create_cnn_model(layer_filters, kernel_size, name='SINGLE_MODEL', activation='relu') losses = calib.train_calibration_model(train_PSF, train_coef, test_PSF, test_coef, N_loops, epochs_loop, verbose=1, batch_size_keras=32, plot_val_loss=False, readout_noise=True, RMS_readout=[1. / SNR],
PSF_zernike, coef_strength, rescale) train_PSF, train_coef, test_PSF, test_coef = calibration.generate_dataset( PSF_zernike, N_train, N_test, new_scale * coef_strength, new_scale * rescale) np.save(os.path.join(directory, 'train_PSF_%d' % zern_level), train_PSF) np.save(os.path.join(directory, 'train_coef_%d' % zern_level), train_coef) np.save(os.path.join(directory, 'test_PSF_%d' % zern_level), test_PSF) np.save(os.path.join(directory, 'test_coef_%d' % zern_level), test_coef) # Train a calibration model on the PSF images calib = calibration.Calibration(PSF_model=PSF_zernike) calib.create_cnn_model(layer_filters, kernel_size, name='ZERN_%d' % zern_level, activation='relu') losses = calib.train_calibration_model(train_PSF, train_coef, test_PSF, test_coef, N_loops, epochs_loop, verbose=1, batch_size_keras=32, plot_val_loss=False, readout_noise=True, RMS_readout=[1. / SNR],
N_PIX=N_PIX) actuator_matrices = [actuator_matrix, pupil_mask, flat_actuator] ones_diversity = np.random.uniform(-1, 1, size=N_act) diversity_actuators = diversity * ones_diversity # Create the PSF model using the Actuator Model for the wavefront PSF_actuators = psf.PointSpreadFunction(matrices=actuator_matrices, N_pix=N_PIX, crop_pix=pix, diversity_coef=diversity_actuators) train_PSF, train_coef, test_PSF, test_coef = calibration.generate_dataset( PSF_actuators, N_train, N_test, coef_strength, rescale) calib = calibration.Calibration(PSF_model=PSF_actuators) calib.create_cnn_model(layer_filers, kernel_size, name='CALIBR', activation='relu') losses = calib.train_calibration_model(train_PSF, train_coef, test_PSF, test_coef, N_loops, epochs_loop, verbose=1, batch_size_keras=32, plot_val_loss=False, readout_noise=True, RMS_readout=[1. / SNR],
import numpy as np import math import calibration cal = calibration.Calibration('CondIndGauss') DEPENDENCE_THRESHOLD = float(cal.get('DEPENDENCE_THRESHOLD', '.5')) def isIndependent(X, Y, Z): result = False # no cond ind # Test for conditional ind result = scoreDependence(X, Y, Z) < DEPENDENCE_THRESHOLD #print ('CondIndGauss: isIndependent ',Z, '_||_', Y, '|', Z, '=', result) return result def scoreDependence(X, Y, Z): # Test for conditional ind corrData = [X, Y, Z] corrCoefs = np.corrcoef(corrData) Pxy = corrCoefs[0, 1] Pxz = corrCoefs[0, 2] Pyz = corrCoefs[1, 2] Pxy_z = (Pxy - Pxz * Pyz) / ((1 - Pxz**2)**.5 * (1 - Pyz**2)**.5) fz = .5 * math.log((1 + Pxy_z) / (1 - Pxy_z)) # Fischer's Z(Pxy_z) dep = math.fabs(fz) #print('dep = ', dep) return dep
def calibrer(): calibrage = calibration.Calibration(self) calibrage.start()
NUM_PICTURES = 10 # Sample period for image capture display one frame every IMG_SAMPLING_PERIOD seconds # mishow over SSH is slow and delay the frames processing. Therefore we display a # frame only every IMG_SAMPLING_PERIOD seconds IMG_SAMPLING_PERIOD = 4.0 # Chessboard dimensions CHESSBOARD = (7,5) DEFAULT_OUT_FILE = 'calibration.json' OPTIONS = '[--file <calibration_file>] [--test]' cam = cv2.VideoCapture(0) cal = calibration.Calibration() """ Process input arguments. Options: --test: test only mode. Reads calibration file and displays normal and undistorded frames --file <calibration_file>: calibration file """ def process_argv(argv): calibration_file = DEFAULT_OUT_FILE test = False try: opts, args = getopt.getopt(argv[1:], "hf:t", ["file=","test"]) except getopt.GetoptError: print('{0} {1}'.format(argv[0], OPTIONS)) sys.exit(2)
PSF_actuators.define_diversity(diversity_defocus) plt.figure() plt.imshow(PSF_actuators.diversity_phase, cmap='RdBu') plt.colorbar() plt.title(r'Diversity Map | Defocus [rad]') plt.show() # Generate a training set for that nominal defocus train_PSF, train_coef, test_PSF, test_coef = calibration.generate_dataset(PSF_actuators, N_train, N_test, coef_strength, rescale) utils.plot_images(train_PSF[5000:]) plt.show() # Train the Calibration Model on images with the nominal defocus calib = calibration.Calibration(PSF_model=PSF_actuators) calib.create_cnn_model(layer_filers, kernel_size, name='CALIBR', activation='relu') losses = calib.train_calibration_model(train_PSF, train_coef, test_PSF, test_coef, N_loops, epochs_loop, verbose=1, batch_size_keras=32, plot_val_loss=False, readout_noise=True, RMS_readout=[1. / SNR], readout_copies=readout_copies) ### Sometimes the train fails (no apparent reason) probably because of random weight initialization?? # If that happens, simply copy and paste the model definition and training bits, and try again RMS_evolution, residual = calib.calibrate_iterations(test_PSF, test_coef, wavelength=WAVE, N_iter=N_iter, readout_noise=True, RMS_readout=1./SNR) calib.plot_RMS_evolution(RMS_evolution) plt.show() # ================================================================================================================ #
def get_calib(self, idx): calib_file = os.path.join(self.calib_dir, '%06d.txt' % idx) assert os.path.exists(calib_file) return calibration.Calibration(calib_file)
train_PSF = np.load(os.path.join(directory, 'train_PSF.npy')) train_coef = np.load(os.path.join(directory, 'train_coef.npy')) test_PSF = np.load(os.path.join(directory, 'test_PSF.npy')) test_coef = np.load(os.path.join(directory, 'test_coef.npy')) # Downsample the arrays to show the effect of using a coarser scale down_train_PSF = PSF_actuators.downsample_datacube(train_PSF) down_test_PSF = PSF_actuators.downsample_datacube(test_PSF) utils.plot_images(train_PSF, 1) utils.plot_images(down_train_PSF, 1) # plt.show() # Let's see if there's a difference in performance between Scales # Fine Scale (5 mas) model calib = calibration.Calibration(PSF_model=PSF_actuators) calib.create_cnn_model(layer_filters, kernel_size, name='SINGLE_FINE', activation='relu') losses = calib.train_calibration_model(train_PSF, train_coef, test_PSF, test_coef, N_loops, epochs_loop, verbose=1, batch_size_keras=32, plot_val_loss=False, readout_noise=True, RMS_readout=[1. / SNR],
# # # ax3 = axes[i_beta][2] # img3 = ax3.imshow(train_PSF_readout[0, :, :, 1], cmap='plasma') # # img3.set_clim([0, 1]) # cbar3 = plt.colorbar(img3, ax=ax3) # # for ax in axes[i_beta]: # ax.xaxis.set_visible(False) # ax.yaxis.set_visible(False) # # if i_beta == 0: # ax2.set_title(r'In-focus PSF') # ax3.set_title(r'Defocus PSF') calib_actu = calibration.Calibration(PSF_model=PSF_actuators) calib_actu.create_cnn_model(layer_filters, kernel_size, name='NOM_ACTU', activation='relu') losses = calib_actu.train_calibration_model(train_PSF_readout, train_coef, test_PSF_readout, test_coef, N_loops=1, epochs_loop=epochs, verbose=1, batch_size_keras=32, plot_val_loss=False, readout_noise=False, RMS_readout=[1. / SNR],
import calibration as Cls import CalibrationSettings as CalibSets import scr.FigureSupport as Fig # create a calibration object calibration = Cls.Calibration() # sample the posterior of the mortality probability calibration.sample_posterior() # create the histogram of the resampled mortality probabilities Fig.graph_histogram( data=calibration.get_mortality_resamples(), title='Histogram of Resampled Mortality Probabilities', x_label='Mortality Probability', y_label='Counts', x_range=[CalibSets.POST_L, CalibSets.POST_U]) # Estimate of mortality probability and the posterior interval print('Estimate of mortality probability ({:.{prec}%} credible interval):'.format(1-CalibSets.ALPHA, prec=0), calibration.get_mortality_estimate_credible_interval(CalibSets.ALPHA, 4))