def process_binned(self, E1, E2, Estep, killzone_mask=None): """ Process spectrum binning Parameters: E1 - low edge of lowest bin E2 - high edge of highest bin Estep = bin width killzone_mask - optional mask of regions to ignore entirely See Calibration.process for prerequisites and results """ calib = calibrate.load(self.calibration_file) # zero out killzones of calibration matrix # (this causes those pixels to be ignored) if killzone_mask is not None: calib.calibration_matrix[killzone_mask] = 0 exposure = Exposure() exposure.load_multi(self.exposure_files) exposure.apply_filters(self.incident_energy, self.filters) spectrum = binned_emission_spectrum(calib.calibration_matrix, exposure, E1, E2, Estep, self.I0) self._set_spectrum(spectrum)
def inundation_damage(sww_base_name, exposure_files_in, exposure_file_out_marker=None, ground_floor_height=0.3, overwrite=False, verbose=True, use_cache = True): """ This is the main function for calculating tsunami damage due to inundation. It gets the location of structures from the exposure file and gets the inundation of these structures from the sww file. It then calculates the damage loss. Note, structures outside of the sww file get the minimum inundation (-ground_floor_height). These calculations are done over all the sww files with the sww_base_name in the specified directory. exposure_files_in - a file or a list of files to input from exposure_file_out_marker - this string will be added to the input file name to get the output file name """ if isinstance(exposure_files_in, basestring): exposure_files_in = [exposure_files_in] for exposure_file_in in exposure_files_in: csv = Exposure(exposure_file_in, title_check_list=[SHORE_DIST_LABEL,WALL_TYPE_LABEL, STR_VALUE_LABEL,CONT_VALUE_LABEL]) geospatial = csv.get_location() geospatial = ensure_absolute(geospatial) max_depths, max_momentums = calc_max_depth_and_momentum(sww_base_name, geospatial, ground_floor_height=ground_floor_height, verbose=verbose, use_cache=use_cache) edm = EventDamageModel(max_depths, csv.get_column(SHORE_DIST_LABEL), csv.get_column(WALL_TYPE_LABEL), csv.get_column(STR_VALUE_LABEL), csv.get_column(CONT_VALUE_LABEL) ) results_dic = edm.calc_damage_and_costs(verbose_csv=True, verbose=verbose) for title, value in results_dic.iteritems(): csv.set_column(title, value, overwrite=overwrite) # Save info back to csv file if exposure_file_out_marker == None: exposure_file_out = exposure_file_in else: # split off extension, in such a way to deal with more than one '.' in the name of file split_name = exposure_file_in.split('.') exposure_file_out = '.'.join(split_name[:-1]) + exposure_file_out_marker + \ '.' + split_name[-1] csv.save(exposure_file_out) if verbose: log.critical('Augmented building file written to %s' % exposure_file_out)
def process(self, emission_energies=None, skip_columns=[], killzone_mask=None): """ Process Emission Spectrum Parameters: emission_energies - list of points in emission energy grid if None, a uniform 0.1 eV grid covering range of calibration energies is used skip_columns - columns (for vertical disp. dir.) or rows (for horizontal) to skip entirely killzone_mask - mask of regions to skip in processing Prerequisites: self.calibration_file must be set to calibration filename self.exposure_files must be a list of exposure filenames self.filters must be a list of mx.filter.Filter descendents to apply to integrated exposures (may be empty list) self.solid_angle_map may be a map of solid angles subtended by each pixel Results: self.spectrum is set to processed spectrum self.emission, self.intensity, self.uncertainty, self.raw_counts and self.num_pixels are set to corresponding columns of spectrum """ calibration = calibrate.Calibration() calibration.load(self.calibration_file) exposure = Exposure() exposure.load_multi(self.exposure_files) for f in self.filters: f.filter(exposure.pixels, self.incident_energy) # generate emission energies from range of calibration matrix if emission_energies is None: Emin, Emax = calibration.energy_range() emission_energies = np.arange(Emin, Emax, .1) spectrum = process_spectrum(calibration.calibration_matrix, exposure, emission_energies, self.I0, calibration.dispersive_direction, calibration.xtals, self.solid_angle_map, skip_columns=skip_columns, killzone_mask=killzone_mask) self._set_spectrum(spectrum)
def test_set_exposure(self): """ Test the function to set and change exposure """ exposure = Exposure(min_t=1 / 500, max_t=1 / 125) (f, t, iso, bias) = exposure.set_exposure(4.0, 1 / 100, 100, 0) self.assertEqual(f, 4.0) self.assertEqual(t, 1 / 100) (f, t, iso, base) = exposure.set_exposure(f=5.6) self.assertAlmostEqual(t, 1 / 50, 1) (f, t, iso, base) = exposure.set_exposure(t=1 / 200) self.assertAlmostEqual(f, 2.8, 1)
def __init__(self): self.brightness = Brightness() self.gamma = Gamma() self.gain = Gain() self.exposure = Exposure() self.exposureauto = ExposureAuto() self.saturation = Saturation() self.hue = Hue() self.whitebalancered = WhiteBalanceRed() self.whitebalanceblue = WhiteBalanceBlue() self.Parameters = { 'Brightness': self.brightness.Brightness, 'Gamma': self.gamma.Gamma, 'Gain': self.gain.Gain, 'Exposure': self.exposure.Exposure, 'ExposureAuto': self.exposureauto.ExposureAuto, 'Saturation': self.saturation.Saturation, 'Hue': self.hue.Hue, 'WhiteBalanceRed': self.whitebalancered.WhiteBalanceRed, 'WhiteBalanceBlue': self.whitebalanceblue.WhiteBalanceBlue }
def process(self, emission_energies=None, skip_columns=[], killzone_mask=None): """ Process Emission Spectrum Parameters: emission_energies - list of points in emission energy grid if None, a uniform 0.1 eV grid covering range of calibration energies is used skip_columns - columns (for vertical disp. dir.) or rows (for horizontal) to skip entirely killzone_mask - mask of regions to skip in processing Prerequisites: self.calibration_file must be set to calibration filename self.exposure_files must be a list of exposure filenames self.filters must be a list of mx.filter.Filter descendents to apply to integrated exposures (may be empty list) self.solid_angle_map may be a map of solid angles subtended by each pixel Results: self.spectrum is set to processed spectrum self.emission, self.intensity, self.uncertainty, self.raw_counts and self.num_pixels are set to corresponding columns of spectrum """ calibration = calibrate.Calibration() calibration.load(self.calibration_file) exposure = Exposure() exposure.load_multi(self.exposure_files) for f in self.filters: f.filter(exposure.pixels, self.incident_energy) # generate emission energies from range of calibration matrix if emission_energies is None: Emin, Emax = calibration.energy_range() emission_energies = np.arange(Emin,Emax,.1) spectrum = process_spectrum(calibration.calibration_matrix, exposure, emission_energies, self.I0, calibration.dispersive_direction, calibration.xtals, self.solid_angle_map, skip_columns=skip_columns, killzone_mask=killzone_mask) self._set_spectrum(spectrum)
def test_x_to_fstop(self): """ Test the f stop conversion function """ exposure = Exposure(min_f=2, max_f=4) self.assertEqual(exposure.x_to_fstop(0), '2') self.assertEqual(exposure.x_to_fstop(0.5), '2.8') self.assertEqual(exposure.x_to_fstop(1.0), '4') exposure = Exposure(min_f=2, max_f=5.6) self.assertEqual(exposure.x_to_fstop(0), '2') self.assertEqual(exposure.x_to_fstop(0.33), '2.8') self.assertEqual(exposure.x_to_fstop(0.67), '4') self.assertEqual(exposure.x_to_fstop(1.0), '5.6')
def test_fstop_to_x(self): """ Test the shutter speed to (0, 1) conversion function """ # Can ony test to one place since stop values are not accurate exposure = Exposure(min_f=2, max_f=4) self.assertEqual(exposure.fstop_to_x(2), 0.0) self.assertEqual(exposure.fstop_to_x(4), 1.0) self.assertAlmostEqual(exposure.fstop_to_x(2.8), 0.5, 1) exposure = Exposure(min_f=2, max_f=5.6) self.assertAlmostEqual(exposure.fstop_to_x(2.8), 1 / 3, 1) self.assertAlmostEqual(exposure.fstop_to_x(4), 2 / 3, 1)
def add_depth_and_momentum2csv(sww_base_name, exposure_file_in, exposure_file_out=None, overwrite=False, verbose=True, use_cache=True): """ Calculate the maximum depth and momemtum in an sww file, for locations specified in an csv exposure file. These calculations are done over all the sww files with the sww_base_name in the specified directory. """ csv = Exposure(exposure_file_in) geospatial = csv.get_location() max_depths, max_momentums = calc_max_depth_and_momentum( sww_base_name, geospatial, verbose=verbose, use_cache=use_cache) csv.set_column("MAX INUNDATION DEPTH (m)", max_depths, overwrite=overwrite) csv.set_column("MOMENTUM (m^2/s) ", max_momentums, overwrite=overwrite) csv.save(exposure_file_out)
def calibrate(self, fit_type=FIT_QUARTIC, progress=ProgressIndicator()): """ Calculate calibration matrix Prerequisites: self.exposure_files must be a list of exposure files self.energies must be a list of corresponding energies self.xtals must be a list of regions exposed by each analyzer crystal each entry must be of the form [[x1,y1],[x2,y2]] self.filters may contain a list of mx.filter.Filter descendents to apply to the exposures Results: self.calibration_matrix contains the calibration matrix self.lin_res contains average linear residuals of fit (one for each xtal) self.rms_res contains rms residuals of fit (one for each xtal) self.fit_points contains all detected peak values as array with columns (x,y,energy) self.fits contains a list of fit parameters (one for each xtal) """ # load exposure files progress.push_step("Load Exposures", 0.1) exposures = [Exposure(f) for f in self.exposure_files] progress.pop_step() # apply filters progress.push_step("Apply Filters", 0.3) i = 0 n = len(exposures) for exposure, energy in izip(exposures, self.energies): progress.update("Filter exposure %d" % (i+1,), i/float(n)) i += 1 for f in self.filters: f.filter(exposure.pixels, energy) progress.pop_step() # calibrate progress.push_step("Calibrate", 0.6) self.calibration_matrix, diagnostics = calibrate(exposures, self.energies, self.xtals, self.dispersive_direction, fit_type, return_diagnostics=True, progress=progress) progress.pop_step() # store diagnostic info self.lin_res, self.rms_res, self.fit_points, self.fits = diagnostics
def add_depth_and_momentum2csv(sww_base_name, exposure_file_in, exposure_file_out=None, overwrite=False, verbose=True, use_cache = True): """ Calculate the maximum depth and momemtum in an sww file, for locations specified in an csv exposure file. These calculations are done over all the sww files with the sww_base_name in the specified directory. """ csv = Exposure(exposure_file_in) geospatial = csv.get_location() max_depths, max_momentums = calc_max_depth_and_momentum(sww_base_name, geospatial, verbose=verbose, use_cache=use_cache) csv.set_column("MAX INUNDATION DEPTH (m)",max_depths, overwrite=overwrite) csv.set_column("MOMENTUM (m^2/s) ",max_momentums, overwrite=overwrite) csv.save(exposure_file_out)
def main(): print('Initializing NetworkTables for peg') NetworkTables.setClientMode( ) NetworkTables.setIPAddress( '10.40.96.2' ) NetworkTables.initialize( ) print('Creating peg video capture ') global pegCapture pegCapture = cv2.VideoCapture("/dev/gear_camera") pegCapture.set(cv2.CAP_PROP_FRAME_WIDTH, 320) pegCapture.set(cv2.CAP_PROP_FRAME_HEIGHT, 240) capture.set(cv2.CAP_PROP_FPS, 15) print('Creating peg pipeline') pegPipeline = PegPipeline() try: print("Starting peg server") server = ThreadedHTTPServer(('0.0.0.0', 5800), CamHandler) print('Running peg pipeline') table = NetworkTables.getTable('/vision/') while True: pegOk, pegFrame = pegCapture.read() if pegOk: print("ok") if table.getNumber('gear_camera_mode') == 0: print('Switching peg to GRIP mode') if Exposure.getExposure("/dev/gear_camera") != 10: Exposure.setExposure(10, "/dev/gear_camera") while table.getNumber('gear_camera_mode') == 0: pegPipeline.process(pegFrame) extra_processing(pegPipeline) else: print('Switching peg to stream mode') if Exposure.getExposure("/dev/gear_camera") != 100: Exposure.setExposure(100, "/dev/gear_camera") while table.getNumber('gear_camera_mode') == 1: server.handle_request() except KeyboardInterrupt: pegCapture.release() server.socket.close()
def inundation_damage(sww_base_name, exposure_files_in, exposure_file_out_marker=None, ground_floor_height=0.3, overwrite=False, verbose=True, use_cache=True): """ This is the main function for calculating tsunami damage due to inundation. It gets the location of structures from the exposure file and gets the inundation of these structures from the sww file. It then calculates the damage loss. Note, structures outside of the sww file get the minimum inundation (-ground_floor_height). These calculations are done over all the sww files with the sww_base_name in the specified directory. exposure_files_in - a file or a list of files to input from exposure_file_out_marker - this string will be added to the input file name to get the output file name """ if isinstance(exposure_files_in, basestring): exposure_files_in = [exposure_files_in] for exposure_file_in in exposure_files_in: csv = Exposure(exposure_file_in, title_check_list=[ SHORE_DIST_LABEL, WALL_TYPE_LABEL, STR_VALUE_LABEL, CONT_VALUE_LABEL ]) geospatial = csv.get_location() geospatial = ensure_absolute(geospatial) max_depths, max_momentums = calc_max_depth_and_momentum( sww_base_name, geospatial, ground_floor_height=ground_floor_height, verbose=verbose, use_cache=use_cache) edm = EventDamageModel(max_depths, csv.get_column(SHORE_DIST_LABEL), csv.get_column(WALL_TYPE_LABEL), csv.get_column(STR_VALUE_LABEL), csv.get_column(CONT_VALUE_LABEL)) results_dic = edm.calc_damage_and_costs(verbose_csv=True, verbose=verbose) for title, value in results_dic.iteritems(): csv.set_column(title, value, overwrite=overwrite) # Save info back to csv file if exposure_file_out_marker is None: exposure_file_out = exposure_file_in else: # split off extension, in such a way to deal with more than one '.' in the name of file split_name = exposure_file_in.split('.') exposure_file_out = '.'.join(split_name[:-1]) + exposure_file_out_marker + \ '.' + split_name[-1] csv.save(exposure_file_out) if verbose: log.critical('Augmented building file written to %s' % exposure_file_out)
scenarios = [ "1950-2099_RCP_4.5_avg.csv", "1950-2099_RCP_4.5_min.csv", "1950-2099_RCP_4.5_max.csv", "1950-2099_RCP_8.5_avg.csv", "1950-2099_RCP_8.5_min.csv", "1950-2099_RCP_8.5_max.csv" ] cdf_prefix = ['best', 'mid', 'worst'] cdf_postfix = ['electronics', 'motor', 'pvc', 'iron'] pop_count = {'electronics': 4, 'motor': 4, 'pvc': 480, 'iron': 190} increxp_prefix = "/users/austinmichne/documents/cleanperses/data/exposure/1950-2099_incr_exp/" cumexp_prefix = "/users/austinmichne/documents/cleanperses/data/exposure/1950-2099_cum_exp/" cdf_path = "/users/austinmichne/documents/cleanperses/data/cdf/" failure_path = "/users/austinmichne/documents/cleanperses/data/failure/" for scenario in scenarios: exposure = Exposure() exposure.load_csv(increxp_prefix + scenarios[0], cumexp_prefix + scenarios[0]) for pre in cdf_prefix: for component_type in cdf_postfix: cdf = CumulativeDistFailure( f'{pre}_{component_type}', f'{cdf_path}{pre}_case_{component_type}.csv') subpop = Subpopulation('example', exposure, cdf, pop_count[component_type]) subpop.get_failures() # print(subpop.failures) # print(len(subpop.failures)) # print([x[0] for x in subpop.failures].count(subpop.failures[0][0])) rcp = scenario.split(sep='_', maxsplit=1)[1].rsplit('.', maxsplit=1)[0]
def test_x_to_shutter(self): """ Test the shutter speed conversion function """ exposure = Exposure(min_t=1 / 500, max_t=1 / 125) self.assertEqual(exposure.x_to_shutter(0), '1/500') self.assertEqual(exposure.x_to_shutter(0.5), '1/250') self.assertEqual(exposure.x_to_shutter(1.0), '1/125') exposure = Exposure(min_t=1 / 1000, max_t=1 / 125) self.assertEqual(exposure.x_to_shutter(0), '1/1000') self.assertEqual(exposure.x_to_shutter(0.33), '1/500') self.assertEqual(exposure.x_to_shutter(0.67), '1/250') self.assertEqual(exposure.x_to_shutter(1.0), '1/125') exposure = Exposure() self.assertEqual(exposure.x_to_shutter(0), '1/8000') self.assertEqual(exposure.x_to_shutter(1.0), '60')
def test_shutter_to_x(self): """ Test the shutter speed to (0, 1) conversion function """ exposure = Exposure(min_t=1 / 500, max_t=1 / 125) self.assertEqual(exposure.shutter_to_x(1 / 500), 0.0) self.assertEqual(exposure.shutter_to_x(1 / 125), 1.0) self.assertAlmostEqual(exposure.shutter_to_x(1 / 250), 0.5, 6) exposure = Exposure(min_t=1 / 1000, max_t=1 / 125) self.assertAlmostEqual(exposure.shutter_to_x(1 / 500), 1 / 3, 6) self.assertAlmostEqual(exposure.shutter_to_x(1 / 250), 2 / 3, 6) exposure = Exposure() self.assertEqual(exposure.shutter_to_x(1 / 8000), 0.0) self.assertEqual(exposure.shutter_to_x(60), 1.0)
#! /usr/bin/env python3 # ToughExposure '''An exposure suggestor and calculator''' from collections import deque import ui import clipboard from console import hud_alert from exposure import Exposure exp = Exposure(2.0, 32.0) exp.set_exposure(5.6, 1 / 100, 100, 0) # Initialize the list of last clicked switches with two items last_adjusted = deque([None, None], maxlen=5) def slider_toggle(sender): """ The user has clicked a switch. Set the last two switches to have been clicked to on and enable the corresponding sliders. Disable the other sliders. :param sender: The switch object which was clicked :return: """ v = sender.superview name = sender.name aperture_slider = v['aperture_slider']
""" Ctrl-Z FRC Team 4096 FIRST Robotics Competition Code for use on Raspberry Pi coprocessor [email protected] """ import sys from exposure import Exposure """ Sets the exposure of /dev/video0 to the first arg passed or 10 if no args are passed If 2 args are passed it sets the exposure of arg 2 to arg 1 """ if len(sys.argv) < 3: print("Exposure is {0}".format(Exposure.getExposure())) Exposure.setExposure(10 if len(sys.argv) < 2 else sys.argv[1]) print("Exposure is now {0}".format(Exposure.getExposure())) else: print("Exposure is {0}".format(Exposure.getExposure())) Exposure.setExposure(10 if len(sys.argv) < 2 else sys.argv[1]) print("Exposure is now {0}".format(Exposure.getExposure()))
def diagnose(self, return_spectra=False, filters=None, progress=None): """ Process all calibration exposures and fit to gaussians, returning parameters of fit Example: >>> import minixs as mx >>> from matplotlib import pyplot as pt >>> c = mx.calibrate.Calibration('example.calib') >>> d, spectra = c.diagnose(return_spectra = True, filters=[mx.filter.HighFilter(1000)]) >>> d[:,3] # sigma for gaussian fits array([ 0.4258905 , 0.54773887, 0.58000567, 0.57056559, 0.56539868, 0.58693027, 0.60704443, 0.61898894, 0.62726828, 0.63519546, 0.65309853, 0.66317984, 0.67826396, 0.69466781, 0.75039033, 0.78887514, 0.84248593, 0.8974527 ]) >>> s = spectra[5] >>> pt.plot(s.emission, s.intensity, 'o') >>> pt.plot(s.emission, mx.gauss.gauss_model(d[5,1:], s.emission)) >>> pt.show() Parameters ---------- return_spectra: whether to return processed calibration spectra Returns ------- (diagnostics, [processed_spectra]) diagnostics: an array with one row for each calibration exposure the columns are: incident beam energy amplitude E0 sigma the best Gaussian fit to the data is given by: exp(-(E-E0)**2/(2*sigma**2)) if `return_spectra` is True, then a list of XES spectra will be returned (one for each calibration exposure) """ emin, emax = self.energy_range() emission_energies = np.arange(emin, emax, .2) diagnostics = np.zeros((len(self.energies), 4)) if return_spectra: spectra = [] for i in range(len(self.energies)): if progress: msg = "Processing calibration exposure %d / %d" % (i, len(self.energies)) prog = i / float(len(self.energies)) progress.update(msg, prog) energy = self.energies[i] exposure = Exposure(self.exposure_files[i]) if filters is not None: exposure.apply_filters(energy, filters) s = process_spectrum(self.calibration_matrix, exposure, emission_energies, 1, self.dispersive_direction, self.xtals) x = s[:,0] y = s[:,1] fit, ier = gauss_leastsq((x,y), (y.max(), energy, 1.0)) if not (0 < ier < 5): continue diagnostics[i,0] = energy diagnostics[i,1:] = fit if return_spectra: xes = EmissionSpectrum() xes.incident_energy = energy xes.exposure_files = [exposure.filename] xes._set_spectrum(s) spectra.append(xes) diagnostics = diagnostics[np.where(diagnostics[:,0] != 0)] if return_spectra: return (diagnostics, spectra) else: return diagnostics