def __init__(self, ver='1.1.5'): self.params = {} # the parameters to feed to proc_frame self.cine_fname = None # file name self.cine_ = None # the cine object self.bck_img = None # back ground image for normalization try: self.db = db.LFmongodb() # hard code the mongodb except: print('gave up and DB') # this eats _ALL_ exceptions self.db = None self.ver = ver
def __init__(self, parent=None): QtCore.QObject.__init__(self, parent) self.process_backend = None self.mbe = None self.next_curve = None self.db = None try: self.db = ldb.LFmongodb() except Exception as e: print(e) print('no database for you!')
def process_split_RM(k, cache_path, RM_params, i_disk_dict, section_per_sec=2): """ Processes fringes -> region_map by segment and saves the result to disk Snarfs all exceptions Parameters ---------- k : cine_hash The move to work on RM_params : dict Paramaters to pass to Region_map.from_backend i_disk_dict : dict mapping between disk number -> disk path section_per_sec : int, optional The number of segments per second """ db = ldb.LFmongodb(i_disk_dict=i_disk_dict) v = db.get_movie_md(k) # make the hdf backend object to hbe = lb.hdf_backend_factory(k, i_disk_dict=i_disk_dict) # set up steps to half second chunks N_step = S_step = hbe.frame_rate // section_per_sec out_path_template = leidenfrost.FilePath(cache_path, datetime.date.today().isoformat(), '') # make sure the path exists lffh.ensure_path_exists(out_path_template.format) for j in range(hbe.first_frame, hbe.last_frame - S_step, N_step): # make output name out_path = out_path_template._replace( fname='RM_{}_{:06}-{:06}_{}.h5'.format(k, j, j + N_step, v['fpath'] ['fname'][:-5])) # compute the RM _rm = lf.Region_map.from_backend(hbe, f_slice=slice(j, j + S_step), **RM_params) # write it out to disk _rm.write_to_hdf(out_path)
def update_average_cache(base_path): cine_fnames = [] for dirpath, dirnames, fnames in os.walk(base_path + '/' + 'leidenfrost'): cine_fnames.extend([ FilePath(base_path, dirpath[len(base_path) + 1:], f) for f in fnames if 'cine' in f ]) db = ldb.LFmongodb() for cn in cine_fnames: if 'cine' not in cn[-1]: continue cine_hash = cine.Cine(cn.format).hash bck_img = db.get_background_img(cine_hash) if bck_img is None: bck_img = gen_bck_img(cn.format) db.store_background_img(cine_hash, bck_img)
def proc_cine_fname(cine_fname, ch, hdf_fname_template, max_circ_change_frac=None): logger = logging.getLogger('proc_cine_frame_' + str(os.getpid())) logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") db = ldb.LFmongodb() config_dict_lst = db.get_unproced_configs(ch) for config_dict in config_dict_lst: print(cine_fname) h5_fname = hdf_fname_template._replace( fname=cine_fname.fname.replace('cine', 'h5')) lh = logging.FileHandler( hdf_fname_template._replace( fname=cine_fname.fname.replace('cine', 'log')).format) lh.setFormatter(formatter) logger.addHandler(lh) seed_curve = li.SplineCurve.from_pickle_dict( config_dict['curves']['0']) params = config_dict['config'] start_frame = params.pop('start_frame', 0) if not os.path.isfile(h5_fname.format): stack = lfbe.ProcessBackend.from_args(cine_fname, **params) stack.gen_stub_h5(h5_fname.format, seed_curve) hfb = lfbe.HdfBackend(h5_fname, cine_base_path=cine_fname.base_path, mode='rw') file_out = hfb.file logger.info('created file') else: hfb = lfbe.HdfBackend(h5_fname, cine_base_path=cine_fname.base_path, mode='rw') logger.info('opened file') file_out = hfb.file # make sure that we continue with the same parameters # params = dict((k, hfb.proc_prams[k]) for k in params) # need to find a better way to record this stack = lfbe.ProcessBackend.from_args(cine_fname, ver=hfb.ver, **params) db.store_proc(ch, config_dict['_id'], h5_fname) old_handler = signal.signal(signal.SIGALRM, timeout_handler) # move the logging to the top try: for j in range(start_frame, len(stack)): # set a 30s window, if the frame does not finish on 30s, kill it if hfb.contains_frame(j): logger.warn('deleting existing frame {0}'.format(j)) hfb._del_frame(j) signal.alarm(45) start = time.time() mbe, new_seed_curve = stack.process_frame(j, seed_curve) if max_circ_change_frac is not None: # check if we are limiting how much the circumference can change # between frames old_circ = seed_curve.circ new_circ = new_seed_curve.circ logger.info('curve circ diff: {}'.format( np.abs(old_circ - new_circ) / old_circ)) # if it changes little enough, adopt the new seed curve if np.abs(old_circ - new_circ) / old_circ < max_circ_change_frac: seed_curve = new_seed_curve else: logger.warn('reusing seedcurve') # if we get here that means we are re-using the # curve from n-m for frame n. We are hitting this # because the run is already going off the rails. Hopefully we # get enough fringes back to start being good again. else: seed_curve = new_seed_curve signal.alarm(0) elapsed = time.time() - start logger.info('completed frame %d in %fs', j, elapsed) # set alarm to 0 mbe.write_to_hdf(file_out) del mbe file_out.flush() gc.collect() except TimeoutException: logger.warn('timed out') except Exception as e: logger.warn(str(e)) except: logger.warn('raised exception not derived from Exception') finally: # make sure that no matter what the output file gets cleaned up file_out.close() # reset the alarm signal.alarm(0) # rest the signal handler signal.signal(signal.SIGALRM, old_handler) logger.removeHandler(lh) return None
def proc_cine_to_h5(ch, hdf_fname_template, params, seed_curve): """ Processes a cine path -> h5 Parameters ---------- ch : str Hash of the cine_fname hdf_fname_template: FilePath Template for where to put the output file + log files params : dict Parameters to use to process the cine file seed_curve : int The first frame to process """ i_disk_dict = { 0: u'/media/tcaswell/leidenfrost_a', 1: u'/media/tcaswell/leidenfrost_c' } # make data base communication object db = ldb.LFmongodb(i_disk_dict=i_disk_dict) # set up logging stuff logger = logging.getLogger('proc_cine_frame_' + str(os.getpid())) logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") # make a copy so we don't export side effects params = copy.copy(params) # convert disk hdf_fname_template = leidenfrost.convert_base_path(hdf_fname_template, i_disk_dict) cine_md = db.get_movie_md(ch) cine_fname = leidenfrost.FilePath.from_db_dict(cine_md['fpath'], i_disk_dict) # sort out output files names h5_fname = hdf_fname_template._replace( fname=cine_fname.fname.replace('cine', 'h5')) # get _id from DB _id, h5_fname = db.start_proc(ch, params, seed_curve, h5_fname) lh = logging.FileHandler( hdf_fname_template._replace( fname=h5_fname.fname.replace('h5', 'log')).format) start_frame = params.pop('start_frame', 0) end_frame = params.pop('end_frame', -1) if end_frame > 0 and end_frame < start_frame: raise Exception("invalid start and end frames") max_circ_change_frac = params.pop('max_circ_change', None) if os.path.isfile(h5_fname.format): print('panic!') logger.error("file already exists") db.remove_proc(_id) return stack = lb.ProcessBackend.from_args(cine_fname, **params) stack.gen_stub_h5(h5_fname.format, seed_curve, start_frame) hfb = lb.HdfBackend(h5_fname, cine_base_path=cine_fname.base_path, mode='rw') file_out = hfb.file logger.info('created file') if end_frame == -1: end_frame = len(stack) old_handler = signal.signal(signal.SIGALRM, _timeout_handler) try: lh.setFormatter(formatter) logger.addHandler(lh) for j in range(start_frame, end_frame): # set a 30s window, if the frame does not finish on 30s, kill it if hfb.contains_frame(j): logger.warn('deleting existing frame {0}'.format(j)) hfb._del_frame(j) signal.alarm(45) start = time.time() mbe, new_seed_curve = stack.process_frame(j, seed_curve) if max_circ_change_frac is not None: # check if we are limiting how much the circumference # can change between frames old_circ = seed_curve.circ new_circ = new_seed_curve.circ # if it changes little enough, adopt the new seed # curve if (np.abs(old_circ - new_circ) / old_circ < max_circ_change_frac): seed_curve = new_seed_curve else: seed_curve = new_seed_curve signal.alarm(0) elapsed = time.time() - start logger.info('completed frame %d in %fs', j, elapsed) # set alarm to 0 mbe.write_to_hdf(file_out) if j % 500 == 0: file_out.flush() del mbe #gc.collect() except TimeoutException: # handle the time out error logger.warn('timed out') # tell the DB we timed out db.timeout_proc(_id) except Exception as e: # handle all exceptions we should get logger.warn(str(e)) db.error_proc(_id) except: # handle everything else logger.warn('raised exception not derived from Exception') db.error_proc(_id) else: # if we ran through the full movie, mark it done (yay) db.finish_proc(_id) finally: # make sure that no matter what the output file gets cleaned up file_out.close() # reset the alarm signal.alarm(0) # rest the signal handler signal.signal(signal.SIGALRM, old_handler) logger.removeHandler(lh) return None
def __init__(self, fname_list, cine_base_path, cine_hash, i_disk_dict=None, cache_path=None): """ Parameters ---------- fname_list : list of tuples (FilePath, frame_in, frame_out) data about the files to be open cine_base_path : string base path (chroot style) of the cine files i_disk_dict : dict, None dictionary to convert disk number -> path """ # hard code the mongodb self.db = db.LFmongodb(i_disk_dict=i_disk_dict) self._cinehash = cine_hash self._h5_backends = [] cine_md = self.db.get_movie_md(self._cinehash) self.frame_rate = cine_md['frame_rate'] self.calibration_value = cine_md['cal_val'] self.calibration_unit = cine_md['cal_unit'] self.cine_len = cine_md['frames'] tmp_flags = np.zeros(self.cine_len, dtype='bool') for fn, frame_in, frame_out in fname_list: # if all of frames have been hit, don't further procs if np.all(tmp_flags): break if cache_path is not None: tmp_fn = fn._replace(base_path=cache_path) if os.path.isfile(tmp_fn.format): fn = tmp_fn try: print("trying to open", fn.format) tmp_be = HdfBackend(fn, cine_base_path, i_disk_dict=i_disk_dict) except IOError: print(fn.format) continue tmp_flags[frame_in:frame_out] = True if tmp_be.cine.hash != self._cinehash: print("This list is inconsistent dropping ") print(fn.format) continue if (frame_in < tmp_be.first_frame or frame_out > tmp_be.last_frame + 1): print(('frame in ({}) and frame out ({}) inconsistent with' + 'first ({}) and last ({}) frames').format( frame_in, frame_out, tmp_be.first_frame, tmp_be.last_frame)) self._h5_backends.append((tmp_be, frame_in, frame_out)) # these are all really cine properties and all are from the same cine # so we can just look at the first one. # TODO replace this with a db call # sort out first and last frame first_frames, last_frames = list(zip(*[(in_f, out_f) for hbe, in_f, out_f in self._h5_backends])) if np.all(tmp_flags): self.first_frame = 0 self.last_frame = self.cine_len else: # this assumes that the data in continuous self.first_frame = min(first_frames) self.last_frame = max(last_frames)
def __init__(self, fname, cine_base_path=None, mode='r', i_disk_dict=None, *args, **kwargs): """ Parameters ---------- fname: `Leidenfrost.FilePath` Fully qualified path to the hdf file to open cine_base_path: str or `None` If not `None`, base path to find the raw cine files h5_buffer_bas_path: str or `None` If not `None`, base path for buffering the h5 file cine_buffer_base_path: str or `None` If not `None`, base path for buffering the cine file """ self._iter_cur_item = -1 self.file = None if mode == 'rw': self.file = h5py.File(fname.format, 'r+') self.writeable = True else: self.file = h5py.File(fname.format, 'r') self.writeable = False self.num_frames = len([k for k in self.file.keys() if 'frame' in k]) self._prams = HdfBEPram(False, True) self.proc_prams = dict(self.file.attrs) if cine_base_path is not None: self.cine_fname = FilePath(cine_base_path, self.file.attrs['cine_path'], self.file.attrs['cine_fname']) self.cine = cine.Cine('/'.join(self.cine_fname)) else: self.cine_fname = None self.cine = None try: # hard code the mongodb self.db = db.LFmongodb(i_disk_dict=i_disk_dict) except: print('gave up and the DB') # this eats _ALL_ exceptions self.db = None self._procid = None if self.db is not None: self._procid = self.db.get_proc_id(fname) self.bck_img = None if self.db is not None and self.cine is not None: self.bck_img = self.db.get_background_img(self.cine.hash) # if that fails too, run it if self.bck_img is None and self.cine is not None: self.gen_back_img() # if we have a data base, shove in the data if self.db is not None and self.bck_img is not None: self.db.store_background_img(self.cine.hash, self.bck_img) if 'ver' not in self.file.attrs or self.file.attrs['ver'] < b'0.1.5': self._frame_str = 'frame_{:05d}' else: self._frame_str = 'frame_{:07d}' self._cal_val = None self._cal_val_unit = None self._first_frame = None self._last_frame = None