def load_frames(reader, cfg, show_progress=False): # TODO: this should be updated to read only the frames requested in cfg # either the images start, step, stop, or based on omega start, step, stop start = time.time() n_frames = reader.getNFrames() logger.info("reading %d frames of data, storing values > %.1f", n_frames, cfg.fit_grains.threshold) if show_progress: widgets = [Bar(">"), " ", ETA(), " ", ReverseBar("<")] pbar = ProgressBar(widgets=widgets, maxval=n_frames).start() frame_list = [] coo_builder = CooMatrixBuilder() for i in range(n_frames): frame = reader.read() frame_list.append(coo_builder.build_matrix(frame, cfg.fit_grains.threshold)) if show_progress: pbar.update(i) frame_list = np.array(frame_list) omega_start = np.radians(cfg.image_series.omega.start) omega_step = np.radians(cfg.image_series.omega.step) reader = [frame_list, [omega_start, omega_step]] if show_progress: pbar.finish() elapsed = time.time() - start logger.info("read %d frames in %g seconds", n_frames, elapsed) return reader
def get_frames(reader, cfg, show_progress=False): # TODO: this should be updated to read only the frames requested in cfg # either the images start, step, stop, or based on omega start, step, stop start = time.time() n_frames = reader.getNFrames() logger.info("reading %d frames of data", n_frames) if show_progress: widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')] pbar = ProgressBar(widgets=widgets, maxval=n_frames).start() frame_list = [] for i in range(n_frames): frame = reader.read() frame[frame <= cfg.fit_grains.threshold] = 0 frame_list.append(coo_matrix(frame)) if show_progress: pbar.update(i) frame_list = np.array(frame_list) omega_start = np.radians(cfg.image_series.omega.start) omega_step = np.radians(cfg.image_series.omega.step) reader = [frame_list, [omega_start, omega_step]] if show_progress: pbar.finish() elapsed = time.time()-start logger.info('read %d frames in %g seconds', n_frames, elapsed) return reader
def fit_grains(cfg, force=False, clean=False, show_progress=False, ids_to_refine=None): # load the data reader, pkwargs = get_data(cfg, show_progress, force, clean) job_queue, njobs = get_job_queue(cfg, ids_to_refine) # log this before starting progress bar ncpus = cfg.multiprocessing ncpus = ncpus if ncpus < njobs else njobs logger.info( 'running pullspots with %d of %d processors', ncpus, mp.cpu_count() ) if ncpus == 1: logger.info('multiprocessing disabled') start = time.time() pbar = None if show_progress: pbar = ProgressBar( widgets=[Bar('>'), ' ', ETA(), ' ', ReverseBar('<')], maxval=njobs ).start() # finally start processing data if ncpus == 1: # no multiprocessing results = [] w = FitGrainsWorker( job_queue, results, reader, copy.deepcopy(pkwargs), progressbar=pbar ) w.run() else: # multiprocessing manager = mp.Manager() results = manager.list() for i in range(ncpus): # lets make a deep copy of the pkwargs, just in case: w = FitGrainsWorkerMP(job_queue, results, reader, copy.deepcopy(pkwargs)) w.daemon = True w.start() while True: n_res = len(results) if show_progress: pbar.update(n_res) if n_res == njobs: break time.sleep(0.1) job_queue.join() write_grains_file(cfg, results) if show_progress: pbar.finish() elapsed = time.time() - start logger.info('processed %d grains in %g minutes', n_res, elapsed/60)
def load_frames(reader, cfg, show_progress=False): # TODO: this should be updated to read only the frames requested in cfg # either the images start, step, stop, or based on omega start, step, stop start = time.time() if cfg.image_series.images.stop is not None: n_frames = cfg.image_series.images.stop else: n_frames = reader.getNFrames() logger.info("reading %d frames of data, storing values > %.1f", n_frames, cfg.fit_grains.threshold) if show_progress: widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')] pbar = ProgressBar(widgets=widgets, maxval=n_frames).start() frame_list = [] coo_builder = CooMatrixBuilder() for i in range(n_frames): frame = reader.read() frame_list.append( coo_builder.build_matrix(frame, cfg.fit_grains.threshold)) if show_progress: pbar.update(i) frame_list = np.array(frame_list) omega_start = np.radians(cfg.image_series.omega.start) omega_step = np.radians(cfg.image_series.omega.step) reader = [frame_list, [omega_start, omega_step]] if show_progress: pbar.finish() elapsed = time.time() - start logger.info('read %d frames in %g seconds', n_frames, elapsed) return reader
def fit_grains(cfg, force=False, show_progress=False): # load the data reader, pkwargs = get_data(cfg, show_progress) job_queue = get_job_queue(cfg) njobs = job_queue.qsize() ncpus = cfg.multiprocessing logger.info('running pullspots with %d processors', ncpus) if show_progress: pbar = ProgressBar( widgets=[Bar('>'), ' ', ETA(), ' ', ReverseBar('<')], maxval=njobs ).start() # prepare the list to collect processed data manager = mp.Manager() results = manager.list() # finally start processing data start = time.time() for i in range(ncpus): # lets make a deep copy of the pkwargs, just in case: w = FitGrainsWorker(job_queue, results, reader, copy.deepcopy(pkwargs)) w.daemon = True w.start() while True: n_res = len(results) if show_progress: pbar.update(n_res) if n_res == njobs: break time.sleep(0.1) job_queue.join() write_grains_file(cfg, results) if show_progress: pbar.finish() elapsed = time.time() - start logger.info('processed %d grains in %g minutes', n_res, elapsed/60)
def fit_grains(cfg, force=False, clean=False, show_progress=False, ids_to_refine=None): # load the data reader, pkwargs = get_data(cfg, show_progress, force, clean) job_queue, njobs = get_job_queue(cfg, ids_to_refine) # log this before starting progress bar ncpus = cfg.multiprocessing ncpus = ncpus if ncpus < njobs else njobs logger.info('running pullspots with %d of %d processors', ncpus, mp.cpu_count()) if ncpus == 1: logger.info('multiprocessing disabled') start = time.time() pbar = None if show_progress: pbar = ProgressBar( widgets=[Bar('>'), ' ', ETA(), ' ', ReverseBar('<')], maxval=njobs).start() # finally start processing data if ncpus == 1: # no multiprocessing results = [] w = FitGrainsWorker(job_queue, results, reader, copy.deepcopy(pkwargs), progressbar=pbar) w.run() else: # multiprocessing manager = mp.Manager() results = manager.list() for i in range(ncpus): # lets make a deep copy of the pkwargs, just in case: w = FitGrainsWorkerMP(job_queue, results, reader, copy.deepcopy(pkwargs)) w.daemon = True w.start() while True: n_res = len(results) if show_progress: pbar.update(n_res) if n_res == njobs: break time.sleep(0.1) job_queue.join() write_grains_file(cfg, results) if show_progress: pbar.finish() elapsed = time.time() - start logger.info('processed %d grains in %g minutes', n_res, elapsed / 60)
def fit_grains(cfg, force=False, clean=False, show_progress=False, ids_to_refine=None): # load the data imgser_dict, instr, pkwargs = get_data(cfg, show_progress, force, clean) job_queue, njobs = get_job_queue(cfg, ids_to_refine) # log this before starting progress bar ncpus = cfg.multiprocessing ncpus = ncpus if ncpus < njobs else njobs logger.info( 'will use %d of %d processors', ncpus, mp.cpu_count() ) if ncpus == 1: logger.info('multiprocessing disabled') # echo some of the fitting options if cfg.fit_grains.fit_only: logger.info('\t**fitting only; will not pull spots') if cfg.fit_grains.refit is not None: msg = 'will perform refit excluding spots > ' + \ '%.2f pixels and ' % cfg.fit_grains.refit[0] + \ '%.2f frames from expected values' % cfg.fit_grains.refit[1] logger.info(msg) start = time.time() pbar = None if show_progress: pbar = ProgressBar( widgets=[Bar('>'), ' ', ETA(), ' ', ReverseBar('<')], maxval=njobs ).start() # finally start processing data if ncpus == 1: # no multiprocessing results = [] w = FitGrainsWorker( job_queue, results, imgser_dict, instr, copy.deepcopy(pkwargs), progressbar=pbar ) w.run() else: # multiprocessing manager = mp.Manager() results = manager.list() for i in range(ncpus): # lets make a deep copy of the pkwargs, just in case: w = FitGrainsWorkerMP(job_queue, results, imgser_dict, instr, copy.deepcopy(pkwargs)) w.daemon = True w.start() while True: n_res = len(results) if show_progress: pbar.update(n_res) if n_res == njobs: break time.sleep(0.1) # ??? check this? job_queue.join() # call to grain tabel output write_grains_file(cfg, results) if show_progress: pbar.finish() elapsed = time.time() - start logger.info('processed %d grains in %g minutes', n_res, elapsed/60)
def fit_grains(cfg, force=False, clean=False, show_progress=False, ids_to_refine=None): # load the data reader, pkwargs = get_data(cfg, show_progress, force, clean) job_queue, njobs = get_job_queue(cfg, ids_to_refine) # log this before starting progress bar ncpus = cfg.multiprocessing ncpus = ncpus if ncpus < njobs else njobs logger.info('will use %d of %d processors', ncpus, mp.cpu_count()) if ncpus == 1: logger.info('multiprocessing disabled') # echo some of the fitting options if cfg.fit_grains.fit_only: logger.info('\t**fitting only; will not pull spots') if cfg.fit_grains.refit is not None: msg = 'will perform refit excluding spots > ' + \ '%.2f pixels and ' %cfg.fit_grains.refit[0] + \ '%.2f frames from expected values' %cfg.fit_grains.refit[1] logger.info(msg) start = time.time() pbar = None if show_progress: pbar = ProgressBar( widgets=[Bar('>'), ' ', ETA(), ' ', ReverseBar('<')], maxval=njobs).start() # finally start processing data if ncpus == 1: # no multiprocessing results = [] w = FitGrainsWorker(job_queue, results, reader, copy.deepcopy(pkwargs), progressbar=pbar) w.run() else: # multiprocessing manager = mp.Manager() results = manager.list() for i in range(ncpus): # lets make a deep copy of the pkwargs, just in case: w = FitGrainsWorkerMP(job_queue, results, reader, copy.deepcopy(pkwargs)) w.daemon = True w.start() while True: n_res = len(results) if show_progress: pbar.update(n_res) if n_res == njobs: break time.sleep(0.1) job_queue.join() write_grains_file(cfg, results) if show_progress: pbar.finish() elapsed = time.time() - start logger.info('processed %d grains in %g minutes', n_res, elapsed / 60)