def process_montage(self, montage): try: src = Path(montage.path) dst = Path(self.session.processing_directory) / montage.base_name if (montage.is_montage): binned = self.bin_montage_stack(src, montage.section, dst.with_suffix('.binned')) coords = self.extract_piece_coords_from_montage_stack( binned, dst.with_suffix('.coords')) blended = self.blend_montage_stack(binned, coords, dst.with_suffix('.blended')) preview = self.preview_montage(blended, dst.with_suffix('.preview.png')) else: preview = self.preview_montage(src, dst.with_suffix('.preview.png'), section=montage.section) montage.preview = str(preview) montage.push(self.session.db) except Exception as e: LOG.exception( f'Error processing {montage.base_name} in {self.session.long_name}: {e}' ) self.failed.append(montage.base_name) ResourceManager.release_cpus(self.required_cpus)
def process_data(self, acquisition_data, motion_correction_data): if motion_correction_data.dose_weighted_image_file is not None: aligned_image_file = motion_correction_data.dose_weighted_image_file else: aligned_image_file = motion_correction_data.aligned_image_file output_file_base = os.path.join(self.session.processing_directory, acquisition_data.base_name) output_file = '{}_dogpicker.json'.format(output_file_base) try: image = imaging.load(aligned_image_file)[0] mint = None maxt = None debug = None meanmax = None sizemin = int(np.min(motion_correction_data.dimensions)/50) sizes = np.logspace(np.log10(sizemin), np.log10(sizemin*10) ,num=20) idogpicker_data = {} for size in sizes: keys = list(self.detect(image, size, mint, maxt, debug, meanmax)) LOG.debug("%i -> %i" % (size, len(keys))) idogpicker_data[int(size+0.5)] = keys with open(output_file,'w') as fp: json.dump(pretty_floats(idogpicker_data),fp) data_model = DogpickerData(acquisition_data.base_name) data_model.time = time.time() data_model.dogpicker_file = output_file data_model.push(self.session.db) except Exception as e: LOG.error("Dogpicker failed") LOG.error(e) self.failed.append(acquisition_data.base_name) ResourceManager.release_cpus(self.required_cpus) return self.finished.append(acquisition_data.base_name) ResourceManager.release_cpus(self.required_cpus)
def process_frames(self,stacks): for stack in stacks: try: self.parse_stack(stack) stack.push(self.session.db) user_data = UserData(stack.base_name) user_data.push(self.session.db) self.update_session(stack) except Exception as e: logger.exception(f'Error processing {stack.base_name} in {self.session.long_name}: {e}') self.failed.append(stack.image_path) continue self.finished.append(stack.image_path) ResourceManager.release_cpus(self.required_cpus)
def run(self): if self.time_since_last_tracking is None or time.time( ) - self.time_since_last_tracking >= MontageProcessor.tracking_interval: self.sync_with_db() self.time_since_last_tracking = time.time() if self.queue.empty(): return if ResourceManager.request_cpus(MontageProcessor.required_cpus): try: montage = self.queue.get() process_thread = Thread(target=self.process_montage, args=[montage]) process_thread.start() except Exception as e: ResourceManager.release_cpus(MontageProcessor.required_cpus) self.queue.put(montage) LOG.exception(e)
def run(self): if self.time_since_last_tracking is None or time.time() - self.time_since_last_tracking >= DogpickerProcessor.tracking_interval: self.update_tracked_data() self.time_since_last_tracking = time.time() if len(self.queued) == 0: return if ResourceManager.request_cpus(DogpickerProcessor.required_cpus): try: motion_correction_data = self.queued.pop() acquisition_data = AcquisitionData(motion_correction_data.base_name) acquisition_data.fetch(self.session.db) process_thread = Thread( target=self.process_data, args=(acquisition_data, motion_correction_data) ) process_thread.start() except: ResourceManager.release_cpus(DogpickerProcessor.required_cpus)
def run(self): if self.time_since_last_tracking is None or time.time( ) - self.time_since_last_tracking >= Motioncor2Processor.tracking_interval: LOG.debug("Starting tracking") self.update_tracked_data() LOG.debug("Finished tracking") self.time_since_last_tracking = time.time() if len(self.queued) == 0: return gpu_id_list = ResourceManager.request_gpus( Motioncor2Processor.required_gpus) if gpu_id_list is not None: try: acquisition_data_model = self.queued.pop() process_thread = Thread(target=self.process_data, args=(acquisition_data_model, gpu_id_list)) process_thread.start() except: ResourceManager.release_gpus(gpu_id_list)
def run(self): if self.time_since_last_tracking is None or time.time() - self.time_since_last_tracking >= FramesFileProcessor.frame_tracking_interval: logger.info("Starting tracking") self.track_frames() logger.info("Finished tracking") self.time_since_last_tracking = time.time() stacks = self.get_valid_stacks_from_queue() logger.debug(f'{len(stacks)} in queue for {self.session.name}') stacks = self.filter_for_most_recent_stacks(stacks) if ResourceManager.request_cpus(FramesFileProcessor.required_cpus): for stack in stacks: self.queued.remove(stack) process_thread = Thread( target=self.process_frames, args=([stacks]) ) process_thread.start()
def process_data(self, acquisition_data, motion_correction_data): if motion_correction_data.dose_weighted_image_file is not None: aligned_image_file = motion_correction_data.dose_weighted_image_file else: aligned_image_file = motion_correction_data.aligned_image_file output_file_base = os.path.join(self.session.processing_directory, acquisition_data.base_name) output_file = '{}_ctffind.ctf'.format(output_file_base) max_resolution = math.floor(motion_correction_data.pixel_size * 2) max_resolution = max(max_resolution,4) min_resolution = math.ceil(motion_correction_data.pixel_size * 10) min_resolution = max(min_resolution, 20) # Ctffind requires a HEREDOC. Yikes. command_list = [ f'{get_config().ctffind_full_path} << EOF > /dev/null', aligned_image_file, output_file, '{}'.format(motion_correction_data.pixel_size), # pixelsize '{}'.format(acquisition_data.voltage), # acceleration voltage #'300', '2.70', # Cs '0.1', # amplitude contrast '512', # size of amplitude spectrum to compute f'{min_resolution}', # min resolution f'{max_resolution}', # max resolution '5000', # min defocus '50000', # max defoxus '500', # defocus search step 'no', # is astig known 'yes', # slower, more exhaustive search 'yes', # use a restraint on astig '200.0', # expected (tolerated) astig 'no', # find additional phase shift 'no', # set expert options 'EOF' ] subprocess.call('\n'.join(command_list), shell=True) data_model = CtfData(acquisition_data.base_name) data_model.time = time.time() data_model.ctf_image_file = output_file data_model.ctf_image_preview_file = self.create_preview(data_model.ctf_image_file) data_model.ctf_log_file = '{}_ctffind.txt'.format(output_file_base) data_model.ctf_epa_log_file = '{}_ctffind_avrot.txt'.format(output_file_base) data_model.command_list = command_list try: data_model = self.update_model_from_EPA_log(data_model) except Exception as e: print("Failed to update ctf data from EPA log {}".format(data_model.ctf_epa_log_file)) print(e) pass try: data_model = self.update_model_from_ctffind_log(data_model) except Exception as e: print("Failed to update ctf data from ctffind log {}".format(data_model.ctf_log_file)) print(e) pass try: data_model.push(self.session.db) except Exception as e: print("Failed to upload data to database") print(e) pass self.finished.append(data_model.base_name) ResourceManager.release_cpus(self.required_cpus)
def process_data(self, acquisition_data_model, gpu_id_list): try: gain_file = self.prepare_gain_reference( self.session.processing_directory, acquisition_data_model.gain_reference_file, acquisition_data_model) except Exception as e: LOG.exception( f'Error preparing gain reference for {acquisition_data_model.base_name} in {self.session.long_name}: {e}' ) self.failed.append(acquisition_data_model.base_name) ResourceManager.release_gpus(gpu_id_list) return output_file_base = '{}/{}'.format(self.session.processing_directory, acquisition_data_model.base_name) output_file = '{}_mc.mrc'.format(output_file_base) output_file_dose_weighted = '{}_mc_DW.mrc'.format(output_file_base) output_log_file = '{}_mc.log'.format(output_file_base) bin_amount = int(Motioncor2Processor.target_binning / acquisition_data_model.binning) input_flag = '-InTiff' if acquisition_data_model.file_format == '.tif' else '-InMrc' dose_per_pixel = acquisition_data_model.frame_dose * ( acquisition_data_model.pixel_size**2) # Try to automatically choose grouping. Should have 0.4e/pix/frame, but make sure not too much grouping. group_amount = math.ceil(0.4 / dose_per_pixel) if group_amount > (acquisition_data_model.frame_count / 3): group_amount = math.floor(acquisition_data_model.frame_count / 3) if group_amount > 7: group_amount = 7 command_list = [ f'{get_config().motioncor2_full_path}', f'{input_flag} {acquisition_data_model.image_path}', f'-OutMrc {output_file}', f'-Group {group_amount}', f'-Kv {acquisition_data_model.voltage}', f'-gain {gain_file}', f'-PixSize {acquisition_data_model.pixel_size}', f'-FmDose {acquisition_data_model.frame_dose}', f'-FtBin {bin_amount}' if bin_amount != 1 else '', '-Iter 10', '-Tol 0.5', '-Gpu {}'.format(','.join([str(gpu_id) for gpu_id in gpu_id_list ])), f'> {output_log_file}' ] # print(' '.join(command_list)) subprocess.call(' '.join(command_list), shell=True) data_model = MotionCorrectionData(acquisition_data_model.base_name) data_model.time = time.time() data_model.non_weighted_image_file = output_file data_model.log_file = output_log_file data_model.binning = Motioncor2Processor.target_binning data_model.grouped_by = group_amount data_model.command_list = command_list if os.path.exists(output_file_dose_weighted): data_model.dose_weighted_image_file = output_file_dose_weighted data_model.corrected_image_file = output_file_dose_weighted else: data_model.corrected_image_file = output_file data_model.preview_file = self.create_preview( data_model.corrected_image_file) try: data_model = self.populate_shifts_from_log(data_model, output_log_file) except Exception as e: LOG.exception( f'Error reading shifts of {data_model.base_name} in {self.session.long_name}: {e}' ) self.failed.append(data_model.base_name) ResourceManager.release_gpus(gpu_id_list) return try: data_model = self.populate_image_metadata_from_mrc( data_model, output_file) except Exception as e: LOG.exception( f'Error populating image metadata of {data_model.base_name} in {self.session.long_name}: {e}' ) self.failed.append(data_model.base_name) ResourceManager.release_gpus(gpu_id_list) return try: data_model.push(self.session.db) except Exception as e: LOG.exception( f'Error pushing results to db {data_model.base_name} in {self.session.long_name}: {e}' ) self.failed.append(data_model.base_name) ResourceManager.release_gpus(gpu_id_list) return self.finished.append(data_model.base_name) ResourceManager.release_gpus(gpu_id_list)