def setup(self, job, redirect_logging=True): job = RPC_Job_Model_Factory.serializer.load_serialized_object(job)[0] paths_object = paths.Paths() self._scanning_job.id = job.id self._scanning_job.computer = AppConfig().computer_human_name self._setup_directory() if redirect_logging: file_path = os.path.join( self._project_directory, paths_object.scan_log_file_pattern.format( self._scanning_job.project_name)) self._logger.info( "{0} is setting up; logging will be directed to file {1}". format(job, file_path)) self._logger.set_output_target(file_path, catch_stdout=True, catch_stderr=True) self._logger.surpress_prints = False self._logger.info("Doing setup") self._scanning_effector_data.current_image_path_pattern = os.path.join( self._project_directory, paths_object.experiment_scan_image_pattern) self._scanner = sane.SaneBase( scan_mode=self._scanning_job.mode, model=self._scanning_job.scanner_hardware) self._scanning_effector_data.compile_project_model = compile_project_factory.CompileProjectFactory.create( compile_action=COMPILE_ACTION.Initiate if self._scanning_job.number_of_scans > 1 else COMPILE_ACTION.InitiateAndSpawnAnalysis, path=paths_object.get_project_settings_path_from_scan_model( self._scanning_job), fixture_type=FIXTURE.Global, fixture_name=self._scanning_job.fixture) self._scanning_effector_data.compile_project_model.images = [] scan_project_file_path = os.path.join( self._project_directory, paths_object.scan_project_file_pattern.format( self._scanning_job.project_name)) if ScanningModelFactory.serializer.dump(self._scanning_job, scan_project_file_path): self._logger.info("Saved project settings to '{0}'".format( scan_project_file_path)) else: self._logger.error( "Could not save project settings to '{0}'".format( scan_project_file_path)) self._allow_start = True
def __one_init__(self, jobs): """ :type jobs: scanomatic.server.jobs.Jobs """ self._paths = paths.Paths() self._logger = logger.Logger("Job Queue") self._next_priority = rpc_job_models.JOB_TYPE.Scan self._queue = list(RPC_Job_Model_Factory.serializer.load(self._paths.rpc_queue)) self._scanner_manager = ScannerPowerManager() self._jobs = jobs decorators.register_type_lock(self)
def __one_init__(self): self._logger = logger.Logger("Jobs Handler") self._paths = paths.Paths() self._scanner_manager = scanner_manager.ScannerPowerManager() self._jobs = {} """:type : dict[scanomatic.models.rpc_job_models.RPCJobModel, scanomatic.server.rpcjob.RpcJob] """ self._load_from_file() self._forcingStop = False self._statuses = []
def __init__(self, job): self._paths = paths.Paths() super(PhenotypeExtractionEffector, self).__init__( job, logger_name="Phenotype Extractor '{0}'".format(job.id)) self._feature_job = job.content_model self._job_label = self._feature_job.analysis_directory self._progress = 0 self._times = None self._data = None self._analysis_base_path = None self._phenotyper = None
def setup(self, job): if self._started: self._logger.warning("Can't setup when started") return False job = RPC_Job_Model_Factory.serializer.load_serialized_object(job)[0] self._feature_job = job.content_model self._job.content_model = self._feature_job if feature_factory.FeaturesFactory.validate( self._feature_job) is not True: self._logger.warning("Can't setup, instructions don't validate") return False self._logger.set_output_target(os.path.join( self._feature_job.analysis_directory, paths.Paths().phenotypes_extraction_log), catch_stdout=True, catch_stderr=True) self._logger.surpress_prints = False self._logger.info("Loading files image data from '{0}'".format( self._feature_job.analysis_directory)) times, data = image_data.ImageData.read_image_data_and_time( self._feature_job.analysis_directory) if times is None or data is None or 0 in map(len, (times, data)): self._logger.error( "Could not filter image times to match data or no data. " + "Do you have the right directory, it should be an analysis directory?" ) self.add_message("There is no image data in given directory or " + "the image data is corrupt") self._running = False self._stopping = True return False self._times = times self._data = data self._analysis_base_path = image_data.ImageData.directory_path_to_data_path_tuple( self._feature_job.analysis_directory)[0] self._allow_start = True
def __one_init__(self): self._logger = logger.Logger("Scanner Manager") self._conf = app_config.Config() self._paths = paths.Paths() self._fixtures = fixtures.Fixtures() self._orphan_usbs = set() self._scanners = self._initiate_scanners() self._pm = None self._scanner_queue = [] self._reported_sane_missing = STATE.Unknown Thread(target=self._load_pm).start() decorators.register_type_lock(self)
def __init__(self, image_identifier, pinning, analysis_model): self._paths = paths.Paths() self._identifier = _create_grid_array_identifier(image_identifier) self._analysis_model = analysis_model self._pinning_matrix = pinning self._guess_grid_cell_size = None self._grid_cell_size = None self._grid_cells = {} """:type:dict[tuple|scanomatic.image_analysis.grid_cell.GridCell]""" self._grid = None self._grid_cell_corners = None self._features = AnalysisFeaturesFactory.create( index=self._identifier[-1], shape=tuple(pinning), data=set()) self._first_analysis = True
def patch_image_file_names_by_interval(path, interval=20.0): """ :param path: Directory containing the images. :type path: str :param interval: Interval between images :type interval: float :return: None """ pattern = re.compile(r"(.*)_\d{4}\.tiff") sanity_threshold = 3 source_pattern = "{0}_{1}.tiff" target_pattern = paths.Paths().experiment_scan_image_pattern images = tuple( os.path.basename(i) for i in glob.glob(os.path.join(path, '*.tiff'))) if not images: _logger.error("Directory does not contain any images") return base_name = "" included_images = 0 for i in images: match = pattern.match(i) if match: included_images += 1 if not base_name: base_name = match.groups()[0] elif match.groups()[0] != base_name: _logger.error( "Conflicting image names, unsure if '{0}' or '{1}' is project name" .format(base_name, match.groups()[0])) return else: _logger.info( "Skipping file '{0}' since it doesn't seem to belong in project" .format(i)) _logger.info("Will process {0} images".format(included_images)) image_index = 0 processed_images = 0 index_length = 4 while processed_images < included_images: source = os.path.join( path, source_pattern.format(base_name, str(image_index).zfill(index_length))) if os.path.isfile(source): os.rename( source, os.path.join( path, target_pattern.format(base_name, str(image_index).zfill(index_length), image_index * 60.0 * interval))) processed_images += 1 else: _logger.warning("Missing file with index {0} ({1})".format( image_index, source)) image_index += 1 if image_index > included_images * sanity_threshold: _logger.error( "Aborting becuase something seems to be amiss." + " Currently attempting to process image {0}".format( image_index) + " for a project which should only contain {0} images.".format( included_images) + " So far only found {0} images...".format(processed_images)) return _logger.info( "Successfully renamed {0} images in project {1} using {2} minutes interval" .format(processed_images, base_name, interval))
import ConfigParser import numpy as np # # INTERNAL DEPENDENCIES # import scanomatic.io.paths as paths # # GLOBALS # _GRAYSCALE_PATH = paths.Paths().analysis_graycsales _GRAYSCALE_CONFIGS = ConfigParser.ConfigParser() _KEY_DEFUALT = 'default' _KEY_TARGETS = 'targets' GRAYSCALE_SCALABLE = ('width', 'min_width', 'lower_than_half_width', 'higher_than_half_width', 'length') GRAYSCALE_CONFIG_KEYS = (_KEY_DEFUALT, _KEY_TARGETS, 'sections') + GRAYSCALE_SCALABLE _GRAYSCALE_VALUE_TYPES = { 'default': bool, 'targets': eval, 'sections': int, 'width': float, 'min_width': float,
class ImageData(object): _LOGGER = logger.Logger("Static Image Data Class") _PATHS = paths.Paths() @staticmethod def write_image(analysis_model, image_model, features): """ :type image_model: scanomatic.models.compile_project_model.CompileImageAnalysisModel """ return ImageData._write_image(analysis_model.output_directory, image_model.image.index, features, analysis_model.image_data_output_item, analysis_model.image_data_output_measure) @staticmethod def _write_image(path, image_index, features, output_item, output_value): path = os.path.join(*ImageData.directory_path_to_data_path_tuple( path, image_index=image_index)) if features is None: ImageData._LOGGER.warning( "Image {0} had no data".format(image_index)) return number_of_plates = features.shape[0] plates = [None] * number_of_plates ImageData._LOGGER.info("Writing features for {0} plates ({1})".format( number_of_plates, features.shape)) for plate_features in features.data: if plate_features is None: continue plate = np.zeros(plate_features.shape) * np.nan ImageData._LOGGER.info( "Writing plate features for plates index {0}".format( plate_features.index)) plates[plate_features.index] = plate for cell_features in plate_features.data: if output_item in cell_features.data: compartment_features = cell_features.data[output_item] if output_value in compartment_features.data: try: plate[cell_features. index[::-1]] = compartment_features.data[ output_value] except IndexError: ImageData._LOGGER.critical( "Shape mismatch between plate {0} and colony position {1}" .format(plate_features.shape, cell_features.index)) return False else: ImageData._LOGGER.info( "Missing data for colony position {0}, plate {1}". format(cell_features.index, plate_features.index)) else: ImageData._LOGGER.info( "Missing compartment for colony position {0}, palte {1}" .format(cell_features.index, plate_features.index)) ImageData._LOGGER.info("Saved Image Data '{0}' with {1} plates".format( path, len(plates))) np.save(path, plates) return True @staticmethod def iter_write_image_from_xml(path, xml_object, output_item, output_value): scans = xml_object.get_scan_times().size plates = max(xml_object.get_data().keys()) + 1 data = xml_object.get_data() for scan_id in range(scans): features = [None] * plates for plate_id in range(plates): features[plate_id] = data[plate_id][:, :, scan_id] ImageData._write_image(path, scan_id, features, output_item=output_item, output_value=output_value) @staticmethod def write_times(analysis_model, image_model, overwrite): """ :type image_model: scanomatic.models.compile_project_model.CompileImageAnalysisModel """ global _SECONDS_PER_HOUR if not overwrite: current_data = ImageData.read_times( analysis_model.output_directory) else: current_data = np.array([], dtype=np.float) if not (image_model.image.index < current_data.size): current_data = np.r_[current_data, [None] * (1 + image_model.image.index - current_data.size)].astype(np.float) current_data[image_model.image. index] = image_model.image.time_stamp / _SECONDS_PER_HOUR np.save( os.path.join(*ImageData.directory_path_to_data_path_tuple( analysis_model.output_directory, times=True)), current_data) @staticmethod def write_times_from_xml(path, xml_object): np.save( os.path.join(*ImageData.directory_path_to_data_path_tuple( path, times=True)), xml_object.get_scan_times()) @staticmethod def read_times(path): path = os.path.join( *ImageData.directory_path_to_data_path_tuple(path, times=True)) ImageData._LOGGER.info("Reading times from {0}".format(path)) if os.path.isfile(path): return np.load(path) else: ImageData._LOGGER.warning("Times data file not found") return np.array([], dtype=np.float) @staticmethod def read_image(path): if os.path.isfile(path): return np.load(path) else: return None @staticmethod def directory_path_to_data_path_tuple(directory_path, image_index="*", times=False): if os.path.isdir(directory_path) and not directory_path.endswith( os.path.sep): directory_path += os.path.sep path_dir = os.path.dirname(directory_path) if times: path_basename = ImageData._PATHS.image_analysis_time_series else: path_basename = ImageData._PATHS.image_analysis_img_data return path_dir, path_basename.format(image_index) @staticmethod def iter_image_paths(path_pattern): return (p for p in glob.iglob( os.path.join( *ImageData.directory_path_to_data_path_tuple(path_pattern)))) @staticmethod def iter_read_images(path): """A generator for reading image data given a directory path. Args: path (str): The path to the directory where the image data is Returns: Generator of image data Simple Usage:: ``D = np.array((list(Image_Data.iterReadImages("."))))`` Note:: This method will not return image data as expected by downstream feature extraction. Note:: This method does **not** return the data in time order. """ for p in ImageData.iter_image_paths(path): yield ImageData.read_image(p) @staticmethod def convert_per_time_to_per_plate(data): """Conversion method for data per time (scan) as generated by the image analysis to data per plate as used by feature extraction, quality control and user output. Args: data (iterable): A numpy array or similar holding data per time/scan. The elements in data must be numpy arrays. Returns: numpy array. The data restrucutred to be sorted by plates. """ if not hasattr(data, "shape"): data = np.array(data) try: new_data = [(None if data[0][plate_index] is None else []) for plate_index in range( max(scan.shape[0] for scan in data if isinstance(scan, np.ndarray)))] except ValueError: ImageData._LOGGER.error("There is no data") return None for scan in data: for plate_id, plate in enumerate(scan): if plate is None: continue new_data[plate_id].append(plate) for plate_id, plate in enumerate(new_data): if plate is None: continue p = np.array(plate) new_data[plate_id] = np.lib.stride_tricks.as_strided( p, (p.shape[1], p.shape[2], p.shape[0]), (p.strides[1], p.strides[2], p.strides[0])) return np.array(new_data) @staticmethod def read_image_data_and_time(path): """Reads all images data files in a directory and report the indices used and data restructured per plate. Args: path (string): The path to the directory with the files. Retruns: tuple (numpy array of time points, numpy array of data) """ times = ImageData.read_times(path) data = [] time_indices = [] for p in ImageData.iter_image_paths(path): try: time_indices.append(int(re.findall(r"\d+", p)[-1])) data.append(np.load(p)) except AttributeError: ImageData._LOGGER.warning( "File '{0}' has no index number in it, need that!".format( p)) try: times = np.array(times[time_indices]) except IndexError: ImageData._LOGGER.error( "Could not filter image times to match data") return None, None sort_list = np.array(time_indices).argsort() return times[sort_list], ImageData.convert_per_time_to_per_plate( np.array(data)[sort_list])
import os import glob import scanomatic.io.logger as logger import scanomatic.io.paths as paths from scanomatic.models.factories.scanning_factory import ScanningModelFactory _logger = logger.Logger("Projects util") _paths = paths.Paths() def rename_project(new_name, old_name=None, update_folder_name=True, update_image_names=True, update_scan_instructions=True): if update_image_names: rename_project_images(new_name, old_name=old_name) if update_scan_instructions: rename_scan_instructions(new_name, old_name=old_name) if update_folder_name: rename_project_folder(new_name) def _get_basepath(name): return os.path.dirname(os.path.abspath(name))