def clip_centr_df(pw, peaks_cobjects, mz_min, mz_max):
    def clip_centr_df_chunk(peaks_i, peaks_cobject, storage):
        print(f'Clipping centroids dataframe chunk {peaks_i}')
        centroids_df_chunk = deserialise(
            storage.get_cloudobject(peaks_cobject,
                                    stream=True)).sort_values('mz')
        centroids_df_chunk = centroids_df_chunk[centroids_df_chunk.mz > 0]

        ds_mz_range_unique_formulas = centroids_df_chunk[
            (mz_min < centroids_df_chunk.mz)
            & (centroids_df_chunk.mz < mz_max)].index.unique()
        centr_df_chunk = centroids_df_chunk[centroids_df_chunk.index.isin(
            ds_mz_range_unique_formulas)].reset_index()
        clip_centr_chunk_cobject = storage.put_cloudobject(
            serialise(centr_df_chunk))

        return clip_centr_chunk_cobject, centr_df_chunk.shape[0]

    memory_capacity_mb = 512
    futures = pw.map(clip_centr_df_chunk,
                     list(enumerate(peaks_cobjects)),
                     runtime_memory=memory_capacity_mb)
    clip_centr_chunks_cobjects, centr_n = list(zip(*pw.get_result(futures)))
    PipelineStats.append_func(futures,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=len(futures))

    clip_centr_chunks_cobjects = list(clip_centr_chunks_cobjects)
    centr_n = sum(centr_n)
    logger.info(f'Prepared {centr_n} centroids')
    return clip_centr_chunks_cobjects, centr_n
def define_centr_segments(pw, clip_centr_chunks_cobjects, centr_n, ds_segm_n,
                          ds_segm_size_mb):
    logger.info('Defining centroids segments bounds')

    def get_first_peak_mz(cobject, id, storage):
        print(
            f'Extracting first peak mz values from clipped centroids dataframe {id}'
        )
        centr_df = read_cloud_object_with_retry(storage, cobject, deserialise)
        first_peak_df = centr_df[centr_df.peak_i == 0]
        return first_peak_df.mz.values

    memory_capacity_mb = 512
    futures = pw.map(get_first_peak_mz,
                     clip_centr_chunks_cobjects,
                     runtime_memory=memory_capacity_mb)
    first_peak_df_mz = np.concatenate(pw.get_result(futures))
    PipelineStats.append_func(futures, memory_mb=memory_capacity_mb)

    ds_size_mb = ds_segm_n * ds_segm_size_mb
    data_per_centr_segm_mb = 50
    peaks_per_centr_segm = 1e4
    centr_segm_n = int(
        max(ds_size_mb // data_per_centr_segm_mb,
            centr_n // peaks_per_centr_segm, 32))

    segm_bounds_q = [i * 1 / centr_segm_n for i in range(0, centr_segm_n)]
    centr_segm_lower_bounds = np.quantile(first_peak_df_mz, segm_bounds_q)

    logger.info(
        f'Generated {len(centr_segm_lower_bounds)} centroids bounds: {centr_segm_lower_bounds[0]}...{centr_segm_lower_bounds[-1]}'
    )
    return centr_segm_lower_bounds
    def segment_ds(self, use_cache=True, debug_validate=False):
        cache_key = ':ds/segment_ds.cache'

        if self.hybrid_impl:
            if use_cache and self.cacher.exists(cache_key):
                result = self.cacher.load(cache_key)
                logger.info(f'Loaded {len(result[2])} dataset segments from cache')
            else:
                sort_memory = 2**32
                fs = self.lithops_vm_executor.call_async(
                    load_and_split_ds_vm,
                    (self.imzml_cobject, self.ibd_cobject, self.ds_segm_size_mb, sort_memory),
                )
                result = self.lithops_vm_executor.get_result(fs)

                logger.info(f'Segmented dataset chunks into {len(result[2])} segments')
                self.cacher.save(result, cache_key)
            self.imzml_reader, \
            self.ds_segments_bounds, \
            self.ds_segms_cobjects, \
            self.ds_segms_len, \
            ds_segm_stats = result
            for func_name, exec_time in ds_segm_stats:
                if func_name == 'upload_segments':
                    cobjs_n = len(self.ds_segms_cobjects)
                else:
                    cobjs_n = 0
                PipelineStats.append_vm(func_name, exec_time, cloud_objects_n=cobjs_n)
        else:
            if use_cache and self.cacher.exists(cache_key):
                self.ds_segments_bounds, self.ds_segms_cobjects, self.ds_segms_len = \
                    self.cacher.load(cache_key)
                logger.info(f'Loaded {len(self.ds_segms_cobjects)} dataset segments from cache')
            else:
                sample_sp_n = 1000
                self.ds_segments_bounds = define_ds_segments(
                    self.lithops_executor,
                    self.ibd_cobject,
                    self.imzml_reader_cobject,
                    self.ds_segm_size_mb,
                    sample_sp_n,
                )
                self.ds_segms_cobjects, self.ds_segms_len = segment_spectra(
                    self.lithops_executor,
                    self.ds_chunks_cobjects,
                    self.ds_segments_bounds,
                    self.ds_segm_size_mb,
                    self.imzml_reader.mzPrecision,
                )
                logger.info(f'Segmented dataset chunks into {len(self.ds_segms_cobjects)} segments')
                self.cacher.save((self.ds_segments_bounds, self.ds_segms_cobjects, self.ds_segms_len), cache_key)

        self.ds_segm_n = len(self.ds_segms_cobjects)
        self.is_intensive_dataset = self.ds_segm_n * self.ds_segm_size_mb > 5000

        if debug_validate:
            validate_ds_segments(
                self.lithops_executor, self.imzml_reader, self.ds_segments_bounds,
                self.ds_segms_cobjects, self.ds_segms_len, self.hybrid_impl,
            )
    def annotate(self, use_cache=True):
        cache_key = ':ds/:db/annotate.cache'

        if use_cache and self.cacher.exists(cache_key):
            self.formula_metrics_df, self.images_cloud_objs = self.cacher.load(cache_key)
            logger.info(f'Loaded {self.formula_metrics_df.shape[0]} metrics from cache')
        else:
            logger.info('Annotating...')
            if self.hybrid_impl:
                memory_capacity_mb = 2048 if self.is_intensive_dataset else 1024
            else:
                memory_capacity_mb = 4096 if self.is_intensive_dataset else 2048
            process_centr_segment = create_process_segment(self.ds_segms_cobjects,
                                                           self.ds_segments_bounds, self.ds_segms_len, self.imzml_reader,
                                                           self.image_gen_config, memory_capacity_mb, self.ds_segm_size_mb,
                                                           self.hybrid_impl)

            futures = self.lithops_executor.map(
                process_centr_segment,
                [co for co in self.db_segms_cobjects],
                runtime_memory=memory_capacity_mb,
            )
            formula_metrics_list, images_cloud_objs = zip(*self.lithops_executor.get_result(futures))
            self.formula_metrics_df = pd.concat(formula_metrics_list)
            self.images_cloud_objs = list(chain(*images_cloud_objs))
            PipelineStats.append_func(futures, memory_mb=memory_capacity_mb, cloud_objects_n=len(self.images_cloud_objs))
            logger.info(f'Metrics calculated: {self.formula_metrics_df.shape[0]}')
            self.cacher.save((self.formula_metrics_df, self.images_cloud_objs), cache_key)
def define_ds_segments(pw, ibd_cobject, imzml_reader_cobject, ds_segm_size_mb,
                       sample_n):
    def get_segm_bounds(storage):
        imzml_reader = read_cloud_object_with_retry(storage,
                                                    imzml_reader_cobject,
                                                    deserialise)
        sp_n = len(imzml_reader.coordinates)
        sample_sp_inds = np.random.choice(np.arange(sp_n), min(sp_n, sample_n))
        print(f'Sampling {len(sample_sp_inds)} spectra')
        spectra_sample = list(
            get_spectra(storage, ibd_cobject, imzml_reader, sample_sp_inds))

        spectra_mzs = np.concatenate(
            [mzs for sp_id, mzs, ints in spectra_sample])
        print(f'Got {len(spectra_mzs)} mzs')

        total_size = 3 * spectra_mzs.nbytes * sp_n / len(sample_sp_inds)

        segm_n = int(np.ceil(total_size / (ds_segm_size_mb * 2**20)))

        segm_bounds_q = [i * 1 / segm_n for i in range(0, segm_n + 1)]
        segm_lower_bounds = [
            np.quantile(spectra_mzs, q) for q in segm_bounds_q
        ]
        return np.array(
            list(zip(segm_lower_bounds[:-1], segm_lower_bounds[1:])))

    logger.info('Defining dataset segments bounds')
    memory_capacity_mb = 1024
    future = pw.call_async(get_segm_bounds, [],
                           runtime_memory=memory_capacity_mb)
    ds_segments = pw.get_result(future)
    PipelineStats.append_func(future, memory_mb=memory_capacity_mb)
    return ds_segments
    def build_database(self, use_cache=True, debug_validate=False):
        if self.hybrid_impl:
            cache_key = ':ds/:db/build_database.cache'
            if use_cache and self.cacher.exists(cache_key):
                self.formula_cobjects, self.db_data_cobjects = self.cacher.load(cache_key)
                logger.info(f'Loaded {len(self.formula_cobjects)} formula segments and'
                            f' {len(self.db_data_cobjects)} db_data objects from cache')
            else:
                futures = self.lithops_vm_executor.call_async(
                    build_database_local,
                    (self.db_config, self.ds_config, self.mols_dbs_cobjects)
                )
                self.formula_cobjects, self.db_data_cobjects, build_db_exec_time = self.lithops_vm_executor.get_result(futures)
                PipelineStats.append_vm('build_database', build_db_exec_time,
                                        cloud_objects_n=len(self.formula_cobjects))
                logger.info(f'Built {len(self.formula_cobjects)} formula segments and'
                            f' {len(self.db_data_cobjects)} db_data objects')
                self.cacher.save((self.formula_cobjects, self.db_data_cobjects), cache_key)
        else:
            cache_key = ':db/build_database.cache'
            if use_cache and self.cacher.exists(cache_key):
                self.formula_cobjects, self.formula_to_id_cobjects = self.cacher.load(cache_key)
                logger.info(f'Loaded {len(self.formula_cobjects)} formula segments and'
                            f' {len(self.formula_to_id_cobjects)} formula-to-id chunks from cache')
            else:
                self.formula_cobjects, self.formula_to_id_cobjects = build_database(
                    self.lithops_executor, self.db_config, self.mols_dbs_cobjects
                )
                logger.info(f'Built {len(self.formula_cobjects)} formula segments and'
                            f' {len(self.formula_to_id_cobjects)} formula-to-id chunks')
                self.cacher.save((self.formula_cobjects, self.formula_to_id_cobjects), cache_key)

        if debug_validate:
            validate_formula_cobjects(self.storage, self.formula_cobjects)
    def run_fdr(self, use_cache=True):
        cache_key = ':ds/:db/run_fdr.cache'

        if use_cache and self.cacher.exists(cache_key):
            self.fdrs = self.cacher.load(cache_key)
            logger.info('Loaded fdrs from cache')
        else:
            if self.hybrid_impl:
                futures = self.lithops_vm_executor.call_async(
                    calculate_fdrs_vm,
                    (self.formula_metrics_df, self.db_data_cobjects),
                )
                self.fdrs, fdr_exec_time = self.lithops_vm_executor.get_result(futures)

                PipelineStats.append_vm('calculate_fdrs', fdr_exec_time)
            else:
                rankings_df = build_fdr_rankings(
                    self.lithops_executor, self.ds_config, self.db_config, self.mols_dbs_cobjects,
                    self.formula_to_id_cobjects, self.formula_metrics_df
                )
                self.fdrs = calculate_fdrs(self.lithops_executor, rankings_df)
            self.cacher.save(self.fdrs, cache_key)

        logger.info('Number of annotations at with FDR less than:')
        for fdr_step in [0.05, 0.1, 0.2, 0.5]:
            logger.info(f'{fdr_step*100:2.0f}%: {(self.fdrs.fdr < fdr_step).sum()}')
Exemple #8
0
def calculate_centroids(pw, formula_cobjects, ds_config):
    polarity = ds_config['polarity']
    isocalc_sigma = ds_config['isocalc_sigma']

    def calculate_peaks_for_formula(formula_i, formula):
        mzs, ints = isocalc_wrapper.centroids(formula)
        if mzs is not None:
            return list(zip(repeat(formula_i), range(len(mzs)), mzs, ints))
        else:
            return []

    def calculate_peaks_chunk(segm_i, segm_cobject, storage):
        print(f'Calculating peaks from formulas chunk {segm_i}')
        chunk_df = deserialise(
            storage.get_cloudobject(segm_cobject, stream=True))
        peaks = [
            peak for formula_i, formula in chunk_df.items()
            for peak in calculate_peaks_for_formula(formula_i, formula)
        ]
        peaks_df = pd.DataFrame(peaks,
                                columns=['formula_i', 'peak_i', 'mz', 'int'])
        peaks_df.set_index('formula_i', inplace=True)

        print(f'Storing centroids chunk {id}')
        peaks_cobject = storage.put_cloudobject(serialise(peaks_df))

        return peaks_cobject, peaks_df.shape[0]

    from annotation_pipeline.isocalc_wrapper import IsocalcWrapper  # Import lazily so that the rest of the pipeline still works if the dependency is missing
    isocalc_wrapper = IsocalcWrapper({
        # These instrument settings are usually customized on a per-dataset basis out of a set of
        # 18 possible combinations, but most of EMBL's datasets are compatible with the following settings:
        'charge': {
            'polarity': polarity,
            'n_charges': 1,
        },
        'isocalc_sigma':
        float(f"{isocalc_sigma:f}"
              )  # Rounding to match production implementation
    })

    memory_capacity_mb = 2048
    futures = pw.map(calculate_peaks_chunk,
                     list(enumerate(formula_cobjects)),
                     runtime_memory=memory_capacity_mb)
    results = pw.get_result(futures)
    PipelineStats.append_func(futures,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=len(futures))

    num_centroids = sum(count for cobj, count in results)
    n_centroids_chunks = len(results)
    peaks_cobjects = [cobj for cobj, count in results]
    logger.info(
        f'Calculated {num_centroids} centroids in {n_centroids_chunks} chunks')
    return peaks_cobjects
    def __init__(self, ds_config, db_config, use_db_cache=True, use_ds_cache=True, hybrid_impl='auto'):

        self.config = default_config()
        self.ds_config = ds_config
        self.db_config = db_config
        self.use_db_cache = use_db_cache
        self.use_ds_cache = use_ds_cache
        if hybrid_impl == 'auto':
            self.hybrid_impl = (
                self.config['lithops']['mode'] == 'localhost'
                or self.config['lithops']['mode'] == 'serverless' and 'ibm_vpc' in self.config
            )
            if self.hybrid_impl:
                logger.info(f'Using the Hybrid implementation')
            else:
                logger.info(f'Using the pure Serverless implementation')
        else:
            self.hybrid_impl = hybrid_impl

        lithops_bucket = self.config['lithops']['storage_bucket']
        self.ds_bucket = self.config.get('storage', {}).get('ds_bucket', lithops_bucket)

        self.lithops_executor = lithops.FunctionExecutor(config=self.config, runtime_memory=2048)
        if self.hybrid_impl:
            if self.config['lithops']['mode'] == 'localhost':
                self.lithops_vm_executor = self.lithops_executor
            else:
                self.lithops_vm_executor = lithops.StandaloneExecutor(config=self.config)

        self.storage = Storage(config=self.config)

        cache_namespace = 'vm' if hybrid_impl else 'function'
        self.cacher = PipelineCacher(
            self.storage, lithops_bucket, cache_namespace, self.ds_config["name"], self.db_config["name"]
        )
        if not self.use_db_cache or not self.use_ds_cache:
            self.cacher.clean(database=not self.use_db_cache, dataset=not self.use_ds_cache)

        stats_path_cache_key = ':ds/:db/stats_path.cache'
        if self.cacher.exists(stats_path_cache_key):
            self.stats_path = self.cacher.load(stats_path_cache_key)
            PipelineStats.path = self.stats_path
            logger.info(f'Using cached {self.stats_path} for statistics')
        else:
            PipelineStats.init()
            self.stats_path = PipelineStats.path
            self.cacher.save(self.stats_path, stats_path_cache_key)
            logger.info(f'Initialised {self.stats_path} for statistics')

        self.ds_segm_size_mb = 128
        self.image_gen_config = {
            "q": 99,
            "do_preprocessing": False,
            "nlevels": 30,
            "ppm": 3.0
        }
def calculate_fdrs(pw, rankings_df):
    def run_ranking(target_cobject, decoy_cobject, storage):
        target = read_cloud_object_with_retry(storage, target_cobject,
                                              deserialise)
        decoy = read_cloud_object_with_retry(storage, decoy_cobject,
                                             deserialise)
        merged = pd.concat(
            [target.assign(is_target=1),
             decoy.assign(is_target=0)],
            sort=False)
        merged = merged.sort_values('msm', ascending=False)
        decoy_cumsum = (merged.is_target == False).cumsum()
        target_cumsum = merged.is_target.cumsum()
        base_fdr = np.clip(decoy_cumsum / target_cumsum, 0, 1)
        base_fdr[np.isnan(base_fdr)] = 1
        target_fdrs = merged.assign(fdr=base_fdr)[lambda df: df.is_target == 1]
        target_fdrs = target_fdrs.drop('is_target', axis=1)
        target_fdrs = target_fdrs.sort_values('msm')
        target_fdrs = target_fdrs.assign(
            fdr=np.minimum.accumulate(target_fdrs.fdr))
        target_fdrs = target_fdrs.sort_index()
        return target_fdrs

    def merge_rankings(target_row, decoy_cobjects, storage):
        print("Merging rankings...")
        print(target_row)
        rankings = [
            run_ranking(target_row.cobject, decoy_cobject, storage)
            for decoy_cobject in decoy_cobjects
        ]
        mols = (pd.concat(rankings).rename_axis(
            'formula_i').reset_index().groupby('formula_i').agg({
                'fdr': np.nanmedian,
                'mol': 'first'
            }).assign(database_path=target_row.database_path,
                      adduct=target_row.adduct,
                      modifier=target_row.modifier))
        return mols

    ranking_jobs = []
    for group_i, group in rankings_df.groupby('group_i'):
        target_rows = group[group.is_target]
        decoy_rows = group[~group.is_target]

        for i, target_row in target_rows.iterrows():
            ranking_jobs.append((target_row, decoy_rows.cobject.tolist()))

    memory_capacity_mb = 256
    futures = pw.map(merge_rankings,
                     ranking_jobs,
                     runtime_memory=memory_capacity_mb)
    results = pw.get_result(futures)
    PipelineStats.append_func(futures, memory_mb=memory_capacity_mb)

    return pd.concat(results)
def get_imzml_reader(pw, imzml_cobject):
    def get_portable_imzml_reader(storage):
        imzml_stream = storage.get_cloudobject(imzml_cobject, stream=True)
        parser = ImzMLParser(imzml_stream, ibd_file=None)
        imzml_reader = parser.portable_spectrum_reader()
        imzml_reader_cobject = storage.put_cloudobject(serialise(imzml_reader))
        return imzml_reader, imzml_reader_cobject

    memory_capacity_mb = 1024
    future = pw.call_async(get_portable_imzml_reader, [])
    imzml_reader, imzml_reader_cobject = pw.get_result(future)
    PipelineStats.append_func(future,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=1)

    return imzml_reader, imzml_reader_cobject
def get_target_images(pw,
                      images_cloud_objs,
                      imzml_reader,
                      targets,
                      as_png=True,
                      only_first_isotope=True):
    def get_target_images(images_cobject, storage):
        images = {}
        segm_images = read_cloud_object_with_retry(storage, images_cobject,
                                                   deserialise)

        for k, imgs in segm_images.items():
            if k in targets:
                if only_first_isotope:
                    imgs = imgs[:1]
                if as_png:
                    imgs = [
                        to_png(img, mask) if img is not None else None
                        for img in imgs
                    ]
                images[k] = imgs
        return images

    mask = make_sample_area_mask(imzml_reader.coordinates)

    memory_capacity_mb = 1024
    futures = pw.map(
        get_target_images,
        [co for co in images_cloud_objs],
        runtime_memory=memory_capacity_mb,
    )

    all_images = {}
    for image_set in pw.get_result(futures):
        all_images.update(image_set)

    PipelineStats.append_func(futures, memory_mb=memory_capacity_mb)

    return all_images
def chunk_spectra(pw, ibd_cobject, imzml_reader_cobject, imzml_reader):
    MAX_CHUNK_SIZE = 512 * 1024**2  # 512MB

    sp_id_to_idx = get_pixel_indices(imzml_reader.coordinates)
    row_size = 3 * max(4,
                       np.dtype(imzml_reader.mzPrecision).itemsize,
                       np.dtype(imzml_reader.intensityPrecision).itemsize)

    def plan_chunks():
        chunk_sp_inds = []

        estimated_size_mb = 0
        # Iterate in the same order that intensities are laid out in the file, hopefully this will
        # prevent fragmented read patterns
        for sp_i in np.argsort(imzml_reader.intensityOffsets):
            spectrum_size = imzml_reader.mzLengths[sp_i] * row_size
            if estimated_size_mb + spectrum_size > MAX_CHUNK_SIZE:
                estimated_size_mb = 0
                yield np.array(chunk_sp_inds)
                chunk_sp_inds = []

            estimated_size_mb += spectrum_size
            chunk_sp_inds.append(sp_i)

        if chunk_sp_inds:
            yield np.array(chunk_sp_inds)

    def upload_chunk(ch_i, storage):
        chunk_sp_inds = chunks[ch_i]
        # Get imzml_reader from COS because it's too big to include via Lithops captured vars
        imzml_reader = read_cloud_object_with_retry(storage,
                                                    imzml_reader_cobject,
                                                    deserialise)
        n_spectra = sum(imzml_reader.mzLengths[sp_i] for sp_i in chunk_sp_inds)
        sp_mz_int_buf = np.zeros((n_spectra, 3),
                                 dtype=imzml_reader.mzPrecision)

        chunk_start = 0
        for sp_i, mzs, ints in get_spectra(storage, ibd_cobject, imzml_reader,
                                           chunk_sp_inds):
            chunk_end = chunk_start + len(mzs)
            sp_mz_int_buf[chunk_start:chunk_end, 0] = sp_id_to_idx[sp_i]
            sp_mz_int_buf[chunk_start:chunk_end, 1] = mzs
            sp_mz_int_buf[chunk_start:chunk_end, 2] = ints
            chunk_start = chunk_end

        by_mz = np.argsort(sp_mz_int_buf[:, 1])
        sp_mz_int_buf = sp_mz_int_buf[by_mz]
        del by_mz

        chunk = serialise(sp_mz_int_buf)
        size = sys.getsizeof(chunk) * (1 / 1024**2)
        logger.info(f'Uploading spectra chunk {ch_i} - %.2f MB' % size)
        chunk_cobject = storage.put_cloudobject(chunk)
        logger.info(f'Spectra chunk {ch_i} finished')
        return chunk_cobject

    chunks = list(plan_chunks())
    memory_capacity_mb = 3072
    futures = pw.map(upload_chunk, [(i, ) for i in range(len(chunks))],
                     runtime_memory=memory_capacity_mb)
    ds_chunks_cobjects = pw.get_result(futures)
    PipelineStats.append_func(futures,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=len(chunks))

    return ds_chunks_cobjects
def segment_spectra(pw, ds_chunks_cobjects, ds_segments_bounds,
                    ds_segm_size_mb, ds_segm_dtype):
    ds_segm_n = len(ds_segments_bounds)

    # extend boundaries of the first and last segments
    # to include all mzs outside of the spectra sample mz range
    ds_segments_bounds = ds_segments_bounds.copy()
    ds_segments_bounds[0, 0] = 0
    ds_segments_bounds[-1, 1] = MAX_MZ_VALUE

    # define first level segmentation and then segment each one into desired number
    first_level_segm_size_mb = 512
    first_level_segm_n = (len(ds_segments_bounds) *
                          ds_segm_size_mb) // first_level_segm_size_mb
    first_level_segm_n = max(first_level_segm_n, 1)
    ds_segments_bounds = np.array_split(ds_segments_bounds, first_level_segm_n)

    def segment_spectra_chunk(chunk_cobject, id, storage):
        print(f'Segmenting spectra chunk {id}')
        sp_mz_int_buf = read_cloud_object_with_retry(storage, chunk_cobject,
                                                     deserialise)

        def _first_level_segment_upload(segm_i):
            l = ds_segments_bounds[segm_i][0, 0]
            r = ds_segments_bounds[segm_i][-1, 1]
            segm_start, segm_end = np.searchsorted(
                sp_mz_int_buf[:, 1], (l, r))  # mz expected to be in column 1
            segm = sp_mz_int_buf[segm_start:segm_end]
            return storage.put_cloudobject(serialise(segm))

        with ThreadPoolExecutor(max_workers=128) as pool:
            sub_segms_cobjects = list(
                pool.map(_first_level_segment_upload,
                         range(len(ds_segments_bounds))))

        return sub_segms_cobjects

    memory_safe_mb = 1536
    memory_capacity_mb = first_level_segm_size_mb * 2 + memory_safe_mb
    first_futures = pw.map(
        segment_spectra_chunk,
        [(co, ) for co in ds_chunks_cobjects],
        runtime_memory=memory_capacity_mb,
    )
    first_level_segms_cobjects = pw.get_result(first_futures)
    if not isinstance(first_futures, list): first_futures = [first_futures]
    PipelineStats.append_func(first_futures,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=len(first_futures) *
                              len(ds_segments_bounds))

    def merge_spectra_chunk_segments(segm_cobjects, id, storage):
        print(f'Merging segment {id} spectra chunks')

        def _merge(ch_i):
            segm_spectra_chunk = read_cloud_object_with_retry(
                storage, segm_cobjects[ch_i], deserialise)
            return segm_spectra_chunk

        with ThreadPoolExecutor(max_workers=128) as pool:
            segm = list(pool.map(_merge, range(len(segm_cobjects))))

        segm = np.concatenate(segm)

        # Alternative in-place sorting (slower) :
        # segm.view(f'{ds_segm_dtype},{ds_segm_dtype},{ds_segm_dtype}').sort(order=['f1'], axis=0)
        segm = segm[segm[:, 1].argsort()]

        bounds_list = ds_segments_bounds[id]

        segms_len = []
        segms_cobjects = []
        for segm_j in range(len(bounds_list)):
            l, r = bounds_list[segm_j]
            segm_start, segm_end = np.searchsorted(
                segm[:, 1], (l, r))  # mz expected to be in column 1
            sub_segm = segm[segm_start:segm_end]
            segms_len.append(len(sub_segm))
            base_id = sum([len(bounds) for bounds in ds_segments_bounds[:id]])
            segm_i = base_id + segm_j
            print(f'Storing dataset segment {segm_i}')
            segms_cobjects.append(storage.put_cloudobject(serialise(sub_segm)))

        return segms_len, segms_cobjects

    second_level_segms_cobjects = np.transpose(
        first_level_segms_cobjects).tolist()
    second_level_segms_cobjects = [
        (segm_cobjects, ) for segm_cobjects in second_level_segms_cobjects
    ]

    # same memory capacity
    second_futures = pw.map(merge_spectra_chunk_segments,
                            second_level_segms_cobjects,
                            runtime_memory=memory_capacity_mb)
    ds_segms_len, ds_segms_cobjects = list(zip(*pw.get_result(second_futures)))
    ds_segms_len = list(np.concatenate(ds_segms_len))
    ds_segms_cobjects = list(np.concatenate(ds_segms_cobjects))
    PipelineStats.append_func(second_futures,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=ds_segm_n)

    assert len(ds_segms_cobjects) == len(
        set(co.key for co in
            ds_segms_cobjects)), 'Duplicate CloudObjects in ds_segms_cobjects'

    pw.clean(cs=np.concatenate(first_level_segms_cobjects).tolist())
    return ds_segms_cobjects, ds_segms_len
def segment_centroids(pw, clip_centr_chunks_cobjects, centr_segm_lower_bounds,
                      ds_segms_bounds, ds_segm_size_mb,
                      max_ds_segms_size_per_db_segm_mb, ppm):
    centr_segm_n = len(centr_segm_lower_bounds)

    # define first level segmentation and then segment each one into desired number
    first_level_centr_segm_n = min(32, len(centr_segm_lower_bounds))
    centr_segm_lower_bounds = np.array_split(centr_segm_lower_bounds,
                                             first_level_centr_segm_n)
    first_level_centr_segm_bounds = np.array(
        [bounds[0] for bounds in centr_segm_lower_bounds])

    def segment_centr_df(centr_df, db_segm_lower_bounds):
        first_peak_df = centr_df[centr_df.peak_i == 0].copy()
        segment_mapping = np.searchsorted(
            db_segm_lower_bounds, first_peak_df.mz.values, side='right') - 1
        first_peak_df['segm_i'] = segment_mapping
        centr_segm_df = pd.merge(centr_df,
                                 first_peak_df[['formula_i', 'segm_i']],
                                 on='formula_i').sort_values('mz')
        return centr_segm_df

    def segment_centr_chunk(cobject, id, storage):
        print(f'Segmenting clipped centroids dataframe chunk {id}')
        centr_df = read_cloud_object_with_retry(storage, cobject, deserialise)
        centr_segm_df = segment_centr_df(centr_df,
                                         first_level_centr_segm_bounds)

        def _first_level_upload(args):
            segm_i, df = args
            del df['segm_i']
            return segm_i, storage.put_cloudobject(serialise(df))

        with ThreadPoolExecutor(max_workers=128) as pool:
            sub_segms = [(segm_i, df)
                         for segm_i, df in centr_segm_df.groupby('segm_i')]
            sub_segms_cobjects = list(pool.map(_first_level_upload, sub_segms))

        return dict(sub_segms_cobjects)

    memory_capacity_mb = 512
    first_futures = pw.map(
        segment_centr_chunk,
        clip_centr_chunks_cobjects,
        runtime_memory=memory_capacity_mb,
    )
    first_level_segms_cobjects = pw.get_result(first_futures)
    PipelineStats.append_func(first_futures,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=len(first_futures) *
                              len(centr_segm_lower_bounds))

    def merge_centr_df_segments(segm_cobjects, id, storage):
        print(f'Merging segment {id} clipped centroids chunks')

        def _merge(cobject):
            segm_centr_df_chunk = read_cloud_object_with_retry(
                storage, cobject, deserialise)
            return segm_centr_df_chunk

        with ThreadPoolExecutor(max_workers=128) as pool:
            segm = pd.concat(list(pool.map(_merge, segm_cobjects)))

        def _second_level_segment(segm, sub_segms_n):
            segm_bounds_q = [
                i * 1 / sub_segms_n for i in range(0, sub_segms_n)
            ]
            sub_segms_lower_bounds = np.quantile(
                segm[segm.peak_i == 0].mz.values, segm_bounds_q)
            centr_segm_df = segment_centr_df(segm, sub_segms_lower_bounds)

            sub_segms = []
            for segm_i, df in centr_segm_df.groupby('segm_i'):
                del df['segm_i']
                sub_segms.append(df)
            return sub_segms

        init_segms = _second_level_segment(segm,
                                           len(centr_segm_lower_bounds[id]))

        segms = []
        for init_segm in init_segms:
            first_ds_segm_i, last_ds_segm_i = choose_ds_segments(
                ds_segms_bounds, init_segm, ppm)
            ds_segms_to_download_n = last_ds_segm_i - first_ds_segm_i + 1
            segms.append((ds_segms_to_download_n, init_segm))
        segms = sorted(segms, key=lambda x: x[0], reverse=True)
        max_ds_segms_to_download_n, max_segm = segms[0]

        max_iterations_n = 100
        iterations_n = 1
        while max_ds_segms_to_download_n * ds_segm_size_mb > max_ds_segms_size_per_db_segm_mb and iterations_n < max_iterations_n:

            sub_segms = []
            sub_segms_n = math.ceil(max_ds_segms_to_download_n *
                                    ds_segm_size_mb /
                                    max_ds_segms_size_per_db_segm_mb)
            for sub_segm in _second_level_segment(max_segm, sub_segms_n):
                first_ds_segm_i, last_ds_segm_i = choose_ds_segments(
                    ds_segms_bounds, sub_segm, ppm)
                ds_segms_to_download_n = last_ds_segm_i - first_ds_segm_i + 1
                sub_segms.append((ds_segms_to_download_n, sub_segm))

            segms = sub_segms + segms[1:]
            segms = sorted(segms, key=lambda x: x[0], reverse=True)
            iterations_n += 1
            max_ds_segms_to_download_n, max_segm = segms[0]

        def _second_level_upload(df):
            return storage.put_cloudobject(serialise(df))

        print(f'Storing {len(segms)} centroids segments')
        with ThreadPoolExecutor(max_workers=128) as pool:
            segms = [df for _, df in segms]
            segms_cobjects = list(pool.map(_second_level_upload, segms))

        return segms_cobjects

    second_level_segms_cobjects = defaultdict(list)
    for sub_segms_cobjects in first_level_segms_cobjects:
        for first_level_segm_i in sub_segms_cobjects:
            second_level_segms_cobjects[first_level_segm_i].append(
                sub_segms_cobjects[first_level_segm_i])
    second_level_segms_cobjects = sorted(second_level_segms_cobjects.items(),
                                         key=lambda x: x[0])
    second_level_segms_cobjects = [
        (cobjects, ) for segm_i, cobjects in second_level_segms_cobjects
    ]

    first_level_cobjs = [
        co for cos in first_level_segms_cobjects for co in cos.values()
    ]
    assert len(first_level_cobjs) == len(
        set(co.key for co in first_level_cobjs)
    ), 'Duplicate CloudObject key in first_level_segms_cobjects'

    memory_capacity_mb = 2048
    second_futures = pw.map(merge_centr_df_segments,
                            second_level_segms_cobjects,
                            runtime_memory=memory_capacity_mb)
    db_segms_cobjects = list(np.concatenate(pw.get_result(second_futures)))
    PipelineStats.append_func(second_futures,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=centr_segm_n)

    assert len(db_segms_cobjects) == len(
        set(co.key for co in db_segms_cobjects)
    ), 'Duplicate CloudObject key in db_segms_cobjects'

    pw.clean(cs=first_level_cobjs)
    return db_segms_cobjects
def build_fdr_rankings(pw, config_ds, config_db, mol_dbs_cobjects,
                       formula_to_id_cobjects, formula_scores_df):
    mol_db_path_to_cobj = dict(zip(config_db['databases'], mol_dbs_cobjects))

    def build_ranking(group_i, ranking_i, database, modifier, adduct, id,
                      storage):
        print("Building ranking...")
        print(f'job_i: {id}')
        print(f'ranking_i: {ranking_i}')
        print(f'database: {database}')
        print(f'modifier: {modifier}')
        print(f'adduct: {adduct}')
        # For every unmodified formula in `database`, look up the MSM score for the molecule
        # that it would become after the modifier and adduct are applied
        mols = read_cloud_object_with_retry(storage,
                                            mol_db_path_to_cobj[database],
                                            deserialise)
        if adduct is not None:
            # Target rankings use the same adduct for all molecules
            mol_formulas = list(
                map(safe_generate_ion_formula, mols, repeat(modifier),
                    repeat(adduct)))
        else:
            # Decoy rankings use a consistent random adduct for each molecule, chosen so that it doesn't overlap
            # with other decoy rankings for this molecule
            adducts = _get_random_adduct_set(len(mols), decoy_adducts,
                                             ranking_i)
            mol_formulas = list(
                map(safe_generate_ion_formula, mols, repeat(modifier),
                    adducts))

        formula_to_id = {}
        for cobject in formula_to_id_cobjects:
            formula_to_id_chunk = read_cloud_object_with_retry(
                storage, cobject, deserialise)

            for formula in mol_formulas:
                if formula_to_id_chunk.get(formula) is not None:
                    formula_to_id[formula] = formula_to_id_chunk.get(formula)

        formula_is = [
            formula and formula_to_id.get(formula) for formula in mol_formulas
        ]
        msm = [
            formula_i and msm_lookup.get(formula_i) for formula_i in formula_is
        ]
        if adduct is not None:
            ranking_df = pd.DataFrame({
                'mol': mols,
                'msm': msm
            },
                                      index=formula_is)
            ranking_df = ranking_df[~ranking_df.msm.isna()]
        else:
            # Specific molecules don't matter in the decoy rankings, only their msm distribution
            ranking_df = pd.DataFrame({'msm': msm})
            ranking_df = ranking_df[~ranking_df.msm.isna()]

        return id, storage.put_cloudobject(serialise(ranking_df))

    decoy_adducts = sorted(set(DECOY_ADDUCTS).difference(config_db['adducts']))
    n_decoy_rankings = config_ds.get('num_decoys', len(decoy_adducts))
    msm_lookup = formula_scores_df.msm.to_dict(
    )  # Ideally this data would stay in COS so it doesn't have to be reuploaded

    # Create a job for each list of molecules to be ranked
    ranking_jobs = []
    for group_i, (database, modifier) in enumerate(
            product(config_db['databases'], config_db['modifiers'])):
        # Target and decoy rankings are treated differently. Decoy rankings are identified by not having an adduct.
        ranking_jobs.extend(
            (group_i, ranking_i, database, modifier, adduct)
            for ranking_i, adduct in enumerate(config_db['adducts']))
        ranking_jobs.extend((group_i, ranking_i, database, modifier, None)
                            for ranking_i in range(n_decoy_rankings))

    memory_capacity_mb = 1536
    futures = pw.map(build_ranking,
                     ranking_jobs,
                     runtime_memory=memory_capacity_mb)
    ranking_cobjects = [
        cobject for job_i, cobject in sorted(pw.get_result(futures))
    ]
    PipelineStats.append_func(futures,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=len(futures))

    rankings_df = pd.DataFrame(ranking_jobs,
                               columns=[
                                   'group_i', 'ranking_i', 'database_path',
                                   'modifier', 'adduct'
                               ])
    rankings_df = rankings_df.assign(is_target=~rankings_df.adduct.isnull(),
                                     cobject=ranking_cobjects)

    return rankings_df
Exemple #17
0
def build_database(pw, db_config, mols_dbs_cobjects):

    adduct_jobs = [(adduct, )
                   for adduct in [*db_config['adducts'], *DECOY_ADDUCTS]]
    modifiers = db_config['modifiers']

    def hash_formula_to_chunk(formula):
        m = hashlib.md5()
        m.update(formula.encode('utf-8'))
        return int(m.hexdigest(), 16) % N_HASH_CHUNKS

    def generate_formulas(adduct, storage):
        print(f'Generating formulas for adduct {adduct}')

        def _get_mols(mols_cobj):
            return read_cloud_object_with_retry(storage, mols_cobj,
                                                deserialise)

        with ThreadPoolExecutor(max_workers=128) as pool:
            mols_list = list(pool.map(_get_mols, mols_dbs_cobjects))

        formulas = set()

        for mols in mols_list:
            for modifier in modifiers:
                formulas.update(
                    map(safe_generate_ion_formula, mols, repeat(modifier),
                        repeat(adduct)))

        if None in formulas:
            formulas.remove(None)

        formulas_chunks = {}
        for formula in formulas:
            chunk_i = hash_formula_to_chunk(formula)
            if chunk_i in formulas_chunks:
                formulas_chunks[chunk_i].append(formula)
            else:
                formulas_chunks[chunk_i] = [formula]

        def _store(chunk_i):
            return chunk_i, storage.put_cloudobject(
                serialise(formulas_chunks[chunk_i]))

        with ThreadPoolExecutor(max_workers=128) as pool:
            cobjects = dict(pool.map(_store, formulas_chunks.keys()))

        return cobjects

    memory_capacity_mb = 512
    futures = pw.map(generate_formulas,
                     adduct_jobs,
                     runtime_memory=memory_capacity_mb)
    results = pw.get_result(futures)
    chunk_cobjects = [[] for i in range(N_HASH_CHUNKS)]
    for cobjects_dict in results:
        for chunk_i, cobject in cobjects_dict.items():
            chunk_cobjects[chunk_i].append(cobject)
    PipelineStats.append_func(futures,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=sum(map(len, chunk_cobjects)))

    def deduplicate_formulas_chunk(chunk_i, chunk_cobjects, storage):
        print(f'Deduplicating formulas chunk {chunk_i}')
        chunk = set()
        for cobject in chunk_cobjects:
            formulas_chunk_part = read_cloud_object_with_retry(
                storage, cobject, deserialise)
            chunk.update(formulas_chunk_part)

        return chunk

    def get_formulas_number_per_chunk(chunk_i, chunk_cobjects, storage):
        chunk = deduplicate_formulas_chunk(chunk_i, chunk_cobjects, storage)
        return len(chunk)

    memory_capacity_mb = 512
    futures = pw.map(get_formulas_number_per_chunk,
                     list(enumerate(chunk_cobjects)),
                     runtime_memory=memory_capacity_mb)
    formulas_nums = pw.get_result(futures)
    PipelineStats.append_func(futures, memory_mb=memory_capacity_mb)

    def store_formulas_segments(chunk_i, chunk_cobjects, storage):
        chunk = deduplicate_formulas_chunk(chunk_i, chunk_cobjects, storage)
        formula_i_start = sum(formulas_nums[:chunk_i])
        formula_i_end = formula_i_start + len(chunk)
        chunk = pd.Series(sorted(chunk),
                          name='ion_formula',
                          index=pd.RangeIndex(formula_i_start,
                                              formula_i_end,
                                              name='formula_i'))

        n_threads = N_FORMULAS_SEGMENTS // N_HASH_CHUNKS
        segm_size = math.ceil(len(chunk) / n_threads)
        segm_list = [
            chunk[i:i + segm_size] for i in range(0, chunk.shape[0], segm_size)
        ]

        def _store(segm_i):
            id = chunk_i * n_threads + segm_i
            print(f'Storing formulas segment {id}')
            return storage.put_cloudobject(serialise(segm_list[segm_i]))

        with ThreadPoolExecutor(max_workers=128) as pool:
            segm_cobjects = list(pool.map(_store, range(n_threads)))

        return segm_cobjects

    memory_capacity_mb = 512
    futures = pw.map(store_formulas_segments,
                     list(enumerate(chunk_cobjects)),
                     runtime_memory=memory_capacity_mb)
    results = pw.get_result(futures)
    formula_cobjects = [segm for segms in results for segm in segms]
    PipelineStats.append_func(futures,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=len(formula_cobjects))

    num_formulas = sum(formulas_nums)
    n_formulas_chunks = sum([len(result) for result in results])
    logger.info(
        f'Generated {num_formulas} formulas in {n_formulas_chunks} chunks')

    formulas_bytes = 200 * num_formulas
    formula_to_id_chunk_mb = 512
    n_formula_to_id = int(
        math.ceil(formulas_bytes / (formula_to_id_chunk_mb * 1024**2)))
    formula_to_id_bounds = [
        N_FORMULAS_SEGMENTS * ch_i // n_formula_to_id
        for ch_i in range(n_formula_to_id + 1)
    ]
    formula_to_id_ranges = list(
        zip(formula_to_id_bounds[:-1], formula_to_id_bounds[1:]))
    formula_to_id_inputs = [
        formula_cobjects[start:end] for start, end in formula_to_id_ranges
        if start != end
    ]

    def store_formula_to_id_chunk(ch_i, input_cobjects, storage):
        print(f'Storing formula_to_id dictionary chunk {ch_i}')

        def _get(cobj):
            formula_chunk = read_cloud_object_with_retry(
                storage, cobj, deserialise)
            formula_to_id_chunk = dict(
                zip(formula_chunk.values, formula_chunk.index))
            return formula_to_id_chunk

        formula_to_id = {}
        with ThreadPoolExecutor(max_workers=128) as pool:
            for chunk_dict in pool.map(_get, input_cobjects):
                formula_to_id.update(chunk_dict)

        return storage.put_cloudobject(serialise(formula_to_id))

    safe_mb = 512
    memory_capacity_mb = formula_to_id_chunk_mb * 2 + safe_mb
    futures = pw.map(store_formula_to_id_chunk,
                     list(enumerate(formula_to_id_inputs)),
                     runtime_memory=memory_capacity_mb)
    formula_to_id_cobjects = pw.get_result(futures)
    PipelineStats.append_func(futures,
                              memory_mb=memory_capacity_mb,
                              cloud_objects_n=n_formula_to_id)
    logger.info(f'Built {n_formula_to_id} formula_to_id dictionaries chunks')

    return formula_cobjects, formula_to_id_cobjects