def __exit__(self, except_type, except_value, except_trace): if except_type is javabridge.JavaException: raise NotSupportedError('File format is not supported.') if except_value: sys.stdout.write('The following error occurred:\n%s' % str(except_value)) for tb in traceback.format_tb(except_trace): sys.stdout.write(tb)
def create_run_batches(self, args): '''Creates job descriptions for parallel computing. Parameters ---------- args: tmlib.workflow.align.args.AlignBatchArguments step-specific arguments Returns ------- generator job descriptions Raises ------ ValueError when `args.ref_wavelength` does not exist across all cycles ''' job_count = 0 with tm.utils.ExperimentSession(self.experiment_id) as session: cycles = session.query(tm.Cycle).all() if not (len(cycles) > 1): raise NotSupportedError( 'Alignment requires more than one cycle.') if args.ref_cycle >= len(cycles): raise JobDescriptionError( 'Cycle index must not exceed total number of cycles.') site_ids = session.query(tm.Site.id).\ order_by(tm.Site.id).\ all() batches = self._create_batches(site_ids, args.batch_size) for batch in batches: job_count += 1 input_ids = { 'reference_file_ids': list(), 'target_file_ids': defaultdict(list) } for cycle in cycles: n = session.query(tm.ChannelImageFile.id).\ join(tm.Cycle).\ join(tm.Channel).\ filter(tm.Cycle.id == cycle.id).\ filter(tm.Channel.wavelength == args.ref_wavelength).\ filter(~tm.Site.omitted).\ count() if n == 0: raise ValueError( 'No image files found for cycle %d and ' 'wavelength "%s"' % (cycle.id, args.ref_wavelength)) for s in batch: files = session.query(tm.ChannelImageFile.id).\ join(tm.Site).\ join(tm.Cycle).\ join(tm.Channel).\ filter(tm.Site.id == s).\ filter(tm.Cycle.id == cycle.id).\ filter(tm.Channel.wavelength == args.ref_wavelength).\ filter(~tm.Site.omitted).\ all() if not files: # We don't raise an Execption here, because # there may be situations were an aquisition # failed at a given site in one cycle, but # is present in the other cycles. logger.warning('no files for site %d and cycle %d', s, cycle.id) continue ids = [f.id for f in files] if cycle.index == args.ref_cycle: input_ids['reference_file_ids'].extend(ids) input_ids['target_file_ids'][cycle.id].extend(ids) yield { 'id': job_count, 'input_ids': input_ids, 'illumcorr': args.illumcorr, 'robust_align': args.robust_align, 'rescale_percentile': args.rescale_percentile }