def mpi_reduce_p(self, p, root=0): l = p.get_lattice().as_numpy_array() c = p.get_counts().as_numpy_array() if self.mpi_helper.rank == root: lt = np.zeros_like(l) ct = np.zeros_like(c) else: lt = None ct = None self.mpi_helper.comm.Reduce(l, lt, op=self.mpi_helper.MPI.SUM, root=root) self.mpi_helper.comm.Reduce(c, ct, op=self.mpi_helper.MPI.SUM, root=root) if self.mpi_helper.rank == root: p.set_lattice(flex.double(lt)) p.set_counts(flex.int(ct)) return p if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(lunus)
# added as a diagnostic #prange=P_nought_vec - P_prime #other_F_prime = 1.0 #otherP_prime = 0.5 * other_F_prime * cos_two_polar_angle * sin_sq_tt_vec #otherprange=P_nought_vec - otherP_prime #diff2 = flex.abs(prange - otherprange) #print >> out, "mean diff is",flex.mean(diff2), "range",flex.min(diff2), flex.max(diff2) # done correction = 1 / ( P_nought_vec - P_prime ) refls['intensity.sum.value'] = refls['intensity.sum.value'] * correction refls['intensity.sum.variance'] = refls['intensity.sum.variance'] * correction**2 # propagated error # This corrects observations for polarization assuming 100% polarization on # one axis (thus the F_prime = -1.0 rather than the perpendicular axis, 1.0) # Polarization model as described by Kahn, Fourme, Gadet, Janin, Dumas & Andre # (1982) J. Appl. Cryst. 15, 330-337, equations 13 - 15. result.extend(refls) if len(reflections) > 0: self.logger.log("Applied polarization correction. Mean intensity changed from %.2f to %.2f"%(flex.mean(reflections['intensity.sum.value']), flex.mean(result['intensity.sum.value']))) self.logger.log_step_time("POLARIZATION_CORRECTION", True) return experiments, result if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(polarization)
for experiment in experiments: f_wavelengths = flex.double([b.get_wavelength() for b in experiments.beams()]) flex_all_wavelengths = self.mpi_helper.aggregate_flex(f_wavelengths, flex.double) if self.mpi_helper.rank == 0: average_wavelength = flex.mean(flex_all_wavelengths) self.logger.main_log("Wavelength: %f"%average_wavelength) else: average_wavelength = None self.logger.log_step_time("BROADCAST_WAVELENGTH") average_wavelength = self.mpi_helper.comm.bcast(average_wavelength, root = 0) self.logger.log_step_time("BROADCAST_WAVELENGTH", True) # save the average wavelength to the phil parameters if self.mpi_helper.rank == 0: self.logger.main_log("Average wavelength (%f A) is saved to phil parameters"%average_wavelength) if not 'average_wavelength' in (self.params.statistics).__dict__: self.params.statistics.__inject__('average_wavelength', average_wavelength) else: self.params.statistics.__setattr__('average_wavelength', average_wavelength) self.logger.log_step_time("BEAM_STATISTICS", True) return experiments, reflections if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(beam_statistics)
if self.mpi_helper.rank == 0: if ucd.is_valid(): ucd.show_histograms() average_unit_cell = ucd.get_average_cell() self.logger.log_step_time("BROADCAST_UNIT_CELL") average_unit_cell = self.mpi_helper.comm.bcast(average_unit_cell, root=0) self.logger.log_step_time("BROADCAST_UNIT_CELL", True) # save the average unit cell to the phil parameters if self.mpi_helper.rank == 0: self.logger.main_log( "Average unit_cell %s is saved to phil parameters" % str(average_unit_cell)) if not 'average_unit_cell' in (self.params.statistics).__dict__: self.params.statistics.__inject__('average_unit_cell', average_unit_cell) else: self.params.statistics.__setattr__('average_unit_cell', average_unit_cell) self.logger.log_step_time("UNIT_CELL_STATISTICS", True) return experiments, reflections if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(unit_cell_statistics)
ignore_line_search_failed_maxfev=False, ignore_line_search_failed_xtol=False, ignore_search_direction_not_descent=False)) def compute_functional_and_gradients(self): values = self.parameterization(self.x) assert -150. < values.BFACTOR < 150, "B-factor out of range (+/-150) within rs2 functional and gradients" self.func = self.refinery.fvec_callable(values) functional = flex.sum(self.refinery.WEIGHTS * self.func * self.func) self.f = functional jacobian = self.refinery.jacobian_callable(values) self.g = flex.double(self.n) for ix in range(self.n): self.g[ix] = flex.sum(2. * self.refinery.WEIGHTS * self.func * jacobian[ix]) print("rms %10.3f" % math.sqrt( flex.sum(self.refinery.WEIGHTS * self.func * self.func) / flex.sum(self.refinery.WEIGHTS)), file=self.out, end='') values.show(self.out) return self.f, self.g if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(postrefinement_rs2)
current_imageset = None current_imageset_path = None for expt_id, expt in enumerate(experiments): assert len(expt.imageset.paths()) == 1 and len(expt.imageset) == 1 self.logger.log("Starting integration experiment %d"%expt_id) refls = reflections.select(reflections['exp_id'] == expt.identifier) if expt.imageset.paths()[0] != current_imageset_path: current_imageset_path = expt.imageset.paths()[0] current_imageset = ImageSetFactory.make_imageset(expt.imageset.paths()) idx = expt.imageset.indices()[0] expt.imageset = current_imageset[idx:idx+1] idents = refls.experiment_identifiers() del idents[expt_id] idents[0] = expt.identifier refls['id'] = flex.int(len(refls), 0) integrated = processor.integrate(experiments[expt_id:expt_id+1], refls) idents = integrated.experiment_identifiers() del idents[0] idents[expt_id] = expt.identifier integrated['id'] = flex.int(len(integrated), expt_id) all_integrated.extend(integrated) self.logger.log("Integration done, %d experiments, %d reflections"%(len(experiments), len(all_integrated))) return experiments, all_integrated if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(integrate)
import copy # Build target symmetry. The exact experiment unit cell values don't matter for converting HKLs to asu HKLs. target_unit_cell = self.params.scaling.unit_cell target_space_group_info = self.params.scaling.space_group target_symmetry = symmetry(unit_cell=target_unit_cell, space_group_info=target_space_group_info) target_space_group = target_symmetry.space_group() # generate and add an asu hkl column reflections['miller_index_asymmetric'] = copy.deepcopy( reflections['miller_index']) miller.map_to_asu(target_space_group.type(), not self.params.merging.merge_anomalous, reflections['miller_index_asymmetric']) ''' def get_min_max_experiment_unit_cell_volume(self, experiments): vols = [] for experiment in experiments: vols.append(experiment.crystal.get_crystal_symmetry().unit_cell().volume()) return min(vols),max(vols) ''' if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(crystal_model)
else: self.logger.main_log("Output merged HKLs limited by (%f - %f) A resolution range"%(self.params.merging.d_max, self.params.merging.d_min)) all_obs = miller.array( miller_set=miller.set(final_symm, reflections['miller_index'], not self.params.merging.merge_anomalous), data=reflections['intensity'], sigmas=reflections['sigma']).resolution_filter( d_min=self.params.merging.d_min, d_max=self.params.merging.d_max).set_observation_type_xray_intensity() mtz_file = os.path.join(self.params.output.output_dir, "%s_%s.mtz"%(self.params.output.prefix, filename_postfix)) mtz_out = all_obs.as_mtz_dataset( column_root_label="Iobs", title=self.params.output.title, wavelength=wavelength) mtz_out.add_miller_array( miller_array=all_obs.average_bijvoet_mates(), column_root_label="IMEAN") mtz_obj = mtz_out.mtz_object() mtz_obj.write(mtz_file) self.logger.main_log("Output anomalous and mean data:\n %s" %os.path.abspath(mtz_file)) self.logger.main_log("Output data summary:") out = StringIO() all_obs.show_summary(prefix=" ", f=out) self.logger.main_log(out.getvalue()) if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(merge)
Cross_Correlation_Table.cumulative_observed_matching_asu_count = cumulative_observed_matching_asu_count Cross_Correlation_Table.cumulative_theor_asu_count = cumulative_theor_asu_count Cross_Correlation_Table.cumulative_cross_correlation = self.cross_correlation_formula(cumulative_observed_matching_asu_count, cumulative_sum_xx, cumulative_sum_yy, cumulative_sum_xy, cumulative_sum_x, cumulative_sum_y) return Cross_Correlation_Table def cross_correlation_formula(self, count, sum_xx, sum_yy, sum_xy, sum_x, sum_y): numerator = (count * sum_xy - sum_x * sum_y) denominator = (math.sqrt(count * sum_xx - sum_x**2) * math.sqrt(count * sum_yy - sum_y**2)) cross_correlation = 0.0 if denominator != 0.0: cross_correlation = numerator / denominator return cross_correlation if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(intensity_resolution_statistics)
reflections = self.setup_work_arrays(reflections) # Make sure every rank knows the global mean/stdev for deltas and use them to get the bin limits self.calculate_delta_statistics() self.calculate_delta_bin_limits() # assign deltas for each reflection to appropriate bin self.distribute_deltas_over_bins() # Each rank gets its own bin. Make sure all deltas in that bin are on that rank and sorted. self.distribute_deltas_over_ranks() # calculate rankits, each rank does its own rankits calculation self.calculate_delta_rankits() # initial ev11 params using slope and offset of fit to rankits self.calculate_initial_ev11_parameters() # Now moving to intensities, find the bin limits using global min/max of the means of each reflection self.calculate_intensity_bin_limits() # Once bin limits are determined, assign intensities on each rank to appropriate bin limits self.distribute_reflections_over_intensity_bins() # Run LBFGSB minimizer -- only rank0 does minimization but gradients/functionals are calculated using all rank self.run_minimizer() # Finally update the variances of each reflection as per Eq (10) in Brewster et. al (2019) reflections['intensity.sum.variance'] = (self.sfac**2)*(reflections['intensity.sum.variance'] + self.sb*self.sb*reflections['biased_mean'] + self.sadd*self.sadd*reflections['biased_mean']**2) del reflections['biased_mean'] return reflections if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(error_modifier)
refl=selected_reflections, data=transmitted, symms=[ E.crystal.get_crystal_symmetry() for E in result_experiments_for_cosym ], uuids=[E.identifier for E in result_experiments_for_cosym], co=global_coset_decomposition, anomalous_flag=self.params.merging.merge_anomalous == False, verbose=False) # this should have re-indexed the refls in place, no need for return value self.mpi_helper.comm.barrier() # Note: this handles the simple case of lattice ambiguity (P63 in P/mmm lattice group) # in this use case we assume all inputs and outputs are in P63. # more complex use cases would have to reset the space group in the crystal, and recalculate # the ASU "miller_indicies" in the reflections table. self.logger.log_step_time("COSYM", True) self.logger.log("Memory usage: %d MB" % get_memory_usage()) from xfel.merging.application.utils.data_counter import data_counter data_counter(self.params).count(result_experiments_for_cosym, selected_reflections) return result_experiments_for_cosym, selected_reflections if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(reindex_to_reference)
return all_keys = list() for key in reflections[0]: all_keys.append(key) for key in all_keys: if not key in [ 'intensity.sum.value', 'intensity.sum.variance', 'miller_index', 'miller_index_asymmetric', 'exp_id', 'odd_frame' ]: del reflections[key] def run(self, experiments, reflections): self.logger.log_step_time("ADD_ASU_HKL_COLUMN") self.add_asu_miller_indices_column(experiments, reflections) self.logger.log_step_time("ADD_ASU_HKL_COLUMN", True) self.logger.log_step_time("PRUNE_COLUMNS") self.prune_reflection_columns(reflections) self.logger.log_step_time("PRUNE_COLUMNS", True) return experiments, reflections if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(reflection_table_editor)
title=self.params.output.title, wavelength=wavelength) mtz_out.add_miller_array(miller_array=all_obs.average_bijvoet_mates(), column_root_label="IMEAN") mtz_obj = mtz_out.mtz_object() mtz_obj.write(mtz_file) self.logger.log(" Anomalous and mean data:\n %s" % \ os.path.abspath(mtz_file)) self.logger.log("") self.logger.log("Final data:") #all_obs.show_summary(self.log, prefix=" ") # don't have a buffer object for this logger all_obs.show_summary(prefix=" ") def run(self, experiments, reflections): if self.mpi_helper.rank == 0: # write the final merged reflection table out to an ASCII file -- for testing/debugging self.logger.log_step_time("WRITE ASCII") self.output_reflections_ascii(reflections) self.logger.log_step_time("WRITE ASCII", True) # write the final merged reflection table out to an MTZ file self.logger.log_step_time("WRITE MTZ") self.output_reflections_mtz(experiments, reflections) self.logger.log_step_time("WRITE MTZ", True) return None, None if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(output)
comm = self.mpi_helper.comm MPI = self.mpi_helper.MPI if self.mpi_helper.rank == 0: self.logger.log_step_time("INTENSITY_HISTOGRAM") self.histogram(reflections['intensity']) self.logger.log_step_time("INTENSITY_HISTOGRAM", True) return experiments, reflections def histogram(self, data): from matplotlib import pyplot as plt nslots = 100 histogram = flex.histogram(data=data, n_slots=nslots) out = StringIO() histogram.show(f=out, prefix=" ", format_cutoffs="%6.2f") self.logger.main_log(out.getvalue() + '\n' + "Total: %d" % data.size() + '\n') if False: fig = plt.figure() plt.bar(histogram.slot_centers(), histogram.slots(), align="center", width=histogram.slot_width()) plt.show() if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(intensity_histogram)
self.logger.log_step_time("SORT") self.logger.log("Sorting reflection table...") reflections.sort('miller_index_asymmetric') self.logger.log_step_time("SORT", True) # Initialize a dictionary to store unique experiment ids in resolution bins experiments_per_resolution_bins = {} for i_bin in range(self.n_bins): experiments_per_resolution_bins[i_bin] = set() # Accumulate experiment ids in the resolution bins where those experiments contributed reflections for refls in reflection_table_utils.get_next_hkl_reflection_table( reflections=reflections): if refls.size() == 0: break # unless the input "reflections" list is empty, generated "refls" lists cannot be empty hkl = refls[0]['miller_index_asymmetric'] if hkl in self.hkl_resolution_bins: i_bin = self.hkl_resolution_bins[hkl] for refl in refls.rows(): experiments_per_resolution_bins[i_bin].add(refl['exp_id']) # For each bin, reduce the sets of unique experiment ids to their count for i_bin in range(self.resolution_binner.n_bins_all()): self.experiment_count_per_resolution_bins[i_bin] = len( experiments_per_resolution_bins[i_bin]) if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(experiment_resolution_statistics)
else: self.logger.log("Received a list of 0 json/pickle file pairs") self.logger.log_step_time("LOAD", True) self.logger.log('Read %d experiments consisting of %d reflections' % (len(all_experiments) - starting_expts_count, len(all_reflections) - starting_refls_count)) self.logger.log("Memory usage: %d MB" % get_memory_usage()) all_reflections = self.prune_reflection_table_keys(all_reflections) # Do we have any data? from xfel.merging.application.utils.data_counter import data_counter data_counter(self.params).count(all_experiments, all_reflections) return all_experiments, all_reflections def prune_reflection_table_keys(self, reflections): from xfel.merging.application.reflection_table_utils import reflection_table_utils reflections = reflection_table_utils.prune_reflection_table_keys(reflections=reflections, keys_to_keep=['intensity.sum.value', 'intensity.sum.variance', 'miller_index', 'miller_index_asymmetric', \ 'exp_id', 's1', 'intensity.sum.value.unmodified', 'intensity.sum.variance.unmodified', 'kapton_absorption_correction', 'flags']) self.logger.log("Pruned reflection table") self.logger.log("Memory usage: %d MB" % get_memory_usage()) return reflections if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(simple_file_loader)
sum_xy += I_w * I_o * I_r sum_x += I_w * I_o sum_y += I_w * I_r sum_w += I_w # calculate Pearson correlation coefficient between X and Y and test it DELTA_1 = result.data_count * sum_xx - sum_x**2 DELTA_2 = result.data_count * sum_yy - sum_y**2 if (abs(DELTA_1) < sys.float_info.epsilon) or (abs(DELTA_2) < sys.float_info.epsilon): result.error = scaling_result.err_low_signal return result result.correlation = (result.data_count * sum_xy - sum_x * sum_y) / ( math.sqrt(DELTA_1) * math.sqrt(DELTA_2)) if result.correlation < self.params.filter.outlier.min_corr: result.error = scaling_result.err_low_correlation return result DELTA = sum_w * sum_xx if abs(DELTA) < sys.float_info.epsilon: result.error = scaling_result.err_low_signal return result result.slope = sum_w * sum_xy / DELTA return result if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(experiment_scaler)
self.split_reflections = [] for i in range(len(self.split_experiments)): self.split_reflections.append(self.reflection_table_stub()) if reflection_count > 0: # set up two lists to be passed to the C++ extension: experiment ids and chunk ids. It's basically a hash table to look up chunk ids by experiment identifier exp_id_list = flex.std_string() chunk_id_list = flex.int() for i in range(len(self.split_experiments)): for exp in self.split_experiments[i]: exp_id_list.append(exp.identifier) chunk_id_list.append(i) # distribute reflections over the experiment chunks using a C++ extension from xfel.merging import split_reflections_by_experiment_chunks_cpp split_reflections_by_experiment_chunks_cpp(reflections, exp_id_list, chunk_id_list, self.split_reflections) for ref_table in self.split_reflections: distributed_reflection_count += ref_table.size() self.logger.log("Distributed %d out of %d reflections" % (distributed_reflection_count, reflection_count)) if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(load_balancer)
self.params.statistics.__setattr__('resolution_binner', resolution_binner) # Provide resolution bin number for each asu hkl in the full miller set hkl_resolution_bins = {} # hkl vs resolution bin number hkls_with_assigned_bin = 0 for i_bin in resolution_binner.range_used(): bin_hkl_selection = resolution_binner.selection(i_bin) bin_hkls = full_miller_set.select(bin_hkl_selection) for hkl in bin_hkls.indices(): assert not hkl in hkl_resolution_bins # each hkl should be assigned a bin number only once hkl_resolution_bins[hkl] = i_bin hkls_with_assigned_bin += 1 self.logger.log("Provided resolution bin number for %d asu hkls" % (hkls_with_assigned_bin)) # Save hkl bin asignments to the parameters if not 'hkl_resolution_bins' in (self.params.statistics).__dict__: self.params.statistics.__inject__('hkl_resolution_bins', hkl_resolution_bins) else: self.params.statistics.__setattr__('hkl_resolution_bins', hkl_resolution_bins) return experiments, reflections if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(resolution_binner)
for j in range(number_of_slices): hkl_chunks_for_alltoall = list() for i in range(len(self.hkl_chunks)): hkl_chunks_for_alltoall.append( list_of_sliced_hkl_chunks[i][j]) # [Aj,Bj,Cj...] self.logger.log_step_time("ALL-TO-ALL") self.logger.log("Executing MPI all-to-all...") self.logger.log("Memory usage: %d MB" % get_memory_usage()) received_hkl_chunks = comm.alltoall(hkl_chunks_for_alltoall) self.logger.log("After all-to-all received %d hkl chunks" % len(received_hkl_chunks)) self.logger.log_step_time("ALL-TO-ALL", True) self.logger.log_step_time("CONSOLIDATE") self.logger.log("Consolidating reflection tables...") for chunk in received_hkl_chunks: result_reflections.extend(chunk) self.logger.log_step_time("CONSOLIDATE", True) return result_reflections if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(hkl_group)
for x in range(self.n): templist = list(self.x) templist[x] += DELTA dvalues = flex.double(templist) dfunc = self.refinery.fvec_callable(self.parameterization(dvalues)) dfunctional = flex.sum(dfunc * dfunc) #calculate by finite_difference self.g.append((dfunctional - functional) / DELTA) self.g[2] = 0. print("rms %10.3f; " % math.sqrt(flex.mean(self.func * self.func)), file=self.out, end='') values.show(self.out) return self.f, self.g def __del__(self): values = self.parameterization(self.x) print("FINALMODEL", file=self.out) print("rms %10.3f; " % math.sqrt(flex.mean(self.func * self.func)), file=self.out, end='') values.show(self.out) if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(postrefinement)
# MPI-reduce total counts comm = self.mpi_helper.comm MPI = self.mpi_helper.MPI total_removed_for_unit_cell = comm.reduce(removed_for_unit_cell, MPI.SUM, 0) total_removed_for_space_group = comm.reduce(removed_for_space_group, MPI.SUM, 0) total_reflections_removed = comm.reduce(removed_reflections, MPI.SUM, 0) # rank 0: log total counts if self.mpi_helper.rank == 0: self.logger.main_log( "Total experiments rejected because of unit cell dimensions: %d" % total_removed_for_unit_cell) self.logger.main_log( "Total experiments rejected because of space group %d" % total_removed_for_space_group) self.logger.main_log( "Total reflections rejected because of rejected experiments %d" % total_reflections_removed) self.logger.log_step_time("FILTER_EXPERIMENTS", True) return new_experiments, new_reflections if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(experiment_filter)
removed_reflections) self.logger.log( "Experiments rejected because of significance filter: %d" % removed_experiments) # MPI-reduce total counts comm = self.mpi_helper.comm MPI = self.mpi_helper.MPI total_removed_reflections = comm.reduce(removed_reflections, MPI.SUM, 0) total_removed_experiments = comm.reduce(removed_experiments, MPI.SUM, 0) # rank 0: log total counts if self.mpi_helper.rank == 0: self.logger.main_log( "Total reflections rejected because of significance filter: %d" % total_removed_reflections) self.logger.main_log( "Total experiments rejected because of significance filter: %d" % total_removed_experiments) self.logger.log_step_time("SIGNIFICANCE_FILTER", True) return new_experiments, new_reflections if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(reflection_filter)
# START OUTPUT ALL UNIT CELLS all_results = [] for experiment_id, experiment in enumerate(all_experiments): if experiment.identifier is None or len(experiment.identifier) == 0: experiment.identifier = create_experiment_identifier(experiment, experiments_filename, experiment_id) uc = experiment.crystal.get_unit_cell() sg = "".join(experiment.crystal.get_space_group().type().lookup_symbol().split()) uc_ps = uc.parameters() result = "%f %f %f %f %f %f %s"%(uc_ps[0], uc_ps[1], uc_ps[2], uc_ps[3], uc_ps[4], uc_ps[5], sg) all_results.append(result) comm = self.mpi_helper.comm MPI = self.mpi_helper.MPI global_results = comm.reduce(all_results, MPI.SUM, 0) if self.mpi_helper.rank == 0: file_cells = open("%s.tdata"%(self.params.tdata.output_path), "w") for result in global_results: line = str(result) + "\n" file_cells.write(line) file_cells.close() self.logger.main_log("output a list of %d unit cells"%len(global_results)) # END OUTPUT ALL UNIT CELLS return all_experiments, all_reflections if __name__ == '__main__': from xfel.merging.application.worker import exercise_worker exercise_worker(simple_cell_listing)