def _iteratively_build_table(config, last_id=None, resume_helper=None): resume_helper = resume_helper or DataSourceResumeHelper(config) indicator_config_id = config._id relevant_ids = [] document_store = get_document_store(config.domain, config.referenced_doc_type) for relevant_id in document_store.iter_document_ids(last_id): relevant_ids.append(relevant_id) if len(relevant_ids) >= ID_CHUNK_SIZE: resume_helper.set_ids_to_resume_from(relevant_ids) _build_indicators(config, document_store, relevant_ids, resume_helper) relevant_ids = [] if relevant_ids: resume_helper.set_ids_to_resume_from(relevant_ids) _build_indicators(config, document_store, relevant_ids, resume_helper) if not id_is_static(indicator_config_id): resume_helper.clear_ids() config.meta.build.finished = True try: config.save() except ResourceConflict: current_config = DataSourceConfiguration.get(config._id) # check that a new build has not yet started if config.meta.build.initiated == current_config.meta.build.initiated: current_config.meta.build.finished = True current_config.save()
def _iteratively_build_table(config, last_id=None, resume_helper=None): resume_helper = resume_helper or DataSourceResumeHelper(config) indicator_config_id = config._id relevant_ids = [] document_store = get_document_store(config.domain, config.referenced_doc_type) for relevant_id in document_store.iter_document_ids(last_id): relevant_ids.append(relevant_id) if len(relevant_ids) >= ID_CHUNK_SIZE: resume_helper.set_ids_to_resume_from(relevant_ids) _build_indicators(config, document_store, relevant_ids, resume_helper) relevant_ids = [] if relevant_ids: resume_helper.set_ids_to_resume_from(relevant_ids) _build_indicators(config, document_store, relevant_ids, resume_helper) if not id_is_static(indicator_config_id): resume_helper.clear_ids() config.meta.build.finished = True try: config.save() except ResourceConflict: current_config = DataSourceConfiguration.get(config._id) # check that a new build has not yet started if config.meta.build.initiated == current_config.meta.build.initiated: current_config.meta.build.finished = True current_config.save() adapter = get_indicator_adapter(config, raise_errors=True, can_handle_laboratory=True) adapter.after_table_build()
def rebuild_indicators(indicator_config_id, initiated_by=None, limit=-1): config = _get_config_by_id(indicator_config_id) success = _('Your UCR table {} has finished rebuilding in {}').format( config.table_id, config.domain) failure = _( 'There was an error rebuilding Your UCR table {} in {}.').format( config.table_id, config.domain) send = False if limit == -1: send = toggles.SEND_UCR_REBUILD_INFO.enabled(initiated_by) with notify_someone(initiated_by, success_message=success, error_message=failure, send=send): adapter = get_indicator_adapter(config) if not id_is_static(indicator_config_id): # Save the start time now in case anything goes wrong. This way we'll be # able to see if the rebuild started a long time ago without finishing. config.meta.build.initiated = datetime.utcnow() config.meta.build.finished = False config.meta.build.rebuilt_asynchronously = False config.save() adapter.rebuild_table() _iteratively_build_table(config, limit=limit)
def rebuild_indicators(indicator_config_id, initiated_by=None, limit=-1, source=None, engine_id=None, diffs=None): config = _get_config_by_id(indicator_config_id) success = _('Your UCR table {} has finished rebuilding in {}').format(config.table_id, config.domain) failure = _('There was an error rebuilding Your UCR table {} in {}.').format(config.table_id, config.domain) send = False if limit == -1: send = toggles.SEND_UCR_REBUILD_INFO.enabled(initiated_by) with notify_someone(initiated_by, success_message=success, error_message=failure, send=send): adapter = get_indicator_adapter(config) if engine_id: if getattr(adapter, 'all_adapters', None): adapter = [ adapter_ for adapter_ in adapter.all_adapters if adapter_.engine_id == engine_id ][0] elif adapter.engine_id != engine_id: raise AssertionError("Engine ID does not match adapter") if not id_is_static(indicator_config_id): # Save the start time now in case anything goes wrong. This way we'll be # able to see if the rebuild started a long time ago without finishing. config.meta.build.initiated = datetime.utcnow() config.meta.build.finished = False config.meta.build.rebuilt_asynchronously = False config.save() skip_log = bool(limit > 0) # don't store log for temporary report builder UCRs adapter.rebuild_table(initiated_by=initiated_by, source=source, skip_log=skip_log, diffs=diffs) _iteratively_build_table(config, limit=limit)
def _iteratively_build_table(config, resume_helper=None, in_place=False, limit=-1): resume_helper = resume_helper or DataSourceResumeHelper(config) indicator_config_id = config._id case_type_or_xmlns_list = config.get_case_type_or_xmlns_filter() completed_ct_xmlns = resume_helper.get_completed_case_type_or_xmlns() if completed_ct_xmlns: case_type_or_xmlns_list = [ case_type_or_xmlns for case_type_or_xmlns in case_type_or_xmlns_list if case_type_or_xmlns not in completed_ct_xmlns ] for case_type_or_xmlns in case_type_or_xmlns_list: relevant_ids = [] document_store = get_document_store( config.domain, config.referenced_doc_type, case_type_or_xmlns=case_type_or_xmlns) for i, relevant_id in enumerate(document_store.iter_document_ids()): if i >= limit > -1: break relevant_ids.append(relevant_id) if len(relevant_ids) >= ID_CHUNK_SIZE: _build_indicators(config, document_store, relevant_ids) relevant_ids = [] if relevant_ids: _build_indicators(config, document_store, relevant_ids) resume_helper.add_completed_case_type_or_xmlns(case_type_or_xmlns) resume_helper.clear_resume_info() if not id_is_static(indicator_config_id): if in_place: config.meta.build.finished_in_place = True else: config.meta.build.finished = True try: config.save() except ResourceConflict: current_config = DataSourceConfiguration.get(config._id) # check that a new build has not yet started if in_place: if config.meta.build.initiated_in_place == current_config.meta.build.initiated_in_place: current_config.meta.build.finished_in_place = True else: if config.meta.build.initiated == current_config.meta.build.initiated: current_config.meta.build.finished = True current_config.save() adapter = get_indicator_adapter(config, raise_errors=True, can_handle_laboratory=True) adapter.after_table_build()
def _iteratively_build_table(config, resume_helper=None, in_place=False, limit=-1): resume_helper = resume_helper or DataSourceResumeHelper(config) indicator_config_id = config._id case_type_or_xmlns_list = config.get_case_type_or_xmlns_filter() domains = config.data_domains loop_iterations = list(itertools.product(domains, case_type_or_xmlns_list)) completed_iterations = resume_helper.get_completed_iterations() if completed_iterations: loop_iterations = list( set(loop_iterations) - set(completed_iterations)) for domain, case_type_or_xmlns in loop_iterations: relevant_ids = [] document_store = get_document_store_for_doc_type( domain, config.referenced_doc_type, case_type_or_xmlns=case_type_or_xmlns, load_source="build_indicators", ) for i, relevant_id in enumerate(document_store.iter_document_ids()): if i >= limit > -1: break relevant_ids.append(relevant_id) if len(relevant_ids) >= ID_CHUNK_SIZE: _build_indicators(config, document_store, relevant_ids) relevant_ids = [] if relevant_ids: _build_indicators(config, document_store, relevant_ids) resume_helper.add_completed_iteration(domain, case_type_or_xmlns) resume_helper.clear_resume_info() if not id_is_static(indicator_config_id): if in_place: config.meta.build.finished_in_place = True else: config.meta.build.finished = True try: config.save() except ResourceConflict: current_config = get_ucr_datasource_config_by_id(config._id) # check that a new build has not yet started if in_place: if config.meta.build.initiated_in_place == current_config.meta.build.initiated_in_place: current_config.meta.build.finished_in_place = True else: if config.meta.build.initiated == current_config.meta.build.initiated: current_config.meta.build.finished = True current_config.save()
def get_ucr_datasource_config_by_id(indicator_config_id, allow_deleted=False): from corehq.apps.userreports.models import ( id_is_static, StaticDataSourceConfiguration, DataSourceConfiguration, ) if id_is_static(indicator_config_id): return StaticDataSourceConfiguration.by_id(indicator_config_id) else: doc = DataSourceConfiguration.get_db().get(indicator_config_id) return _wrap_data_source_by_doc_type(doc, allow_deleted)
def _iteratively_build_table(config, resume_helper=None, in_place=False, limit=-1): resume_helper = resume_helper or DataSourceResumeHelper(config) indicator_config_id = config._id case_type_or_xmlns_list = config.get_case_type_or_xmlns_filter() completed_ct_xmlns = resume_helper.get_completed_case_type_or_xmlns() if completed_ct_xmlns: case_type_or_xmlns_list = [ case_type_or_xmlns for case_type_or_xmlns in case_type_or_xmlns_list if case_type_or_xmlns not in completed_ct_xmlns ] for case_type_or_xmlns in case_type_or_xmlns_list: relevant_ids = [] document_store = get_document_store_for_doc_type( config.domain, config.referenced_doc_type, case_type_or_xmlns=case_type_or_xmlns, load_source="build_indicators", ) for i, relevant_id in enumerate(document_store.iter_document_ids()): if i >= limit > -1: break relevant_ids.append(relevant_id) if len(relevant_ids) >= ID_CHUNK_SIZE: _build_indicators(config, document_store, relevant_ids) relevant_ids = [] if relevant_ids: _build_indicators(config, document_store, relevant_ids) resume_helper.add_completed_case_type_or_xmlns(case_type_or_xmlns) resume_helper.clear_resume_info() if not id_is_static(indicator_config_id): if in_place: config.meta.build.finished_in_place = True else: config.meta.build.finished = True try: config.save() except ResourceConflict: current_config = DataSourceConfiguration.get(config._id) # check that a new build has not yet started if in_place: if config.meta.build.initiated_in_place == current_config.meta.build.initiated_in_place: current_config.meta.build.finished_in_place = True else: if config.meta.build.initiated == current_config.meta.build.initiated: current_config.meta.build.finished = True current_config.save()
def rebuild_indicators_in_place(indicator_config_id, initiated_by=None): config = _get_config_by_id(indicator_config_id) success = _('Your UCR table {} has finished rebuilding').format(config.table_id) failure = _('There was an error rebuilding Your UCR table {}.').format(config.table_id) send = toggles.SEND_UCR_REBUILD_INFO.enabled(initiated_by) with notify_someone(initiated_by, success_message=success, error_message=failure, send=send): adapter = get_indicator_adapter(config, can_handle_laboratory=True) if not id_is_static(indicator_config_id): config.meta.build.initiated_in_place = datetime.utcnow() config.meta.build.finished_in_place = False config.save() adapter.build_table() _iteratively_build_table(config, in_place=True)
def rebuild_indicators_in_place(indicator_config_id, initiated_by=None, source=None): config = _get_config_by_id(indicator_config_id) success = _('Your UCR table {} has finished rebuilding in {}').format(config.table_id, config.domain) failure = _('There was an error rebuilding Your UCR table {} in {}.').format(config.table_id, config.domain) send = toggles.SEND_UCR_REBUILD_INFO.enabled(initiated_by) with notify_someone(initiated_by, success_message=success, error_message=failure, send=send): adapter = get_indicator_adapter(config) if not id_is_static(indicator_config_id): config.meta.build.initiated_in_place = datetime.utcnow() config.meta.build.finished_in_place = False config.meta.build.rebuilt_asynchronously = False config.save() adapter.build_table(initiated_by=initiated_by, source=source) _iteratively_build_table(config, in_place=True)
def rebuild_indicators(indicator_config_id, initiated_by=None): config = _get_config_by_id(indicator_config_id) success = _('Your UCR table {} has finished rebuilding').format(config.table_id) failure = _('There was an error rebuilding Your UCR table {}.').format(config.table_id) send = toggles.SEND_UCR_REBUILD_INFO.enabled(initiated_by) with notify_someone(initiated_by, success_message=success, error_message=failure, send=send): adapter = IndicatorSqlAdapter(config) if not id_is_static(indicator_config_id): # Save the start time now in case anything goes wrong. This way we'll be # able to see if the rebuild started a long time ago without finishing. config.meta.build.initiated = datetime.datetime.utcnow() config.meta.build.finished = False config.save() adapter.rebuild_table() _iteratively_build_table(config)
def rebuild_indicators(indicator_config_id, initiated_by=None, limit=-1, source=None): config = _get_config_by_id(indicator_config_id) success = _('Your UCR table {} has finished rebuilding in {}').format(config.table_id, config.domain) failure = _('There was an error rebuilding Your UCR table {} in {}.').format(config.table_id, config.domain) send = False if limit == -1: send = toggles.SEND_UCR_REBUILD_INFO.enabled(initiated_by) with notify_someone(initiated_by, success_message=success, error_message=failure, send=send): adapter = get_indicator_adapter(config) if not id_is_static(indicator_config_id): # Save the start time now in case anything goes wrong. This way we'll be # able to see if the rebuild started a long time ago without finishing. config.meta.build.initiated = datetime.utcnow() config.meta.build.finished = False config.meta.build.rebuilt_asynchronously = False config.save() skip_log = bool(limit > 0) # don't store log for temporary report builder UCRs adapter.rebuild_table(initiated_by=initiated_by, source=source, skip_log=skip_log) _iteratively_build_table(config, limit=limit)
def rebuild_indicators_in_place(indicator_config_id, initiated_by=None): config = _get_config_by_id(indicator_config_id) success = _('Your UCR table {} has finished rebuilding').format( config.table_id) failure = _('There was an error rebuilding Your UCR table {}.').format( config.table_id) send = toggles.SEND_UCR_REBUILD_INFO.enabled(initiated_by) with notify_someone(initiated_by, success_message=success, error_message=failure, send=send): adapter = get_indicator_adapter(config, can_handle_laboratory=True) if not id_is_static(indicator_config_id): # Save the start time now in case anything goes wrong. This way we'll be # able to see if the rebuild started a long time ago without finishing. config.meta.build.initiated = datetime.datetime.utcnow() config.meta.build.finished = False config.save() adapter.build_table() _iteratively_build_table(config)
def rebuild_indicators_in_place(indicator_config_id, initiated_by=None, source=None, domain=None): config = get_ucr_datasource_config_by_id(indicator_config_id) success = _('Your UCR table {} has finished rebuilding in {}').format( config.table_id, config.domain) failure = _( 'There was an error rebuilding Your UCR table {} in {}.').format( config.table_id, config.domain) send = toggles.SEND_UCR_REBUILD_INFO.enabled(initiated_by) with notify_someone(initiated_by, success_message=success, error_message=failure, send=send): adapter = get_indicator_adapter(config) if not id_is_static(indicator_config_id): config.meta.build.initiated_in_place = datetime.utcnow() config.meta.build.finished_in_place = False config.meta.build.rebuilt_asynchronously = False config.save() adapter.build_table(initiated_by=initiated_by, source=source) _iteratively_build_table(config, in_place=True)
def _get_config_by_id(indicator_config_id): if id_is_static(indicator_config_id): return StaticDataSourceConfiguration.by_id(indicator_config_id) else: return DataSourceConfiguration.get(indicator_config_id)
def get_redis_key_for_config(config): if id_is_static(config._id): rev = 'static' else: rev = config._rev return 'ucr_queue-{}:{}'.format(config._id, rev)
def read_only(self): return id_is_static(self.config_id) if self.config_id is not None else False
def read_only(self): return id_is_static( self.config_id) if self.config_id is not None else False