def run(self, num_tweets_to_push): self._logging(logging.INFO, 'Starting the Twitter bot.') with acquire_inter_process_lock('twitter_bot') as acquired: if not acquired: err_msg = 'Another instance of the Twitter bot is already ' \ 'running, aborting now.' self._logging(logging.WARNING, err_msg) else: self._push_job_offers_to_twitter(num_tweets_to_push)
def run(self, num_tweets_to_push): self._logging(logging.INFO, 'Starting the Twitter bot.') with acquire_inter_process_lock('twitter_bot') as acquired: if not acquired: err_msg = 'Another instance of the Twitter bot is already ' \ 'running, aborting now.' self._logging(logging.WARNING, err_msg) else: self._push_job_offers_to_twitter(num_tweets_to_push)
def _geocode_job_offers(self): log_msg = 'Starting job offers geocoding operations...' self._logging(logging.INFO, log_msg) with acquire_inter_process_lock('geocode_job_offers') as acquired: if not acquired: err_msg = 'Another process is already performing geocoding' \ 'operations on the job offers, aborting now.' self._logging(logging.WARNING, err_msg) else: self._geocode(model.JobAlchemy, self._job_id_logging)
def _geocode_companies(self): log_msg = 'Starting companies geocoding operations...' self._logging(logging.INFO, log_msg) with acquire_inter_process_lock('geocode_companies') as acquired: if not acquired: err_msg = 'Another process is already performing geocoding ' \ 'operations on the companies, aborting now.' self._logging(logging.WARNING, err_msg) else: self._geocode(model.CompanyAlchemy, self._company_id_logging)
def _geocode_job_offers(self): log_msg = 'Starting job offers geocoding operations...' self._logging(logging.INFO, log_msg) with acquire_inter_process_lock('geocode_job_offers') as acquired: if not acquired: err_msg = 'Another process is already performing geocoding' \ 'operations on the job offers, aborting now.' self._logging(logging.WARNING, err_msg) else: self._geocode(model.JobAlchemy, self._job_id_logging)
def _geocode_companies(self): log_msg = 'Starting companies geocoding operations...' self._logging(logging.INFO, log_msg) with acquire_inter_process_lock('geocode_companies') as acquired: if not acquired: err_msg = 'Another process is already performing geocoding ' \ 'operations on the companies, aborting now.' self._logging(logging.WARNING, err_msg) else: self._geocode(model.CompanyAlchemy, self._company_id_logging)
def _purge_index(self, index_name, index_settings, doc_type_class): log_msg = 'Purging index %s.' % index_name self._logging(logging.INFO, log_msg) with acquire_inter_process_lock('purge_%s' % index_name) as acquired: if not acquired: err_msg = 'Another process is already purging the %s ' \ 'index, aborting now.' % index_name self._logging(logging.WARNING, err_msg) else: self._perform_index_purge(index_name, index_settings, doc_type_class)
def _reset_sync(self, index_name, sqlalchemy_table_class): err_msg = 'Resetting synchronization data for index %s.' % index_name self._logging(logging.WARNING, err_msg) with acquire_inter_process_lock('purge_%s' % index_name) as acquired: if not acquired: err_msg = 'Another process is already resetting the %s ' \ 'index synchronization data, aborting now.' \ % index_name self._logging(logging.WARNING, err_msg) else: self._perform_sync_reset(sqlalchemy_table_class)
def _reset_sync(self, index_name, sqlalchemy_table_class): err_msg = 'Resetting synchronization data for index %s.' % index_name self._logging(logging.WARNING, err_msg) with acquire_inter_process_lock('purge_%s' % index_name) as acquired: if not acquired: err_msg = 'Another process is already resetting the %s ' \ 'index synchronization data, aborting now.' \ % index_name self._logging(logging.WARNING, err_msg) else: self._perform_sync_reset(sqlalchemy_table_class)
def _purge_index(self, index_name, index_settings, doc_type_class): log_msg = 'Purging index %s.' % index_name self._logging(logging.INFO, log_msg) with acquire_inter_process_lock('purge_%s' % index_name) as acquired: if not acquired: err_msg = 'Another process is already purging the %s ' \ 'index, aborting now.' % index_name self._logging(logging.WARNING, err_msg) else: self._perform_index_purge(index_name, index_settings, doc_type_class)
def run(self): """ Update job file if new jobs. Then make a push. :return: """ self._logging(logging.INFO, 'Starting the Github bot.') with acquire_inter_process_lock('github_bot') as acquired: if not acquired: err_msg = 'Another instance of the Github bot is already ' \ 'running, aborting now.' logging.getLogger(__name__).log(logging.WARNING, err_msg) else: self._push_new_job_offers_to_github()
def _populate_geocomplete_index(self, max_doc=1000): log_msg = 'Populating geocomplete index.' self._logging(logging.INFO, log_msg) with acquire_inter_process_lock('populate_geocomplete') as acquired: if not acquired: err_msg = 'Another process is already populating the ' \ 'geocomplete index, aborting now.' self._logging(logging.WARNING, err_msg) else: self._perform_geocomplete_index_population(max_doc) log_msg = 'gecomplete index populated and refreshed.' self._logging(logging.INFO, log_msg)
def run(self): """ Update job file if new jobs. Then make a push. :return: """ self._logging(logging.INFO, 'Starting the Github bot.') with acquire_inter_process_lock('github_bot') as acquired: if not acquired: err_msg = 'Another instance of the Github bot is already ' \ 'running, aborting now.' logging.getLogger(__name__).log(logging.WARNING, err_msg) else: self._push_new_job_offers_to_github()
def _populate_geocomplete_index(self, max_doc=1000): log_msg = 'Populating geocomplete index.' self._logging(logging.INFO, log_msg) with acquire_inter_process_lock('populate_geocomplete') as acquired: if not acquired: err_msg = 'Another process is already populating the ' \ 'geocomplete index, aborting now.' self._logging(logging.WARNING, err_msg) else: self._perform_geocomplete_index_population(max_doc) log_msg = 'gecomplete index populated and refreshed.' self._logging(logging.INFO, log_msg)
def _synchronise_index(self, sql_table_cls, es_doc_cls, id_logger): es_doc = es_doc_cls() self._logging(logging.INFO, 'Synchronizing %s index.' % es_doc.index) with acquire_inter_process_lock('sync_%s' % es_doc.index) as acquired: if not acquired: es_doc = es_doc_cls() err_msg = 'Another process is already synchronizing the %s ' \ 'index, aborting now.' % es_doc.index self._logging(logging.WARNING, err_msg) else: self._perform_index_sync(sql_table_cls, es_doc_cls, id_logger) self._logging(logging.INFO, 'Index %s is now synchronized.' % es_doc.index)
def _synchronise_index(self, sql_table_cls, es_doc_cls, id_logger): es_doc = es_doc_cls() self._logging(logging.INFO, 'Synchronizing %s index.' % es_doc.index) with acquire_inter_process_lock('sync_%s' % es_doc.index) as acquired: if not acquired: es_doc = es_doc_cls() err_msg = 'Another process is already synchronizing the %s ' \ 'index, aborting now.' % es_doc.index self._logging(logging.WARNING, err_msg) else: self._perform_index_sync(sql_table_cls, es_doc_cls, id_logger) self._logging(logging.INFO, 'Index %s is now synchronized.' % es_doc.index)