Example #1
0
 def worker_handler(self, should_stop):
     mq = get_mq(seafes_config.subscribe_mq, seafes_config.subscribe_server,
                 seafes_config.subscribe_port,
                 seafes_config.subscribe_password)
     logger.info('%s starting work' % threading.current_thread().name)
     try:
         while not should_stop.isSet():
             try:
                 res = mq.brpop('index_task', timeout=30)
                 if res is not None:
                     key, value = res
                     msg = value.split('\t')
                     if len(msg) != 3:
                         logger.info('Bad message: %s' % str(msg))
                     else:
                         repo_id, commit_id = msg[1], msg[2]
                         self.worker_task_handler(mq, repo_id, commit_id,
                                                  should_stop)
             except (ResponseError, NoMQAvailable, TimeoutError) as e:
                 logger.error(
                     'The connection to the redis server failed: %s' % e)
     except Exception as e:
         logger.error('%s Handle Worker Task Error' %
                      threading.current_thread().name)
         logger.error(e, exc_info=True)
         # prevent case that redis break at program runing.
         time.sleep(0.3)
Example #2
0
def clear(should_stop):
    seafes_config.load_index_slave_conf()
    global locked_keys
    mq = get_mq(seafes_config.subscribe_mq, seafes_config.subscribe_server,
                seafes_config.subscribe_port, seafes_config.subscribe_password)
    # stop work thread
    logger.info("stop work thread")
    should_stop.set()
    # if a thread just lock key, wait to add the lock to the list.
    time.sleep(1)
    # del redis locked key
    for key in locked_keys:
        mq.delete(key)
        logger.info("redis lock key %s has been deleted" % key)
    # sys.exit
    logger.info("Exit the process")
    os._exit(0)
Example #3
0
    def refresh_lock(self):
        mq = get_mq(seafes_config.subscribe_mq, seafes_config.subscribe_server,
                    seafes_config.subscribe_port,
                    seafes_config.subscribe_password)
        logger.info('%s Starting refresh locks' % self.tname)
        while True:
            try:
                # workaround for the RuntimeError: Set changed size during iteration
                copy = locked_keys.copy()

                for lock in copy:
                    ttl = mq.ttl(lock)
                    new_ttl = ttl + self.REFRESH_INTERVAL
                    mq.expire(lock, new_ttl)
                    logger.debug('%s Refresh lock [%s] timeout from %s to %s' %
                                 (self.tname, lock, ttl, new_ttl))

                time.sleep(self.REFRESH_INTERVAL)
            except Exception as e:
                logger.error(e)
                time.sleep(1)
Example #4
0
 def __init__(self):
     Thread.__init__(self)
     self.mq = get_mq(seafes_config.subscribe_mq,
                      seafes_config.subscribe_server,
                      seafes_config.subscribe_port,
                      seafes_config.subscribe_password)
Example #5
0
import logging
import argparse

from seafes.config import seafes_config
from seafes.repo_data import repo_data
from seafes.mq import get_mq

seafes_config.load_index_master_conf()
mq = get_mq(seafes_config.subscribe_mq,
            seafes_config.subscribe_server,
            seafes_config.subscribe_port,
            seafes_config.subscribe_password)

def put_to_redis(repo_id, cmt_id):
    msg = "index_recover\t%s\t%s" % (repo_id, cmt_id)
    mq.lpush('index_task', msg)

def show_all_task():
    logging.info("index task count: %s" % mq.llen('index_task'))

def restore_all_repo():
    start, count = 0, 1000
    while True:
        try:
            repo_commits = repo_data.get_repo_id_commit_id(start, count)
        except Exception as e:
            logging.error("Error: %s" % e)
            return
        else:
            if len(repo_commits) == 0:
                break