class BackupWorkerMethod(BaseBackupWorker):

    def __init__(self):
        BaseBackupWorker.__init__(self)
        self.zkOper = Requests_ZkOpers()

    def _get_usable_ips(self):
        online_node_list = self.zkOper.retrieve_started_nodes()
        if not online_node_list:
            raise UserVisiableException('not started node, please check zk node!')

        url_system_path = "/node/stat/workload"
        url_disk_path = "/node/stat/disk/available"
        url_memory_path = "/node/stat/memory/available"
        url_diskenough_path = "/node/stat/disk/enough"

        try:
            system_loads = self._retrieve_nodes_load(online_node_list, url_system_path)
            available_spaces = self._retrieve_nodes_load(online_node_list, url_disk_path)
            available_memory = self._retrieve_nodes_load(online_node_list, url_memory_path)
            disk_enough = self._retrieve_nodes_load(online_node_list, url_diskenough_path)

            usable_ips = self._analysis_usable_backup_node(system_loads, available_spaces, available_memory, disk_enough)

        except Exception, e:
            self.zkOper.write_backup_backup_info({'error': traceback.format_exc()})
            raise UserVisiableException(e)

        return usable_ips
Пример #2
0
    def __init__(self, backup_mode='full', incr_basedir=None):

        self._backup_mode = backup_mode
        self.backup_record = {}
        threading.Thread.__init__(self)
        self.zkOpers = Requests_ZkOpers()
        self.dba_opers = DBAOpers()

        if self._backup_mode == "full":
            self.backupOpers = FullBackupOpers()
        else:
            self.backupOpers = IncrementBackupOpers(incr_basedir)
Пример #3
0
class DMLBatch(RequestHandler):
    """/db/{db_name}/dml/batch
    """
    def initialize(self):
        self.zk = Requests_ZkOpers()

    @asynchronous
    @engine
    def post(self, db_name):
        body = json.loads(self.request.body, strict=False, encoding='utf-8')
        dml_sqls = body.get("dmlSqls")
        if not dml_sqls:
            self.set_status(400)
            self.finish({
                "errmsg": "required argument is none",
                "errcode": 40001
            })
            return

        # 写开始状态到zk
        dml_status = dict(isFinished=False,
                          isSuccessed=False,
                          status='sql batch dml is in processing')
        yield self.set_start_status(db_name, dml_status)
        self.finish(dml_status)

        yield self.dml_execute(db_name, dml_sqls)

    @run_on_executor()
    @run_callback
    def set_start_status(self, db_name, dml_status):
        self.zk.write_sqlbatch_dml_info(db_name, dml_status)

    @run_on_executor()
    @run_callback
    def dml_execute(self, db_name, dml_sqls):
        sqls = dml_sqls.split(";")
        sqls = [sql for sql in sqls if sql]

        batch = SQLBatch(db_name)
        error = batch.dml(sqls)

        # 写结束状态到zk
        is_successed = False if error else True
        status = error or 'sql batch dml is successed'
        dml_status = dict(isFinished=True,
                          isSuccessed=is_successed,
                          status=status)
        self.zk.write_sqlbatch_dml_info(db_name, dml_status)
Пример #4
0
    def post(self):
        incr_basedir = self.get_argument("incr_basedir", None)
        backup_type = self.get_argument("backup_type")
        if not backup_type:
            raise HTTPAPIErrorException(
                "backup params is not given, please check 'backup_type' params.",
                status_code=417)

        zkOper = Requests_ZkOpers()
        zkOper.write_backup_backup_info({"backup type": "backup is building"})
        worker = DispatchBackupWorker(backup_type, incr_basedir)
        worker.start()

        result = {}
        result.setdefault("message",
                          "backup process is running, please waiting")
        self.finish(result)
Пример #5
0
class BackupWorkers(threading.Thread):
    def __init__(self, backup_mode='full', incr_basedir=None):

        self._backup_mode = backup_mode
        self.backup_record = {}
        threading.Thread.__init__(self)
        self.zkOpers = Requests_ZkOpers()
        self.dba_opers = DBAOpers()

        if self._backup_mode == "full":
            self.backupOpers = FullBackupOpers()
        else:
            self.backupOpers = IncrementBackupOpers(incr_basedir)

    def run(self):
        isLock, lock = self.zkOpers.lock_backup_action()
        if not isLock:
            logging.info('zk is not lock')
            return

        try:
            _password = retrieve_monitor_password()
            conn = self.dba_opers.get_mysql_connection(user="******",
                                                       passwd=_password)
            if None == conn:
                raise UserVisiableException("Can\'t connect to mysql server")

            db_status = self.dba_opers.show_status(conn)
            if 'Synced' != db_status[-14][1]:
                self.backup_record[
                    'error: '] = 'Mcluster is not start %s' % datetime.datetime.now(
                    ).strftime(TIME_FORMAT)
                self.backupOpers._write_info_to_local(
                    self.backupOpers.path, self.backupOpers.file_name,
                    self.backup_record)
                self.zkOpers.write_backup_backup_info(self.backup_record)
                return

            if '0' == self.__run_comm(CHECK_DMP_DATA_CMD):
                self.backup_record[
                    'error: '] = 'No have /data partition %s' % datetime.datetime.now(
                    ).strftime(TIME_FORMAT)
                self.backupOpers._write_info_to_local(
                    self.backupOpers.path, self.backupOpers.file_name,
                    self.backup_record)
                self.zkOpers.write_backup_backup_info(self.backup_record)
                return

            self.backupOpers.create_backup_directory()
            self.backupOpers.remove_expired_backup_file()

            self.backupOpers.backup_action(self.zkOpers)
            self.backupOpers.trans_backup_file(self.zkOpers)

            record = {
                "recently_backup_ip: ": str(get_localhost_ip()),
                'time: ': datetime.datetime.now().strftime(TIME_FORMAT),
                'backup_type: ': self._backup_mode
            }
            self.zkOpers.write_backup_backup_info(record)

        except Exception, e:
            record = {
                "error: ": 'backup is wrong, please check it!',
                'time:': datetime.datetime.now().strftime(TIME_FORMAT),
                'backup_type: ': self._backup_mode
            }
            self.zkOpers.write_backup_backup_info(record)
            logging.error(e, exc_info=True)

        finally:
 def __init__(self):
     BaseBackupWorker.__init__(self)
     self.zkOper = Requests_ZkOpers()
Пример #7
0
 def retrieve_zkOper(self):
     if None == self.zkOper:
         self.zkOper = Requests_ZkOpers()
     return self.zkOper
Пример #8
0
 def initialize(self):
     self.zk = Requests_ZkOpers()
Пример #9
0
 def get_status(self, db_name):
     zk = Requests_ZkOpers()
     dml_status = zk.retrieve_sqlbatch_dml_status_info(db_name)
     return dml_status
Пример #10
0
class DDLBatch(RequestHandler):
    """/db/{db_name}/ddl/batch
    """
    def initialize(self):
        self.zk = Requests_ZkOpers()

    @asynchronous
    @engine
    def post(self, db_name):
        body = json.loads(self.request.body, strict=False, encoding='utf-8')
        pts = json.loads(body.get('pts', '[]'))
        if not pts:
            self.set_status(400)
            self.finish({
                "errmsg": "required argument is empty",
                "errcode": 40001
            })
            return

        # 写开始状态到zk
        ddl_status = dict(isFinished=False,
                          isSuccessed=False,
                          status='sql batch ddl is in processing')
        yield self.set_start_status(db_name, ddl_status)
        self.finish(ddl_status)

        yield self.ddl_execute(db_name, pts)

    @run_on_executor()
    @run_callback
    def set_start_status(self, db_name, ddl_status):
        self.zk.write_sqlbatch_ddl_info(db_name, ddl_status)

    @run_on_executor()
    @run_callback
    def ddl_execute(self, db_name, pts):
        batch = SQLBatch(db_name)
        is_finished = True
        is_successed = True
        ddl_status = {}
        for pt in pts:
            ddl_sqls = pt.get("ddlSqls")
            tb_name = pt.get("tbName")
            ddl_type = pt.get('type', 'ALTER')
            # DDL语句:ALTER类型走PT工具执行,其他语句直接调用Mysql客户端执行
            if ddl_type == DDL_TYPE.ALTER:
                ret = self.execute_one(batch, db_name, tb_name, ddl_sqls)
            else:
                ret = self.execute_direct(batch, db_name, ddl_sqls)
            if not ret:
                is_successed = False
                ddl_status = dict(
                    isFinished=is_finished,
                    isSuccessed=is_successed,
                    status='sql batch ddl({0}) is failed'.format(ddl_sqls))
                break
        if is_successed:
            ddl_status = dict(isFinished=is_finished,
                              isSuccessed=is_successed,
                              status='sql batch ddl is successed')
        self.zk.write_sqlbatch_ddl_info(db_name, ddl_status)

    def execute_direct(self, batch, db_name, sql):
        logging.info("[DDL Batch] execute_direct sqls: {0}".format(sql))
        error = batch.sql_excute(sql)
        return False if error else True

    def execute_one(self, batch, db_name, tb_name, ddl_sqls):
        # PT-OSC工具执行分两步操作:
        # 先测试,成功返回:Dry run complete.
        logging.info("[DDL Batch] test sqls: {0}".format(ddl_sqls))
        ret = batch.ddl_test(ddl_sqls, tb_name)
        if not ret:
            logging.error("[DDL Batch] test error: {0}".format(ddl_sqls))
            return False

        # 再执行,成功返回:Successfully.
        logging.info("[DDL Batch] result sqls: {0}".format(ddl_sqls))
        ret = batch.ddl(ddl_sqls, tb_name)
        if not ret:
            logging.error("[DDL Batch] error: {0}".format(ddl_sqls))
            return False
        return True