def __get_cmdb_host_info(self): db = dbControl(POOL) result = select_database_info(db, PROJ_DB_CONFIG["database"], "cmdb_host_information", source_addr=self.source_addr) db.close() return result
def __get_db_conn_info(self): db = dbControl(POOL) result = select_database_info(db, PROJ_DB_CONFIG["database"], "backup_host_manager", source_addr=self.source_addr) result["my_files"] = result.get("db_conf", '/etc/my.cnf') result["db_host"] = self.source_addr db.close() return result
def on_failure(self, exc, task_id, args, kwargs, einfo): db = dbControl(POOL) try: db.select_database(PROJ_DB_CONFIG["database"]).select_table("backup_task_history")\ .set({"message": pymysql.escape_string(str(einfo))}).where({"task_id": task_id}).update() except Exception as e: logger.warn(str(e)) finally: db.close()
def after_db_backup_status_update(self, data): task_id = '-'.join([self.p_id, self.t_id, self.stat_time]) db = dbControl(POOL) try: db.select_database(PROJ_DB_CONFIG["database"]).select_table("backup_task_history").where( {"task_id": task_id}).set(data).update() except Exception as e: logger.error(str(e)) finally: db.close()
def on_success(self, retval, task_id, args, kwargs): db = dbControl(POOL) try: db.select_database(PROJ_DB_CONFIG["database"]).select_table("backup_task_history")\ .where({"task_id": task_id})\ .set({"task_status": 1, "message": pymysql.escape_string(str(retval))}).update() except Exception as e: traceback.print_exc() logger.warn(str(e)) finally: db.close()
def celery_filesystem_full_backup(cmdb_host_info, backup_path, backup_to_local_path, action): result = '' db = dbControl(POOL) sshObj = controlHost(cmdb_host_info["source_addr"], cmdb_host_info["host_user"], cmdb_host_info["host_passwd"], cmdb_host_info["host_port"]) d = distribute_filesystem_backup(sshObj, cmdb_host_info["source_addr"], backup_path, backup_to_local_path) if action == "start": data = {"backup_status": 2} db.select_database(PROJ_DB_CONFIG["database"]).select_table("filesystem_backup_task")\ .where({"source_addr": cmdb_host_info["source_addr"], "backup_path": backup_path, "backup_to_local_path": backup_to_local_path}).set(data).update() result = d.fs_backup_start() # celery data = {"backup_status": 1} db.select_database(PROJ_DB_CONFIG["database"]).select_table("filesystem_backup_task")\ .where({"source_addr": cmdb_host_info["source_addr"], "backup_path": backup_path, "backup_to_local_path": backup_to_local_path}).set(data).update() elif action == "stop": result = d.fs_backup_stop() data = {"backup_status": 0} db.select_database(PROJ_DB_CONFIG["database"]).select_table("filesystem_backup_task")\ .where({"source_addr": cmdb_host_info["source_addr"], "backup_path": backup_path, "backup_to_local_path": backup_to_local_path}).set(data).update() elif action == "fs_full_backup": today = ControlTime.date_today(_format="%Y%m%d%H%M%S")[0] fs_full_backup_path = list(backup_to_local_path.partition(cmdb_host_info["source_addr"])) fs_full_backup_path[1] = os.path.join(fs_full_backup_path[1], 'filesystem_full_backup') fs_full_backup_path.append('/%s' % today) fs_full_backup_path = ''.join(fs_full_backup_path) task_id = celery_filesystem_full_backup.request.id db.select_database(PROJ_DB_CONFIG["database"]).select_table("backup_task_history").where({"task_id": task_id})\ .set({"backup_path": backup_to_local_path, "backup_to_local_path": fs_full_backup_path}).update() result = d.fs_full_backup(backup_to_local_path, fs_full_backup_path) # celery if hasattr(sshObj, "close"): sshObj.close() return result
def before_db_backup_status_add(self, backup_to_local_path): db = dbControl(POOL) task_id = '-'.join([self.p_id, self.t_id, self.stat_time]) data = {"stat_time": self.stat_time, "task_id": task_id, "source_addr": self.source_addr, "svc_type": "db", "createor": "sched", "task_status": 0, "backup_to_local_path": backup_to_local_path} try: db.select_database(PROJ_DB_CONFIG["database"]).select_table("backup_task_history").add([data]) except Exception as e: logger.error(str(e)) finally: db.close()
def celery_filesystem_agent_install(cmdb_host_info, svc_type): result = '' db = dbControl(POOL) sshObj = controlHost(cmdb_host_info["source_addr"], cmdb_host_info["host_user"], cmdb_host_info["host_passwd"], cmdb_host_info["host_port"],) if svc_type == 'fs': data = {"rsync_status": 1, "sersync_status": 1} fs_agent_install = backup_agent_install(sshObj) # celery res = fs_agent_install.fs_backup_agent() result += res["msg"] elif svc_type == 'db': data = {"xtrabackup_status": 1} db_agent_install = backup_agent_install(sshObj) # celery res = db_agent_install.db_backup_agent() result += res["msg"] elif svc_type == "all": data = {"rsync_status": 1, "sersync_status": 1, "xtrabackup_status": 1} fs_agent_install = backup_agent_install(sshObj) # celery res1 = fs_agent_install.fs_backup_agent() result += res1["msg"] db_agent_install = backup_agent_install(sshObj) # celery res2 = db_agent_install.db_backup_agent() result += res2["msg"] else: data = {} try: db.select_database(PROJ_DB_CONFIG["database"]).select_table("backup_host_manager")\ .where({"source_addr": cmdb_host_info["source_addr"]}).set(data).update() except Exception as e: logger.warn(str(e)) finally: if hasattr(sshObj, "close"): sshObj.close() return result
from django.conf import settings from lib.sshConn import controlHost from lib.dbControl import dbControl app = settings.CELERY logger = settings.LOGGER PROJ_DB_CONFIG = settings.PROJ_DB_CONFIG POOL = settings.POOL db = dbControl(POOL) # from lib.db_backup_tools import * # import time # import os # import sys # PROJ_LIB_DIR = settings.PROJ_LIB_DIR # sys.path.insert(0, PROJ_LIB_DIR) class celery_ssh_check_task(app.Task): def on_failure(self, exc, task_id, args, kwargs, einfo): print('{0!r} failed: {1!r}'.format(task_id, exc)) print(args, 'args') print(kwargs, 'kwargs') print(einfo, 'einfo') # def on_success(self, retval, task_id, args, kwargs): print(retval, 'retval') print(task_id, 'taskid') print(args, 'args')
def process_request(self, request): db = dbControl(POOL) request.META['db'] = db print(db, '--process_request--')
def getBackupPolicy(self): db = dbControl(POOL) result = {} try: res = db.select_database(PROJ_DB_CONFIG["database"])\ .select_table("backup_sched_task_manager").select('*', final="dict") except Exception as e: traceback.print_exc() logger.warn("调取器获取任务调度策略失败! err: %s" % (str(e))) raise ValueError("调度器获取任务调度策略失败! err: %s" % (str(e))) else: for j in res: t_id = str(j.get("t_id", 0)) p_id = str(j.get("p_id", 0)) day_of_week = { "day_of_week": j.get("day_of_week", None) } if j.get("day_of_week", None) else {} hour = { "hour": j.get('sched_hour', None) } if j.get('sched_hour', None) else {} minute = { "minute": j.get("sched_minute", None) } if j.get("sched_minute", None) else {} source_addr = j.get("source_addr") svc_type = j.get("svc_type") backup_to_local_path = j.get("backup_to_local_path") backup_path = j.get("backup_path") if svc_type == "db": func = sched_task.sched_db_backup_tasks.db_backup_tools( source_addr, p_id, t_id) result[t_id] = { "func": func.db_backup_start, "args": [ backup_to_local_path, ] } elif svc_type == "fs": func = sched_task.sched_fs_backup_tasks.fs_backup_tools( source_addr, p_id, t_id) result[t_id] = { "func": func.fs_backup_start, "args": [backup_path, backup_to_local_path] } if t_id == "1000000001": # 副本自动清理任务脚本 func = sched_task.sched_clean_backup_duplicate.duplicate_claen_tools( p_id, t_id) result[t_id] = { "func": func.duplicate_clean_start, "args": [] } result[t_id].update(day_of_week) result[t_id].update(hour) result[t_id].update(minute) print('--------getBackupPolicy_result------', result) return result