def echo_start(self): print("=" * 90) print("★Start backup[{}]".format(public.format_date())) print("=" * 90)
def backup_path_to(self, spath, dfile, exclude=[], siteName=None): if not os.path.exists(spath): self.echo_error( 'The specified directory {} does not exist!'.format(spath)) return False if spath[-1] == '/': spath = spath[:-1] dirname = os.path.basename(spath) dpath = os.path.dirname(dfile) if not os.path.exists(dpath): os.makedirs(dpath, 384) p_size = public.get_path_size(spath) self.get_exclude(exclude) exclude_config = self._exclude if not self._exclude: exclude_config = "Not set" if siteName: self.echo_info('Backup site: {}'.format(siteName)) self.echo_info('Website root directory: {}'.format(spath)) else: self.echo_info('Backup directory: {}'.format(spath)) self.echo_info("Directory size: {}".format(public.to_size(p_size))) self.echo_info('Exclusion setting: {}'.format(exclude_config)) disk_path, disk_free, disk_inode = self.get_disk_free(dfile) self.echo_info( "Partition {} available disk space is: {}, available Inode is: {}". format(disk_path, public.to_size(disk_free), disk_inode)) if disk_path: if disk_free < p_size: self.echo_error( "The available disk space of the target partition is less than {}, and the backup cannot be completed. Please increase the disk capacity or change the default backup directory on the settings page!" .format(public.to_size(p_size))) return False if disk_inode < self._inode_min: self.echo_error( "The available Inode of the target partition is less than {}, and the backup cannot be completed. Please increase the disk capacity or change the default backup directory on the settings page!" .format(self._inode_min)) return False stime = time.time() self.echo_info("Start compressing files: {}".format( public.format_date(times=stime))) if os.path.exists(dfile): os.remove(dfile) public.ExecShell("cd " + os.path.dirname(spath) + " && tar zcvf '" + dfile + "' " + self._exclude + " '" + dirname + "' 2>{err_log} 1> /dev/null".format( err_log=self._err_log)) tar_size = os.path.getsize(dfile) if tar_size < 1: self.echo_error("Data compression failed") self.echo_info(public.readFile(self._err_log)) return False self.echo_info( "File compression completed, took {:.2f} seconds, compressed package size: {}" .format(time.time() - stime, public.to_size(tar_size))) if siteName: self.echo_info("Site backed up to: {}".format(dfile)) else: self.echo_info("Directory has been backed up to: {}".format(dfile)) if os.path.exists(self._err_log): os.remove(self._err_log) return dfile
def backup_database(self, db_name, dfile=None, save=3): self.echo_start() if not dfile: fname = 'db_{}_{}.sql.gz'.format( db_name, public.format_date("%Y%m%d_%H%M%S")) dfile = os.path.join(self._path, 'database', fname) else: fname = os.path.basename(dfile) dpath = os.path.dirname(dfile) if not os.path.exists(dpath): os.makedirs(dpath, 384) import panelMysql if not self._db_mysql: self._db_mysql = panelMysql.panelMysql() d_tmp = self._db_mysql.query( "select sum(DATA_LENGTH)+sum(INDEX_LENGTH) from information_schema.tables where table_schema='%s'" % db_name) p_size = self.map_to_list(d_tmp)[0][0] if p_size == None: self.echo_error( 'The specified database [ {} ] has no data!'.format(db_name)) return character = public.get_database_character(db_name) self.echo_info('Backup database:{}'.format(db_name)) self.echo_info("Database size: {}".format(public.to_size(p_size))) self.echo_info("Database character set: {}".format(character)) disk_path, disk_free, disk_inode = self.get_disk_free(dfile) self.echo_info( "Partition {} available disk space is: {}, available Inode is: {}". format(disk_path, public.to_size(disk_free), disk_inode)) if disk_path: if disk_free < p_size: self.echo_error( "The available disk space of the target partition is less than {}, and the backup cannot be completed. Please increase the disk capacity or change the default backup directory on the settings page!" .format(public.to_size(p_size))) return False if disk_inode < self._inode_min: self.echo_error( "The available Inode of the target partition is less than {}, and the backup cannot be completed. Please increase the disk capacity or change the default backup directory on the settings page!" .format(self._inode_min)) return False stime = time.time() self.echo_info("Start exporting database: {}".format( public.format_date(times=stime))) if os.path.exists(dfile): os.remove(dfile) self.mypass(True) public.ExecShell( "/www/server/mysql/bin/mysqldump --default-character-set=" + character + " --force --hex-blob --opt " + db_name + " 2>" + self._err_log + "| gzip > " + dfile) self.mypass(False) gz_size = os.path.getsize(dfile) if gz_size < 400: self.echo_error("Database export failed!") self.echo_info(public.readFile(self._err_log)) return False self.echo_info( "Database backup completed, took {:.2f} seconds, compressed package size: {}" .format(time.time() - stime, public.to_size(gz_size))) if self._cloud: self.echo_info("Uploading to {}, please wait ...".format( self._cloud._title)) if self._cloud.upload_file(dfile, 'database'): self.echo_info("Successfully uploaded to {}".format( self._cloud._title)) else: self.echo_error('Error: File upload failed, skip this backup!') if os.path.exists(dfile): os.remove(dfile) return False filename = dfile if self._cloud: filename = dfile + '|' + self._cloud._name + '|' + fname self.echo_info("Database has been backed up to: {}".format(dfile)) if os.path.exists(self._err_log): os.remove(self._err_log) pid = public.M('databases').where('name=?', (db_name)).getField('id') pdata = { 'type': '1', 'name': fname, 'pid': pid, 'filename': filename, 'addtime': public.format_date(), 'size': os.path.getsize(dfile) } public.M('backup').insert(pdata) if self._cloud: if not self._is_save_local: if os.path.exists(dfile): os.remove(dfile) self.echo_info( "User settings do not retain local backups, deleted {}" .format(dfile)) #清理多余备份 if not self._cloud: backups = public.M('backup').where( "type=? and pid=? and filename LIKE '%/%'", ('1', pid)).field('id,name,filename').select() else: backups = public.M('backup').where( 'type=? and pid=? and filename LIKE "%{}%"'.format( self._cloud._name), ('1', pid)).field('id,name,filename').select() self.delete_old(backups, save, 'database') self.echo_end() return dfile
def GetCrontab(self, get): self.checkBackup() self.__clean_log() cront = public.M('crontab').order("id desc").field(self.field).select() if type(cront) == str: public.M('crontab').execute( "ALTER TABLE 'crontab' ADD 'status' INTEGER DEFAULT 1", ()) public.M('crontab').execute( "ALTER TABLE 'crontab' ADD 'save' INTEGER DEFAULT 3", ()) public.M('crontab').execute( "ALTER TABLE 'crontab' ADD 'backupTo' TEXT DEFAULT off", ()) public.M('crontab').execute( "ALTER TABLE 'crontab' ADD 'sName' TEXT", ()) public.M('crontab').execute( "ALTER TABLE 'crontab' ADD 'sBody' TEXT", ()) public.M('crontab').execute( "ALTER TABLE 'crontab' ADD 'sType' TEXT", ()) public.M('crontab').execute( "ALTER TABLE 'crontab' ADD 'urladdress' TEXT", ()) cront = public.M('crontab').order("id desc").field( self.field).select() data = [] for i in range(len(cront)): tmp = {} tmp = cront[i] if cront[i]['type'] == "day": tmp['type'] = public.getMsg('CRONTAB_TODAY') tmp['cycle'] = public.getMsg('CRONTAB_TODAY_CYCLE', (str( cront[i]['where_hour']), str(cront[i]['where_minute']))) elif cront[i]['type'] == "day-n": tmp['type'] = public.getMsg('CRONTAB_N_TODAY', (str(cront[i]['where1']), )) tmp['cycle'] = public.getMsg( 'CRONTAB_N_TODAY_CYCLE', (str(cront[i]['where1']), str(cront[i]['where_hour']), str(cront[i]['where_minute']))) elif cront[i]['type'] == "hour": tmp['type'] = public.getMsg('CRONTAB_HOUR') tmp['cycle'] = public.getMsg('CRONTAB_HOUR_CYCLE', (str(cront[i]['where_minute']), )) elif cront[i]['type'] == "hour-n": tmp['type'] = public.getMsg('CRONTAB_N_HOUR', (str(cront[i]['where1']), )) tmp['cycle'] = public.getMsg( 'CRONTAB_N_HOUR_CYCLE', (str(cront[i]['where1']), str(cront[i]['where_minute']))) elif cront[i]['type'] == "minute-n": tmp['type'] = public.getMsg('CRONTAB_N_MINUTE', (str(cront[i]['where1']), )) tmp['cycle'] = public.getMsg('CRONTAB_N_MINUTE_CYCLE', (str(cront[i]['where1']), )) elif cront[i]['type'] == "week": tmp['type'] = public.getMsg('CRONTAB_WEEK') if not cront[i]['where1']: cront[i]['where1'] = '0' tmp['cycle'] = public.getMsg( 'CRONTAB_WEEK_CYCLE', (self.toWeek(int( cront[i]['where1'])), str(cront[i]['where_hour']), str(cront[i]['where_minute']))) elif cront[i]['type'] == "month": tmp['type'] = public.getMsg('CRONTAB_MONTH') tmp['cycle'] = public.getMsg( 'CRONTAB_MONTH_CYCLE', (str(cront[i]['where1']), str(cront[i]['where_hour']), str(cront[i]['where_minute']))) log_file = '/www/server/cron/{}.log'.format(tmp['echo']) if os.path.exists(log_file): tmp['addtime'] = public.format_date( times=int(os.path.getmtime(log_file))) data.append(tmp) return data
def backup_path(self, spath, dfile=None, exclude=[], save=3): self.echo_start() if not os.path.exists(spath): self.echo_error( 'The specified directory {} does not exist!'.format(spath)) return False if spath[-1] == '/': spath = spath[:-1] dirname = os.path.basename(spath) if not dfile: fname = 'path_{}_{}.tar.gz'.format( dirname, public.format_date("%Y%m%d_%H%M%S")) dfile = os.path.join(self._path, 'path', fname) if not self.backup_path_to(spath, dfile, exclude): return False if self._cloud: self.echo_info("Uploading to {}, please wait ...".format( self._cloud._title)) if self._cloud.upload_file(dfile, 'path'): self.echo_info("Successfully uploaded to {}".format( self._cloud._title)) else: self.echo_error('Error: File upload failed, skip this backup!') if os.path.exists(dfile): os.remove(dfile) return False filename = dfile if self._cloud: filename = dfile + '|' + self._cloud._name + '|' + fname pdata = { 'type': '2', 'name': spath, 'pid': 0, 'filename': filename, 'addtime': public.format_date(), 'size': os.path.getsize(dfile) } public.M('backup').insert(pdata) if self._cloud: if not self._is_save_local: if os.path.exists(dfile): os.remove(dfile) self.echo_info( "User settings do not retain local backups, deleted {}" .format(dfile)) if not self._cloud: backups = public.M('backup').where( "type=? and pid=? and name=? and filename NOT LIKE '%|%'", ('2', 0, spath)).field('id,name,filename').select() else: backups = public.M('backup').where( "type=? and pid=? and name=? and filename LIKE '%{}%'".format( self._cloud._name), ('2', 0, spath)).field('id,name,filename').select() self.delete_old(backups, save, 'path') self.echo_end() return dfile
def backup_path_to(self, spath, dfile, exclude=[], siteName=None): if not os.path.exists(spath): self.echo_error('指定目录{}不存在!'.format(spath)) return False if spath[-1] == '/': spath = spath[:-1] dirname = os.path.basename(spath) dpath = os.path.dirname(dfile) if not os.path.exists(dpath): os.makedirs(dpath, 384) p_size = public.get_path_size(spath) self.get_exclude(exclude) exclude_config = self._exclude if not self._exclude: exclude_config = "未设置" if siteName: self.echo_info('备份网站:{}'.format(siteName)) self.echo_info('网站根目录:{}'.format(spath)) else: self.echo_info('备份目录:{}'.format(spath)) self.echo_info("目录大小:{}".format(public.to_size(p_size))) self.echo_info('排除设置:{}'.format(exclude_config)) disk_path, disk_free, disk_inode = self.get_disk_free(dfile) self.echo_info("分区{}可用磁盘空间为:{},可用Inode为:{}".format( disk_path, public.to_size(disk_free), disk_inode)) if disk_path: if disk_free < p_size: self.echo_error( "目标分区可用的磁盘空间小于{},无法完成备份,请增加磁盘容量,或在设置页面更改默认备份目录!".format( public.to_size(p_size))) return False if disk_inode < self._inode_min: self.echo_error( "目标分区可用的Inode小于{},无法完成备份,请增加磁盘容量,或在设置页面更改默认备份目录!".format( self._inode_min)) return False stime = time.time() self.echo_info("开始压缩文件:{}".format(public.format_date(times=stime))) if os.path.exists(dfile): os.remove(dfile) public.ExecShell("cd " + os.path.dirname(spath) + " && tar zcvf '" + dfile + "' " + self._exclude + " '" + dirname + "' 2>{err_log} 1> /dev/null".format( err_log=self._err_log)) tar_size = os.path.getsize(dfile) if tar_size < 1: self.echo_error("数据压缩失败") self.echo_info(public.readFile(self._err_log)) return False self.echo_info("文件压缩完成,耗时{:.2f}秒,压缩包大小:{}".format( time.time() - stime, public.to_size(tar_size))) if siteName: self.echo_info("网站已备份到:{}".format(dfile)) else: self.echo_info("目录已备份到:{}".format(dfile)) if os.path.exists(self._err_log): os.remove(self._err_log) return dfile
def GetConcifInfo(self, get=None): #取环境配置信息 if not 'config' in session: session['config'] = public.M('config').where( "id=?", ('1', )).field('webserver,sites_path,backup_path,status,mysql_root' ).find() if not 'email' in session['config']: session['config']['email'] = public.M('users').where( "id=?", ('1', )).getField('email') data = session['config'] data['webserver'] = public.get_webserver() #PHP版本 phpVersions = ('52', '53', '54', '55', '56', '70', '71', '72', '73', '74') data['php'] = [] for version in phpVersions: tmp = {} tmp['setup'] = os.path.exists(self.setupPath + '/php/' + version + '/bin/php') if tmp['setup']: phpConfig = self.GetPHPConfig(version) tmp['version'] = version tmp['max'] = phpConfig['max'] tmp['maxTime'] = phpConfig['maxTime'] tmp['pathinfo'] = phpConfig['pathinfo'] tmp['status'] = os.path.exists('/tmp/php-cgi-' + version + '.sock') data['php'].append(tmp) tmp = {} data['webserver'] = '' serviceName = 'nginx' tmp['setup'] = False phpversion = "54" phpport = '888' pstatus = False pauth = False if os.path.exists(self.setupPath + '/nginx'): data['webserver'] = 'nginx' serviceName = 'nginx' tmp['setup'] = os.path.exists(self.setupPath + '/nginx/sbin/nginx') configFile = self.setupPath + '/nginx/conf/nginx.conf' try: if os.path.exists(configFile): conf = public.readFile(configFile) rep = "listen\s+([0-9]+)\s*;" rtmp = re.search(rep, conf) if rtmp: phpport = rtmp.groups()[0] if conf.find('AUTH_START') != -1: pauth = True if conf.find(self.setupPath + '/stop') == -1: pstatus = True configFile = self.setupPath + '/nginx/conf/enable-php.conf' conf = public.readFile(configFile) rep = "php-cgi-([0-9]+)\.sock" rtmp = re.search(rep, conf) if rtmp: phpversion = rtmp.groups()[0] except: pass elif os.path.exists(self.setupPath + '/apache'): data['webserver'] = 'apache' serviceName = 'httpd' tmp['setup'] = os.path.exists(self.setupPath + '/apache/bin/httpd') configFile = self.setupPath + '/apache/conf/extra/httpd-vhosts.conf' try: if os.path.exists(configFile): conf = public.readFile(configFile) rep = "php-cgi-([0-9]+)\.sock" rtmp = re.search(rep, conf) if rtmp: phpversion = rtmp.groups()[0] rep = "Listen\s+([0-9]+)\s*\n" rtmp = re.search(rep, conf) if rtmp: phpport = rtmp.groups()[0] if conf.find('AUTH_START') != -1: pauth = True if conf.find(self.setupPath + '/stop') == -1: pstatus = True except: pass elif os.path.exists('/usr/local/lsws/bin/lswsctrl'): data['webserver'] = 'openlitespeed' serviceName = 'openlitespeed' tmp['setup'] = os.path.exists('/usr/local/lsws/bin/lswsctrl') configFile = '/usr/local/lsws/bin/lswsctrl' try: if os.path.exists(configFile): conf = public.readFile( '/www/server/panel/vhost/openlitespeed/detail/phpmyadmin.conf' ) rep = "/usr/local/lsws/lsphp(\d+)/bin/lsphp" rtmp = re.search(rep, conf) if rtmp: phpversion = rtmp.groups()[0] conf = public.readFile( '/www/server/panel/vhost/openlitespeed/listen/888.conf' ) rep = "address\s+\*\:(\d+)" rtmp = re.search(rep, conf) if rtmp: phpport = rtmp.groups()[0] if conf.find('AUTH_START') != -1: pauth = True if conf.find(self.setupPath + '/stop') == -1: pstatus = True except: pass tmp['type'] = data['webserver'] tmp['version'] = public.readFile(self.setupPath + '/' + data['webserver'] + '/version.pl') tmp['status'] = False result = public.ExecShell('/etc/init.d/' + serviceName + ' status') if result[0].find('running') != -1: tmp['status'] = True data['web'] = tmp tmp = {} vfile = self.setupPath + '/phpmyadmin/version.pl' tmp['version'] = public.readFile(vfile) if tmp['version']: tmp['version'] = tmp['version'].strip() tmp['setup'] = os.path.exists(vfile) tmp['status'] = pstatus tmp['phpversion'] = phpversion.strip() tmp['port'] = phpport tmp['auth'] = pauth data['phpmyadmin'] = tmp tmp = {} tmp['setup'] = os.path.exists('/etc/init.d/tomcat') tmp['status'] = tmp['setup'] #if public.ExecShell('ps -aux|grep tomcat|grep -v grep')[0] == "": tmp['status'] = False tmp['version'] = public.readFile(self.setupPath + '/tomcat/version.pl') data['tomcat'] = tmp tmp = {} tmp['setup'] = os.path.exists(self.setupPath + '/mysql/bin/mysql') tmp['version'] = public.readFile(self.setupPath + '/mysql/version.pl') tmp['status'] = os.path.exists('/tmp/mysql.sock') data['mysql'] = tmp tmp = {} tmp['setup'] = os.path.exists(self.setupPath + '/redis/runtest') tmp['status'] = os.path.exists('/var/run/redis_6379.pid') data['redis'] = tmp tmp = {} tmp['setup'] = os.path.exists('/usr/local/memcached/bin/memcached') tmp['status'] = os.path.exists('/var/run/memcached.pid') data['memcached'] = tmp tmp = {} tmp['setup'] = os.path.exists(self.setupPath + '/pure-ftpd/bin/pure-pw') tmp['version'] = public.readFile(self.setupPath + '/pure-ftpd/version.pl') tmp['status'] = os.path.exists('/var/run/pure-ftpd.pid') data['pure-ftpd'] = tmp data['panel'] = self.GetPanelInfo() data['systemdate'] = public.format_date( "%Y-%m-%d %H:%M:%S %Z %z" ) #public.ExecShell('date +"%Y-%m-%d %H:%M:%S %Z %z"')[0].strip() return data
def backup_database(self, db_name, dfile=None, save=3): self.echo_start() if not dfile: fname = 'db_{}_{}.sql.gz'.format( db_name, public.format_date("%Y%m%d_%H%M%S")) dfile = os.path.join(self._path, 'database', fname) else: fname = os.path.basename(dfile) dpath = os.path.dirname(dfile) if not os.path.exists(dpath): os.makedirs(dpath, 384) import panelMysql if not self._db_mysql: self._db_mysql = panelMysql.panelMysql() d_tmp = self._db_mysql.query( "select sum(DATA_LENGTH)+sum(INDEX_LENGTH) from information_schema.tables where table_schema='%s'" % db_name) p_size = self.map_to_list(d_tmp)[0][0] if p_size == None: self.echo_error(public.getMsg('DB_BACKUP_ERR', (db_name, ))) return character = public.get_database_character(db_name) self.echo_info(public.getMsg('DB_BACKUP', (db_name, ))) self.echo_info(public.getMsg("DB_SIZE", (public.to_size(p_size), ))) self.echo_info(public.getMsg("DB_CHARACTER", (character, ))) disk_path, disk_free, disk_inode = self.get_disk_free(dfile) self.echo_info( public.getMsg( "PARTITION_INFO", (disk_path, str(public.to_size(disk_free)), str(disk_inode)))) if disk_path: if disk_free < p_size: self.echo_error( public.getMsg("PARTITION_LESS_THEN", (str(public.to_size(p_size), )))) return False if disk_inode < self._inode_min: self.echo_error( public.getMsg("INODE_LESS_THEN", (self._inode_min, ))) return False stime = time.time() self.echo_info( public.getMsg("EXPORT_DB", (public.format_date(times=stime), ))) if os.path.exists(dfile): os.remove(dfile) #self.mypass(True) try: password = public.M('config').where('id=?', (1, )).getField('mysql_root') os.environ["MYSQL_PWD"] = password backup_cmd = "/www/server/mysql/bin/mysqldump -E -R --default-character-set=" + character + " --force --hex-blob --opt " + db_name + " -u root" + " 2>" + self._err_log + "| gzip > " + dfile public.ExecShell(backup_cmd) except Exception as e: raise finally: os.environ["MYSQL_PWD"] = "" #public.ExecShell("/www/server/mysql/bin/mysqldump --default-character-set="+ character +" --force --hex-blob --opt " + db_name + " 2>"+self._err_log+"| gzip > " + dfile) #self.mypass(False) gz_size = os.path.getsize(dfile) if gz_size < 400: self.echo_error(public.getMsg("EXPORT_DB_ERR")) self.echo_info(public.readFile(self._err_log)) return False compressed_time = str('{:.2f}'.format(time.time() - stime)) self.echo_info( public.getMsg( "COMPRESS_TIME", (str(compressed_time), str(public.to_size(gz_size))))) if self._cloud: self.echo_info( public.getMsg("BACKUP_UPLOADING", (self._cloud._title, ))) if self._cloud.upload_file(dfile, 'database'): self.echo_info( public.getMsg("BACKUP_UPLOAD_SUCCESS", (self._cloud._title, ))) else: self.echo_error('BACKUP_UPLOAD_FAILED') if os.path.exists(dfile): os.remove(dfile) return False filename = dfile if self._cloud: filename = dfile + '|' + self._cloud._name + '|' + fname self.echo_info(public.getMsg("DB_BACKUP_TO", (dfile, ))) if os.path.exists(self._err_log): os.remove(self._err_log) pid = public.M('databases').where('name=?', (db_name)).getField('id') pdata = { 'type': '1', 'name': fname, 'pid': pid, 'filename': filename, 'addtime': public.format_date(), 'size': os.path.getsize(dfile) } public.M('backup').insert(pdata) if self._cloud: if not self._is_save_local: if os.path.exists(dfile): os.remove(dfile) self.echo_info(public.getMsg("BACKUP_DEL", (dfile))) #清理多余备份 if not self._cloud: backups = public.M('backup').where( "type=? and pid=? and filename NOT LIKE '%|%'", ('1', pid)).field('id,name,filename').select() else: backups = public.M('backup').where( 'type=? and pid=? and filename LIKE "%{}%"'.format( self._cloud._name), ('1', pid)).field('id,name,filename').select() self.delete_old(backups, save, 'database') self.echo_end() return dfile
def echo_start(self): print("=" * 90) print("★" + public.getMsg('START_BACKUP') + "[{}]".format(public.format_date())) print("=" * 90)
def backup_path(self, spath, dfile=None, exclude=[], save=3): self.echo_start() if not os.path.exists(spath): self.echo_error(public.getMsg('BACKUP_DIR_NOT_EXIST', (spath, ))) return False if spath[-1] == '/': spath = spath[:-1] dirname = os.path.basename(spath) if not dfile: fname = 'path_{}_{}.tar.gz'.format( dirname, public.format_date("%Y%m%d_%H%M%S")) dfile = os.path.join(self._path, 'path', fname) if not self.backup_path_to(spath, dfile, exclude): return False if self._cloud: self.echo_info( public.getMsg("BACKUP_UPLOADING", (self._cloud._title, ))) if self._cloud.upload_file(dfile, 'path'): self.echo_info( public.getMsg("BACKUP_UPLOAD_SUCCESS", (self._cloud._title, ))) else: self.echo_error(public.getMsg('BACKUP_UPLOAD_FAILED')) if os.path.exists(dfile): os.remove(dfile) return False filename = dfile if self._cloud: filename = dfile + '|' + self._cloud._name + '|' + fname pdata = { 'type': '2', 'name': spath, 'pid': 0, 'filename': filename, 'addtime': public.format_date(), 'size': os.path.getsize(dfile) } public.M('backup').insert(pdata) if self._cloud: if not self._is_save_local: if os.path.exists(dfile): os.remove(dfile) self.echo_info(public.getMsg("BACKUP_DEL", (dfile, ))) if not self._cloud: backups = public.M('backup').where( "type=? and pid=? and name=? and filename NOT LIKE '%|%'", ('2', 0, spath)).field('id,name,filename').select() else: backups = public.M('backup').where( "type=? and pid=? and name=? and filename LIKE '%{}%'".format( self._cloud._name), ('2', 0, spath)).field('id,name,filename').select() self.delete_old(backups, save, 'path') self.echo_end() return dfile
def backup_path_to(self, spath, dfile, exclude=[], siteName=None): if not os.path.exists(spath): self.echo_error(public.getMsg('BACKUP_DIR_NOT_EXIST', (spath, ))) return False if spath[-1] == '/': spath = spath[:-1] dirname = os.path.basename(spath) dpath = os.path.dirname(dfile) if not os.path.exists(dpath): os.makedirs(dpath, 384) p_size = public.get_path_size(spath) self.get_exclude(exclude) exclude_config = self._exclude if not self._exclude: exclude_config = "Not set" if siteName: self.echo_info(public.getMsg('BACKUP_SITE', (siteName))) self.echo_info(public.getMsg('WEBSITE_DIR', (spath, ))) else: self.echo_info(public.getMsg('BACKUP_DIR', (spath))) self.echo_info( public.getMsg("DIR_SIZE", (str(public.to_size(p_size), )))) self.echo_info(public.getMsg('BACKUP_EXCLUSION', (exclude_config, ))) disk_path, disk_free, disk_inode = self.get_disk_free(dfile) self.echo_info( public.getMsg( "PARTITION_INFO", (disk_path, str(public.to_size(disk_free)), str(disk_inode)))) if disk_path: if disk_free < p_size: self.echo_error( public.getMsg("PARTITION_LESS_THEN", (str(public.to_size(p_size)), ))) return False if disk_inode < self._inode_min: self.echo_error( public.getMsg("INODE_LESS_THEN", (str(self._inode_min, )))) return False stime = time.time() self.echo_info( public.getMsg("START_COMPRESS", (public.format_date(times=stime), ))) if os.path.exists(dfile): os.remove(dfile) public.ExecShell("cd " + os.path.dirname(spath) + " && tar zcvf '" + dfile + "' " + self._exclude + " '" + dirname + "' 2>{err_log} 1> /dev/null".format( err_log=self._err_log)) tar_size = os.path.getsize(dfile) if tar_size < 1: self.echo_error(public.getMsg('ZIP_ERR')) self.echo_info(public.readFile(self._err_log)) return False compression_time = str('{:.2f}'.format(time.time() - stime)) self.echo_info( public.getMsg('COMPRESS_TIME', (compression_time, str(public.to_size(tar_size))))) if siteName: self.echo_info(public.getMsg("WEBSITE_BACKUP_TO", (dfile, ))) else: self.echo_info(public.getMsg("DIR_BACKUP_TO", (dfile, ))) if os.path.exists(self._err_log): os.remove(self._err_log) return dfile
def echo_end(self): print("=" * 90) print("☆备份完成[{}]".format(public.format_date())) print("=" * 90) print("\n")
def echo_start(self): print("=" * 90) print("★开始备份[{}]".format(public.format_date())) print("=" * 90)
def backup_database(self, db_name, dfile=None, save=3): self.echo_start() if not dfile: fname = 'db_{}_{}.sql.gz'.format( db_name, public.format_date("%Y%m%d_%H%M%S")) dfile = os.path.join(self._path, 'database', fname) else: fname = os.path.basename(dfile) dpath = os.path.dirname(dfile) if not os.path.exists(dpath): os.makedirs(dpath, 384) import panelMysql if not self._db_mysql: self._db_mysql = panelMysql.panelMysql() d_tmp = self._db_mysql.query( "select sum(DATA_LENGTH)+sum(INDEX_LENGTH) from information_schema.tables where table_schema='%s'" % db_name) p_size = self.map_to_list(d_tmp)[0][0] if p_size == None: self.echo_error('指定数据库 `{}` 没有任何数据!'.format(db_name)) return character = public.get_database_character(db_name) self.echo_info('备份数据库:{}'.format(db_name)) self.echo_info("数据库大小:{}".format(public.to_size(p_size))) self.echo_info("数据库字符集:{}".format(character)) disk_path, disk_free, disk_inode = self.get_disk_free(dfile) self.echo_info("分区{}可用磁盘空间为:{},可用Inode为:{}".format( disk_path, public.to_size(disk_free), disk_inode)) if disk_path: if disk_free < p_size: self.echo_error( "目标分区可用的磁盘空间小于{},无法完成备份,请增加磁盘容量,或在设置页面更改默认备份目录!".format( public.to_size(p_size))) return False if disk_inode < self._inode_min: self.echo_error( "目标分区可用的Inode小于{},无法完成备份,请增加磁盘容量,或在设置页面更改默认备份目录!".format( self._inode_min)) return False stime = time.time() self.echo_info("开始导出数据库:{}".format(public.format_date(times=stime))) if os.path.exists(dfile): os.remove(dfile) self.mypass(True) public.ExecShell( "/www/server/mysql/bin/mysqldump --default-character-set=" + character + " --force --hex-blob --opt " + db_name + " 2>" + self._err_log + "| gzip > " + dfile) self.mypass(False) gz_size = os.path.getsize(dfile) if gz_size < 400: self.echo_error("数据库导出失败!") self.echo_info(public.readFile(self._err_log)) return False self.echo_info("数据库备份完成,耗时{:.2f}秒,压缩包大小:{}".format( time.time() - stime, public.to_size(gz_size))) if self._cloud: self.echo_info("正在上传到{},请稍候...".format(self._cloud._title)) if self._cloud.upload_file(dfile, 'database'): self.echo_info("已成功上传到{}".format(self._cloud._title)) else: self.echo_error('错误:文件上传失败,跳过本次备份!') if os.path.exists(dfile): os.remove(dfile) return False filename = dfile if self._cloud: filename = dfile + '|' + self._cloud._name + '|' + fname self.echo_info("数据库已备份到:{}".format(dfile)) if os.path.exists(self._err_log): os.remove(self._err_log) pid = public.M('databases').where('name=?', (db_name)).getField('id') pdata = { 'type': '1', 'name': fname, 'pid': pid, 'filename': filename, 'addtime': public.format_date(), 'size': os.path.getsize(dfile) } public.M('backup').insert(pdata) if self._cloud: if not self._is_save_local: if os.path.exists(dfile): os.remove(dfile) self.echo_info("用户设置不保留本地备份,已删除{}".format(dfile)) #清理多余备份 if not self._cloud: backups = public.M('backup').where( "type=? and pid=? and filename NOT LIKE '%|%'", ('1', pid)).field('id,name,filename').select() else: backups = public.M('backup').where( 'type=? and pid=? and filename LIKE "%{}%"'.format( self._cloud._name), ('1', pid)).field('id,name,filename').select() self.delete_old(backups, save, 'database') self.echo_end() return dfile
def echo_end(self): print("=" * 90) print("☆Backup completed[{}]".format(public.format_date())) print("=" * 90) print("\n")
def echo_end(self): print("=" * 90) print("☆" + public.getMsg('BACKUP_COMPLETED') + "[{}]".format(public.format_date())) print("=" * 90) print("\n")
def backup_path(self, spath, dfile=None, exclude=[], save=3): error_msg = "" self.echo_start() if not os.path.exists(spath): error_msg = public.getMsg('BACKUP_DIR_NOT_EXIST', (spath, )) self.echo_error(error_msg) self.send_failture_notification(error_msg) return False if spath[-1] == '/': spath = spath[:-1] dirname = os.path.basename(spath) if not dfile: fname = 'path_{}_{}.tar.gz'.format( dirname, public.format_date("%Y%m%d_%H%M%S")) dfile = os.path.join(self._path, 'path', fname) if not self.backup_path_to(spath, dfile, exclude): if self._error_msg: error_msg = self._error_msg self.send_failture_notification(error_msg) return False if self._cloud: self.echo_info( public.getMsg("BACKUP_UPLOADING", (self._cloud._title, ))) if self._cloud.upload_file(dfile, 'path'): self.echo_info( public.getMsg("BACKUP_UPLOAD_SUCCESS", (self._cloud._title, ))) else: if hasattr(self._cloud, "error_msg"): if self._cloud.error_msg: error_msg = self._cloud.error_msg if not error_msg: error_msg = public.getMsg('BACKUP_UPLOAD_FAILED') self.echo_error(error_msg) if os.path.exists(dfile): os.remove(dfile) remark = "Backup to " + self._cloud._title self.send_failture_notification(error_msg, remark=remark) return False filename = dfile if self._cloud: filename = dfile + '|' + self._cloud._name + '|' + fname pdata = { 'type': '2', 'name': spath, 'pid': 0, 'filename': filename, 'addtime': public.format_date(), 'size': os.path.getsize(dfile) } public.M('backup').insert(pdata) if self._cloud: _not_save_local = True save_local = 0 if self.cron_info: save_local = self.cron_info["save_local"] if save_local: _not_save_local = False else: if self._is_save_local: _not_save_local = False if _not_save_local: if os.path.exists(dfile): os.remove(dfile) self.echo_info(public.getMsg("BACKUP_DEL", (dfile, ))) else: self.echo_info(public.getMsg('KEEP_LOCAL')) if not self._cloud: backups = public.M('backup').where( "type=? and pid=? and name=? and filename NOT LIKE '%|%'", ('2', 0, spath)).field('id,name,filename').select() else: backups = public.M('backup').where( "type=? and pid=? and name=? and filename LIKE '%{}%'".format( self._cloud._name), ('2', 0, spath)).field('id,name,filename').select() self.delete_old(backups, save, 'path') self.echo_end() return dfile
def backup_path(self, spath, dfile=None, exclude=[], save=3): self.echo_start() if not os.path.exists(spath): self.echo_error('指定目录{}不存在!'.format(spath)) return False if spath[-1] == '/': spath = spath[:-1] dirname = os.path.basename(spath) if not dfile: fname = 'path_{}_{}.tar.gz'.format( dirname, public.format_date("%Y%m%d_%H%M%S")) dfile = os.path.join(self._path, 'path', fname) if not self.backup_path_to(spath, dfile, exclude): return False if self._cloud: self.echo_info("正在上传到{},请稍候...".format(self._cloud._title)) if self._cloud.upload_file(dfile, 'path'): self.echo_info("已成功上传到{}".format(self._cloud._title)) else: self.echo_error('错误:文件上传失败,跳过本次备份!') if os.path.exists(dfile): os.remove(dfile) return False filename = dfile if self._cloud: filename = dfile + '|' + self._cloud._name + '|' + fname pdata = { 'type': '2', 'name': spath, 'pid': 0, 'filename': filename, 'addtime': public.format_date(), 'size': os.path.getsize(dfile) } public.M('backup').insert(pdata) if self._cloud: if not self._is_save_local: if os.path.exists(dfile): os.remove(dfile) self.echo_info("用户设置不保留本地备份,已删除{}".format(dfile)) if not self._cloud: backups = public.M('backup').where( "type=? and pid=? and name=? and filename NOT LIKE '%|%'", ('2', 0, spath)).field('id,name,filename').select() else: backups = public.M('backup').where( "type=? and pid=? and name=? and filename LIKE '%{}%'".format( self._cloud._name), ('2', 0, spath)).field('id,name,filename').select() self.delete_old(backups, save, 'path') self.echo_end() return dfile