コード例 #1
0
    def backup_path_to(self,spath,dfile,exclude = [],siteName = None):
        if not os.path.exists(spath):
            self.echo_error('The specified directory {} does not exist!'.format(spath))
            return False

        if spath[-1] == '/':
            spath = spath[:-1]

        dirname = os.path.basename(spath)
        dpath = os.path.dirname(dfile)
        if not os.path.exists(dpath):
            os.makedirs(dpath,384)
        
        p_size = public.get_path_size(spath)
        self.get_exclude(exclude)
        exclude_config = self._exclude
        if not self._exclude:
            exclude_config = "Not set"
        
        if siteName:
            self.echo_info('Backup site: {}'.format(siteName))
            self.echo_info('Website root directory: {}'.format(spath))
        else:
            self.echo_info('Backup directory: {}'.format(spath))
        
        self.echo_info("Directory size: {}".format(public.to_size(p_size)))
        self.echo_info('Exclusion setting: {}'.format(exclude_config))
        disk_path,disk_free,disk_inode = self.get_disk_free(dfile)
        self.echo_info("Partition {} available disk space is: {}, available Inode is: {}".format(disk_path,public.to_size(disk_free),disk_inode))
        if disk_path:
            if disk_free < p_size:
                self.echo_error("The available disk space of the target partition is less than {}, and the backup cannot be completed. Please increase the disk capacity or change the default backup directory on the settings page!".format(public.to_size(p_size)))
                return False

            if disk_inode < self._inode_min:
                self.echo_error("The available Inode of the target partition is less than {}, and the backup cannot be completed. Please increase the disk capacity or change the default backup directory on the settings page!".format(self._inode_min))
                return False

        stime = time.time()
        self.echo_info("Start compressing files: {}".format(public.format_date(times=stime)))
        if os.path.exists(dfile):
            os.remove(dfile)
        public.ExecShell("cd " + os.path.dirname(spath) + " && tar zcvf '" + dfile + "' " + self._exclude + " '" + dirname + "' 2>{err_log} 1> /dev/null".format(err_log = self._err_log))
        tar_size = os.path.getsize(dfile)
        if tar_size < 1:
            self.echo_error("Data compression failed")
            self.echo_info(public.readFile(self._err_log))
            return False
        self.echo_info("File compression completed, took {:.2f} seconds, compressed package size: {}".format(time.time() - stime,public.to_size(tar_size)))
        if siteName:
            self.echo_info("Site backed up to: {}".format(dfile))
        else:
            self.echo_info("Directory has been backed up to: {}".format(dfile))
        if os.path.exists(self._err_log):
            os.remove(self._err_log)
        return dfile
コード例 #2
0
    def get_task_log(self, id, task_type, num=5):
        log_file = self.__task_path + str(id) + '.log'
        if not os.path.exists(log_file):
            data = ''
            if(task_type == '1'):
                data = {'name': '下载文件', 'total': 0, 'used': 0,
                        'pre': 0, 'speed': 0, 'time': 0}
            return data

        if(task_type == '1'):
            total = 0
            if not os.path.exists(self.down_log_total_file):
                f = open(log_file, 'r')
                head = f.read(4096)
                content_length = re.findall(r"Length:\s+(\d+)", head)
                if content_length:
                    total = int(content_length[0])
                    public.writeFile(self.down_log_total_file,
                                     content_length[0])
            else:
                total = public.readFile(self.down_log_total_file)
                if not total:
                    total = 0
                total = int(total)

            filename = public.M(self.__table).where(
                'id=?', (id,)).getField('shell')

            speed_tmp = public.ExecShell("tail -n 2 {}".format(log_file))[0]
            speed_total = re.findall(
                r"([\d\.]+[BbKkMmGg]).+\s+(\d+)%\s+([\d\.]+[KMBGkmbg])\s+(\w+[sS])", speed_tmp)
            if not speed_total:
                data = {'name': '下载文件{}'.format(
                    filename), 'total': 0, 'used': 0, 'pre': 0, 'speed': 0, 'time': 0}
            else:
                speed_total = speed_total[0]
                used = speed_total[0]
                if speed_total[0].lower().find('k') != -1:
                    used = public.to_size(
                        float(speed_total[0].lower().replace('k', '')) * 1024)
                    u_time = speed_total[3].replace(
                        'h', '小时').replace('m', '分').replace('s', '秒')
                data = {'name': '下载文件{}'.format(
                    filename), 'total': total, 'used': used, 'pre': speed_total[1], 'speed': speed_total[2], 'time': u_time}
        else:
            data = public.ExecShell("tail -n {} {}".format(num, log_file))[0]
            if type(data) == list:
                return ''
            data = data.replace('\x08', '').replace('\n', '<br>')
        return data
コード例 #3
0
    def get_import_bak_list(self, args):  # 获取导入的文件列表
        args = self.processing_parameter(args)  # 处理前端传过来的参数
        file_list = os.listdir(os.path.join(self.db_back_dir, "upload"))
        file_list_json = []
        for i in file_list:
            file_path = os.path.join(os.path.join(self.db_back_dir, "upload"), i)
            file_info = os.stat(file_path)
            create_time = file_info.st_ctime
            time_local = time.localtime(int(create_time))
            create_time = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
            file_size = file_info.st_size
            file_size = public.to_size(file_size)
            file_list_json.append({"filename": i, "create_time": create_time, "file_size": file_size, "file_path": file_path})

        # 返回数据到前端
        return {'data': file_list_json, "status": True}
コード例 #4
0
    def get_pgsql_bak_list(self, args):  # 获取数据库备份文件列表
        args = self.processing_parameter(args)  # 处理前端传过来的参数
        file_list = os.listdir(self.db_back_dir)
        file_list_json = []
        for i in file_list:
            if i.split("_")[0].startswith(args.database):
                file_path = os.path.join(self.db_back_dir, i)
                file_info = os.stat(file_path)
                create_time = file_info.st_ctime
                time_local = time.localtime(int(create_time))
                create_time = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
                file_size = file_info.st_size
                file_size = public.to_size(file_size)
                file_list_json.append({"filename": i, "create_time": create_time, "file_size": file_size, "file_path": file_path})

        # 返回数据到前端
        return {'data': file_list_json, "status": True}
コード例 #5
0
    def backup_database(self,db_name,dfile = None,save=3):
        self.echo_start()
        if not dfile:
            fname = 'db_{}_{}.sql.gz'.format(db_name,public.format_date("%Y%m%d_%H%M%S"))
            dfile = os.path.join(self._path,'database',fname)
        else:
            fname = os.path.basename(dfile)
        
        dpath = os.path.dirname(dfile)
        if not os.path.exists(dpath):
            os.makedirs(dpath,384)

        import panelMysql
        if not self._db_mysql:self._db_mysql = panelMysql.panelMysql()
        d_tmp = self._db_mysql.query("select sum(DATA_LENGTH)+sum(INDEX_LENGTH) from information_schema.tables where table_schema='%s'" % db_name)
        p_size = self.map_to_list(d_tmp)[0][0]
        
        if p_size == None:
            self.echo_error('The specified database [ {} ] has no data!'.format(db_name))
            return

        character = public.get_database_character(db_name)

        self.echo_info('Backup database:{}'.format(db_name))
        self.echo_info("Database size: {}".format(public.to_size(p_size)))
        self.echo_info("Database character set: {}".format(character))
        disk_path,disk_free,disk_inode = self.get_disk_free(dfile)
        self.echo_info("Partition {} available disk space is: {}, available Inode is: {}".format(disk_path,public.to_size(disk_free),disk_inode))
        if disk_path:
            if disk_free < p_size:
                self.echo_error("The available disk space of the target partition is less than {}, and the backup cannot be completed. Please increase the disk capacity or change the default backup directory on the settings page!".format(public.to_size(p_size)))
                return False

            if disk_inode < self._inode_min:
                self.echo_error("The available Inode of the target partition is less than {}, and the backup cannot be completed. Please increase the disk capacity or change the default backup directory on the settings page!".format(self._inode_min))
                return False
        
        stime = time.time()
        self.echo_info("Start exporting database: {}".format(public.format_date(times=stime)))
        if os.path.exists(dfile):
            os.remove(dfile)
        self.mypass(True)
        public.ExecShell("/www/server/mysql/bin/mysqldump --default-character-set="+ character +" --force --hex-blob --opt " + db_name + " 2>"+self._err_log+"| gzip > " + dfile)
        self.mypass(False)
        gz_size = os.path.getsize(dfile)
        if gz_size < 400:
            self.echo_error("Database export failed!")
            self.echo_info(public.readFile(self._err_log))
            return False
        self.echo_info("Database backup completed, took {:.2f} seconds, compressed package size: {}".format(time.time() - stime,public.to_size(gz_size)))
        if self._cloud:
            self.echo_info("Uploading to {}, please wait ...".format(self._cloud._title))
            if self._cloud.upload_file(dfile, 'database'):
                self.echo_info("Successfully uploaded to {}".format(self._cloud._title))
            else:
                self.echo_error('Error: File upload failed, skip this backup!')
                if os.path.exists(dfile):
                    os.remove(dfile)
                return False

        filename = dfile
        if self._cloud:
            filename = self._cloud._name
        self.echo_info("Database has been backed up to: {}".format(dfile))
        if os.path.exists(self._err_log):
            os.remove(self._err_log)

        pid = public.M('databases').where('name=?',(db_name)).getField('id')
        pdata = {
            'type': '1',
            'name': fname,
            'pid': pid,
            'filename': filename,
            'addtime': public.format_date(),
            'size': os.path.getsize(dfile)
        }
        public.M('backup').insert(pdata)


        if self._cloud:
            if not self._is_save_local:
                if os.path.exists(dfile):
                    os.remove(dfile)
                    self.echo_info("User settings do not retain local backups, deleted {}".format(dfile))

        #清理多余备份
        if not self._cloud:
            backups = public.M('backup').where("type=? and pid=? and filename LIKE '%/%'",('1',pid)).field('id,name,filename').select()
        else:
            backups = public.M('backup').where('type=? and pid=? and filename=?',('1',pid,filename)).field('id,name,filename').select()

        self.echo_info(str(backups))
        self.delete_old(backups,save,'database')
        self.echo_end()
        return dfile
コード例 #6
0
    def backup_database(self,db_name,dfile = None,save=3):
        self.echo_start()
        if not dfile:
            fname = 'db_{}_{}.sql.gz'.format(db_name,public.format_date("%Y%m%d_%H%M%S"))
            dfile = os.path.join(self._path,'database',fname)
        else:
            fname = os.path.basename(dfile)
        
        dpath = os.path.dirname(dfile)
        if not os.path.exists(dpath):
            os.makedirs(dpath,384)

        error_msg = ""
        import panelMysql
        if not self._db_mysql:self._db_mysql = panelMysql.panelMysql()
        d_tmp = self._db_mysql.query("select sum(DATA_LENGTH)+sum(INDEX_LENGTH) from information_schema.tables where table_schema='%s'" % db_name)
        try:
            p_size = self.map_to_list(d_tmp)[0][0]
        except:
            error_msg = public.getMsg('DB_CONN_ERR')
            self.echo_error(error_msg)
            self.send_failture_notification(error_msg)
            return False
        
        if p_size == None:
            error_msg = public.getMsg('DB_BACKUP_ERR',(db_name,))
            self.echo_error(error_msg)
            self.send_failture_notification(error_msg)
            return False

        character = public.get_database_character(db_name)

        self.echo_info(public.getMsg('DB_BACKUP',(db_name,)))
        self.echo_info(public.getMsg("DB_SIZE",(public.to_size(p_size),)))
        self.echo_info(public.getMsg("DB_CHARACTER",(character,)))
        disk_path,disk_free,disk_inode = self.get_disk_free(dfile)
        self.echo_info(public.getMsg(
            "PARTITION_INFO",(
                disk_path,str(public.to_size(disk_free)),str(disk_inode)
            )
        ))
        if disk_path:
            if disk_free < p_size:
                error_msg = public.getMsg("PARTITION_LESS_THEN",(
                        str(public.to_size(p_size),)
                    ))
                self.echo_error(error_msg)
                self.send_failture_notification(error_msg)
                return False

            if disk_inode < self._inode_min:
                error_msg = public.getMsg("INODE_LESS_THEN",(self._inode_min,))
                self.echo_error(error_msg)
                self.send_failture_notification(error_msg)
                return False
        
        stime = time.time()
        self.echo_info(public.getMsg("EXPORT_DB",(public.format_date(times=stime),)))
        if os.path.exists(dfile):
            os.remove(dfile)
        #self.mypass(True)
        try:
            password = public.M('config').where('id=?',(1,)).getField('mysql_root')
            os.environ["MYSQL_PWD"] = password
            backup_cmd = "/www/server/mysql/bin/mysqldump -E -R --default-character-set="+ character +" --force --hex-blob --opt " + db_name + " -u root" + " 2>"+self._err_log+"| gzip > " + dfile
            public.ExecShell(backup_cmd)
        except Exception as e:
            raise
        finally:
            os.environ["MYSQL_PWD"] = ""
        #public.ExecShell("/www/server/mysql/bin/mysqldump --default-character-set="+ character +" --force --hex-blob --opt " + db_name + " 2>"+self._err_log+"| gzip > " + dfile)
        #self.mypass(False)
        gz_size = os.path.getsize(dfile)
        if gz_size < 400:
            error_msg = public.getMsg("EXPORT_DB_ERR")
            self.echo_error(error_msg)
            self.send_failture_notification(error_msg)
            self.echo_info(public.readFile(self._err_log))
            return False
        compressed_time = str('{:.2f}'.format(time.time() - stime))
        self.echo_info(
            public.getMsg("COMPRESS_TIME",(str(compressed_time),
            str(public.to_size(gz_size))
            ))
        )
        if self._cloud:
            self.echo_info(public.getMsg("BACKUP_UPLOADING",(self._cloud._title,)))
            if self._cloud.upload_file(dfile, 'database'):
                self.echo_info(public.getMsg("BACKUP_UPLOAD_SUCCESS",(self._cloud._title,)))
            else:
                if hasattr(self._cloud, "error_msg"):
                    if self._cloud.error_msg:
                        error_msg = self._cloud.error_msg
                if not error_msg:
                    error_msg = public.getMsg('BACKUP_UPLOAD_FAILED')
                self.echo_error(error_msg)
                if os.path.exists(dfile):
                    os.remove(dfile)

                remark = "Backup to " + self._cloud._title
                self.send_failture_notification(error_msg, remark=remark)
                return False

        filename = dfile
        if self._cloud:
            filename = dfile + '|' + self._cloud._name + '|' + fname
        self.echo_info(public.getMsg("DB_BACKUP_TO",(dfile,)))
        if os.path.exists(self._err_log):
            os.remove(self._err_log)

        pid = public.M('databases').where('name=?',(db_name)).getField('id')
        pdata = {
            'type': '1',
            'name': fname,
            'pid': pid,
            'filename': filename,
            'addtime': public.format_date(),
            'size': os.path.getsize(dfile)
        }
        public.M('backup').insert(pdata)

        if self._cloud:
            _not_save_local = True
            save_local = 0
            if self.cron_info:
                save_local = self.cron_info["save_local"]
            if save_local:
                _not_save_local = False
            else:
                if self._is_save_local:
                    _not_save_local = False

                    pdata = {
                        'type': '1',
                        'name': fname,
                        'pid': pid,
                        'filename': dfile,
                        'addtime': public.format_date(),
                        'size': os.path.getsize(dfile)
                    }
                    public.M('backup').insert(pdata)

            if _not_save_local:
                if os.path.exists(dfile):
                    os.remove(dfile)
                    self.echo_info(public.getMsg("BACKUP_DEL",(dfile,)))
            else:
                self.echo_info(public.getMsg('KEEP_LOCAL'))

        #清理多余备份
        if not self._cloud:
            backups = public.M('backup').where("type=? and pid=? and filename NOT LIKE '%|%'",('1',pid)).field('id,name,filename').select()
        else:
            backups = public.M('backup').where('type=? and pid=? and filename LIKE "%{}%"'.format(self._cloud._name),('1',pid)).field('id,name,filename').select()
        self.delete_old(backups,save,'database')
        self.echo_end()
        return dfile
コード例 #7
0
    def backup_path_to(self,spath,dfile,exclude = [],siteName = None):
        if not os.path.exists(spath):
            self.echo_error(public.getMsg('BACKUP_DIR_NOT_EXIST',(spath,)))
            return False

        if spath[-1] == '/':
            spath = spath[:-1]

        dirname = os.path.basename(spath)
        dpath = os.path.dirname(dfile)
        if not os.path.exists(dpath):
            os.makedirs(dpath,384)

        self.get_exclude(exclude)
        exclude_config = self._exclude
        exclude_list = self.get_exclude_list(exclude)
        p_size = public.get_path_size(spath, exclude=exclude_list)
        if not self._exclude:
            exclude_config = "Not set"
        
        if siteName:
            self.echo_info(public.getMsg('BACKUP_SITE',(siteName,)))
            self.echo_info(public.getMsg('WEBSITE_DIR',(spath,)))
        else:
            self.echo_info(public.getMsg('BACKUP_DIR',(spath,)))
        
        self.echo_info(public.getMsg(
            "DIR_SIZE",
            (str(public.to_size(p_size),))
        ))
        self.echo_info(public.getMsg('BACKUP_EXCLUSION',(exclude_config,)))
        disk_path,disk_free,disk_inode = self.get_disk_free(dfile)
        self.echo_info(public.getMsg(
            "PARTITION_INFO",
            (disk_path,str(public.to_size(disk_free)),str(disk_inode))
        ))
        if disk_path:
            if disk_free < p_size:
                self.echo_error(public.getMsg(
                    "PARTITION_LESS_THEN",
                    (str(public.to_size(p_size)),)
                ))
                return False

            if disk_inode < self._inode_min:
                self.echo_error(public.getMsg(
                    "INODE_LESS_THEN",
                    (str(self._inode_min,))
                ))
                return False

        stime = time.time()
        self.echo_info(public.getMsg("START_COMPRESS",(public.format_date(times=stime),)))
        if os.path.exists(dfile):
            os.remove(dfile)
        public.ExecShell("cd " + os.path.dirname(spath) + " && tar zcvf '" + dfile + "' " + self._exclude + " '" + dirname + "' 2>{err_log} 1> /dev/null".format(err_log = self._err_log))
        tar_size = os.path.getsize(dfile)
        if tar_size < 1:
            self.echo_error(public.getMsg('ZIP_ERR'))
            self.echo_info(public.readFile(self._err_log))
            return False
        compression_time = str('{:.2f}'.format(time.time() - stime))
        self.echo_info(public.getMsg(
            'COMPRESS_TIME',
            (compression_time,str(public.to_size(tar_size)))
        ))
        if siteName:
            self.echo_info(public.getMsg("WEBSITE_BACKUP_TO",(dfile,)))
        else:
            self.echo_info(public.getMsg("DIR_BACKUP_TO",(dfile,)))
        if os.path.exists(self._err_log):
            os.remove(self._err_log)
        return dfile
コード例 #8
0
    def backup_database(self, db_name, dfile=None, save=3):
        self.echo_start()
        if not dfile:
            fname = 'db_{}_{}.sql.gz'.format(
                db_name, public.format_date("%Y%m%d_%H%M%S"))
            dfile = os.path.join(self._path, 'database', fname)
        else:
            fname = os.path.basename(dfile)

        dpath = os.path.dirname(dfile)
        if not os.path.exists(dpath):
            os.makedirs(dpath, 384)

        import panelMysql
        if not self._db_mysql: self._db_mysql = panelMysql.panelMysql()
        d_tmp = self._db_mysql.query(
            "select sum(DATA_LENGTH)+sum(INDEX_LENGTH) from information_schema.tables where table_schema='%s'"
            % db_name)
        p_size = self.map_to_list(d_tmp)[0][0]

        if p_size == None:
            self.echo_error('指定数据库 `{}` 没有任何数据!'.format(db_name))
            return

        character = public.get_database_character(db_name)

        self.echo_info('备份数据库:{}'.format(db_name))
        self.echo_info("数据库大小:{}".format(public.to_size(p_size)))
        self.echo_info("数据库字符集:{}".format(character))
        disk_path, disk_free, disk_inode = self.get_disk_free(dfile)
        self.echo_info("分区{}可用磁盘空间为:{},可用Inode为:{}".format(
            disk_path, public.to_size(disk_free), disk_inode))
        if disk_path:
            if disk_free < p_size:
                self.echo_error(
                    "目标分区可用的磁盘空间小于{},无法完成备份,请增加磁盘容量,或在设置页面更改默认备份目录!".format(
                        public.to_size(p_size)))
                return False

            if disk_inode < self._inode_min:
                self.echo_error(
                    "目标分区可用的Inode小于{},无法完成备份,请增加磁盘容量,或在设置页面更改默认备份目录!".format(
                        self._inode_min))
                return False

        stime = time.time()
        self.echo_info("开始导出数据库:{}".format(public.format_date(times=stime)))
        if os.path.exists(dfile):
            os.remove(dfile)
        self.mypass(True)
        public.ExecShell(
            "/www/server/mysql/bin/mysqldump --default-character-set=" +
            character + " --force --hex-blob --opt " + db_name + " 2>" +
            self._err_log + "| gzip > " + dfile)
        self.mypass(False)
        gz_size = os.path.getsize(dfile)
        if gz_size < 400:
            self.echo_error("数据库导出失败!")
            self.echo_info(public.readFile(self._err_log))
            return False
        self.echo_info("数据库备份完成,耗时{:.2f}秒,压缩包大小:{}".format(
            time.time() - stime, public.to_size(gz_size)))

        if self._cloud:
            self.echo_info("正在上传到{},请稍候...".format(self._cloud._title))
            if self._cloud.upload_file(dfile, 'database'):
                self.echo_info("已成功上传到{}".format(self._cloud._title))
            else:
                self.echo_error('错误:文件上传失败,跳过本次备份!')
                if os.path.exists(dfile):
                    os.remove(dfile)
                return False

        filename = dfile
        if self._cloud:
            filename = dfile + '|' + self._cloud._name + '|' + fname

        self.echo_info("数据库已备份到:{}".format(dfile))
        if os.path.exists(self._err_log):
            os.remove(self._err_log)

        pid = public.M('databases').where('name=?', (db_name)).getField('id')

        pdata = {
            'type': '1',
            'name': fname,
            'pid': pid,
            'filename': filename,
            'addtime': public.format_date(),
            'size': os.path.getsize(dfile)
        }
        public.M('backup').insert(pdata)

        if self._cloud:
            if not self._is_save_local:
                if os.path.exists(dfile):
                    os.remove(dfile)
                    self.echo_info("用户设置不保留本地备份,已删除{}".format(dfile))

        #清理多余备份
        if not self._cloud:
            backups = public.M('backup').where(
                "type=? and pid=? and filename NOT LIKE '%|%'",
                ('1', pid)).field('id,name,filename').select()
        else:
            backups = public.M('backup').where(
                'type=? and pid=? and filename LIKE "%{}%"'.format(
                    self._cloud._name),
                ('1', pid)).field('id,name,filename').select()

        self.delete_old(backups, save, 'database')
        self.echo_end()
        return dfile
コード例 #9
0
    def backup_path_to(self, spath, dfile, exclude=[], siteName=None):
        if not os.path.exists(spath):
            self.echo_error('指定目录{}不存在!'.format(spath))
            return False

        if spath[-1] == '/':
            spath = spath[:-1]

        dirname = os.path.basename(spath)
        dpath = os.path.dirname(dfile)
        if not os.path.exists(dpath):
            os.makedirs(dpath, 384)

        p_size = public.get_path_size(spath)
        self.get_exclude(exclude)
        exclude_config = self._exclude
        if not self._exclude:
            exclude_config = "未设置"

        if siteName:
            self.echo_info('备份网站:{}'.format(siteName))
            self.echo_info('网站根目录:{}'.format(spath))
        else:
            self.echo_info('备份目录:{}'.format(spath))

        self.echo_info("目录大小:{}".format(public.to_size(p_size)))
        self.echo_info('排除设置:{}'.format(exclude_config))
        disk_path, disk_free, disk_inode = self.get_disk_free(dfile)
        self.echo_info("分区{}可用磁盘空间为:{},可用Inode为:{}".format(
            disk_path, public.to_size(disk_free), disk_inode))
        if disk_path:
            if disk_free < p_size:
                self.echo_error(
                    "目标分区可用的磁盘空间小于{},无法完成备份,请增加磁盘容量,或在设置页面更改默认备份目录!".format(
                        public.to_size(p_size)))
                return False

            if disk_inode < self._inode_min:
                self.echo_error(
                    "目标分区可用的Inode小于{},无法完成备份,请增加磁盘容量,或在设置页面更改默认备份目录!".format(
                        self._inode_min))
                return False

        stime = time.time()
        self.echo_info("开始压缩文件:{}".format(public.format_date(times=stime)))
        if os.path.exists(dfile):
            os.remove(dfile)
        public.ExecShell("cd " + os.path.dirname(spath) + " && tar zcvf '" +
                         dfile + "' " + self._exclude + " '" + dirname +
                         "' 2>{err_log} 1> /dev/null".format(
                             err_log=self._err_log))
        tar_size = os.path.getsize(dfile)
        if tar_size < 1:
            self.echo_error("数据压缩失败")
            self.echo_info(public.readFile(self._err_log))
            return False
        self.echo_info("文件压缩完成,耗时{:.2f}秒,压缩包大小:{}".format(
            time.time() - stime, public.to_size(tar_size)))
        if siteName:
            self.echo_info("网站已备份到:{}".format(dfile))
        else:
            self.echo_info("目录已备份到:{}".format(dfile))
        if os.path.exists(self._err_log):
            os.remove(self._err_log)
        return dfile