def has_space(self, directory): """ Returns a boolean indicating whether the device has enough space for the directory. """ target_dir_size = get_directory_size(directory.target) source_dir_size = get_directory_size(directory.source) statvfs = os.statvfs(self.path) target_spare_space = statvfs.f_frsize * statvfs.f_bavail return (target_spare_space + target_dir_size) > source_dir_size
def main(): dir = '/mfs/backup/mysql/' pattern = os.path.join(dir, '*/snapshots/*') # pattern = os.path.join(dir, '*/binlogs/*') # pattern = os.path.join('/mfs/user/dba/recovery/*') total_size = 0 for i in glob.glob(pattern): info = os.stat(i) t1 = time.gmtime(info.st_ctime) t2 = time.strftime('%Y-%m-%d', t1) year, month, day =t2.split('-') time1 = datetime.datetime(int(year),int(month),int(day)) m1 = time.gmtime() m2 = time.strftime('%Y-%m-%d', m1) year, month, day =m2.split('-') time2 = datetime.datetime(int(year),int(month),int(day)) days = (time2 - time1).days # print time1, time2, days if days>20: total_size = total_size + get_directory_size(i)/1024**3 print i, time1, total_size try: pass shutil.rmtree(i) # os.remove(i) except Exception as exc: print i, exc
def main(): dir = '/backup/mysql/mysql-longterm-backups' pattern = os.path.join(dir, '*/*') cn = 0 for j, i in sorted([(l.split('/')[-1], l) for l in glob.glob(pattern)]): try: a = i.split('/') backup_start_time_str = a[-1].split('-')[0] farm = a[-2] origin_id = 0 backup_start_time = datetime.datetime.strptime(backup_start_time_str, "%Y.%m.%d.%H.%M.%S") backup_size = int(get_directory_size(i)//1024**2) snapshot_dir = i origin_dir = None LongtermInfo.add(farm, origin_id, backup_start_time, backup_size, snapshot_dir, origin_dir) cn += 1 print cn, i, backup_size except Exception as exc: cn += 1 print cn, i print i, exc
def home(): show_nsfw = ('True' == config['threads.show_nsfw']) active_content_size = get_directory_size('uploads') number_of_messages = Post.select().count() return dict(title=config['app.title'], welcome_message=config['app.welcome_message'], show_nsfw=show_nsfw, active_content_size=active_content_size, number_of_messages=number_of_messages, basename=basename)
'Ableton Folder Info/Properties.cfg') as fp: metadata = fp.read() for match in re.finditer('PackUniqueID = \"([^\"]+)\"', metadata, re.DOTALL): this_pack["packuniqueid"] = match.group(1) for match in re.finditer('PackDisplayName = \"([^\"]+)\"', metadata, re.DOTALL): this_pack["packdisplayname"] = match.group(1) for match in re.finditer('PackVendor = \"([^\"]+)\"', metadata, re.DOTALL): this_pack["packvendor"] = match.group(1) for match in re.finditer('PackRevision = ([^;]+);', metadata, re.DOTALL): this_pack["packrevision"] = match.group(1) this_pack["total_size"] = utils.get_size_format( utils.get_directory_size(pack.path)) with os.scandir(pack.path) as folders: for folder in folders: if folder.is_dir( ) and folder.name != "Ableton Folder Info" and folder.name != "Ableton Project Info" and folder.name != "Lessons": for root, d_names, f_names in os.walk(folder.path): if f_names: for f in f_names: this_pack["items"].append(f) factory_packs["packs"].append(this_pack) print(json.dumps(factory_packs))
def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--dest', default='/mfs/backup/mongodb', help='Backup destination, default /mfs/backup/mongodb') parser.add_argument( '-f', '--farm', dest='farm', metavar='FARM', help='The mongo replica set name, e.g. audit/shuai/biz/alg and so on') parser.add_argument('-l', '--bwlimit', type=int, default=40000, help='Limit backup speed(KB/s).default: 40000') parser.add_argument('-v', '--verbose', action='store_true') parser.add_argument('--force-use-primary', action='store_true', help='Force to backup on primary node') args = parser.parse_args() conf_dict = yaml.load(open(CONF_FILE)) _, aliases, _ = socket.gethostbyname_ex(socket.gethostname()) if args.farm: if not am_i_the_right_instance(args.farm + '-mongo', likely=1): if args.verbose: print >> sys.stderr, 'I am not "{}", exit...'.format(args.farm) return 1 aliases = [ args.farm + '-mongo', ] manually = 1 if args.farm else 0 returncode = 0 _returncode = 0 for i in aliases: if i.find('-mongo') == -1: continue farm = i.split('-')[0] if farm not in conf_dict['replsets']: if args.verbose: print >> sys.stderr, 'No such mongo replica set(farm): %s.' % farm continue port = conf_dict['replsets'][farm]['port'] user = conf_dict['common']['user'] password = conf_dict['common']['password'] host = socket.gethostname() manually = manually force_use_primary = 1 if args.force_use_primary else 0 if force_use_primary and args.verbose: print >> sys.stdout, 'Force to backup on %s\'s primary node!' % farm if check_is_right_node(port, force_use_primary, manually): pass elif manually and args.verbose: msg = ('This is %s\'s primary node, will not do the backup job!' 'If you really want to bakcup on primary node, ' 'please add --force-use-primary args.') print >> sys.stderr, msg % farm return 1 else: _returncode = 1 continue time_str = time.strftime('%Y%m%d%H%M%S') backup_dest = os.path.join(args.dest, farm, 'hot_backup', time_str) ensure_dir(backup_dest) #Before starting backup job, insert the basic infomation into database. info_id = MongodbBackupInfo.add(farm, None, None, host, backup_dest, 'hot_backup') backup_obj = MongodbBackupInfo.get(info_id) if not path_chown(backup_dest, BACKUP_USER): _returncode = 1 continue if not tokumx_hot_backup(user, password, host, port, args.bwlimit, backup_dest, args.verbose): print >> sys.stderr, 'Hot backup on %s(%s) failed.' % (farm, host) _returncode = 1 backup_obj.set_backup_success(0) continue else: backup_obj.set_backup_success(1) size = get_directory_size(backup_dest) backup_obj.set_backup_size_snap_used_size(size, 0) if _returncode > returncode: returncode = _returncode #if exec this script manually, don't do the below work(check snapshot and rm expried snapshot) if args.farm: return returncode #Check if exists backup for the farm_role within WARNING_DAYS days check = MongodbBackupInfo.check_backup_by_farm(farm, WARNING_DAYS) if check: pass else: _returncode = 1 msg = 'There is no backup within %s days for %s' print >> sys.stderr, msg % (WARNING_DAYS, farm) #Get all dirs of backup will to be deleted delete_list = MongodbBackupInfo.get_backup_dir_will_delete( farm, DELETE_DAYS) if not delete_list: continue for del_dict in delete_list: try: shutil.rmtree(del_dict['backup_dir']) del_backup = MongodbBackupInfo.get(del_dict['id']) del_backup.set_is_delete() except OSError as ex: if ex.errno == errno.ENOENT: del_backup = MongodbBackupInfo.get(del_dict['id']) del_backup.set_is_delete() continue _returncode = 1 print >> sys.stderr, 'Remove mongodb backup %s fail!' % dir, ex except Exception as ex: _returncode = 1 print >> sys.stderr, 'Remove mongodb backup %s fail!' % dir, ex if _returncode > returncode: returncode = _returncode return returncode
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-i', '--docker-image', default='dba-registry:5000/tokumx-2.0:online', help= 'which docker image to use. default: dba-registry:5000/tokumx-2.0:online' ) parser.add_argument('-f', '--farm', required=True, help='MongoDB replSet name.') parser.add_argument( '--rsync-data', action='store_true', help= 'really need to rsync backup data(source) to this server(destination)? default: False' ) parser.add_argument('--bwlimit', type=int, default=30000, help='default: 30000') parser.add_argument('-s', '--source', help='Which backup set to use (full path is needed)') parser.add_argument('-l', '--use-latest-snapshot', action='store_true', help='Use latest backupSet') parser.add_argument('--snapshot-store', default='/mfs/backup/mongodb') parser.add_argument('--data-root', default='/data', help='default: /data') parser.add_argument('--log-root', default='/data', help='default: /data') parser.add_argument('-v', '--verbose', action='count', default=0) args = parser.parse_args() if args.verbose >= 2: level = logging.DEBUG elif args.verbose == 1: level = logging.INFO else: level = logging.WARNING basic_format = '%(asctime)s %(levelname)s [%(name)s] %(message)s' logging.basicConfig(format=basic_format, level=level) logger = logging.getLogger(__name__) conf_dict = yaml.load(open(CONF_FILE)) if args.farm not in conf_dict['replsets']: if args.verbose: print >> sys.stderr, 'No such MongoDB replica set(farm): %s.' % ( args.farm) return 1 farm_dir = 'mongo-{}'.format(args.farm) data_dir = os.path.join(args.data_root, farm_dir) log_dir = os.path.join(args.log_root, 'mongo-{}-log'.format(args.farm)) ensure_dir(data_dir) ensure_dir(log_dir) if args.rsync_data: if args.use_latest_snapshot: source = find_snapshot(args.snapshot_store, args.farm, datetime.now()) if not source: logger.debug('No snapshot found for "%s" farm under %s', args.farm, args.snapshot_store) return 1 else: source = args.source if not source: logger.error(( 'either --source or --use-latest-snapshot should be provided')) return 1 if not os.path.isdir(source): logger.error('%s as a "snapshot" shoud be a directory', source) return 1 rsync = ['rsync', '-a', '--drop-cache', '--bwlimit', str(args.bwlimit)] data_size = get_directory_size(source) if not is_space_enough(args.data_root, data_size, SPACE_TO_KEEP): logger.error('%sGB is needed, there is no enough space on %s', data_size, args.data_root) return 1 # rsync the data source = source.rstrip('/') + '/' cmd = rsync + [source, data_dir] proc = subprocess.Popen(cmd) proc.wait() user = conf_dict['common']['user'] password = conf_dict['common']['password'] port = conf_dict['replsets'][args.farm]['port'] host = '%s-mongo' % (args.farm) key = conf_dict['replsets'][args.farm]['key'] for i in range(1, 4): mongodb_info = get_mongodb_info(host + str(i), port, user, password) if mongodb_info: break if not mongodb_info.get('maxConns') or not mongodb_info.get('cacheSize'): if args.verbose: msg = ('May be not any %s node Available.' 'The value of maxConns and cacheSize must be get.') % ( args.farm) print >> sys.stderr, msg return 1 logger.debug('%r', mongodb_info) return run_instance_deploy(args.docker_image, args.farm, port, data_dir, log_dir, BACKUP_DIR, key, **mongodb_info)