def _expire_due_onetime_backups(self): # process onetime backups logger.info("BackupExpirationManager: Finding all onetime backups " "due for expiration") total_processed = 0 total_expired = 0 total_dont_expire = 0 q = self._check_to_expire_query() q["plan._id"] = {"$exists": False} logger.info("BackupExpirationManager: Executing query :\n%s" % document_pretty_string(q)) onetime_backups_iter = get_mbs().backup_collection.find_iter( query=q, no_cursor_timeout=True) for onetime_backup in onetime_backups_iter: if self.stop_requested: break total_processed += 1 if self.is_onetime_backup_not_expirable(onetime_backup): self.mark_backup_never_expire(onetime_backup) total_dont_expire += 1 elif self.is_onetime_backup_due_for_expiration(onetime_backup): self.expire_backup(onetime_backup) total_expired += 1 logger.info("BackupExpirationManager: Finished processing Onetime" " Backups.\nTotal Expired=%s, Total Don't Expire=%s, " "Total Processed=%s" % (total_expired, total_dont_expire, total_processed))
def _expire_due_onetime_backups(self): # process onetime backups logger.info("BackupExpirationManager: Finding all onetime backups " "due for expiration") total_processed = 0 total_expired = 0 total_dont_expire = 0 q = self._check_to_expire_query() q["plan._id"] = { "$exists": False } logger.info("BackupExpirationManager: Executing query :\n%s" % document_pretty_string(q)) onetime_backups_iter = get_mbs().backup_collection.find_iter(query=q, no_cursor_timeout=True) for onetime_backup in onetime_backups_iter: if self.stop_requested: break total_processed += 1 if self.is_onetime_backup_not_expirable(onetime_backup): self.mark_backup_never_expire(onetime_backup) total_dont_expire += 1 elif self.is_onetime_backup_due_for_expiration(onetime_backup): self.expire_backup(onetime_backup) total_expired += 1 logger.info("BackupExpirationManager: Finished processing Onetime" " Backups.\nTotal Expired=%s, Total Don't Expire=%s, " "Total Processed=%s" % (total_expired, total_dont_expire, total_processed))
def create_default_config(): from mbs.mbs_config import MBS_CONF_PATH mbs_conf = os.path.expanduser(MBS_CONF_PATH) # do nothing if conf already exists print "Checking if configuration '%s' exists..." % mbs_conf if os.path.exists(mbs_conf): print "Config '%s' already exists" % mbs_conf return print "Configuration '%s' does not exist. Creating default..." % mbs_conf login = os.getlogin() conf_dir = os.path.dirname(mbs_conf) owner = pwd.getpwnam(login) owner_uid = owner[2] owner_gid = owner[3] # if the conf dir does not exist then create it and change owner # This is needs so when pip install is run with sudo then the owner # should be logged in user instead of root if not os.path.exists(conf_dir): print "Creating conf dir '%s'" % conf_dir os.makedirs(conf_dir) print "chown conf dir '%s' to owner uid %s, gid %s" % (conf_dir, owner_uid, owner_gid) os.chown(conf_dir, owner_uid, owner_gid) print "Chmod conf dir '%s' to 00755" % conf_dir os.chmod(conf_dir, 00755) default_conf = { "databaseURI": "YOUR DATABASE URI", "engines": [{ "_type": "BackupEngine", "_id": "DEFAULT", "maxWorkers": 10, "tempDir": "~/backup_temp", "commandPort": 8888, "tags": None }] } from mbs.utils import document_pretty_string conf_file = open(mbs_conf, mode="w") conf_file.write(document_pretty_string(default_conf)) # chown conf file print "chown conf file '%s' to owner uid %s, gid %s" % ( mbs_conf, owner_uid, owner_gid) os.chown(mbs_conf, owner_uid, owner_gid) print "Chmod conf file '%s' to 00644" % mbs_conf os.chmod(mbs_conf, 00644) print "Successfully created configuration '%s'!" % mbs_conf
def create_default_config(): from mbs.mbs_config import MBS_CONF_PATH mbs_conf = os.path.expanduser(MBS_CONF_PATH) # do nothing if conf already exists print "Checking if configuration '%s' exists..." % mbs_conf if os.path.exists(mbs_conf): print "Config '%s' already exists" % mbs_conf return print "Configuration '%s' does not exist. Creating default..." % mbs_conf login = os.getlogin() conf_dir = os.path.dirname(mbs_conf) owner = pwd.getpwnam(login) owner_uid = owner[2] owner_gid = owner[3] # if the conf dir does not exist then create it and change owner # This is needs so when pip install is run with sudo then the owner # should be logged in user instead of root if not os.path.exists(conf_dir): print "Creating conf dir '%s'" % conf_dir os.makedirs(conf_dir) print "chown conf dir '%s' to owner uid %s, gid %s" % (conf_dir, owner_uid, owner_gid) os.chown(conf_dir, owner_uid, owner_gid) print "Chmod conf dir '%s' to 00755" % conf_dir os.chmod(conf_dir, 00755) default_conf = { "databaseURI": "YOUR DATABASE URI", "engines": [ { "_type": "BackupEngine", "_id": "DEFAULT", "maxWorkers": 10, "tempDir": "~/backup_temp", "commandPort": 8888, "tags": None, } ], } from mbs.utils import document_pretty_string conf_file = open(mbs_conf, mode="w") conf_file.write(document_pretty_string(default_conf)) # chown conf file print "chown conf file '%s' to owner uid %s, gid %s" % (mbs_conf, owner_uid, owner_gid) os.chown(mbs_conf, owner_uid, owner_gid) print "Chmod conf file '%s' to 00644" % mbs_conf os.chmod(mbs_conf, 00644) print "Successfully created configuration '%s'!" % mbs_conf
def delete_backup_plan(self, plan_id): try: result = self.backup_system.remove_plan(plan_id) return document_pretty_string(result) except Exception, e: msg = ("Error while trying to delete backup plan %s: %s" % (plan_id, e)) logger.error(msg) logger.error(traceback.format_exc()) send_api_error("delete-backup-plan", e) return error_response(msg)
def get_backup_database_names(self, backup_id): try: dbnames = self.backup_system.get_backup_database_names(backup_id) return document_pretty_string(dbnames) except Exception, e: msg = ("Error while trying to get backup database" " names %s: %s" %(backup_id, e)) logger.error(msg) logger.error(traceback.format_exc()) send_api_error("get-backup-database-names", e) return error_response(msg)
def delete_backup_plan(self, plan_id): try: deleted = self.backup_system.remove_plan(plan_id) return document_pretty_string({"deleted": deleted}) except Exception, e: msg = ("Error while trying to delete backup plan %s: %s" % (plan_id, e)) logger.error(msg) logger.error(traceback.format_exc()) send_api_error("delete-backup-plan", e) return error_response(msg)
def expire_backup(self, backup_id): try: exp_man = self.backup_system.backup_expiration_manager backup = persistence.get_backup(backup_id) result = exp_man.expire_backup(backup, force=True) return document_pretty_string(result) except Exception, e: msg = ("Error while trying to expire backup %s: %s" % (backup_id, e)) logger.error(msg) logger.error(traceback.format_exc()) send_api_error("expire-backup", e) return error_response(msg)
def get_destination_restore_status(self): destination_uri = request.args.get('destinationUri') try: status = self.backup_system.get_destination_restore_status( destination_uri) return document_pretty_string({"status": status}) except Exception, e: msg = ("Error while trying to get restore status for" " destination '%s': %s" % (destination_uri, e)) logger.error(msg) logger.error(traceback.format_exc()) send_api_error("get-destination-restore-status", e) return error_response(msg)
def _delete_backups_targets_due(self): logger.info("BackupSweeper: Starting a sweep cycle...") # clear stats self._cycle_total_processed = 0 self._cycle_total_errored = 0 self._cycle_total_deleted = 0 # compute # of workers based on cpu count self._worker_count = multiprocessing.cpu_count() * 2 + 1 self._sweep_workers = [] self._start_workers() if self.test_mode: logger.info("BackupSweeper: Running in TEST MODE. Nothing will" " be really deleted") logger.info("BackupSweeper: Finding all backups" " due for deletion") q = self._check_to_delete_query() logger.info("BackupSweeper: Executing query :\n%s" % document_pretty_string(q)) backups_iter = get_mbs().backup_collection.find_iter(query=q, no_cursor_timeout=True) backups_iterated = 0 # process all plan backups for backup in backups_iter: self._sweep_queue.put(backup) backups_iterated += 1 # PERFORMANCE OPTIMIZATION # process 10 * worker at max # This is needed because making backup objects (from within the backups_iter) takes up a lot of CPU/Memory # This is needed to give it a breath if backups_iterated % (self._worker_count * 10) == 0: self._wait_for_queue_to_be_empty() self._finish_cycle() logger.info("BackupSweeper: Finished sweep cycle. " "Total Deleted=%s, Total Errored=%s, " "Total Processed=%s" % (self._cycle_total_deleted, self._cycle_total_errored, self._cycle_total_processed))
def _expire_due_recurring_backups(self): total_processed = 0 total_expired = 0 total_dont_expire = 0 logger.info("BackupExpirationManager: Finding all recurring backups" " due for expiration") q = self._check_to_expire_query() q["plan._id"] = { "$exists": True } s = [("plan._id", -1)] logger.info("BackupExpirationManager: Executing query :\n%s" % document_pretty_string(q)) backups_iter = get_mbs().backup_collection.find_iter(query=q, sort=s, no_cursor_timeout=True) current_backup = next(backups_iter, None) plan = current_backup.plan if current_backup else None plan_backups = [] # process all plan backups while current_backup and not self.stop_requested: total_processed += 1 if current_backup.plan.id == plan.id: plan_backups.append(current_backup) current_backup = next(backups_iter, None) # process the current plan if not current_backup or current_backup.plan.id != plan.id: plan_total_expired, plan_total_dont_expire = \ self._process_plan(plan, plan_backups) total_expired += plan_total_expired total_dont_expire = plan_total_dont_expire plan = current_backup.plan if current_backup else None plan_backups = [] logger.info("BackupExpirationManager: Finished processing Recurring " "Backups.\nTotal Expired=%s, Total Don't Expire=%s, " "Total Processed=%s" % (total_expired, total_dont_expire, total_processed))
def get_destination_restore_status(self): destination_uri = request.args.get('destinationUri') try: status = self.backup_system.get_destination_restore_status( destination_uri) return document_pretty_string({ "status": status }) except Exception, e: msg = ("Error while trying to get restore status for" " destination '%s': %s" % (destination_uri, e)) logger.error(msg) logger.error(traceback.format_exc()) send_api_error("get-destination-restore-status", e) return error_response(msg)
def _expire_due_recurring_backups(self): total_processed = 0 total_expired = 0 total_dont_expire = 0 logger.info("BackupExpirationManager: Finding all recurring backups" " due for expiration") q = self._check_to_expire_query() q["plan._id"] = {"$exists": True} s = [("plan._id", -1)] logger.info("BackupExpirationManager: Executing query :\n%s" % document_pretty_string(q)) backups_iter = get_mbs().backup_collection.find_iter( query=q, sort=s, no_cursor_timeout=True) current_backup = next(backups_iter, None) plan = current_backup.plan if current_backup else None plan_backups = [] # process all plan backups while current_backup and not self.stop_requested: total_processed += 1 if current_backup.plan.id == plan.id: plan_backups.append(current_backup) current_backup = next(backups_iter, None) # process the current plan if not current_backup or current_backup.plan.id != plan.id: plan_total_expired, plan_total_dont_expire = \ self._process_plan(plan, plan_backups) total_expired += plan_total_expired total_dont_expire = plan_total_dont_expire plan = current_backup.plan if current_backup else None plan_backups = [] logger.info("BackupExpirationManager: Finished processing Recurring " "Backups.\nTotal Expired=%s, Total Don't Expire=%s, " "Total Processed=%s" % (total_expired, total_dont_expire, total_processed))
def _delete_backups_targets_due(self): logger.info("BackupSweeper: Starting a sweep cycle...") # clear stats self._cycle_total_processed = 0 self._cycle_total_errored = 0 self._cycle_total_deleted = 0 # compute # of workers based on cpu count self._worker_count = multiprocessing.cpu_count() * 2 + 1 self._sweep_workers = [] self._start_workers() if self.test_mode: logger.info("BackupSweeper: Running in TEST MODE. Nothing will" " be really deleted") logger.info("BackupSweeper: Finding all backups" " due for deletion") q = self._check_to_delete_query() logger.info("BackupSweeper: Executing query :\n%s" % document_pretty_string(q)) backups_iter = get_mbs().backup_collection.find_iter( query=q, no_cursor_timeout=True) backups_iterated = 0 # process all plan backups for backup in backups_iter: self._sweep_queue.put(backup) backups_iterated += 1 # PERFORMANCE OPTIMIZATION # process 10 * worker at max # This is needed because making backup objects (from within the backups_iter) takes up a lot of CPU/Memory # This is needed to give it a breath if backups_iterated % (self._worker_count * 10) == 0: self._wait_for_queue_to_be_empty() self._finish_cycle() logger.info("BackupSweeper: Finished sweep cycle. " "Total Deleted=%s, Total Errored=%s, " "Total Processed=%s" % (self._cycle_total_deleted, self._cycle_total_errored, self._cycle_total_processed))
def _expire_due_canceled_backups(self): # process onetime backups logger.info("BackupExpirationManager: Finding all canceled backups " "due for expiration") q = self._check_to_expire_query() q["state"] = State.CANCELED q["createdDate"] = {"$lt": self.expired_canceled_cutoff_date()} logger.info("BackupExpirationManager: Executing query :\n%s" % document_pretty_string(q)) canceled_backups_iter = get_mbs().backup_collection.find_iter( query=q, no_cursor_timeout=True) for backup in canceled_backups_iter: if self.stop_requested: break # for canceled backups, we always expire them immediately self.expire_backup(backup) logger.info("BackupExpirationManager: Finished processing canceled" " Backups")
def _expire_due_canceled_backups(self): # process onetime backups logger.info("BackupExpirationManager: Finding all canceled backups " "due for expiration") q = self._check_to_expire_query() q["state"] = State.CANCELED q["createdDate"] = { "$lt": self.expired_canceled_cutoff_date() } logger.info("BackupExpirationManager: Executing query :\n%s" % document_pretty_string(q)) canceled_backups_iter = get_mbs().backup_collection.find_iter(query=q, no_cursor_timeout=True) for backup in canceled_backups_iter: if self.stop_requested: break # for canceled backups, we always expire them immediately self.expire_backup(backup) logger.info("BackupExpirationManager: Finished processing canceled" " Backups")
def status_request(): logger.info("Received a status command") return document_pretty_string(self.status())
def error_response(message, **kwargs): kwargs.update({"error": message}) return document_pretty_string(kwargs)
def ok_response(ok=True): return document_pretty_string({ "ok": ok })
def stop_api_server_request(): logger.info("Received a stop command") return document_pretty_string(self.stop_api_server())