def __init__(self, conf_dict, storage): self.conf = conf_dict self.storage = storage self.engine = conf_dict.engine self.client = client_manager.get_client_manager(CONF) self.nova = self.client.get_nova() self.cinder = self.client.get_cinder() self.glance = self.client.get_glance() self._general_validation() self._validate() if self.conf.nova_inst_name: self.nova_instance_ids = [ server.id for server in self.nova.servers.list(detailed=False) if server.name == self.conf.nova_inst_name ] if self.conf.cinder_vol_name: self.cinder_vol_ids = [ volume.id for volume in self.cinder.volumes.list() if volume.name == self.conf.cinder_inst_name ] if self.conf.glance_image_name: self.glance_image_ids = [ image.id for image in self.glance.images.list() if image.name == self.conf.glance_image_name ] if self.conf.glance_image_name_filter: self.glance_image_ids = [ image.id for image in self.glance.images.list() if self.conf.glance_image_name_filter not in image.name ]
def __init__(self, storage, **kwargs): super(NovaEngine, self).__init__(storage=storage) self.client = client_manager.get_client_manager(CONF) self.nova = self.client.create_nova() self.glance = self.client.create_glance() self.cinder = self.client.create_cinder() self.neutron = self.client.create_neutron() self.server_info = None
def __init__(self, storage, **kwargs): super(GlanceEngine, self).__init__(storage=storage) self.client = client_manager.get_client_manager(CONF) self.glance = self.client.create_glance() self.encrypt_pass_file = kwargs.get('encrypt_key') self.exclude = kwargs.get('exclude') self.server_info = None self.openssl_path = None self.compression_algo = 'gzip' self.is_windows = None self.dry_run = kwargs.get('dry_run', False) self.max_segment_size = kwargs.get('max_segment_size') self.storage = storage self.dereference_symlink = kwargs.get('symlinks')
def __init__(self, storage, **kwargs): super(OsbrickEngine, self).__init__(storage=storage) self.client = client_manager.get_client_manager(CONF) self.cinder = self.client.create_cinder() self.volume_info = None self.compression_algo = kwargs.get('compression') self.encrypt_pass_file = kwargs.get('encrypt_key') self.dereference_symlink = kwargs.get('symlinks') self.exclude = kwargs.get('exclude') self.storage = storage self.is_windows = winutils.is_windows() self.dry_run = kwargs.get('dry_run', False) self.max_segment_size = kwargs.get('max_segment_size')
def __init__(self, storage, **kwargs): super(NovaEngine, self).__init__(storage=storage) self.client = client_manager.get_client_manager(CONF) self.nova = self.client.create_nova() self.glance = self.client.create_glance() self.cinder = self.client.create_cinder() self.neutron = self.client.create_neutron() self.encrypt_pass_file = kwargs.get('encrypt_key') self.exclude = kwargs.get('exclude') self.server_info = None self.openssl_path = None self.compression_algo = 'gzip' self.is_windows = None self.dry_run = kwargs.get('dry_run', False) self.max_segment_size = kwargs.get('max_segment_size') self.storage = storage self.dereference_symlink = kwargs.get('symlinks')
def freezer_main(backup_args): """Freezer main loop for job execution. """ if not backup_args.quiet: LOG.info("Begin freezer agent process with args: {0}".format(sys.argv)) LOG.info('log file at {0}'.format(CONF.get('log_file'))) if backup_args.max_priority: utils.set_max_process_priority() backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format( backup_args.hostname, backup_args.backup_name) max_segment_size = backup_args.max_segment_size if (backup_args.storage == 'swift' or backup_args.backup_media in ['nova', 'cinder', 'cindernative', 'cinderbrick']): backup_args.client_manager = client_manager.get_client_manager( backup_args.__dict__) if backup_args.storage == 's3': if backup_args.__dict__['access_key'] == '' \ and 'ACCESS_KEY' in os.environ: backup_args.__dict__['access_key'] = os.environ.get('ACCESS_KEY') if backup_args.__dict__['access_key'] == '': raise Exception('No access key found for S3 compatible storage') if backup_args.__dict__['secret_key'] == '' \ and 'SECRET_KEY' in os.environ: backup_args.__dict__['secret_key'] = os.environ.get('SECRET_KEY') if backup_args.__dict__['secret_key'] == '': raise Exception('No secret key found for S3 compatible storage') if backup_args.__dict__['endpoint'] == '' \ and 'ENDPOINT' in os.environ: backup_args.__dict__['endpoint'] = os.environ.get('ENDPOINT') if backup_args.__dict__['endpoint'] == '': raise Exception('No endpoint found for S3 compatible storage') if backup_args.storages: # pylint: disable=abstract-class-instantiated storage = multiple.MultipleStorage([ storage_from_dict(x, max_segment_size) for x in backup_args.storages ]) else: storage = storage_from_dict(backup_args.__dict__, max_segment_size) engine_loader = engine_manager.EngineManager() backup_args.engine = engine_loader.load_engine( compression=backup_args.compression, symlinks=backup_args.dereference_symlink, exclude=backup_args.exclude, storage=storage, max_segment_size=backup_args.max_segment_size, rsync_block_size=backup_args.rsync_block_size, encrypt_key=backup_args.encrypt_pass_file, dry_run=backup_args.dry_run) if hasattr(backup_args, 'trickle_command'): if "tricklecount" in os.environ: if int(os.environ.get("tricklecount")) > 1: LOG.critical("Trickle seems to be not working, Switching " "to normal mode ") return run_job(backup_args, storage) freezer_command = '{0} {1}'.format(backup_args.trickle_command, ' '.join(sys.argv)) LOG.debug('Trickle command: {0}'.format(freezer_command)) process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) while process.poll() is None: line = process.stdout.readline().strip() if line != '': print(line) output, error = process.communicate() if hasattr(backup_args, 'tmp_file'): utils.delete_file(backup_args.tmp_file) if process.returncode: LOG.warning("Trickle Error: {0}".format(error)) LOG.info("Switching to work without trickle ...") return run_job(backup_args, storage) else: return run_job(backup_args, storage)
def freezer_main(backup_args): """Freezer main loop for job execution. """ if not backup_args.quiet: LOG.info("Begin freezer agent process with args: {0}".format(sys.argv)) LOG.info('log file at {0}'.format(CONF.get('log_file'))) if backup_args.max_priority: utils.set_max_process_priority() backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format( backup_args.hostname, backup_args.backup_name) max_segment_size = backup_args.max_segment_size if (backup_args.storage == 'swift' or backup_args.backup_media in ['nova', 'cinder', 'cindernative', 'cinderbrick']): backup_args.client_manager = client_manager.get_client_manager( backup_args.__dict__) if backup_args.storage == 's3': if backup_args.__dict__['access_key'] == '' \ and 'ACCESS_KEY' in os.environ: backup_args.__dict__['access_key'] = os.environ.get('ACCESS_KEY') if backup_args.__dict__['access_key'] == '': raise Exception('No access key found for S3 compatible storage') if backup_args.__dict__['secret_key'] == '' \ and 'SECRET_KEY' in os.environ: backup_args.__dict__['secret_key'] = os.environ.get('SECRET_KEY') if backup_args.__dict__['secret_key'] == '': raise Exception('No secret key found for S3 compatible storage') if backup_args.__dict__['endpoint'] == '' \ and 'ENDPOINT' in os.environ: backup_args.__dict__['endpoint'] = os.environ.get('ENDPOINT') if backup_args.__dict__['endpoint'] == '': raise Exception('No endpoint found for S3 compatible storage') if backup_args.storages: # pylint: disable=abstract-class-instantiated storage = multiple.MultipleStorage( [storage_from_dict(x, max_segment_size) for x in backup_args.storages]) else: storage = storage_from_dict(backup_args.__dict__, max_segment_size) engine_loader = engine_manager.EngineManager() backup_args.engine = engine_loader.load_engine( compression=backup_args.compression, symlinks=backup_args.dereference_symlink, exclude=backup_args.exclude, storage=storage, max_segment_size=backup_args.max_segment_size, rsync_block_size=backup_args.rsync_block_size, encrypt_key=backup_args.encrypt_pass_file, dry_run=backup_args.dry_run ) if hasattr(backup_args, 'trickle_command'): if "tricklecount" in os.environ: if int(os.environ.get("tricklecount")) > 1: LOG.critical("Trickle seems to be not working, Switching " "to normal mode ") return run_job(backup_args, storage) freezer_command = '{0} {1}'.format(backup_args.trickle_command, ' '.join(sys.argv)) LOG.debug('Trickle command: {0}'.format(freezer_command)) process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) while process.poll() is None: line = process.stdout.readline().strip() if line != '': print(line) output, error = process.communicate() if hasattr(backup_args, 'tmp_file'): utils.delete_file(backup_args.tmp_file) if process.returncode: LOG.warning("Trickle Error: {0}".format(error)) LOG.info("Switching to work without trickle ...") return run_job(backup_args, storage) else: return run_job(backup_args, storage)