Ejemplo n.º 1
0
def test_compression_bad_mode():
    global tmpdir

    f = compression.open_stream(os.path.join(tmpdir, 'foo'), 'w', 'gzip')
    f.write('foo')
    f.close()

    f = compression.open_stream(os.path.join(tmpdir, 'foo'), 'bad', 'gzip')
    f.write('foo')
    f.close()
Ejemplo n.º 2
0
def test_compression_wrong_method():
    global tmpdir
    
    # gzip - write it, read it, verify it
    f = compression.open_stream(os.path.join(tmpdir, 'foo'), 'w', 'gzip')
    f.write('foo')
    f.close()
    
    f = compression.open_stream(os.path.join(tmpdir, 'foo'), 'r', 'bzip2')
    foo = f.read(3)
    f.close()
Ejemplo n.º 3
0
def test_compression_wrong_method():
    global tmpdir

    # gzip - write it, read it, verify it
    f = compression.open_stream(os.path.join(tmpdir, 'foo'), 'w', 'gzip')
    f.write('foo')
    f.close()

    f = compression.open_stream(os.path.join(tmpdir, 'foo'), 'r', 'bzip2')
    foo = f.read(3)
    f.close()
Ejemplo n.º 4
0
def test_compression_bad_mode():
    global tmpdir

    f = compression.open_stream(os.path.join(tmpdir, 'foo'), 'w', 'gzip')
    f.write('foo')
    f.close()
    
    f = compression.open_stream(os.path.join(tmpdir, 'foo'), 'bad', 'gzip')
    f.write('foo')
    f.close()
    
    
Ejemplo n.º 5
0
 def open_xb_stdout(self):
     """Open the stdout output for a streaming xtrabackup run"""
     config = self.config["xtrabackup"]
     backup_directory = self.target_directory
     stream = util.determine_stream_method(config["stream"])
     if stream:
         # XXX: bounce through compression
         if stream == "tar":
             archive_path = join(backup_directory, "backup.tar")
             zconfig = self.config["compression"]
             try:
                 return open_stream(
                     archive_path,
                     "w",
                     method=zconfig["method"],
                     level=zconfig["level"],
                     extra_args=zconfig["options"],
                 )
             except OSError, exc:
                 raise BackupError("Unable to create output file: %s" % exc)
         elif stream == "xbstream":
             archive_path = join(backup_directory, "backup.xb")
             return open(archive_path, "w")
         else:
             raise BackupError("Unknown stream method '%s'" % stream)
Ejemplo n.º 6
0
 def open_xb_stdout(self):
     """Open the stdout output for a streaming xtrabackup run"""
     config = self.config["xtrabackup"]
     backup_directory = self.target_directory
     stream = util.determine_stream_method(config["stream"])
     if stream:
         if stream == "tar":
             archive_path = join(backup_directory, "backup.tar")
         elif stream == "xbstream":
             archive_path = join(backup_directory, "backup.xb")
         else:
             raise BackupError("Unknown stream method '%s'" % stream)
         zconfig = self.config["compression"]
         try:
             return open_stream(
                 archive_path,
                 "w",
                 method=zconfig["method"],
                 level=zconfig["level"],
                 extra_args=zconfig["options"],
                 inline=zconfig["inline"],
             )
         except OSError as exc:
             raise BackupError("Unable to create output file: %s" % exc)
     else:
         return open("/dev/null", "w")
Ejemplo n.º 7
0
    def backup(self):
        """
        Create backup
        """
        if self.dry_run:
            return
        if not os.path.exists(self.config["tar"]["directory"]) or not os.path.isdir(
            self.config["tar"]["directory"]
        ):
            raise BackupError("{0} is not a directory!".format(self.config["tar"]["directory"]))
        out_name = "{0}.tar".format(self.config["tar"]["directory"].lstrip("/").replace("/", "_"))
        outfile = os.path.join(self.target_directory, out_name)
        args = ["tar", "c", self.config["tar"]["directory"]]
        errlog = TemporaryFile()
        stream = open_stream(outfile, "w", **self.config["compression"])
        LOG.info("Executing: %s", list2cmdline(args))
        pid = Popen(args, stdout=stream.fileno(), stderr=errlog.fileno(), close_fds=True)
        status = pid.wait()
        try:
            errlog.flush()
            errlog.seek(0)
            for line in errlog:
                LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid, line.rstrip())
        finally:
            errlog.close()

        if status != 0:
            raise BackupError("tar failed (status={0})".format(status))
Ejemplo n.º 8
0
    def run(self, cmd, opts, *databases):
        # 1) find the directory in the backupset
        # 2) if !file-per-database, use all-databases.sql
        # 3) otherwise, loop over data/MANIFEST.txt
        # 3a) apply database files before parsing file-per-database files
        #     to avoid doing too much work
        # 3b) Try to apply table exclusion filters if they have a '.'
        config = self.backup.config
        config.validate_config(CONFIGSPEC)
        if 'mysqldump' not in config:
            logging.info("Backupset %s is not a mysqldump backup.", self.backup.name)
            return 1

        if not opts.output:
            logging.error("No output destination was specified.  Specify '-' to output to the console.")
            return 1

        if not databases and not opts.all_databases:
            logging.info("No databases specified to restore.  Please specify some "
                  "database or use the --all-databases option to restore "
                  "everything from the backupset.")
            return 1

        databases = [db.decode('utf8') for db in databases]

        dbrewriter = create_rewriter(databases)

        if opts.all_databases:
            logging.info("Restoring all databases.")
            databases = None # restore all databases
        else:
            databases = dbrewriter.dbmap.keys() # only restore specified databases

        if opts.force:
            logging.warning("Skipping confirmation")
        else:
            logging.info("Confirmation should be done here.")

        schema_filter = SchemaFilter(databases=databases)

        if opts.output == 'mysql':
            pid = start_mysql()
            outfile = pid.stdin
        elif opts.output == '-':
            outfile = sys.stdout
        else:
            if os.path.exists(opts.output):
                logging.error("Refusing to overwrite %s", os.path.abspath(opts.output))
                return 1
            outfile = open(opts.output, 'w')

        if not config['mysqldump']['file-per-database']:
            path = os.path.join(self.backup.path, 'backup_data.sql')
            try:
                stream = open_stream(path, 'r', config['compression']['method'])
                stream = codecs.getreader('utf8')(stream)
            except IOError, exc:
                logging.error("Failed to open backup data stream: %s", exc)
                return 1
            handle_stream(stream, schema_filter, opts.binlog, opts.progress, dbrewriter=dbrewriter, outfile=outfile)
Ejemplo n.º 9
0
def backup_pgsql(backup_directory, config, databases):
    """Backup databases in a Postgres instance

    :param backup_directory: directory to save pg_dump output to
    :param config: PgDumpPlugin config dictionary
    :raises: OSError, PgError on error
    """
    connection_params = pgauth2args(config)
    extra_options = pg_extra_options(config)

    pgenv = dict(os.environ)

    if config['pgauth']['password'] is not None:
        pgpass_file = generate_pgpassfile(backup_directory,
                                          config['pgauth']['password'])
        if 'PGPASSFILE' in pgenv:
            LOG.warning(
                "Overriding PGPASSFILE in environment with %s because "
                "a password is specified.", pgpass_file)
        pgenv['PGPASSFILE'] = pgpass_file

    backup_globals(backup_directory, config, connection_params, env=pgenv)

    ext_map = {
        'custom': '.dump',
        'plain': '.sql',
        'tar': '.tar',
    }

    backups = []
    for dbname in databases:
        out_format = config['pgdump']['format']

        dump_name = encode_safe(dbname)
        if dump_name != dbname:
            LOG.warning("Encoded database %s as filename %s", dbname,
                        dump_name)

        filename = os.path.join(backup_directory,
                                dump_name + ext_map[out_format])

        zopts = config['compression']
        stream = open_stream(filename,
                             'w',
                             method=zopts['method'],
                             level=zopts['level'],
                             extra_args=zopts['options'],
                             inline=zopts['inline'])

        backups.append((dbname, stream.name))

        run_pgdump(dbname=dbname,
                   output_stream=stream,
                   connection_params=connection_params + extra_options,
                   out_format=out_format,
                   env=pgenv)

        stream.close()

    generate_manifest(backups, backup_directory)
Ejemplo n.º 10
0
def setup_actions(snapshot, config, client, snap_datadir, spooldir):
    """Setup actions for a LVM snapshot based on the provided
    configuration.

    Optional actions:
        * MySQL locking
        * InnoDB recovery
        * Recording MySQL replication
    """
    if config['mysql-lvm']['lock-tables']:
        extra_flush = config['mysql-lvm']['extra-flush-tables']
        act = FlushAndLockMySQLAction(client, extra_flush)
        snapshot.register('pre-snapshot', act, priority=100)
        snapshot.register('post-snapshot', act, priority=100)
    if config['mysql-lvm'].get('replication', True):
        repl_cfg = config.setdefault('mysql:replication', {})
        act = RecordMySQLReplicationAction(client, repl_cfg)
        snapshot.register('pre-snapshot', act, 0)
    if config['mysql-lvm']['innodb-recovery']:
        mysqld_config = dict(config['mysqld'])
        mysqld_config['datadir'] = snap_datadir
        if not mysqld_config['tmpdir']:
            mysqld_config['tmpdir'] = tempfile.gettempdir()
        ib_log_size = client.show_variable('innodb_log_file_size')
        mysqld_config['innodb-log-file-size'] = ib_log_size
        act = InnodbRecoveryAction(mysqld_config)
        snapshot.register('post-mount', act, priority=100)

    archive_stream = open_stream(os.path.join(spooldir, 'backup.tar'), 'w',
                                 **config['compression'])
    act = TarArchiveAction(snap_datadir, archive_stream, config['tar'])
    snapshot.register('post-mount', act, priority=50)

    snapshot.register('pre-remove', log_final_snapshot_size)
Ejemplo n.º 11
0
    def backup(self):
        """
        Create backup
        """
        if self.dry_run:
            return
        if not os.path.exists(
                self.config["tar"]["directory"]) or not os.path.isdir(
                    self.config["tar"]["directory"]):
            raise BackupError("{0} is not a directory!".format(
                self.config["tar"]["directory"]))
        out_name = "{0}.tar".format(
            self.config["tar"]["directory"].lstrip("/").replace("/", "_"))
        outfile = os.path.join(self.target_directory, out_name)
        args = ["tar", "c", self.config["tar"]["directory"]]
        errlog = TemporaryFile()
        stream = open_stream(outfile, "w", **self.config["compression"])
        LOG.info("Executing: %s", list2cmdline(args))
        pid = Popen(args,
                    stdout=stream.fileno(),
                    stderr=errlog.fileno(),
                    close_fds=True)
        status = pid.wait()
        try:
            errlog.flush()
            errlog.seek(0)
            for line in errlog:
                LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid,
                          line.rstrip())
        finally:
            errlog.close()

        if status != 0:
            raise BackupError("tar failed (status={0})".format(status))
Ejemplo n.º 12
0
def backup_pgsql(backup_directory, config, databases):
    """Backup databases in a Postgres instance

    :param backup_directory: directory to save pg_dump output to
    :param config: PgDumpPlugin config dictionary
    :raises: OSError, PgError on error
    """
    connection_params = pgauth2args(config)
    extra_options = pg_extra_options(config)

    pgenv = dict(os.environ)

    if config["pgauth"]["password"] is not None:
        pgpass_file = generate_pgpassfile(backup_directory, config["pgauth"]["password"])
        if "PGPASSFILE" in pgenv:
            LOG.warning(
                "Overriding PGPASSFILE in environment with %s because " "a password is specified.",
                pgpass_file,
            )
        pgenv["PGPASSFILE"] = pgpass_file

    backup_globals(backup_directory, config, connection_params, env=pgenv)

    ext_map = {"custom": ".dump", "plain": ".sql", "tar": ".tar"}

    backups = []
    for dbname in databases:
        out_format = config["pgdump"]["format"]

        dump_name = encode_safe(dbname)
        if dump_name != dbname:
            LOG.warning("Encoded database %s as filename %s", dbname, dump_name)

        filename = os.path.join(backup_directory, dump_name + ext_map[out_format])

        zopts = config["compression"]
        stream = open_stream(
            filename,
            "w",
            method=zopts["method"],
            level=zopts["level"],
            extra_args=zopts["options"],
            inline=zopts["inline"],
        )

        backups.append((dbname, stream.name))

        run_pgdump(
            dbname=dbname,
            output_stream=stream,
            connection_params=connection_params + extra_options,
            out_format=out_format,
            env=pgenv,
        )

        stream.close()

    generate_manifest(backups, backup_directory)
Ejemplo n.º 13
0
 def _open_stream(self, path, mode, method=None):
     """Open a stream through the holland compression api, relative to
     this instance's target directory
     """
     path = os.path.join(self.target_directory, 'backup_data', path)
     compression_method = method or self.config['compression']['method']
     compression_level = self.config['compression']['level']
     stream = open_stream(path, mode, compression_method, compression_level)
     return stream
Ejemplo n.º 14
0
 def _open_stream(self, path, mode, method=None):
     """Open a stream through the holland compression api, relative to
     this instance's target directory
     """
     path = os.path.join(self.target_directory, "backup_data", path)
     compression_method = method or self.config["compression"]["method"]
     compression_level = self.config["compression"]["level"]
     stream = open_stream(path, mode, compression_method, compression_level)
     return stream
Ejemplo n.º 15
0
def backup_pgsql(backup_directory, config, databases):
    """Backup databases in a Postgres instance

    :param backup_directory: directory to save pg_dump output to
    :param config: PgDumpPlugin config dictionary
    :raises: OSError, PgError on error
    """
    connection_params = pgauth2args(config)
    extra_options = pg_extra_options(config)

    pgenv = dict(os.environ)

    if config['pgauth']['password'] is not None:
        pgpass_file = generate_pgpassfile(backup_directory,
                                          config['pgauth']['password'])
        if 'PGPASSFILE' in pgenv:
            LOG.warn("Overriding PGPASSFILE in environment with %s because "
                     "a password is specified.",
                      pgpass_file)
        pgenv['PGPASSFILE'] = pgpass_file

    backup_globals(backup_directory, config, connection_params, env=pgenv)

    ext_map = {
        'custom' : '.dump',
        'plain' : '.sql',
        'tar' : '.tar',
    }


    backups = []
    for dbname in databases:
        format = config['pgdump']['format']

        dump_name, _ = encode_safe(dbname)
        if dump_name != dbname:
            LOG.warn("Encoded database %s as filename %s", dbname, dump_name)

        filename = os.path.join(backup_directory, dump_name + ext_map[format])

        zopts = config['compression']
        stream = open_stream(filename, 'w',
                             method=zopts['method'],
                             level=zopts['level'],
                             extra_args=zopts['options'])

        backups.append((dbname, stream.name))

        run_pgdump(dbname=dbname,
                   output_stream=stream,
                   connection_params=connection_params + extra_options,
                   format=format,
                   env=pgenv)

        stream.close()

    generate_manifest(backups, backup_directory)
Ejemplo n.º 16
0
 def _open_stream(self, path, mode, method=None):
     """Open a stream through the holland compression api, relative to
     this instance's target directory
     """
     path = str(os.path.join(self.target_directory, "backup_data", path))
     config = deepcopy(self.config["compression"])
     if method:
         config["method"] = method
     stream = open_stream(path, mode, **config)
     return stream
Ejemplo n.º 17
0
 def _open_stream(self, path, mode, method=None):
     """Open a stream through the holland compression api, relative to
     this instance's target directory
     """
     path = str(os.path.join(self.target_directory, "backup_data", path))
     config = deepcopy(self.config["compression"])
     if method:
         config["method"] = method
     stream = open_stream(path, mode, **config)
     return stream
Ejemplo n.º 18
0
def test_compression():
    global tmpdir
    
    # gzip - write it, read it, verify it
    f = compression.open_stream(os.path.join(tmpdir, 'gzip_foo'), 'w', 'gzip')
    f.write('foo')
    f.close()
    
    f = compression.open_stream(os.path.join(tmpdir, 'gzip_foo'), 'r', 'gzip')
    foo = f.read(3)
    f.close()

    ok_(foo == 'foo')
    
    # bzip2 - write it, read it, verify it
    f = compression.open_stream(os.path.join(tmpdir, 'bzip2_foo'), 'w', 'bzip2')
    f.write('foo')
    f.close()
    
    f = compression.open_stream(os.path.join(tmpdir, 'bzip2_foo'), 'r', 'bzip2')
    foo = f.read(3)
    f.close()

    ok_(foo == 'foo')
    
    # gzip - write it, read it, verify it
    f = compression.open_stream(os.path.join(tmpdir, 'lzop_foo'), 'w', 'lzop')
    f.write('foo')
    f.close()
    
    f = compression.open_stream(os.path.join(tmpdir, 'lzop_foo'), 'r', 'lzop')
    foo = f.read(3)
    f.close()

    ok_(foo == 'foo')
Ejemplo n.º 19
0
    def backup(self):
        """
        Do what is necessary to perform and validate a successful backup.
        """
        command = ["mongodump"]
        username = self.config["mongodump"].get("username")
        if username:
            command += ["-u", username]
            password = self.config["mongodump"].get("password")
            if password:
                command += ["-p", password]
        command += ["--host", self.config["mongodump"].get("host")]
        command += ["--out", self.target_directory]
        add_options = self.config["mongodump"].get("additional-options")
        if add_options:
            command.extend(add_options)

        if self.dry_run:
            LOG.info("[Dry run] MongoDump Plugin - test backup run")
            LOG.info("MongoDump command: %s", subprocess.list2cmdline(command))
        else:
            LOG.info("MongoDump command: %s", subprocess.list2cmdline(command))
            logfile = open(
                os.path.join(self.target_directory, "mongodump.log"), "w")
            proc = subprocess.Popen(command, stdout=logfile, stderr=logfile)
            ret = proc.wait()

            if ret != 0:
                raise BackupError("Mongodump returned %d" % ret)

            zopts = self.config["compression"]
            for root, _, files in os.walk(self.target_directory):
                for file_object in files:
                    if ".log" in file_object or ".conf" in file_object:
                        continue
                    if ".gz" in file_object:
                        continue
                    path = os.path.join(root, file_object)
                    LOG.info("Compressing file %s", path)
                    ostream = open_stream(
                        path,
                        "w",
                        method=zopts["method"],
                        level=zopts["level"],
                        extra_args=zopts["options"],
                        inline=zopts["inline"],
                        split=zopts["split"],
                    )
                    with open(path, "rb") as file_object:
                        ostream.write(file_object.read())
                    ostream.close()
                    os.remove(path)
Ejemplo n.º 20
0
 def _open_stream(self, path, mode, method=None):
     """Open a stream through the holland compression api, relative to
     this instance's target directory
     """
     compression_method = method or self.config['compression']['method']
     compression_level = self.config['compression']['level']
     compression_options = self.config['compression']['options']
     stream = open_stream(path,
                          mode,
                          compression_method,
                          compression_level,
                          extra_args=compression_options)
     return stream
Ejemplo n.º 21
0
    def run(self, cmd, opts, directory):
        # LVM backups are strictly tar backups through some (or no) compression
        config = self.backup.config
        if "lvmbackup" not in config:
            logging.error("Backupset %s is not a mysqldump backup.", self.backup.name)
            return 1

        path = os.path.join(self.backup.path, "backup.tar")
        try:
            stream = open_stream(path, "r", config["compression"]["method"])
        except IOError, exc:
            logging.error("Failed to open stream: %s", path)
            return 1
Ejemplo n.º 22
0
def setup_actions(snapshot, config, client, snap_datadir, spooldir):
    """Setup actions for a LVM snapshot based on the provided
    configuration.

    Optional actions:
        * MySQL locking
        * InnoDB recovery
        * Recording MySQL replication
    """
    mysql = connect_simple(config['mysql:client'])
    if mysql.show_variable('have_innodb') == 'YES':
        try:
            pathinfo = MySQLPathInfo.from_mysql(mysql)
        finally:
            mysql.close()
        try:
            check_innodb(pathinfo, ensure_subdir_of_datadir=True)
        except BackupError:
            if not config['mysql-lvm']['force-innodb-backup']:
                raise

    if config['mysql-lvm']['lock-tables']:
        extra_flush = config['mysql-lvm']['extra-flush-tables']
        act = FlushAndLockMySQLAction(client, extra_flush)
        snapshot.register('pre-snapshot', act, priority=100)
        snapshot.register('post-snapshot', act, priority=100)
    if config['mysql-lvm'].get('replication', True):
        repl_cfg = config.setdefault('mysql:replication', {})
        act = RecordMySQLReplicationAction(client, repl_cfg)
        snapshot.register('pre-snapshot', act, 0)
    if config['mysql-lvm']['innodb-recovery']:
        mysqld_config = dict(config['mysqld'])
        mysqld_config['datadir'] = snap_datadir
        if not mysqld_config['tmpdir']:
            mysqld_config['tmpdir'] = tempfile.gettempdir()
        ib_log_size = client.show_variable('innodb_log_file_size')
        mysqld_config['innodb-log-file-size'] = ib_log_size
        act = InnodbRecoveryAction(mysqld_config)
        snapshot.register('post-mount', act, priority=100)

    try:
        archive_stream = open_stream(
            os.path.join(spooldir, 'backup.tar'),
            'w',
            method=config['compression']['method'],
            level=config['compression']['level'],
            extra_args=config['compression']['options'])
    except OSError, exc:
        raise BackupError("Unable to create archive file '%s': %s" %
                          (os.path.join(spooldir, 'backup.tar'), exc))
Ejemplo n.º 23
0
def backup_pgsql(backup_directory, config, databases):
    """Backup databases in a Postgres instance

    :param backup_directory: directory to save pg_dump output to
    :param config: PgDumpPlugin config dictionary
    :raises: OSError, PgError on error
    """
    connection_params = pgauth2args(config)
    extra_options = pg_extra_options(config)

    pgenv = dict(os.environ)

    if config["pgauth"]["password"] is not None:
        pgpass_file = generate_pgpassfile(backup_directory, config["pgauth"]["password"])
        if "PGPASSFILE" in pgenv:
            LOG.warning(
                "Overriding PGPASSFILE in environment with %s because " "a password is specified.",
                pgpass_file,
            )
        pgenv["PGPASSFILE"] = pgpass_file

    backup_globals(backup_directory, config, connection_params, env=pgenv)

    ext_map = {"custom": ".dump", "plain": ".sql", "tar": ".tar"}

    backups = []
    for dbname in databases:
        out_format = config["pgdump"]["format"]

        dump_name = encode_safe(dbname)
        if dump_name != dbname:
            LOG.warning("Encoded database %s as filename %s", dbname, dump_name)

        filename = os.path.join(backup_directory, dump_name + ext_map[out_format])

        stream = open_stream(filename, "w", **config["compression"])
        backups.append((dbname, stream.name))

        run_pgdump(
            dbname=dbname,
            output_stream=stream,
            connection_params=connection_params + extra_options,
            out_format=out_format,
            env=pgenv,
        )

        stream.close()

    generate_manifest(backups, backup_directory)
Ejemplo n.º 24
0
    def backup(self):
        """Run a database backup with xtrabackup"""
        defaults_file = os.path.join(self.target_directory,
                                     'my.xtrabackup.cnf')
        args = [
            self.config['xtrabackup']['innobackupex'],
            '--defaults-file=%s' % defaults_file,
            '--stream=tar4ibd',
            tempfile.gettempdir(),
        ]

        if self.config['xtrabackup']['slave-info']:
            args.insert(3, '--slave-info')
        if self.config['xtrabackup']['no-lock']:
            args.insert(2, '--no-lock')

        LOG.info("%s", list2cmdline(args))

        if self.dry_run:
            return

        config = build_mysql_config(self.config['mysql:client'])
        write_options(config, defaults_file)
        shutil.copyfileobj(
            open(self.config['xtrabackup']['global-defaults'], 'r'),
            open(defaults_file, 'a'))

        backup_path = os.path.join(self.target_directory, 'backup.tar')
        compression_stream = open_stream(backup_path, 'w',
                                         **self.config['compression'])
        error_log_path = os.path.join(self.target_directory, 'xtrabackup.log')
        error_log = open(error_log_path, 'wb')
        try:
            try:
                check_call(args,
                           stdout=compression_stream,
                           stderr=error_log,
                           close_fds=True)
            except OSError, exc:
                LOG.info("Command not found: %s", args[0])
                raise BackupError("%s not found. Is xtrabackup installed?" %
                                  args[0])
            except CalledProcessError, exc:
                LOG.info("%s failed", list2cmdline(exc.cmd))
                for line in open(error_log_path, 'r'):
                    if line.startswith('>>'):
                        continue
                    LOG.info("%s", line.rstrip())
                raise BackupError("%s failed" % exc.cmd[0])
Ejemplo n.º 25
0
def backup_globals(backup_directory, config, connection_params, env=None):
    """Backup global Postgres data that wouldn't otherwise
    be captured by pg_dump.

    Runs pg_dumpall -g > $backup_dir/globals.sql

    :param backup_directory: directory to save pg_dump output to
    :param config: PgDumpPlugin config dictionary
    :raises: OSError, PgError on error
    """

    path = os.path.join(backup_directory, 'global.sql')
    zopts = config['compression']
    output_stream = open_stream(path,
                                'w',
                                method=zopts['method'],
                                level=zopts['level'],
                                extra_args=zopts['options'],
                                inline=zopts['inline'])

    args = [
        'pg_dumpall',
        '-g',
    ] + connection_params

    LOG.info('%s > %s', subprocess.list2cmdline(args), output_stream.name)
    stderr = tempfile.TemporaryFile()
    try:
        try:
            returncode = subprocess.call(args,
                                         stdout=output_stream,
                                         stderr=stderr,
                                         env=env,
                                         close_fds=True)
        except OSError as exc:
            raise PgError("Failed to execute '%s': [%d] %s" %
                          (args[0], exc.errno, exc.strerror))

        output_stream.close()
        stderr.flush()
        stderr.seek(0)
        for line in stderr:
            LOG.error('%s', line.rstrip())
    finally:
        stderr.close()

    if returncode != 0:
        raise PgError("pg_dumpall command exited with failure code %d." %
                      returncode)
Ejemplo n.º 26
0
def setup_actions(snapshot, config, client, snap_datadir, spooldir):
    """Setup actions for a LVM snapshot based on the provided
    configuration.

    Optional actions:
        * MySQL locking
        * InnoDB recovery
        * Recording MySQL replication
    """
    if client is not None:
        mysql = connect_simple(config['mysql:client'])
        if mysql.show_variable('have_innodb') == 'YES':
            try:
                pathinfo = MySQLPathInfo.from_mysql(mysql)
            finally:
                mysql.close()
            try:
                check_innodb(pathinfo, ensure_subdir_of_datadir=True)
            except BackupError:
                if not config['mysql-lvm']['force-innodb-backup']:
                    raise
    
        if config['mysql-lvm']['lock-tables']:
            extra_flush = config['mysql-lvm']['extra-flush-tables']
            act = FlushAndLockMySQLAction(client, extra_flush)
            snapshot.register('pre-snapshot', act, priority=100)
            snapshot.register('post-snapshot', act, priority=100)
        if config['mysql-lvm'].get('replication', True):
            repl_cfg = config.setdefault('mysql:replication', {})
            act = RecordMySQLReplicationAction(client, repl_cfg)
            snapshot.register('pre-snapshot', act, 0)
        if config['mysql-lvm']['innodb-recovery']:
            mysqld_config = dict(config['mysqld'])
            mysqld_config['datadir'] = snap_datadir
            if not mysqld_config['tmpdir']:
                mysqld_config['tmpdir'] = tempfile.gettempdir()
            ib_log_size = client.show_variable('innodb_log_file_size')
            mysqld_config['innodb-log-file-size'] = ib_log_size
            act = InnodbRecoveryAction(mysqld_config)
            snapshot.register('post-mount', act, priority=100)


    archive_stream = open_stream(os.path.join(spooldir, 'backup.tar'),
                                 'w',
                                 **config['compression'])
    act = TarArchiveAction(snap_datadir, archive_stream, config['tar'])
    snapshot.register('post-mount', act, priority=50)

    snapshot.register('pre-remove', log_final_snapshot_size)
Ejemplo n.º 27
0
 def _open_stream(self, path, mode, method=None):
     """
     Open a stream through the holland compression api, relative to
     this instance's target directory
     """
     stream = open_stream(
         path,
         mode,
         method or self.config["compression"]["method"],
         self.config["compression"]["level"],
         extra_args=self.config["compression"]["options"],
         inline=self.config["compression"]["inline"],
         split=self.config["compression"]["split"],
     )
     return stream
Ejemplo n.º 28
0
 def _open_stream(self, path, mode):
     """Open a stream through the holland compression api, relative to
     this instance's target directory
     """
     path = os.path.join(self.target_directory, 'backup_data', path)
     compression_method = self.config['compression']['method']
     compression_level = self.config['compression']['level']
     if compression_method == 'none':
         compresison_info = '(uncompressed)'
     else:
         compression_info = '(%s compressed level %d)' % \
                             (compression_method, compression_level)
     stream = open_stream(path, mode, compression_method, compression_level)
     LOG.info("Saving mysqldump output to %s %s",
             os.path.basename(stream.name), compression_info)
     return stream
Ejemplo n.º 29
0
    def backup(self):
        """Run a database backup with xtrabackup"""
        defaults_file = os.path.join(self.target_directory, 'my.xtrabackup.cnf')
        args = [
            self.config['xtrabackup']['innobackupex'],
            '--defaults-file=%s' % defaults_file,
            '--stream=tar4ibd',
            tempfile.gettempdir(),
        ]

        if self.config['xtrabackup']['slave-info']:
            args.insert(3, '--slave-info')
        if self.config['xtrabackup']['no-lock']:
            args.insert(2, '--no-lock')

        LOG.info("%s", list2cmdline(args))

        if self.dry_run:
            return

        config = build_mysql_config(self.config['mysql:client'])
        write_options(config, defaults_file)
        shutil.copyfileobj(open(self.config['xtrabackup']['global-defaults'], 'r'),
                           open(defaults_file, 'a'))

        backup_path = os.path.join(self.target_directory, 'backup.tar')
        compression_stream = open_stream(backup_path, 'w',
                                         **self.config['compression'])
        error_log_path = os.path.join(self.target_directory, 'xtrabackup.log')
        error_log = open(error_log_path, 'wb')
        try:
            try:
                check_call(args,
                           stdout=compression_stream,
                           stderr=error_log,
                           close_fds=True)
            except OSError, exc:
                LOG.info("Command not found: %s", args[0])
                raise BackupError("%s not found. Is xtrabackup installed?" %
                                  args[0])
            except CalledProcessError, exc:
                LOG.info("%s failed", list2cmdline(exc.cmd))
                for line in open(error_log_path, 'r'):
                    if line.startswith('>>'):
                        continue
                    LOG.info("%s", line.rstrip())
                raise BackupError("%s failed" % exc.cmd[0])
Ejemplo n.º 30
0
    def backup(self):
        """
        Use the internal '.dump' functionality built into SQLite to dump the
        pure ASCII SQL Text and write that to disk.
        """

        zopts = (
            self.config["compression"]["method"],
            int(self.config["compression"]["level"]),
            self.config["compression"]["options"],
            self.config["compression"]["inline"],
            self.config["compression"]["split"],
        )
        LOG.info("SQLite binary is [%s]", self.sqlite_bin)
        for database in self.databases:
            path = os.path.abspath(os.path.expanduser(database))

            if database in self.invalid_databases:
                LOG.warning("Skipping invalid SQLite database at [%s]", path)
                continue

            if self.dry_run:
                LOG.info("Backing up SQLite database at [%s] (dry run)", path)
                dest = open("/dev/null", "w")
            else:
                LOG.info("Backing up SQLite database at [%s]", path)
                dest = os.path.join(self.target_directory,
                                    "%s.sql" % os.path.basename(path))
                dest = open_stream(dest, "w", *zopts)

            process = Popen(
                [self.sqlite_bin, path, ".dump"],
                stdin=open("/dev/null", "r"),
                stdout=dest,
                stderr=PIPE,
            )
            _, stderroutput = process.communicate()
            dest.close()

            if process.returncode != 0:
                LOG.error(stderroutput)
                raise BackupError("SQLite '.dump' of [%s] failed" % path)

        # Raise for invalid databases after we successfully backup the others
        if self.invalid_databases:
            raise BackupError("Invalid database(s): %s" %
                              self.invalid_databases)
Ejemplo n.º 31
0
def backup_pgsql(backup_directory, config):
    """Backup databases in a Postgres instance

    :param backup_directory: directory to save pg_dump output to
    :param config: PgDumpPlugin config dictionary
    :raises: OSError, PgError on error
    """
    backup_globals(backup_directory, config)

    databases = pg_databases(config)
    for dbname in databases:
        # FIXME: potential problems with weird dataase names
        #        Consider: 'foo/bar' or unicode names
        # FIXME: compression usually doesn't make sense with --format=custom
        stream = open_stream(dbname + '.dump', 'w', **config['compression'])
        run_pgdump(dbname, stream)
        stream.close()
Ejemplo n.º 32
0
    def backup(self):
        """
        Do what is necessary to perform and validate a successful backup.
        """
        command = ["mongodump"]
        username = self.config["mongodump"].get("username")
        if username:
            command += ["-u", username]
            password = self.config["mongodump"].get("password")
            if password:
                # TODO: find a better way to inform the password
                command += ["-p", password]
        command += ["--host", self.config["mongodump"].get("host")]
        command += ["--out", self.target_directory]

        if self.dry_run:
            LOG.info("[Dry run] MongoDump Plugin - test backup run")
            LOG.info("MongoDump command: %s" %
                     subprocess.list2cmdline(command))
        else:
            LOG.info("MongoDump command: %s" %
                     subprocess.list2cmdline(command))
            logfile = open(
                os.path.join(self.target_directory, "mongodump.log"), "w")
            p = subprocess.Popen(command, stdout=logfile, stderr=logfile)
            ret = p.wait()

            if ret != 0:
                raise BackupError("Mongodump returned %d" % ret)

            zopts = self.config['compression']
            for root, _, files in os.walk(self.target_directory):
                for f in files:
                    if '.log' in f or '.conf' in f:
                        continue
                    path = os.path.join(root, f)
                    LOG.info("Compressing file %s" % path)
                    ostream = open_stream(path,
                                          'w',
                                          method=zopts['method'],
                                          level=zopts['level'],
                                          extra_args="")
                    with open(path, 'rb') as f:
                        ostream.write(f.read())
                    ostream.close()
                    os.remove(path)
Ejemplo n.º 33
0
 def open_xb_stdout(self):
     """Open the stdout output for a streaming xtrabackup run"""
     config = self.config['xtrabackup']
     backup_directory = self.target_directory
     if config['stream'] in ('tar', 'tar4ibd', 'xbstream'):
         # XXX: bounce through compression
         if 'tar' in config['stream']:
             archive_path = join(backup_directory, 'backup.tar')
             zconfig = self.config['compression']
             return open_stream(archive_path, 'w',
                                     method=zconfig['method'],
                                     level=zconfig['level'])
         elif 'xbstream' in config['stream']:
             archive_path = join(backup_directory, 'backup.xb')
             return open(archive_path, 'w')
     else:
         return open('/dev/null', 'w')
Ejemplo n.º 34
0
    def backup(self):
        """
        Do what is necessary to perform and validate a successful backup.
        """
        command = ["mongodump"]
        uri = self.config["mongodump"].get("uri")
        if uri and uri != ['']:
            command.extend(['--uri', ','.join(uri)])
        else:
            username = self.config["mongodump"].get("username")
            if username:
                command += ["-u", username]
                password = self.config["mongodump"].get("password")
                if password:
                    command += ["-p", password]
            command += ["--host", self.config["mongodump"].get("host")]
        command += ["--out", self.target_directory]
        add_options = self.config["mongodump"].get("additional-options")
        if add_options:
            command.extend(add_options)

        if self.dry_run:
            LOG.info("[Dry run] MongoDump Plugin - test backup run")
            LOG.info("MongoDump command: %s", subprocess.list2cmdline(command))
        else:
            LOG.info("MongoDump command: %s", subprocess.list2cmdline(command))
            logfile = open(os.path.join(self.target_directory, "mongodump.log"), "w")
            proc = subprocess.Popen(command, stdout=logfile, stderr=logfile)
            ret = proc.wait()

            if ret != 0:
                raise BackupError("Mongodump returned %d" % ret)
            for root, _, files in os.walk(self.target_directory):
                for file_object in files:
                    if ".log" in file_object or ".conf" in file_object:
                        continue
                    if ".gz" in file_object:
                        continue
                    path = os.path.join(root, file_object)
                    LOG.info("Compressing file %s", path)
                    ostream = open_stream(path, "w", **self.config["compression"])
                    with open(path, "rb") as file_object:
                        ostream.write(file_object.read())
                    ostream.close()
                    os.remove(path)
Ejemplo n.º 35
0
 def open_mb_stdout(self):
     """Open the stdout output for a streaming mariabackup run"""
     config = self.config["mariabackup"]
     backup_directory = self.target_directory
     stream = util.determine_stream_method(config["stream"])
     if stream:
         if stream == "tar":
             archive_path = join(backup_directory, "backup.tar")
         elif "stream" in stream:
             archive_path = join(backup_directory, "backup.mb")
         else:
             raise BackupError("Unknown stream method '%s'" % stream)
         try:
             return open_stream(archive_path, "w", **self.config["compression"])
         except OSError as exc:
             raise BackupError("Unable to create output file: %s" % exc)
     else:
         return open("/dev/null", "w")
Ejemplo n.º 36
0
 def _open_stream(self, path, mode, method=None):
     """Open a stream through the holland compression api, relative to
     this instance's target directory
     """
     path = str(os.path.join(self.target_directory, "backup_data", path))
     compression_method = method or self.config["compression"]["method"]
     compression_level = self.config["compression"]["level"]
     compression_options = self.config["compression"]["options"]
     compression_inline = self.config["compression"]["inline"]
     stream = open_stream(
         path,
         mode,
         compression_method,
         compression_level,
         extra_args=compression_options,
         inline=compression_inline,
     )
     return stream
Ejemplo n.º 37
0
 def open_mb_stdout(self):
     """Open the stdout output for a streaming mariabackup run"""
     config = self.config["mariabackup"]
     backup_directory = self.target_directory
     stream = util.determine_stream_method(config["stream"])
     if stream:
         if stream == "tar":
             archive_path = join(backup_directory, "backup.tar")
         elif stream == "xbstream":
             archive_path = join(backup_directory, "backup.mb")
         else:
             raise BackupError("Unknown stream method '%s'" % stream)
         try:
             return open_stream(archive_path, "w", **self.config["compression"])
         except OSError as exc:
             raise BackupError("Unable to create output file: %s" % exc)
     else:
         return open("/dev/null", "w")
Ejemplo n.º 38
0
 def backup(self):
     if self.dry_run:
         return self._dry_run()
     destination = os.path.join(self.directory, 'backup.tar')
     zopts = compression_options(self.config['compression'])
     destination = open_stream(destination, 'w', *zopts)
     LOGGER.info("Saving snapshot to %s (compression=%s)", destination.name, zopts[0])
     myauth = mysql_auth_options(self.config['mysql:client'])
     mylvmcfg = mysql_lvm_options(self.config)
     LOGGER.debug("Instantiating a new LVM snapshot lifecycle")
     lifecycle = mysql_snapshot_lifecycle(destination,
                                          mysql_auth=myauth,
                                          replication_info_callback=self._save_replication_info,
                                          **mylvmcfg
                                         )
     try:
         lifecycle.run()
     except EnvironmentError,exc:
         raise BackupError(str(exc))
Ejemplo n.º 39
0
Archivo: base.py Proyecto: abg/holland
def backup_globals(backup_directory, config, connection_params, env=None):
    """Backup global Postgres data that wouldn't otherwise
    be captured by pg_dump.

    Runs pg_dumpall -g > $backup_dir/globals.sql

    :param backup_directory: directory to save pg_dump output to
    :param config: PgDumpPlugin config dictionary
    :raises: OSError, PgError on error
    """

    path = os.path.join(backup_directory, 'global.sql')
    zopts = config['compression']
    output_stream = open_stream(path, 'w',
                                method=zopts['method'],
                                level=zopts['level'],
                                extra_args=zopts['options'])

    args = [
        'pg_dumpall',
        '-g',
    ] + connection_params

    LOG.info('%s > %s', subprocess.list2cmdline(args),
                        output_stream.name)
    stderr = tempfile.TemporaryFile()
    try:
        try:
            returncode = subprocess.call(args,
                                         stdout=output_stream,
                                         stderr=stderr,
                                         env=env,
                                         close_fds=True)
        except OSError, exc:
            raise PgError("Failed to execute '%s': [%d] %s" %
                          (args[0], exc.errno, exc.strerror))

        output_stream.close()
        stderr.flush()
        stderr.seek(0)
        for line in stderr:
            LOG.error('%s', line.rstrip())
Ejemplo n.º 40
0
 def open_xb_stdout(self):
     """Open the stdout output for a streaming xtrabackup run"""
     config = self.config['xtrabackup']
     backup_directory = self.target_directory
     stream = util.determine_stream_method(config['stream'])
     if stream:
         # XXX: bounce through compression
         if stream == 'tar':
             archive_path = join(backup_directory, 'backup.tar')
             zconfig = self.config['compression']
             return open_stream(archive_path, 'w',
                                method=zconfig['method'],
                                level=zconfig['level'])
         elif stream == 'xbstream':
             archive_path = join(backup_directory, 'backup.xb')
             return open(archive_path, 'w')
         else:
             raise BackupError("Unknown stream method '%s'" % stream)
     else:
         return open('/dev/null', 'w')
Ejemplo n.º 41
0
 def open_xb_stdout(self):
     """Open the stdout output for a streaming xtrabackup run"""
     config = self.config['xtrabackup']
     backup_directory = self.target_directory
     stream = util.determine_stream_method(config['stream'])
     if stream:
         if stream == 'tar':
             archive_path = join(backup_directory, 'backup.tar')
         elif stream == 'xbstream':
             archive_path = join(backup_directory, 'backup.xb')
         else:
             raise BackupError("Unknown stream method '%s'" % stream)
         zconfig = self.config['compression']
         try:
             return open_stream(archive_path, 'w',
                                method=zconfig['method'],
                                level=zconfig['level'],
                                extra_args=zconfig['options'])
         except OSError, exc:
             raise BackupError("Unable to create output file: %s" % exc)
Ejemplo n.º 42
0
 def open_xb_stdout(self):
     """Open the stdout output for a streaming xtrabackup run"""
     config = self.config['xtrabackup']
     backup_directory = self.target_directory
     stream = util.determine_stream_method(config['stream'])
     if stream:
         if stream == 'tar':
             archive_path = join(backup_directory, 'backup.tar')
         elif stream == 'xbstream':
             archive_path = join(backup_directory, 'backup.xb')
         else:
             raise BackupError("Unknown stream method '%s'" % stream)
         zconfig = self.config['compression']
         try:
             return open_stream(archive_path,
                                'w',
                                method=zconfig['method'],
                                level=zconfig['level'],
                                extra_args=zconfig['options'])
         except OSError, exc:
             raise BackupError("Unable to create output file: %s" % exc)
Ejemplo n.º 43
0
	def _open_stream(self, path, mode, method=None):
        """Open a stream through the holland compression api, relative to
        this instance's target directory
        """
        compression_method = method or self.config['compression']['method']
        compression_level = self.config['compression']['level']
        compression_options = self.config['compression']['options']
        stream = open_stream(path,
                             mode,
                             compression_method,
                             compression_level,
                             extra_args=compression_options)
        return stream

	def backup(self):
		if self.dry_run:
			return
		if not os.path.exists(self.config['tar']['directory'])
		 or not os.path.isdir(self.config['tar']['directory']):
			raise BackupError('{0} is not a directory!'.format(self.config['tar']['directory']))
		out_name = "{0}.tar".format(
			self.config['tar']['directory'].lstrip('/').replace('/', '_'))
		outfile = os.path.join(self.target_directory, out_name)
		args = ['tar', 'c', self.config['tar']['directory']]
		errlog = TemporaryFile()
		stream = self._open_stream(outfile, 'w')
		LOG.info("Executing: %s", list2cmdline(args))
		pid = Popen(
			args,
			stdout=stream.fileno(),
			stderr=errlog.fileno(),
			close_fds=True)
		status = pid.wait()
		try:
			errlog.flush()
			errlog.seek(0)
			for line in errlog:
				LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid, line.rstrip())
		finally:
			errlog.close()
Ejemplo n.º 44
0
    def backup(self):
        """
        Use the internal '.dump' functionality built into SQLite to dump the 
        pure ASCII SQL Text and write that to disk.
        """

        zopts = (self.config['compression']['method'],
                 int(self.config['compression']['level']))
        LOG.info("SQLite binary is [%s]" % self.sqlite_bin)
        for db in self.databases:
            path = os.path.abspath(os.path.expanduser(db))

            if db in self.invalid_databases:
                LOG.warn("Skipping invalid SQLite database at [%s]" % path)
                continue

            if self.dry_run:
                LOG.info("Backing up SQLite database at [%s] (dry run)" % path)
                dest = open('/dev/null', 'w')
            else:
                LOG.info("Backing up SQLite database at [%s]" % path)
                dest = os.path.join(self.target_directory, '%s.sql' % \
                                    os.path.basename(path))
                dest = open_stream(dest, 'w', *zopts)

            process = Popen([self.sqlite_bin, path, '.dump'],
                            stdin=open('/dev/null', 'r'),
                            stdout=dest,
                            stderr=PIPE)
            _, stderroutput = process.communicate()
            dest.close()

            if process.returncode != 0:
                LOG.error(stderroutput)
                raise BackupError("SQLite '.dump' of [%s] failed" % path)

        # Raise for invalid databases after we successfully backup the others
        if len(self.invalid_databases) > 0:
            raise BackupError("Invalid database(s): %s" %
                              self.invalid_databases)
Ejemplo n.º 45
0
    def backup(self):
        """
        Use the internal '.dump' functionality built into SQLite to dump the
        pure ASCII SQL Text and write that to disk.
        """

        LOG.info("SQLite binary is [%s]", self.sqlite_bin)
        for database in self.databases:
            path = os.path.abspath(os.path.expanduser(database))

            if database in self.invalid_databases:
                LOG.warning("Skipping invalid SQLite database at [%s]", path)
                continue

            if self.dry_run:
                LOG.info("Backing up SQLite database at [%s] (dry run)", path)
                dest = open("/dev/null", "w")
            else:
                LOG.info("Backing up SQLite database at [%s]", path)
                dest = os.path.join(self.target_directory, "%s.sql" % os.path.basename(path))
                dest = open_stream(dest, "w", **self.config["compression"])

            process = Popen(
                [self.sqlite_bin, path, ".dump"],
                stdin=open("/dev/null", "r"),
                stdout=dest,
                stderr=PIPE,
            )
            _, stderroutput = process.communicate()
            dest.close()

            if process.returncode != 0:
                LOG.error(stderroutput)
                raise BackupError("SQLite '.dump' of [%s] failed" % path)

        # Raise for invalid databases after we successfully backup the others
        if self.invalid_databases:
            raise BackupError("Invalid database(s): %s" % self.invalid_databases)
Ejemplo n.º 46
0
    def backup(self):
        """
        Use the internal '.dump' functionality built into SQLite to dump the 
        pure ASCII SQL Text and write that to disk.
        """
        
        zopts = (self.config['compression']['method'], 
                 int(self.config['compression']['level']))
        LOG.info("SQLite binary is [%s]" % self.sqlite_bin)         
        for db in self.databases:
            path = os.path.abspath(os.path.expanduser(db))
            
            if db in self.invalid_databases:
                LOG.warn("Skipping invalid SQLite database at [%s]" % path)
                continue
            
            if self.dry_run:
                LOG.info("Backing up SQLite database at [%s] (dry run)" % path)
                dest = open('/dev/null', 'w')
            else:
                LOG.info("Backing up SQLite database at [%s]" % path)
                dest = os.path.join(self.target_directory, '%s.sql' % \
                                    os.path.basename(path))                    
                dest = open_stream(dest, 'w', *zopts)
                
            process = Popen([self.sqlite_bin, path, '.dump'], 
                            stdin=open('/dev/null', 'r'), stdout=dest, 
                            stderr=PIPE)
            _, stderroutput = process.communicate()
            dest.close()

            if process.returncode != 0:
              LOG.error(stderroutput)
              raise BackupError("SQLite '.dump' of [%s] failed" % path)

        # Raise for invalid databases after we successfully backup the others
        if len(self.invalid_databases) > 0:
            raise BackupError, "Invalid database(s): %s" % self.invalid_databases
Ejemplo n.º 47
0
def backup_globals(backup_directory, config):
    """Backup global Postgres data that wouldn't otherwise
    be captured by pg_dump.

    Runs pg_dumpall -g > $backup_dir/globals.sql

    :param backup_directory: directory to save pg_dump output to
    :param config: PgDumpPlugin config dictionary
    :raises: OSError, PgError on error
    """

    path = os.path.join(backup_directory, 'global.sql')
    output_stream = open_stream(path, 'w', **config['compression'])

    # FIXME: use PGPASSFILE
    returncode = subprocess.call(['pg_dumpall', '-g'],
                                 stdout=output_stream,
                                 stderr=open('pgdump.err', 'a'),
                                 close_fds=True)
    output_stream.close()
    if returncode != 0:
        raise PgError("pg_dumpall exited with non-zero status[%d]" %
                      returncode)
Ejemplo n.º 48
0
def backup_globals(backup_directory, config, connection_params, env=None):
    """Backup global Postgres data that wouldn't otherwise
    be captured by pg_dump.

    Runs pg_dumpall -g > $backup_dir/globals.sql

    :param backup_directory: directory to save pg_dump output to
    :param config: PgDumpPlugin config dictionary
    :raises: OSError, PgError on error
    """

    path = os.path.join(backup_directory, 'global.sql')
    output_stream = open_stream(path, 'w', **config['compression'])

    args = [
        'pg_dumpall',
        '-g',
    ] + connection_params

    LOG.info('%s > %s', subprocess.list2cmdline(args),
                        output_stream.name)
    stderr = tempfile.TemporaryFile()
    returncode = subprocess.call(args,
                                 stdout=output_stream,
                                 stderr=stderr,
                                 env=env,
                                 close_fds=True)
    output_stream.close()
    stderr.flush()
    stderr.seek(0)
    for line in stderr:
        LOG.error('%s', line.rstrip())
    stderr.close()

    if returncode != 0:
        raise PgError("pg_dumpall command exited with failure code %d." %
                      returncode)
Ejemplo n.º 49
0
def backup_globals(backup_directory, config, connection_params, env=None):
    """Backup global Postgres data that wouldn't otherwise
    be captured by pg_dump.

    Runs pg_dumpall -g > $backup_dir/globals.sql

    :param backup_directory: directory to save pg_dump output to
    :param config: PgDumpPlugin config dictionary
    :raises: OSError, PgError on error
    """

    path = os.path.join(backup_directory, 'global.sql')
    output_stream = open_stream(path, 'w', **config['compression'])

    args = [
        'pg_dumpall',
        '-g',
    ] + connection_params

    LOG.info('%s > %s', subprocess.list2cmdline(args),
                        output_stream.name)
    stderr = tempfile.TemporaryFile()
    returncode = subprocess.call(args,
                                 stdout=output_stream,
                                 stderr=stderr,
                                 env=env,
                                 close_fds=True)
    output_stream.close()
    stderr.flush()
    stderr.seek(0)
    for line in stderr:
        LOG.error('%s', line.rstrip())
    stderr.close()

    if returncode != 0:
        raise PgError("pg_dumpall command exited with failure code %d." %
                      returncode)
Ejemplo n.º 50
0
def backup_globals(backup_directory, config, connection_params, env=None):
    """Backup global Postgres data that wouldn't otherwise
    be captured by pg_dump.

    Runs pg_dumpall -g > $backup_dir/globals.sql

    :param backup_directory: directory to save pg_dump output to
    :param config: PgDumpPlugin config dictionary
    :raises: OSError, PgError on error
    """

    path = os.path.join(backup_directory, "global.sql")
    output_stream = open_stream(path, "w", **config["compression"])

    args = ["pg_dumpall", "-g"] + connection_params

    LOG.info("%s > %s", subprocess.list2cmdline(args), output_stream.name)
    stderr = tempfile.TemporaryFile()
    try:
        try:
            returncode = subprocess.call(
                args, stdout=output_stream, stderr=stderr, env=env, close_fds=True
            )
        except OSError as exc:
            raise PgError("Failed to execute '%s': [%d] %s" % (args[0], exc.errno, exc.strerror))

        output_stream.close()
        stderr.flush()
        stderr.seek(0)
        for line in stderr:
            LOG.error("%s", line.rstrip())
    finally:
        stderr.close()

    if returncode != 0:
        raise PgError("pg_dumpall command exited with failure code %d." % returncode)
Ejemplo n.º 51
0
def setup_actions(snapshot, config, client, snap_datadir, spooldir):
    """Setup actions for a LVM snapshot based on the provided
    configuration.

    Optional actions:
        * MySQL locking
        * InnoDB recovery
        * Recording MySQL replication
    """
    if config['mysql-lvm']['lock-tables']:
        extra_flush = config['mysql-lvm']['extra-flush-tables']
        act = FlushAndLockMySQLAction(client, extra_flush)
        snapshot.register('pre-snapshot', act, priority=100)
        snapshot.register('post-snapshot', act, priority=100)
    if config['mysql-lvm'].get('replication', True):
        repl_cfg = config.setdefault('mysql:replication', {})
        act = RecordMySQLReplicationAction(client, repl_cfg)
        snapshot.register('pre-snapshot', act, 0)
    if config['mysql-lvm']['innodb-recovery']:
        mysqld_config = dict(config['mysqld'])
        mysqld_config['datadir'] = snap_datadir
        if not mysqld_config['tmpdir']:
            mysqld_config['tmpdir'] = tempfile.gettempdir()
        ib_log_size = client.show_variable('innodb_log_file_size')
        mysqld_config['innodb-log-file-size'] = ib_log_size
        act = InnodbRecoveryAction(mysqld_config)
        snapshot.register('post-mount', act, priority=100)


    archive_stream = open_stream(os.path.join(spooldir, 'backup.tar'),
                                 'w',
                                 **config['compression'])
    act = TarArchiveAction(snap_datadir, archive_stream, config['tar'])
    snapshot.register('post-mount', act, priority=50)

    snapshot.register('pre-remove', log_final_snapshot_size)
Ejemplo n.º 52
0
    def test_compression(self):
        """Test Compression methods"""
        # gzip - write it, read it, verify it
        filep = compression.open_stream(
            os.path.join(self.__class__.tmpdir, "gzip_foo"), "w", "gzip")
        filep.write(bytes("foo", "ascii"))
        filep.close()

        filep = compression.open_stream(
            os.path.join(self.__class__.tmpdir, "gzip_foo"), "r", "gzip")
        foo_object = filep.read(3)
        filep.close()

        self.assertTrue(foo_object.decode() == "foo")

        # bzip2 - write it, read it, verify it
        filep = compression.open_stream(
            os.path.join(self.__class__.tmpdir, "bzip2_foo"), "w", "bzip2")
        filep.write(bytes("foo", "ascii"))
        filep.close()

        filep = compression.open_stream(
            os.path.join(self.__class__.tmpdir, "bzip2_foo"), "r", "bzip2")
        foo_object = filep.read(3)
        filep.close()

        self.assertTrue(foo_object.decode() == "foo")

        # gzip - write it, read it, verify it
        filep = compression.open_stream(
            os.path.join(self.__class__.tmpdir, "lzop_foo"), "w", "lzop")
        filep.write(bytes("foo", "ascii"))
        filep.close()

        filep = compression.open_stream(
            os.path.join(self.__class__.tmpdir, "lzop_foo"), "r", "lzop")
        foo_object = filep.read(3)
        filep.close()

        self.assertTrue(foo_object.decode() == "foo")
Ejemplo n.º 53
0
def test_compression():
    global tmpdir

    # gzip - write it, read it, verify it
    f = compression.open_stream(os.path.join(tmpdir, 'gzip_foo'), 'w', 'gzip')
    f.write('foo')
    f.close()

    f = compression.open_stream(os.path.join(tmpdir, 'gzip_foo'), 'r', 'gzip')
    foo = f.read(3)
    f.close()

    ok_(foo == 'foo')

    # bzip2 - write it, read it, verify it
    f = compression.open_stream(os.path.join(tmpdir, 'bzip2_foo'), 'w',
                                'bzip2')
    f.write('foo')
    f.close()

    f = compression.open_stream(os.path.join(tmpdir, 'bzip2_foo'), 'r',
                                'bzip2')
    foo = f.read(3)
    f.close()

    ok_(foo == 'foo')

    # gzip - write it, read it, verify it
    f = compression.open_stream(os.path.join(tmpdir, 'lzop_foo'), 'w', 'lzop')
    f.write('foo')
    f.close()

    f = compression.open_stream(os.path.join(tmpdir, 'lzop_foo'), 'r', 'lzop')
    foo = f.read(3)
    f.close()

    ok_(foo == 'foo')
Ejemplo n.º 54
0
 def test_compression_bad_mode(self):
     """Test bad mode"""
     filep = compression.open_stream(
         os.path.join(self.__class__.tmpdir, "foo"), "w", "gzip")
     filep.write(bytes("foo", "ascii"))
     filep.close()
Ejemplo n.º 55
0
def setup_actions(snapshot, config, client, snap_datadir, spooldir):
    """Setup actions for a LVM snapshot based on the provided
    configuration.

    Optional actions:
        * MySQL locking
        * InnoDB recovery
        * Recording MySQL replication
    """
    mysql = connect_simple(config["mysql:client"])
    if mysql.show_variable("have_innodb") == "YES":
        try:
            pathinfo = MySQLPathInfo.from_mysql(mysql)
        finally:
            mysql.close()
        try:
            check_innodb(pathinfo, ensure_subdir_of_datadir=True)
        except BackupError:
            if not config["mysql-lvm"]["force-innodb-backup"]:
                raise

    if config["mysql-lvm"]["lock-tables"]:
        extra_flush = config["mysql-lvm"]["extra-flush-tables"]
        act = FlushAndLockMySQLAction(client, extra_flush)
        snapshot.register("pre-snapshot", act, priority=100)
        snapshot.register("post-snapshot", act, priority=100)
    if config["mysql-lvm"].get("replication", True):
        repl_cfg = config.setdefault("mysql:replication", {})
        act = RecordMySQLReplicationAction(client, repl_cfg)
        snapshot.register("pre-snapshot", act, 0)
    if config["mysql-lvm"]["innodb-recovery"]:
        mysqld_config = dict(config["mysqld"])
        mysqld_config["datadir"] = snap_datadir
        if not mysqld_config["tmpdir"]:
            mysqld_config["tmpdir"] = tempfile.gettempdir()
        ib_log_size = client.show_variable("innodb_log_file_size")
        mysqld_config["innodb-log-file-size"] = ib_log_size
        act = InnodbRecoveryAction(mysqld_config)
        snapshot.register("post-mount", act, priority=100)
    if config["mysql-lvm"]["archive-method"] == "dir":
        try:
            backup_datadir = os.path.join(spooldir, "backup_data")
            os.mkdir(backup_datadir)
        except OSError as exc:
            raise BackupError("Unable to create archive directory '%s': %s" %
                              (backup_datadir, exc))
        act = DirArchiveAction(snap_datadir, backup_datadir, config["tar"])
        snapshot.register("post-mount", act, priority=50)
    else:
        try:
            archive_stream = open_stream(
                os.path.join(spooldir, "backup.tar"),
                "w",
                method=config["compression"]["method"],
                level=config["compression"]["level"],
                extra_args=config["compression"]["options"],
                inline=config["compression"]["inline"],
                split=config["compression"]["split"],
            )
        except OSError as exc:
            raise BackupError("Unable to create archive file '%s': %s" %
                              (os.path.join(spooldir, "backup.tar"), exc))
        act = TarArchiveAction(snap_datadir, archive_stream, config["tar"])
        snapshot.register("post-mount", act, priority=50)

    snapshot.register("pre-remove", log_final_snapshot_size)
Ejemplo n.º 56
0
            mysqld_config['tmpdir'] = tempfile.gettempdir()
        ib_log_size = client.show_variable('innodb_log_file_size')
        mysqld_config['innodb-log-file-size'] = ib_log_size
        act = InnodbRecoveryAction(mysqld_config)
        snapshot.register('post-mount', act, priority=100)
    if config['mysql-lvm']['archive-method'] == "dir":
        try:
            backup_datadir = os.path.join(spooldir, 'backup_data')
            os.mkdir(backup_datadir)
        except OSError, exc:
            raise BackupError("Unable to create archive directory '%s': %s" %
                              (backup_datadir, exc))

        act = DirArchiveAction(snap_datadir, backup_datadir, config['tar'])
        snapshot.register('post-mount', act, priority=50)
    else:
        try:
            archive_stream = open_stream(
                os.path.join(spooldir, 'backup.tar'),
                'w',
                method=config['compression']['method'],
                level=config['compression']['level'],
                extra_args=config['compression']['options'])
        except OSError, exc:
            raise BackupError("Unable to create archive file '%s': %s" %
                              (os.path.join(spooldir, 'backup.tar'), exc))
        act = TarArchiveAction(snap_datadir, archive_stream, config['tar'])
        snapshot.register('post-mount', act, priority=50)

    snapshot.register('pre-remove', log_final_snapshot_size)