Ejemplo n.º 1
0
    def move_to(self, dst):
        if not os.path.exists(dst):
            LOG.debug("creating %s" % dst)
            os.makedirs(dst)

        for config in ['postgresql.conf', 'pg_ident.conf', 'pg_hba.conf']:
            old_config = os.path.join(self.path, config)
            new_config = os.path.join(dst, config)
            if os.path.exists(old_config):
                LOG.debug('Moving %s' % config)
                shutil.move(old_config, new_config)
            elif os.path.exists(new_config):
                LOG.debug('%s is already in place. Skipping.' % config)
            else:
                raise BaseException('Postgresql config file not found: %s' %
                                    old_config)
            chown_r(new_config, DEFAULT_USER)

        #the following block needs revision

        #self._make_symlinks(dst)
        self._patch_sysconfig(dst)

        self.path = dst

        LOG.debug("configuring pid")
        conf = PostgresqlConf.find(self)
        conf.pid_file = os.path.join(dst, 'postmaster.pid')
Ejemplo n.º 2
0
    def start(cls):
        if not cls.is_running():
            cls._logger.info("Starting %s process" % MONGOS)
            args = [
                "sudo",
                "-u",
                DEFAULT_USER,
                MONGOS,
                "--fork",
                "--logpath",
                ROUTER_LOG_PATH,
                "--configdb",
                "mongo-0-0:%s" % CONFIG_SERVER_DEFAULT_PORT,
            ]
            if cls.keyfile and os.path.exists(cls.keyfile):
                chown_r(cls.keyfile, DEFAULT_USER)
                args.append("--keyFile=%s" % cls.keyfile)

            if cls.verbose and isinstance(cls.verbose, int) and 0 < cls.verbose < 6:
                args.append("-" + "v" * cls.verbose)

            if os.path.exists(ROUTER_LOG_PATH):
                chown_r(ROUTER_LOG_PATH, DEFAULT_USER)

            system2(args, close_fds=True, preexec_fn=mongo_preexec_fn)
            wait_until(lambda: cls.is_running, timeout=MAX_START_TIMEOUT)
            wait_until(lambda: cls.get_cli().has_connection, timeout=MAX_START_TIMEOUT)
            cls._logger.debug("%s process has been started." % MONGOS)
Ejemplo n.º 3
0
def get_pidfile(port=__redis__['defaults']['port']):

    pid_file = os.path.join(__redis__['pid_dir'], 'redis-server.%s.pid' % port)
    if not os.path.exists(pid_file):  # fix for ubuntu1004
        open(pid_file, 'w').close()
        chown_r(pid_file, 'redis')
    return pid_file
Ejemplo n.º 4
0
 def move_to(self, dst, move_files=True):
     new_cluster_dir = os.path.join(dst, STORAGE_DATA_DIR)
     
     if not os.path.exists(dst):
         LOG.debug('Creating directory structure for postgresql cluster: %s' % dst)
         os.makedirs(dst)
     
     if move_files:
         source = self.path 
         if not os.path.exists(self.path):
             source = self.default_path
             LOG.debug('data_directory in postgresql.conf points to non-existing location, using %s instead' % source)
         if source != new_cluster_dir:
             LOG.debug("copying cluster files from %s into %s" % (source, new_cluster_dir))
             shutil.copytree(source, new_cluster_dir)    
     LOG.debug("changing directory owner to %s" % self.user)
     chown_r(dst, self.user)
     
     LOG.debug("Changing postgres user`s home directory")
     if disttool.is_redhat_based():
         #looks like ubuntu doesn`t need this
         system2([USERMOD, '-d', new_cluster_dir, self.user]) 
         
     self.path = new_cluster_dir
 
     return new_cluster_dir
Ejemplo n.º 5
0
    def move_to(self, dst, move_files=True):
        new_cluster_dir = os.path.join(dst, STORAGE_DATA_DIR)

        if not os.path.exists(dst):
            LOG.debug(
                'Creating directory structure for postgresql cluster: %s' %
                dst)
            os.makedirs(dst)

        if move_files:
            source = self.path
            if not os.path.exists(self.path):
                source = self.default_path
                LOG.debug(
                    'data_directory in postgresql.conf points to non-existing location, using %s instead'
                    % source)
            if source != new_cluster_dir:
                LOG.debug("copying cluster files from %s into %s" %
                          (source, new_cluster_dir))
                shutil.copytree(source, new_cluster_dir)
        LOG.debug("changing directory owner to %s" % self.user)
        chown_r(dst, self.user)

        LOG.debug("Changing postgres user`s home directory")
        if linux.os.redhat_family:
            #looks like ubuntu doesn`t need this
            system2([USERMOD, '-d', new_cluster_dir, self.user])

        self.path = new_cluster_dir

        return new_cluster_dir
Ejemplo n.º 6
0
    def move_mysqldir_to(self, storage_path):
        LOG.info('Moving mysql dir to %s' % storage_path)
        for directive, dirname in (
                        ('mysqld/log_bin', os.path.join(storage_path,STORAGE_BINLOG)),
                        ('mysqld/datadir', os.path.join(storage_path,STORAGE_DATA_DIR) + '/')
                        ):

            dest = os.path.dirname(dirname)
            if os.path.isdir(dest):
                LOG.info('No need to move %s to %s: already in place.' % (directive, dest))
            else:
                os.makedirs(dest)

                raw_value = self.my_cnf.get(directive)
                LOG.debug('directive %s:%s' % (directive, raw_value))
                if raw_value:
                    src_dir = os.path.dirname(raw_value + "/") + "/"
                    LOG.debug('source path: %s' % src_dir)
                    if os.path.isdir(src_dir) and src_dir != dest:
                        selinuxenabled = software.which('selinuxenabled')
                        if selinuxenabled:
                            if not system2((selinuxenabled, ), raise_exc=False)[2]:
                                if not system2((software.which('getsebool'), 'mysqld_disable_trans'), raise_exc=False)[2]:
                                    LOG.debug('Make SELinux rule for rsync')
                                    system2((software.which('setsebool'), '-P', 'mysqld_disable_trans', '1'))

                        LOG.info('Copying mysql directory \'%s\' to \'%s\'', src_dir, dest)
                        rsync(src_dir, dest, archive=True, exclude=['ib_logfile*', '*.sock'])

            self.my_cnf.set(directive, dirname)
            chown_r(dest, "mysql", "mysql")
            # Adding rules to apparmor config
            if disttool.is_debian_based():
                _add_apparmor_rules(dest)
Ejemplo n.º 7
0
    def move_to(self, dst):
        if not os.path.exists(dst):
            LOG.debug("creating %s" % dst)
            os.makedirs(dst)
        
        for config in ['postgresql.conf', 'pg_ident.conf', 'pg_hba.conf']:
            old_config = os.path.join(self.path, config)
            new_config = os.path.join(dst, config)
            if os.path.exists(old_config):
                LOG.debug('Moving %s' % config)
                shutil.move(old_config, new_config)
            elif os.path.exists(new_config):
                LOG.debug('%s is already in place. Skipping.' % config)
            else:
                raise BaseException('Postgresql config file not found: %s' % old_config)
            chown_r(new_config, DEFAULT_USER)

        #the following block needs revision
        
        #self._make_symlinks(dst)
        self._patch_sysconfig(dst)
        
        self.path = dst
        
        LOG.debug("configuring pid")
        conf = PostgresqlConf.find(self)
        conf.pid_file = os.path.join(dst, 'postmaster.pid')
Ejemplo n.º 8
0
    def init_service(self, mpoint):
        if not os.path.exists(mpoint):
            os.makedirs(mpoint)
            LOG.debug('Created directory structure for redis db files: %s' %
                      mpoint)

        chown_r(mpoint, __redis__['defaults']['user'])

        self.redis_conf.requirepass = self.password
        self.redis_conf.daemonize = True
        self.redis_conf.dir = mpoint
        self.redis_conf.bind = None
        self.redis_conf.port = self.port
        self.redis_conf.pidfile = get_pidfile(self.port)

        persistence_type = __redis__["persistence_type"]
        if persistence_type == 'snapshotting':
            self.redis_conf.appendonly = False
            self.redis_conf.dbfilename = get_snap_db_filename(self.port)
            self.redis_conf.appendfilename = None
        elif persistence_type == 'aof':
            aof_path = get_aof_db_filename(self.port)
            self.redis_conf.appendonly = True
            self.redis_conf.appendfilename = aof_path
            self.redis_conf.dbfilename = None
            self.redis_conf.save = {}
        elif persistence_type == 'nopersistence':
            self.redis_conf.dbfilename = get_snap_db_filename(self.port)
            self.redis_conf.appendonly = False
            self.redis_conf.appendfsync = 'no'
            self.redis_conf.save = {}
            assert not self.redis_conf.save
        LOG.debug('Persistence type is set to %s' % persistence_type)
Ejemplo n.º 9
0
    def init_service(self, mpoint):
        if not os.path.exists(mpoint):
            os.makedirs(mpoint)
            LOG.debug('Created directory structure for redis db files: %s' % mpoint)

        chown_r(mpoint, __redis__['defaults']['user'])

        self.redis_conf.requirepass = self.password
        self.redis_conf.daemonize = True
        self.redis_conf.dir = mpoint
        self.redis_conf.bind = None
        self.redis_conf.port = self.port
        self.redis_conf.pidfile = get_pidfile(self.port)

        persistence_type = __redis__["persistence_type"]
        if persistence_type == 'snapshotting':
            self.redis_conf.appendonly = False
            self.redis_conf.dbfilename = get_snap_db_filename(self.port)
            self.redis_conf.appendfilename = None
        elif persistence_type == 'aof':
            aof_path = get_aof_db_filename(self.port)
            self.redis_conf.appendonly = True
            self.redis_conf.appendfilename = aof_path
            self.redis_conf.dbfilename = None
            self.redis_conf.save = {}
        elif persistence_type == 'nopersistence':
            self.redis_conf.dbfilename = get_snap_db_filename(self.port)
            self.redis_conf.appendonly = False
            self.redis_conf.appendfsync = 'no'
            self.redis_conf.save = {}
            assert not self.redis_conf.save
        LOG.debug('Persistence type is set to %s' % persistence_type)
Ejemplo n.º 10
0
        def do_backup(op):
            tmpdir = None
            dumps = []
            tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
            try:
                # Get databases list
                psql = PSQL(user=self.postgresql.root_user.name)
                databases = psql.list_pg_databases()
                if 'template0' in databases:
                    databases.remove('template0')
                
                if not os.path.exists(tmp_path):
                    os.makedirs(tmp_path)

                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=tmp_path)       
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
                    su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError('Error while dumping database %s: %s' % (db_name, err))  #?
                    dumps.append(dump_path)


                for db_name in databases:
                    _single_backup(db_name)

                cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR)

                suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
                backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

                LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags))
                trn = LargeTransfer(dumps, cloud_storage_path, tags=backup_tags)
                manifest = trn.run()
                LOG.info("Postgresql backup uploaded to cloud storage under %s", cloud_storage_path)
                    
                # Notify Scalr
                result = transfer_result_to_backup_result(manifest)
                __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                                        dict(db_type=BEHAVIOUR,
                                             status='ok',
                                             backup_parts=result))

                return result
                            
            except (Exception, BaseException), e:
                LOG.exception(e)
                
                # Notify Scalr about error
                __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                                        dict(db_type=BEHAVIOUR,
                                             status='error',
                                             last_error=str(e)))
Ejemplo n.º 11
0
 def rollback_hook(self):
     try:
         pwd.getpwnam("apache")
         uname = "apache"
     except:
         uname = "www-data"
     for obj in self.config_data:
         coreutils.chown_r(obj.path, uname)
Ejemplo n.º 12
0
    def do_backup(self):
        tmpdir = None
        dumps = []
        tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
        try:
            # Get databases list
            psql = PSQL(user=self.postgresql.root_user.name)
            databases = psql.list_pg_databases()
            if 'template0' in databases:
                databases.remove('template0')

            if not os.path.exists(tmp_path):
                os.makedirs(tmp_path)

            # Dump all databases
            LOG.info("Dumping all databases")
            tmpdir = tempfile.mkdtemp(dir=tmp_path)
            chown_r(tmpdir, self.postgresql.root_user.name)

            def _single_backup(db_name):
                dump_path = tmpdir + os.sep + db_name + '.sql'
                pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
                su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
                err = system2(su_args)[1]
                if err:
                    raise HandlerError('Error while dumping database %s: %s' % (db_name, err))  # ?
                dumps.append(dump_path)

            for db_name in databases:
                _single_backup(db_name)

            cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR)

            suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
            backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

            LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags))

            def progress_cb(progress):
                LOG.debug('Uploading %s bytes' % progress)

            uploader = largetransfer.Upload(dumps, cloud_storage_path, progress_cb=progress_cb)
            try:
                uploader.apply_async()
                uploader.join()
                manifest = uploader.manifest

                LOG.info("Postgresql backup uploaded to cloud storage under %s", cloud_storage_path)
                LOG.debug(manifest.data)

                return transfer_result_to_backup_result(manifest)
            except:
                uploader.terminate()
                raise
        finally:
            if tmpdir:
                shutil.rmtree(tmpdir, ignore_errors=True)
Ejemplo n.º 13
0
def create_redis_conf_copy(port=__redis__['defaults']['port']):
    if not os.path.exists(__redis__['defaults']['redis.conf']):
        raise ServiceError('Default redis config %s does not exist' % __redis__['defaults']['redis.conf'])
    dst = get_redis_conf_path(port)
    if not os.path.exists(dst):
        LOG.debug('Copying %s to %s.' % (__redis__['defaults']['redis.conf'],dst))
        shutil.copy(__redis__['defaults']['redis.conf'], dst)
        chown_r(dst, 'redis')
    else:
        LOG.debug('%s already exists.' % dst)
Ejemplo n.º 14
0
def create_redis_conf_copy(port=__redis__['defaults']['port']):
    if not os.path.exists(__redis__['defaults']['redis.conf']):
        raise ServiceError('Default redis config %s does not exist' % __redis__['defaults']['redis.conf'])
    dst = get_redis_conf_path(port)
    if not os.path.exists(dst):
        LOG.debug('Copying %s to %s.' % (__redis__['defaults']['redis.conf'],dst))
        shutil.copy(__redis__['defaults']['redis.conf'], dst)
        chown_r(dst, 'redis')
    else:
        LOG.debug('%s already exists.' % dst)
Ejemplo n.º 15
0
def get_pidfile(port=__redis__['defaults']['port']):

    pid_file = os.path.join(__redis__['pid_dir'], 'redis-server.%s.pid' % port)
    '''
    fix for ubuntu1004
    '''
    if not os.path.exists(pid_file):
        open(pid_file, 'w').close()
    chown_r(pid_file, 'redis')
    return pid_file
Ejemplo n.º 16
0
def get_pidfile(port=DEFAULT_PORT):

    pid_file = os.path.join(DEFAULT_PID_DIR,'redis-server.%s.pid' % port)
    '''
    fix for ubuntu1004
    '''
    if not os.path.exists(pid_file):
        open(pid_file, 'w').close()
    chown_r(pid_file, 'redis')
    return pid_file
Ejemplo n.º 17
0
 def apply_private_ssh_key(self,source_path=None):
     source_path = source_path or self.private_key_path
     if not os.path.exists(source_path):
         LOG.error('Cannot apply private ssh key: source %s not found' % source_path)
     else:
         if not os.path.exists(self.ssh_dir):
             os.makedirs(self.ssh_dir)
             chown_r(self.ssh_dir, self.name)
             
         dst = os.path.join(self.ssh_dir, 'id_rsa')
         shutil.copyfile(source_path, dst)
         os.chmod(dst, 0400)
         chown_r(dst, self.name)
Ejemplo n.º 18
0
 def _prepare_arbiter(self, rs_name):
     if os.path.isdir(ARBITER_DATA_DIR):
         shutil.rmtree(ARBITER_DATA_DIR)
     self._logger.debug("Creating datadir for arbiter: %s" % ARBITER_DATA_DIR)
     os.makedirs(ARBITER_DATA_DIR)
     chown_r(ARBITER_DATA_DIR, DEFAULT_USER)
     self._logger.debug("Preparing arbiter's config file")
     self.arbiter_conf.dbpath = ARBITER_DATA_DIR
     self.arbiter_conf.replSet = rs_name
     self.arbiter_conf.shardsvr = True
     self.arbiter_conf.port = ARBITER_DEFAULT_PORT
     self.arbiter_conf.logpath = ARBITER_LOG_PATH
     self.arbiter_conf.nojournal = True
Ejemplo n.º 19
0
 def _prepare_arbiter(self, rs_name):
     if os.path.isdir(ARBITER_DATA_DIR):
         shutil.rmtree(ARBITER_DATA_DIR)
     self._logger.debug('Creating datadir for arbiter: %s' %
                        ARBITER_DATA_DIR)
     os.makedirs(ARBITER_DATA_DIR)
     chown_r(ARBITER_DATA_DIR, DEFAULT_USER)
     self._logger.debug("Preparing arbiter's config file")
     self.arbiter_conf.dbpath = ARBITER_DATA_DIR
     self.arbiter_conf.replSet = rs_name
     self.arbiter_conf.shardsvr = True
     self.arbiter_conf.port = ARBITER_DEFAULT_PORT
     self.arbiter_conf.logpath = ARBITER_LOG_PATH
     self.arbiter_conf.nojournal = True
Ejemplo n.º 20
0
    def move_mysqldir_to(self, storage_path):
        LOG.info('Moving mysql dir to %s' % storage_path)
        for directive, dirname in (
            ('mysqld/log_bin', os.path.join(storage_path, STORAGE_BINLOG)),
            ('mysqld/datadir',
             os.path.join(storage_path, STORAGE_DATA_DIR) + '/')):

            dest = os.path.dirname(dirname)
            if os.path.isdir(dest):
                LOG.info('No need to move %s to %s: already in place.' %
                         (directive, dest))
            else:
                os.makedirs(dest)

                raw_value = self.my_cnf.get(directive)
                LOG.debug('directive %s:%s' % (directive, raw_value))
                if raw_value and node.__node__['platform'] != 'openstack':
                    src_dir = os.path.dirname(raw_value + "/") + "/"
                    LOG.debug('source path: %s' % src_dir)
                    if os.path.isdir(src_dir) and src_dir != dest:
                        selinuxenabled = software.which('selinuxenabled')
                        if selinuxenabled:
                            if not system2(
                                (selinuxenabled, ), raise_exc=False)[2]:
                                if not system2((software.which('getsebool'),
                                                'mysqld_disable_trans'),
                                               raise_exc=False)[2]:
                                    LOG.debug('Make SELinux rule for rsync')
                                    system2((software.which('setsebool'), '-P',
                                             'mysqld_disable_trans', '1'))
                                else:
                                    semanage = get_semanage()
                                    system2((semanage, 'fcontext', '-a', '-t',
                                             'bin_t', '/usr/bin/rsync'))
                                    system2((software.which('restorecon'),
                                             '-v', '/usr/bin/rsync'))

                        LOG.info('Copying mysql directory \'%s\' to \'%s\'',
                                 src_dir, dest)
                        rsync(src_dir,
                              dest,
                              archive=True,
                              exclude=['ib_logfile*', '*.sock'])

            self.my_cnf.set(directive, dirname)
            chown_r(dest, "mysql", "mysql")
            # Adding rules to apparmor config
            if disttool.is_debian_based():
                _add_apparmor_rules(dest)
Ejemplo n.º 21
0
    def args(self):
        s = ["--fork"]
        if self.configpath:
            s.append("--config=%s" % self.configpath)
        if self.dbpath:
            s.append("--dbpath=%s" % self.dbpath)
        if self.port:
            s.append("--port=%s" % self.port)
        if self.keyfile and os.path.exists(self.keyfile):
            chown_r(self.keyfile, DEFAULT_USER)
            s.append("--keyFile=%s" % self.keyfile)
        if self.verbose and isinstance(self.verbose, int) and 0 < self.verbose < 6:
            s.append("-" + "v" * self.verbose)

        return s
Ejemplo n.º 22
0
    def move_to(self, dst, move_files=True):
        new_db_path = os.path.join(dst, os.path.basename(self.db_path))

        if not os.path.exists(dst):
            LOG.debug('Creating directory structure for redis db files: %s' % dst)
            os.makedirs(dst)

        if move_files and os.path.exists(os.path.dirname(self.db_path)) and os.path.isfile(self.db_path):
            LOG.debug("copying db file %s into %s" % (os.path.dirname(self.db_path), dst))
            shutil.copyfile(self.db_path, new_db_path)

        LOG.debug("changing directory owner to %s" % self.user)
        chown_r(dst, self.user)
        self.db_path = new_db_path
        return new_db_path
Ejemplo n.º 23
0
def make_symlinks(source_dir, dst_dir, username='******'):
    #Vital hack for getting CentOS init script to work
    for obj in ['base', 'PG_VERSION', 'postmaster.pid']:
        
        src = os.path.join(source_dir, obj)
        dst = os.path.join(dst_dir, obj) 
        
        if os.path.islink(dst):
            os.unlink(dst)
        elif os.path.exists(dst):
            shutil.rmtree(dst)
            
        os.symlink(src, dst)
        
        if os.path.exists(src):
            chown_r(dst, username)
Ejemplo n.º 24
0
def make_symlinks(source_dir, dst_dir, username='******'):
    #Vital hack to get init script to work on CentOS 5x/6x
    for obj in ['base', 'PG_VERSION', 'postmaster.pid']:

        src = os.path.join(source_dir, obj)
        dst = os.path.join(dst_dir, obj)

        if os.path.islink(dst):
            os.unlink(dst)
        elif os.path.exists(dst):
            shutil.rmtree(dst)

        os.symlink(src, dst)

        if os.path.exists(src):
            chown_r(dst, username)
Ejemplo n.º 25
0
    def args(self):
        s = ['--fork']
        if self.configpath:
            s.append('--config=%s' % self.configpath)
        if self.dbpath:
            s.append('--dbpath=%s' % self.dbpath)
        if self.port:
            s.append('--port=%s' % self.port)
        if self.keyfile and os.path.exists(self.keyfile):
            chown_r(self.keyfile, DEFAULT_USER)
            s.append('--keyFile=%s' % self.keyfile)
        if self.verbose and isinstance(self.verbose,
                                       int) and 0 < self.verbose < 6:
            s.append('-' + 'v' * self.verbose)

        return s
Ejemplo n.º 26
0
 def _prepare_config_server(self):
     self._logger.debug('Preparing config server')
     if not os.path.exists(CONFIG_SERVER_DATA_DIR):
         os.makedirs(CONFIG_SERVER_DATA_DIR)
     chown_r(CONFIG_SERVER_DATA_DIR, DEFAULT_USER)
     '''
     configsvr changes the default port and turns on the diaglog, 
     a log that keeps every action the config database performs 
     in a replayable format, just in case.
     For mongo 1.8+ use --port 27019 and --journal (instead of --diaglog). 
     Journaling gives mostly the same effect as the diaglog with better performance.
     P.S. Assume that mongodb roles Scalr will be build on x64 platform only
     Wchich means journal option by default will be on.
     '''
     self.config_server_conf.configsvr = True
     self.config_server_conf.port = CONFIG_SERVER_DEFAULT_PORT
     self.config_server_conf.logpath = CONFIG_SERVER_LOG_PATH
Ejemplo n.º 27
0
 def _prepare_config_server(self):
     self._logger.debug("Preparing config server")
     if not os.path.exists(CONFIG_SERVER_DATA_DIR):
         os.makedirs(CONFIG_SERVER_DATA_DIR)
     chown_r(CONFIG_SERVER_DATA_DIR, DEFAULT_USER)
     """
     configsvr changes the default port and turns on the diaglog, 
     a log that keeps every action the config database performs 
     in a replayable format, just in case.
     For mongo 1.8+ use --port 27019 and --journal (instead of --diaglog). 
     Journaling gives mostly the same effect as the diaglog with better performance.
     P.S. Assume that mongodb roles Scalr will be build on x64 platform only
     Wchich means journal option by default will be on.
     """
     self.config_server_conf.configsvr = True
     self.config_server_conf.port = CONFIG_SERVER_DEFAULT_PORT
     self.config_server_conf.logpath = CONFIG_SERVER_LOG_PATH
Ejemplo n.º 28
0
    def move_to(self, dst):
        datadir = os.path.join(__postgresql__['storage_dir'], STORAGE_DATA_DIR)
        centos7 = "centos" in linux.os['name'].lower(
        ) and linux.os["release"].version[0] == 7

        if not os.path.exists(dst):
            LOG.debug("creating %s" % dst)
            os.makedirs(dst)

        for config in ['postgresql.conf', 'pg_ident.conf', 'pg_hba.conf']:
            old_config = os.path.join(self.path, config)
            new_config = os.path.join(dst, config)
            if os.path.exists(
                    old_config) and not os.path.islink(old_config):  #???
                LOG.debug('Moving %s' % config)
                shutil.move(old_config, new_config)
            elif os.path.exists(new_config):
                LOG.debug('%s is already in place. Skipping.' % config)
            else:
                raise BaseException('Postgresql config file not found: %s' %
                                    old_config)
            chown_r(new_config, DEFAULT_USER)
            if centos7:
                new_link = os.path.join(datadir, config)
                if os.path.exists(new_link) and os.path.isfile(new_link):
                    os.remove(new_link)
                    LOG.debug("Duplicate config %s removed." % new_link)
                os.symlink(new_config, new_link)
                chown_r(new_link, DEFAULT_USER)

        #the following block needs revision
        if centos7:
            self._systemd_change_pgdata(datadir)
            system2([software.which("systemctl"),
                     "daemon-reload"])  # [SCALARIZR-1627]
        else:
            self._patch_sysconfig(dst)

        self.path = dst

        LOG.debug("configuring pid")
        conf = PostgresqlConf.find(self)
        if not centos7:
            conf.pid_file = os.path.join(dst,
                                         'postmaster.pid')  # [SCALARIZR-1685]
Ejemplo n.º 29
0
 def apply_public_ssh_key(self, source_path=None):
     source_path = source_path or self.public_key_path 
     if not os.path.exists(self.ssh_dir):
         os.makedirs(self.ssh_dir)
         chown_r(self.ssh_dir, self.name)
     
     pub_key = '' 
     with open(source_path, 'r') as fp:
         pub_key = fp.read()
     path = os.path.join(self.ssh_dir, 'authorized_keys')
     keys = ''
     if os.path.exists(path):
         with open(path, 'r') as fp:
             keys = fp.read()
     
     if not keys or not pub_key in keys:
         with open(path, 'a') as fp:
             fp.write('\n%s %s\n' % (pub_key, self.name))
         chown_r(path, self.name)
Ejemplo n.º 30
0
    def apply_public_ssh_key(self, source_path=None):
        source_path = source_path or self.public_key_path
        if not os.path.exists(self.ssh_dir):
            os.makedirs(self.ssh_dir)
            chown_r(self.ssh_dir, self.name)

        pub_key = ''
        with open(source_path, 'r') as fp:
            pub_key = fp.read()
        path = os.path.join(self.ssh_dir, 'authorized_keys')
        keys = ''
        if os.path.exists(path):
            with open(path, 'r') as fp:
                keys = fp.read()

        if not keys or not pub_key in keys:
            with open(path, 'a') as fp:
                fp.write('\n%s %s\n' % (pub_key, self.name))
            chown_r(path, self.name)
Ejemplo n.º 31
0
    def start(cls):
        if not cls.is_running():
            cls._logger.info('Starting %s process' % MONGOS)
            args = ['sudo', '-u', DEFAULT_USER, MONGOS, '--fork',
                            '--logpath', ROUTER_LOG_PATH, '--configdb',
                            'mongo-0-0:%s' % CONFIG_SERVER_DEFAULT_PORT]
            if cls.keyfile and os.path.exists(cls.keyfile):
                chown_r(cls.keyfile, DEFAULT_USER)
                args.append('--keyFile=%s' % cls.keyfile)

            if cls.verbose and isinstance(cls.verbose, int) and 0<cls.verbose<6:
                args.append('-'+'v'*cls.verbose)


            if os.path.exists(ROUTER_LOG_PATH):
                chown_r(ROUTER_LOG_PATH, DEFAULT_USER)

            system2(args, close_fds=True, preexec_fn=mongo_preexec_fn)
            wait_until(lambda: cls.is_running, timeout=MAX_START_TIMEOUT)
            wait_until(lambda: cls.get_cli().has_connection, timeout=MAX_START_TIMEOUT)
            cls._logger.debug('%s process has been started.' % MONGOS)
Ejemplo n.º 32
0
    def start(cls):
        if not cls.is_running():
            cls._logger.info('Starting %s process' % MONGOS)
            args = [
                'sudo', '-u', DEFAULT_USER, MONGOS, '--fork', '--logpath',
                ROUTER_LOG_PATH, '--configdb',
                'mongo-0-0:%s' % CONFIG_SERVER_DEFAULT_PORT
            ]
            if cls.keyfile and os.path.exists(cls.keyfile):
                chown_r(cls.keyfile, DEFAULT_USER)
                args.append('--keyFile=%s' % cls.keyfile)

            if cls.verbose and isinstance(cls.verbose,
                                          int) and 0 < cls.verbose < 6:
                args.append('-' + 'v' * cls.verbose)

            if os.path.exists(ROUTER_LOG_PATH):
                chown_r(ROUTER_LOG_PATH, DEFAULT_USER)

            system2(args, close_fds=True, preexec_fn=mongo_preexec_fn)
            wait_until(lambda: cls.is_running, timeout=MAX_START_TIMEOUT)
            wait_until(lambda: cls.get_cli().has_connection,
                       timeout=MAX_START_TIMEOUT)
            cls._logger.debug('%s process has been started.' % MONGOS)
Ejemplo n.º 33
0
    def _run(self):
        # Apply resource's meta
        mnf = cloudfs.Manifest(cloudfs_path=self.cloudfs_source)
        bak = backup.restore(**mnf.meta)

        incrementals = []
        if bak.backup_type == 'incremental':
            incrementals = [bak]
            while bak.prev_cloudfs_source:
                tmpmnf = cloudfs.Manifest(cloudfs_path=bak.prev_cloudfs_source)
                bak = backup.restore(**tmpmnf.meta)
                if bak.backup_type == 'incremental':
                    incrementals.insert(0, bak)
        self.incrementals = incrementals
        if self.incrementals:
            self.log_file = self.incrementals[-1].log_file
            self.log_pos = self.incrementals[-1].log_pos
        else:
            self.log_file = bak.log_file
            self.log_pos = bak.log_pos

        coreutils.clean_dir(__mysql__['data_dir'])

        LOG.info('Downloading the base backup (LSN: 0..%s)', bak.to_lsn)

        trn = largetransfer.Download(bak.cloudfs_source)
        trn.apply_async()

        streamer = xbstream.args(extract=True, directory=__mysql__['data_dir'])
        streamer.popen(stdin=trn.output)

        trn.join()

        LOG.info('Preparing the base backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     redo_only=True,
                     ibbackup='xtrabackup',
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])

        if incrementals:
            inc_dir = os.path.join(__mysql__['tmp_dir'],
                                   'xtrabackup-restore-inc')
            i = 0
            for inc in incrementals:
                try:
                    os.makedirs(inc_dir)
                    inc = backup.restore(inc)
                    LOG.info(
                        'Downloading incremental backup #%d (LSN: %s..%s)', i,
                        inc.from_lsn, inc.to_lsn)

                    trn = largetransfer.Download(inc.cloudfs_source)
                    trn.apply_async()

                    streamer = xbstream.args(extract=True, directory=inc_dir)
                    streamer.popen(stdin=trn.output)

                    trn.join()

                    LOG.info('Preparing incremental backup #%d', i)
                    innobackupex(__mysql__['data_dir'],
                                 apply_log=True,
                                 redo_only=True,
                                 incremental_dir=inc_dir,
                                 ibbackup='xtrabackup',
                                 user=__mysql__['root_user'],
                                 password=__mysql__['root_password'])
                    i += 1
                finally:
                    coreutils.remove(inc_dir)

        LOG.info('Preparing the full backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])
        coreutils.chown_r(__mysql__['data_dir'], 'mysql', 'mysql')

        self._mysql_init.start()
        if int(__mysql__['replication_master']):
            LOG.info("Master will reset it's binary logs, "
                     "so updating binary log position in backup manifest")
            log_file, log_pos = self._client().master_status()
            meta = mnf.meta
            meta.update({'log_file': log_file, 'log_pos': log_pos})
            mnf.meta = meta
            mnf.save()
Ejemplo n.º 34
0
    def create_vhost(self,
                     hostname,
                     port,
                     template,
                     ssl,
                     ssl_certificate_id=None,
                     reload=True,
                     allow_port=False):
        """
        Creates Name-Based Apache VirtualHost

        @param hostname: Server Name
        @param port: port to listen to
        @param template: VirtualHost body with no certificate paths
        @param ssl: True if VirtualHost uses SSL certificate
        @param ssl_certificate_id: ID of SSL certificate
        @param reload: True if immediate apache reload is required.
        @return: path to VirtualHost file
        """
        #TODO: add Listen and NameVirtualHost directives to httpd.conf or ports.conf if needed

        name = "%s:%s" % (hostname, port)
        LOG.info("Creating Apache VirtualHost %s" % name)

        v_host = VirtualHost(template)

        if ssl:
            ssl_certificate = SSLCertificate(ssl_certificate_id)
            if not ssl_certificate.exists():
                ssl_certificate.ensure()

            v_host.use_certificate(
                ssl_certificate.cert_path, ssl_certificate.key_path,
                ssl_certificate.chain_path
                if os.path.exists(ssl_certificate.chain_path) else None)

            LOG.info("Certificate %s is set to VirtualHost %s" %
                     (ssl_certificate_id, name))

            #Compatibility with old apache handler
            if not self.mod_ssl.has_valid_certificate(
            ) or self.mod_ssl.is_system_certificate_used():
                self.mod_ssl.set_default_certificate(ssl_certificate)

        for directory in v_host.document_root_paths:
            docroot_parent_path = os.path.dirname(directory)

            if not os.path.exists(docroot_parent_path):
                os.makedirs(docroot_parent_path, 0755)
                LOG.info(
                    "Created parent directory of document root %s for %s" %
                    (directory, name))

            if not os.path.exists(directory):
                shutil.copytree(os.path.join(bus.share_path, "apache/html"),
                                directory)
                files = ", ".join(os.listdir(directory))
                LOG.debug("Copied document root files: %s" % files)

                try:
                    pwd.getpwnam("apache")
                    uname = "apache"
                except KeyError:
                    uname = "www-data"

                coreutils.chown_r(directory, uname)
                LOG.debug("Changed owner to %s: %s" %
                          (uname, ", ".join(os.listdir(directory))))
            else:
                LOG.debug("Document root %s already exists." % directory)

        try:
            clog_path = os.path.dirname(v_host.custom_log_path)
            if not os.path.exists(clog_path):
                os.makedirs(clog_path, 0755)
                LOG.info(
                    "Created CustomLog directory for VirtualHost %s:%s: %s" % (
                        hostname,
                        port,
                        clog_path,
                    ))
        except NoPathError:
            LOG.debug("Directive 'CustomLog' not found in %s" % name)

        try:
            errlog_path = os.path.dirname(v_host.error_log_path)
            if not os.path.exists(errlog_path):
                os.makedirs(errlog_path, 0755)
                LOG.info(
                    "Created ErrorLog directory for VirtualHost %s:%s: %s" % (
                        hostname,
                        port,
                        errlog_path,
                    ))
        except NoPathError:
            LOG.debug("Directive 'ErrorLog' not found in %s" % name)

        v_host_changed = True
        v_host_path = get_virtual_host_path(hostname, port)
        if os.path.exists(v_host_path):
            with open(v_host_path, "r") as old_v_host:
                if old_v_host.read() == v_host.body:
                    v_host_changed = False

        if v_host_changed:
            with open(v_host_path, "w") as fp:
                fp.write(v_host.body)
            LOG.info("VirtualHost %s configuration saved to %s" %
                     (name, v_host_path))
        else:
            LOG.info("VirtualHost %s configuration (%s) has no changes." %
                     (name, v_host_path))

        if allow_port:
            self._open_ports([port])

        if reload:
            try:
                self.configtest()
            except initdv2.InitdError, e:
                LOG.error("ConfigTest failed with error: '%s'." % str(e))
                raise
            else:
                self.reload_service("Applying Apache VirtualHost %s" % name)
Ejemplo n.º 35
0
    def on_DbMsr_CreateBackup(self, message):
        #TODO: Think how to move the most part of it into Postgresql class 
        # Retrieve password for scalr pg user
        tmpdir = backup_path = None
        try:
            # Get databases list
            psql = PSQL(user=self.postgresql.root_user.name)
            databases = psql.list_pg_databases()
            if 'template0' in databases:
                databases.remove('template0')
            
            
            op = operation(name=self._op_backup, phases=[{
                'name': self._phase_backup
            }])
            op.define()         
            
            with op.phase(self._phase_backup):
            
                if not os.path.exists(self._tmp_path):
                    os.makedirs(self._tmp_path)
                    
                # Defining archive name and path
                backup_filename = time.strftime('%Y-%m-%d-%H:%M:%S')+'.tar.gz'
                backup_path = os.path.join(self._tmp_path, backup_filename)
                
                # Creating archive 
                backup = tarfile.open(backup_path, 'w:gz')
    
                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=self._tmp_path)       
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
                    su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError('Error while dumping database %s: %s' % (db_name, err))
                    backup.add(dump_path, os.path.basename(dump_path))  

                make_backup_steps(databases, op, _single_backup)                        

                backup.close()
                
                with op.step(self._step_upload_to_cloud_storage):
                    # Creating list of full paths to archive chunks
                    #if os.path.getsize(backup_path) > __postgresql__['pgdump_chunk_size']:
                    #    parts = [os.path.join(tmpdir, file) for file in split(backup_path, backup_filename, __postgresql__['pgdump_chunk_size'], tmpdir)]
                    #else:
                    #    parts = [backup_path]
                    #sizes = [os.path.getsize(file) for file in parts]

                    cloud_storage_path = self._platform.scalrfs.backups(BEHAVIOUR)
                    LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path)

                    trn = LargeTransfer(backup_path, cloud_storage_path)
                    manifest = trn.run()
                    LOG.info("Postgresql backup uploaded to cloud storage under %s/%s",
                                    cloud_storage_path, backup_filename)
            
            result = list(dict(path=os.path.join(cloud_storage_path, c[0]), size=c[2]) for c in
                            manifest['files'][0]['chunks'])
            op.ok(data=result)
                
            # Notify Scalr
            self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
                db_type = BEHAVIOUR,
                status = 'ok',
                backup_parts = result
            ))
                        
        except (Exception, BaseException), e:
            LOG.exception(e)
            
            # Notify Scalr about error
            self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
                db_type = BEHAVIOUR,
                status = 'error',
                last_error = str(e)
            ))
Ejemplo n.º 36
0
    def on_init(self):      
        #temporary fix for starting-after-rebundle issue
        if not os.path.exists(PG_SOCKET_DIR):
            os.makedirs(PG_SOCKET_DIR)
            chown_r(PG_SOCKET_DIR, 'postgres')
            
        bus.on("host_init_response", self.on_host_init_response)
        bus.on("before_host_up", self.on_before_host_up)
        bus.on("before_reboot_start", self.on_before_reboot_start)

        self._insert_iptables_rules()       

        if __node__['state'] == ScalarizrState.BOOTSTRAPPING:
            
            if disttool.is_redhat_based():      
                    
                checkmodule_path = software.which('checkmodule')
                semodule_package_path = software.which('semodule_package')
                semodule_path = software.which('semodule')
            
                if all((checkmodule_path, semodule_package_path, semodule_path)):
                    
                    with open('/tmp/sshkeygen.te', 'w') as fp:
                        fp.write(SSH_KEYGEN_SELINUX_MODULE)
                    
                    self._logger.debug('Compiling SELinux policy for ssh-keygen')
                    system2((checkmodule_path, '-M', '-m', '-o',
                             '/tmp/sshkeygen.mod', '/tmp/sshkeygen.te'), logger=self._logger)
                    
                    self._logger.debug('Building SELinux package for ssh-keygen')
                    system2((semodule_package_path, '-o', '/tmp/sshkeygen.pp',
                             '-m', '/tmp/sshkeygen.mod'), logger=self._logger)
                    
                    self._logger.debug('Loading ssh-keygen SELinux package')                    
                    system2((semodule_path, '-i', '/tmp/sshkeygen.pp'), logger=self._logger)


        if __node__['state'] == 'running':

            vol = storage2.volume(__postgresql__['volume'])
            vol.ensure(mount=True)
            
            self.postgresql.service.start()
            self.accept_all_clients()
            
            self._logger.debug("Checking presence of Scalr's PostgreSQL root user.")
            root_password = self.root_password
            
            if not self.postgresql.root_user.exists():
                self._logger.debug("Scalr's PostgreSQL root user does not exist. Recreating")
                self.postgresql.root_user = self.postgresql.create_linux_user(ROOT_USER, root_password)
            else:
                try:
                    self.postgresql.root_user.check_system_password(root_password)
                    self._logger.debug("Scalr's root PgSQL user is present. Password is correct.")              
                except ValueError:
                    self._logger.warning("Scalr's root PgSQL user was changed. Recreating.")
                    self.postgresql.root_user.change_system_password(root_password)
                    
            if self.is_replication_master:  
                #ALTER ROLE cannot be executed in a read-only transaction
                self._logger.debug("Checking password for pg_role scalr.")      
                if not self.postgresql.root_user.check_role_password(root_password):
                    LOG.warning("Scalr's root PgSQL role was changed. Recreating.")
                    self.postgresql.root_user.change_role_password(root_password)
Ejemplo n.º 37
0
	def _run(self):
		if self.backup_type:
			self._check_backup_type()
		rst_volume = None
		exc_info = None
		'''
		# Create custom my.cnf
		# XXX: it's not a good think to do, but we should create this hacks, 
		# cause when handler calls restore.run() my.cnf is not patched yet 
		shutil.copy(__mysql__['my.cnf'], '/tmp/my.cnf')
		mycnf = metaconf.Configuration('mysql')
		mycnf.read('/tmp/my.cnf')
		try:
			mycnf.options('mysqld')
		except metaconf.NoPathError:
			mycnf.add('mysqld')
		mycnf.set('mysqld/datadir', __mysql__['data_dir'])
		mycnf.set('mysqld/log-bin', __mysql__['binlog_dir'])
		mycnf.write('/tmp/my.cnf')
		'''
		
		my_defaults = my_print_defaults('mysqld')
		rst_volume = None
		self._data_dir = os.path.normpath(my_defaults['datadir'])
		LOG.info('_run: datadir is "%s"' % self._data_dir)
		self._log_bin = os.path.normpath(my_defaults['log_bin'])
		if self._log_bin.startswith('/'):
			self._binlog_dir = os.path.dirname(self._log_bin)
		
		try:
			if self.snapshot:
				LOG.info('Creating restore volume from snapshot')
				if self.volume:
					# Clone volume object
					self.volume = storage2.volume(self.volume)
					rst_volume = self.volume.clone()
					rst_volume.snap = self.snapshot
				else:
					self.snapshot = storage2.snapshot(self.snapshot)
					rst_volume = storage2.volume(type=self.snapshot.type, 
											snap=self.snapshot)
				rst_volume.tags.update({'tmp': 1})
				rst_volume.mpoint = self.backup_dir
				rst_volume.ensure(mount=True)

	
			if not os.listdir(self.backup_dir):
				msg = 'Failed to find any backups in %s'
				raise Error(msg, self.backup_dir)
			
			backups = sorted(os.listdir(self.backup_dir))
			LOG.info('Preparing the base backup')
			base = backups.pop(0)
			target_dir = os.path.join(self.backup_dir, base)
			innobackupex(target_dir, 
						apply_log=True, 
						redo_only=True,
						user=__mysql__['root_user'],
						password=__mysql__['root_password'])
			for inc in backups:
				LOG.info('Preparing incremental backup %s', inc)
				innobackupex(target_dir,
							apply_log=True, 
							redo_only=True, 
							incremental_dir=os.path.join(self.backup_dir, inc),
							user=__mysql__['root_user'],
							password=__mysql__['root_password'])
			LOG.info('Preparing the full backup')
			innobackupex(target_dir, 
						apply_log=True, 
						user=__mysql__['root_user'],
						password=__mysql__['root_password'])
			
			LOG.info('Copying backup to datadir')
			self._mysql_init.stop()
			self._start_copyback()
			try:
				innobackupex(target_dir, copy_back=True)
				coreutils.chown_r(self._data_dir, 
								'mysql', 'mysql')
				self._mysql_init.start()
				self._commit_copyback()
			except:
				self._rollback_copyback()
				raise
		except:
			exc_info = sys.exc_info()
		finally:
			if rst_volume:
				LOG.info('Destroying restore volume')
				try:
					rst_volume.destroy(force=True)
				except:
					msg = 'Failed to destroy volume %s: %s'
					LOG.warn(msg, rst_volume.id, sys.exc_info()[1])
		if exc_info:
			raise exc_info[0], exc_info[1], exc_info[2]
Ejemplo n.º 38
0
 def set_permissions(self):
     self._logger.debug("Changing working directory owner to %s" %
                        self.user)
     chown_r(self.path, self.user)
Ejemplo n.º 39
0
    def on_DbMsr_CreateBackup(self, message):
        #TODO: Think how to move the most part of it into Postgresql class
        # Retrieve password for scalr pg user
        tmpdir = backup_path = None
        try:
            # Get databases list
            psql = PSQL(user=self.postgresql.root_user.name)
            databases = psql.list_pg_databases()
            if 'template0' in databases:
                databases.remove('template0')

            op = operation(name=self._op_backup,
                           phases=[{
                               'name': self._phase_backup
                           }])
            op.define()

            with op.phase(self._phase_backup):

                if not os.path.exists(self._tmp_path):
                    os.makedirs(self._tmp_path)

                # Defining archive name and path
                backup_filename = time.strftime(
                    '%Y-%m-%d-%H:%M:%S') + '.tar.gz'
                backup_path = os.path.join(self._tmp_path, backup_filename)

                # Creating archive
                backup = tarfile.open(backup_path, 'w:gz')

                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=self._tmp_path)
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (
                        PG_DUMP, db_name, dump_path)
                    su_args = [
                        SU_EXEC, '-', self.postgresql.root_user.name, '-c',
                        pg_args
                    ]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError(
                            'Error while dumping database %s: %s' %
                            (db_name, err))
                    backup.add(dump_path, os.path.basename(dump_path))

                make_backup_steps(databases, op, _single_backup)

                backup.close()

                with op.step(self._step_upload_to_cloud_storage):
                    # Creating list of full paths to archive chunks
                    if os.path.getsize(
                            backup_path) > __postgresql__['pgdump_chunk_size']:
                        parts = [
                            os.path.join(tmpdir, file) for file in split(
                                backup_path, backup_filename,
                                __postgresql__['pgdump_chunk_size'], tmpdir)
                        ]
                    else:
                        parts = [backup_path]
                    sizes = [os.path.getsize(file) for file in parts]

                    cloud_storage_path = self._platform.scalrfs.backups(
                        BEHAVIOUR)
                    LOG.info("Uploading backup to cloud storage (%s)",
                             cloud_storage_path)
                    trn = transfer.Transfer()
                    cloud_files = trn.upload(parts, cloud_storage_path)
                    LOG.info(
                        "Postgresql backup uploaded to cloud storage under %s/%s",
                        cloud_storage_path, backup_filename)

            result = list(
                dict(path=path, size=size)
                for path, size in zip(cloud_files, sizes))
            op.ok(data=result)

            # Notify Scalr
            self.send_message(
                DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                dict(db_type=BEHAVIOUR, status='ok', backup_parts=result))

        except (Exception, BaseException), e:
            LOG.exception(e)

            # Notify Scalr about error
            self.send_message(
                DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
Ejemplo n.º 40
0
    def create_vhost(self, hostname, port, template, ssl, ssl_certificate_id=None, reload=True, allow_port=False):
        """
        Creates Name-Based Apache VirtualHost

        @param hostname: Server Name
        @param port: port to listen to
        @param template: VirtualHost body with no certificate paths
        @param ssl: True if VirtualHost uses SSL certificate
        @param ssl_certificate_id: ID of SSL certificate
        @param reload: True if immediate apache reload is required.
        @return: path to VirtualHost file
        """
        #TODO: add Listen and NameVirtualHost directives to httpd.conf or ports.conf if needed

        name = "%s:%s" % (hostname, port)
        LOG.info("Creating Apache VirtualHost %s" % name)

        v_host = VirtualHost(template)

        if ssl:
            ssl_certificate = SSLCertificate(ssl_certificate_id)
            if not ssl_certificate.exists():
                ssl_certificate.ensure()

            v_host.use_certificate(
                ssl_certificate.cert_path,
                ssl_certificate.key_path,
                ssl_certificate.chain_path if os.path.exists(ssl_certificate.chain_path) else None
            )

            LOG.info("Certificate %s is set to VirtualHost %s" % (ssl_certificate_id, name))

            #Compatibility with old apache handler
            if not self.mod_ssl.has_valid_certificate() or self.mod_ssl.is_system_certificate_used():
                self.mod_ssl.set_default_certificate(ssl_certificate)

        for directory in v_host.document_root_paths:
            docroot_parent_path = os.path.dirname(directory)

            if not os.path.exists(docroot_parent_path):
                os.makedirs(docroot_parent_path, 0755)
                LOG.info("Created parent directory of document root %s for %s" % (directory, name))

            if not os.path.exists(directory):
                shutil.copytree(os.path.join(bus.share_path, "apache/html"), directory)
                files = ", ".join(os.listdir(directory))
                LOG.debug("Copied document root files: %s" % files)

                try:
                    pwd.getpwnam("apache")
                    uname = "apache"
                except KeyError:
                    uname = "www-data"

                coreutils.chown_r(directory, uname)
                LOG.debug("Changed owner to %s: %s" % (
                    uname, ", ".join(os.listdir(directory))))
            else:
                LOG.debug("Document root %s already exists." % directory)

        try:
            clog_path = os.path.dirname(v_host.custom_log_path)
            if not os.path.exists(clog_path):
                os.makedirs(clog_path, 0755)
                LOG.info("Created CustomLog directory for VirtualHost %s:%s: %s" % (
                    hostname,
                    port,
                    clog_path,
                ))
        except NoPathError:
            LOG.debug("Directive 'CustomLog' not found in %s" % name)

        try:
            errlog_path = os.path.dirname(v_host.error_log_path)
            if not os.path.exists(errlog_path):
                os.makedirs(errlog_path, 0755)
                LOG.info("Created ErrorLog directory for VirtualHost %s:%s: %s" % (
                    hostname,
                    port,
                    errlog_path,
                ))
        except NoPathError:
            LOG.debug("Directive 'ErrorLog' not found in %s" % name)

        v_host_changed = True
        v_host_path = get_virtual_host_path(hostname, port)
        if os.path.exists(v_host_path):
            with open(v_host_path, "r") as old_v_host:
                if old_v_host.read() == v_host.body:
                    v_host_changed = False

        if v_host_changed:
            with open(v_host_path, "w") as fp:
                fp.write(v_host.body)
            LOG.info("VirtualHost %s configuration saved to %s" % (name, v_host_path))
        else:
            LOG.info("VirtualHost %s configuration (%s) has no changes." % (name, v_host_path))

        if allow_port:
            self._open_ports([port])

        if reload:
            try:
                self.configtest()
            except initdv2.InitdError, e:
                LOG.error("ConfigTest failed with error: '%s'." % str(e))
                raise
            else:
                self.reload_service("Applying Apache VirtualHost %s" % name)
Ejemplo n.º 41
0
MONGO_DUMP = software.which('mongodump')
MONGOS = software.which('mongos')

ROUTER_DEFAULT_PORT = 27017
ARBITER_DEFAULT_PORT = 27020
REPLICA_DEFAULT_PORT = 27018
CONFIG_SERVER_DEFAULT_PORT = 27019

SERVICE_NAME = BuiltinBehaviours.MONGODB
STORAGE_PATH = "/mnt/mongodb-storage"

DEFAULT_USER = '******' if disttool.is_debian_based() else 'mongod'
LOG_DIR = '/var/log/mongodb'
if not os.path.isdir(LOG_DIR):
    os.makedirs(LOG_DIR)
chown_r(LOG_DIR, DEFAULT_USER)

LOG_PATH_DEFAULT = os.path.join(LOG_DIR, 'mongodb.shardsrv.log')
DEFAULT_UBUNTU_DB_PATH = '/var/lib/mongodb'
DEFAULT_CENTOS_DB_PATH = '/var/lib/mongo'
LOCK_FILE = 'mongod.lock'

SCALR_USER = '******'
STORAGE_DATA_DIR = os.path.join(STORAGE_PATH, 'data')

UBUNTU_CONFIG_PATH = '/etc/mongodb.conf'
CENTOS_CONFIG_PATH = '/etc/mongod.conf'
CONFIG_PATH_DEFAULT = '/etc/mongodb.shardsrv.conf'
ARBITER_DATA_DIR = '/tmp/arbiter'
ARBITER_LOG_PATH = os.path.join(LOG_DIR, 'mongodb.arbiter.log')
ARBITER_CONF_PATH = '/etc/mongodb.arbiter.conf'
Ejemplo n.º 42
0
    def _run(self):
        # Apply resource's meta
        mnf = cloudfs.Manifest(cloudfs_path=self.cloudfs_source)
        bak = backup.restore(**mnf.meta)

        incrementals = []
        if bak.backup_type == "incremental":
            incrementals = [bak]
            while bak.prev_cloudfs_source:
                tmpmnf = cloudfs.Manifest(cloudfs_path=bak.prev_cloudfs_source)
                bak = backup.restore(**tmpmnf.meta)
                if bak.backup_type == "incremental":
                    incrementals.insert(0, bak)
        self.incrementals = incrementals
        if self.incrementals:
            self.log_file = self.incrementals[-1].log_file
            self.log_pos = self.incrementals[-1].log_pos
        else:
            self.log_file = bak.log_file
            self.log_pos = bak.log_pos

        coreutils.clean_dir(__mysql__["data_dir"])

        LOG.info("Downloading the base backup (LSN: 0..%s)", bak.to_lsn)
        trn = cloudfs.LargeTransfer(
            bak.cloudfs_source,
            __mysql__["data_dir"],
            streamer=xbstream.args(extract=True, directory=__mysql__["data_dir"]),
        )
        trn.run()

        LOG.info("Preparing the base backup")
        innobackupex(
            __mysql__["data_dir"],
            apply_log=True,
            redo_only=True,
            ibbackup="xtrabackup",
            user=__mysql__["root_user"],
            password=__mysql__["root_password"],
        )

        if incrementals:
            inc_dir = os.path.join(__mysql__["tmp_dir"], "xtrabackup-restore-inc")
            i = 0
            for inc in incrementals:
                try:
                    os.makedirs(inc_dir)
                    inc = backup.restore(inc)
                    LOG.info("Downloading incremental backup #%d (LSN: %s..%s)", i, inc.from_lsn, inc.to_lsn)
                    trn = cloudfs.LargeTransfer(
                        inc.cloudfs_source, inc_dir, streamer=xbstream.args(extract=True, directory=inc_dir)
                    )

                    trn.run()  # todo: Largetransfer should support custom decompressor proc
                    LOG.info("Preparing incremental backup #%d", i)
                    innobackupex(
                        __mysql__["data_dir"],
                        apply_log=True,
                        redo_only=True,
                        incremental_dir=inc_dir,
                        ibbackup="xtrabackup",
                        user=__mysql__["root_user"],
                        password=__mysql__["root_password"],
                    )
                    i += 1
                finally:
                    coreutils.remove(inc_dir)

        LOG.info("Preparing the full backup")
        innobackupex(
            __mysql__["data_dir"], apply_log=True, user=__mysql__["root_user"], password=__mysql__["root_password"]
        )
        coreutils.chown_r(__mysql__["data_dir"], "mysql", "mysql")

        self._mysql_init.start()
        if int(__mysql__["replication_master"]):
            LOG.info("Master will reset it's binary logs, " "so updating binary log position in backup manifest")
            log_file, log_pos = self._client().master_status()
            meta = mnf.meta
            meta.update({"log_file": log_file, "log_pos": log_pos})
            mnf.meta = meta
            mnf.save()
Ejemplo n.º 43
0
        def do_backup(op):
            tmpdir = None
            dumps = []
            tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
            try:
                # Get databases list
                psql = PSQL(user=self.postgresql.root_user.name)
                databases = psql.list_pg_databases()
                if 'template0' in databases:
                    databases.remove('template0')

                if not os.path.exists(tmp_path):
                    os.makedirs(tmp_path)

                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=tmp_path)
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (
                        PG_DUMP, db_name, dump_path)
                    su_args = [
                        SU_EXEC, '-', self.postgresql.root_user.name, '-c',
                        pg_args
                    ]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError(
                            'Error while dumping database %s: %s' %
                            (db_name, err))  #?
                    dumps.append(dump_path)

                for db_name in databases:
                    _single_backup(db_name)

                cloud_storage_path = __node__.platform.scalrfs.backups(
                    BEHAVIOUR)

                suffix = 'master' if int(
                    __postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
                backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

                LOG.info("Uploading backup to %s with tags %s" %
                         (cloud_storage_path, backup_tags))

                def progress_cb(progress):
                    LOG.debug('Uploading %s bytes' % progress)

                uploader = largetransfer.Upload(dumps,
                                                cloud_storage_path,
                                                progress_cb=progress_cb)
                uploader.apply_async()
                uploader.join()
                manifest = uploader.manifest

                LOG.info(
                    "Postgresql backup uploaded to cloud storage under %s",
                    cloud_storage_path)
                LOG.debug(manifest.data)

                # Notify Scalr
                result = transfer_result_to_backup_result(manifest)
                __node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='ok', backup_parts=result))

                return result

            except (Exception, BaseException), e:
                LOG.exception(e)

                # Notify Scalr about error
                __node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
Ejemplo n.º 44
0
        def do_backup(op):
            tmpdir = backup_path = None
            tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
            try:
                # Get databases list
                psql = PSQL(user=self.postgresql.root_user.name)
                databases = psql.list_pg_databases()
                if 'template0' in databases:
                    databases.remove('template0')
                
                if not os.path.exists(tmp_path):
                    os.makedirs(tmp_path)
                    
                # Defining archive name and path
                backup_filename = time.strftime('%Y-%m-%d-%H:%M:%S')+'.tar.gz'
                backup_path = os.path.join(tmp_path, backup_filename)
                
                # Creating archive 
                backup_obj = tarfile.open(backup_path, 'w:gz')

                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=tmp_path)       
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
                    su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError('Error while dumping database %s: %s' % (db_name, err))  #?
                    backup_obj.add(dump_path, os.path.basename(dump_path))  

                for db_name in databases:
                    _single_backup(db_name)
                       
                backup_obj.close()
                
                # Creating list of full paths to archive chunks
                #if os.path.getsize(backup_path) > __postgresql__['pgdump_chunk_size']:
                #    parts = [os.path.join(tmpdir, file) for file in split(backup_path, backup_filename, __postgresql__['pgdump_chunk_size'], tmpdir)]
                #else:
                #    parts = [backup_path]
                #sizes = [os.path.getsize(file) for file in parts]

                cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR)

                suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
                backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

                LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags))
                trn = LargeTransfer(backup_path, cloud_storage_path, tags=backup_tags)
                manifest = trn.run()
                LOG.info("Postgresql backup uploaded to cloud storage under %s/%s",
                                cloud_storage_path, backup_filename)
                
                result = list(dict(path=os.path.join(os.path.dirname(manifest.cloudfs_path), c[0]), size=c[2]) for c in
                                manifest['files'][0]['chunks'])
                    
                # Notify Scalr
                __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                                        dict(db_type=BEHAVIOUR,
                                             status='ok',
                                             backup_parts=result))

                return result  #?
                            
            except (Exception, BaseException), e:
                LOG.exception(e)
                
                # Notify Scalr about error
                __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                                        dict(db_type=BEHAVIOUR,
                                             status='error',
                                             last_error=str(e)))
Ejemplo n.º 45
0
MONGO_DUMP = software.which('mongodump')
MONGOS = software.which('mongos')

ROUTER_DEFAULT_PORT = 27017
ARBITER_DEFAULT_PORT = 27020
REPLICA_DEFAULT_PORT = 27018
CONFIG_SERVER_DEFAULT_PORT = 27019

SERVICE_NAME = BuiltinBehaviours.MONGODB
STORAGE_PATH = "/mnt/mongodb-storage"

DEFAULT_USER = '******' if disttool.is_debian_based() else 'mongod'
LOG_DIR = '/var/log/mongodb'
if not os.path.isdir(LOG_DIR):
    os.makedirs(LOG_DIR)
chown_r(LOG_DIR, DEFAULT_USER)

LOG_PATH_DEFAULT = os.path.join(LOG_DIR, 'mongodb.shardsrv.log') 
DEFAULT_UBUNTU_DB_PATH = '/var/lib/mongodb'
DEFAULT_CENTOS_DB_PATH = '/var/lib/mongo'
LOCK_FILE = 'mongod.lock'

SCALR_USER = '******'
STORAGE_DATA_DIR = os.path.join(STORAGE_PATH, 'data')

UBUNTU_CONFIG_PATH = '/etc/mongodb.conf'
CENTOS_CONFIG_PATH = '/etc/mongod.conf'
CONFIG_PATH_DEFAULT = '/etc/mongodb.shardsrv.conf'
ARBITER_DATA_DIR = '/tmp/arbiter'
ARBITER_LOG_PATH = os.path.join(LOG_DIR, 'mongodb.arbiter.log') 
ARBITER_CONF_PATH = '/etc/mongodb.arbiter.conf'
Ejemplo n.º 46
0
    def on_init(self):
        #temporary fix for starting-after-rebundle issue
        if not os.path.exists(PG_SOCKET_DIR):
            os.makedirs(PG_SOCKET_DIR)
            chown_r(PG_SOCKET_DIR, 'postgres')

        bus.on("host_init_response", self.on_host_init_response)
        bus.on("before_host_up", self.on_before_host_up)
        bus.on("before_reboot_start", self.on_before_reboot_start)

        self._insert_iptables_rules()

        if __node__['state'] == ScalarizrState.BOOTSTRAPPING:

            if linux.os.redhat_family:

                def selinux_enabled():
                    selinuxenabled = software.which('selinuxenabled')
                    if selinuxenabled:
                        _, _, ret_code = system2((selinuxenabled, ),
                                                 raise_exc=False)
                        return 0 == ret_code
                    # Consider it enabled by default
                    return True

                checkmodule_path = software.which('checkmodule')
                semodule_package_path = software.which('semodule_package')
                semodule_path = software.which('semodule')

                if all(
                    (checkmodule_path, semodule_package_path, semodule_path)):
                    if selinux_enabled():
                        with open('/tmp/sshkeygen.te', 'w') as fp:
                            fp.write(SSH_KEYGEN_SELINUX_MODULE)

                        self._logger.debug(
                            'Compiling SELinux policy for ssh-keygen')
                        system2((checkmodule_path, '-M', '-m', '-o',
                                 '/tmp/sshkeygen.mod', '/tmp/sshkeygen.te'),
                                logger=self._logger)

                        self._logger.debug(
                            'Building SELinux package for ssh-keygen')
                        system2(
                            (semodule_package_path, '-o', '/tmp/sshkeygen.pp',
                             '-m', '/tmp/sshkeygen.mod'),
                            logger=self._logger)

                        self._logger.debug(
                            'Loading ssh-keygen SELinux package')
                        system2((semodule_path, '-i', '/tmp/sshkeygen.pp'),
                                logger=self._logger)

        if __node__['state'] == 'running':

            vol = storage2.volume(__postgresql__['volume'])
            vol.ensure(mount=True)

            self.postgresql.service.start()
            self.accept_all_clients()

            self._logger.debug(
                "Checking presence of Scalr's PostgreSQL root user.")
            root_password = self.root_password

            if not self.postgresql.root_user.exists():
                self._logger.debug(
                    "Scalr's PostgreSQL root user does not exist. Recreating")
                self.postgresql.root_user = self.postgresql.create_linux_user(
                    ROOT_USER, root_password)
            else:
                try:
                    self.postgresql.root_user.check_system_password(
                        root_password)
                    self._logger.debug(
                        "Scalr's root PgSQL user is present. Password is correct."
                    )
                except ValueError:
                    self._logger.warning(
                        "Scalr's root PgSQL user was changed. Recreating.")
                    self.postgresql.root_user.change_system_password(
                        root_password)

            if self.is_replication_master:
                #ALTER ROLE cannot be executed in a read-only transaction
                self._logger.debug("Checking password for pg_role scalr.")
                if not self.postgresql.root_user.check_role_password(
                        root_password):
                    LOG.warning(
                        "Scalr's root PgSQL role was changed. Recreating.")
                    self.postgresql.root_user.change_role_password(
                        root_password)
Ejemplo n.º 47
0
    def _init_master(self, message):
        """
        Initialize MySQL master
        @type message: scalarizr.messaging.Message
        @param message: HostUp message
        """
        LOG.info("Initializing MySQL master")
        log = bus.init_op.logger

        log.info('Create storage')
        if 'restore' in __mysql__ and \
                        __mysql__['restore'].type == 'snap_mysql':
            __mysql__['restore'].run()
        else:
            if __node__['platform'].name == 'idcf':
                if __mysql__['volume'].id:
                    LOG.info('Cloning volume to workaround reattachment limitations of IDCF')
                    __mysql__['volume'].snap = __mysql__['volume'].snapshot()

            __mysql__['volume'].ensure(mount=True, mkfs=True)
            LOG.debug('MySQL volume config after ensure: %s', dict(__mysql__['volume']))

        coreutils.clean_dir(__mysql__['defaults']['datadir'])
        self.mysql.flush_logs(__mysql__['data_dir'])
        self.mysql.move_mysqldir_to(__mysql__['storage_dir'])
        self._change_selinux_ctx()

        storage_valid = self._storage_valid()
        user_creds = self.get_user_creds()
        self._fix_percona_debian_cnf()
        #datadir = mysql2_svc.my_print_defaults('mysqld').get('datadir', __mysql__['defaults']['datadir'])
        #if not storage_valid and datadir.find(__mysql__['data_dir']) == 0:
        #    # When role was created from another mysql role it contains modified my.cnf settings
        #    #self.mysql.my_cnf.datadir = '/var/lib/mysql'
        #    self.mysql.my_cnf.delete_options(['mysqld/log_bin'])



        if not storage_valid:
            '''
            if linux.os['family'] == 'RedHat':
                try:
                    # Check if selinux enabled
                    selinuxenabled_bin = software.which('selinuxenabled')
                    if selinuxenabled_bin:
                        se_enabled = not system2((selinuxenabled_bin, ), raise_exc=False)[2]
                        if se_enabled:
                            # Set selinux context for new mysql datadir
                            semanage = mysql_svc.get_semanage()
                            linux.system('%s fcontext -a -t mysqld_db_t "%s(/.*)?"'
                                         % (semanage, __mysql__['storage_dir']), shell=True)
                            # Restore selinux context
                            restorecon = software.which('restorecon')
                            linux.system('%s -R -v %s' % (restorecon, __mysql__['storage_dir']), shell=True)
                except:
                    LOG.debug('Selinux context setup failed', exc_info=sys.exc_info())
                '''

            self.mysql.my_cnf.delete_options(['mysqld/log_bin'])
            linux.system(['mysql_install_db', '--user=mysql', '--datadir=%s' % __mysql__['data_dir']])

        # Patch configuration
        options = {
            'bind-address': '0.0.0.0',
            'datadir': __mysql__['data_dir'],
            'log_bin': os.path.join(__mysql__['binlog_dir'], 'binlog'),
            'log-bin-index': os.path.join(__mysql__['binlog_dir'], 'binlog.index'),  # MariaDB
            'sync_binlog': '1',
            'innodb_flush_log_at_trx_commit': '1',
            'expire_logs_days': '10'
        }
        for key, value in options.items():
            self.mysql.my_cnf.set('mysqld/' + key, value)

        if not storage_valid:
            if linux.os.debian_family and os.path.exists(__mysql__['debian.cnf']):
                self.mysql.service.start()
                debian_cnf = metaconf.Configuration('mysql')
                debian_cnf.read(__mysql__['debian.cnf'])
                sql = ("GRANT ALL PRIVILEGES ON *.* "
                        "TO 'debian-sys-maint'@'localhost' "
                        "IDENTIFIED BY '{0}'").format(debian_cnf.get('client/password'))
                linux.system(['mysql', '-u', 'root', '-e', sql])
                self.mysql.service.stop()

            coreutils.chown_r(__mysql__['data_dir'], 'mysql', 'mysql')
        if 'restore' in __mysql__ and \
                        __mysql__['restore'].type == 'xtrabackup':
            # XXX: when restoring data bundle on ephemeral storage, data dir should by empty
            # but move_mysqldir_to call required to set several options in my.cnf
            coreutils.clean_dir(__mysql__['data_dir'])

        #self._change_selinux_ctx()

        log.info('Patch my.cnf configuration file')
        # Init replication
        self.mysql._init_replication(master=True)

        if 'restore' in __mysql__ and \
                        __mysql__['restore'].type == 'xtrabackup':
            __mysql__['restore'].run()


        # If It's 1st init of mysql master storage
        if not storage_valid:
            if os.path.exists(__mysql__['debian.cnf']):
                log.info("Copying debian.cnf file to mysql storage")
                shutil.copy(__mysql__['debian.cnf'], __mysql__['storage_dir'])

        # If volume has mysql storage directory structure (N-th init)
        else:
            log.info('InnoDB recovery')
            self._copy_debian_cnf_back()
            if 'restore' in __mysql__ and  __mysql__['restore'].type != 'xtrabackup':
                self._innodb_recovery()
                self.mysql.service.start()

        log.info('Create Scalr users')
        # Check and create mysql system users
        self.create_users(**user_creds)

        log.info('Create data bundle')
        if 'backup' in __mysql__:
            __mysql__['restore'] = __mysql__['backup'].run()

        # Update HostUp message
        log.info('Collect HostUp data')
        md = dict(
                replication_master=__mysql__['replication_master'],
                root_password=__mysql__['root_password'],
                repl_password=__mysql__['repl_password'],
                stat_password=__mysql__['stat_password'],
                master_password=__mysql__['master_password']
        )
        if __mysql__['compat_prior_backup_restore']:
            if 'restore' in __mysql__:
                md.update(dict(
                                log_file=__mysql__['restore'].log_file,
                                log_pos=__mysql__['restore'].log_pos,
                                snapshot_config=dict(__mysql__['restore'].snapshot)))
            elif 'log_file' in __mysql__:
                md.update(dict(
                                log_file=__mysql__['log_file'],
                                log_pos=__mysql__['log_pos']))
            md.update(dict(
                                    volume_config=dict(__mysql__['volume'])))
        else:
            md.update(dict(
                    volume=dict(__mysql__['volume'])
            ))
            for key in ('backup', 'restore'):
                if key in __mysql__:
                    md[key] = dict(__mysql__[key])


        message.db_type = __mysql__['behavior']
        setattr(message, __mysql__['behavior'], md)
Ejemplo n.º 48
0
 def set_permissions(self):
     self._logger.debug("Changing working directory owner to %s" % self.user)
     chown_r(self.path, self.user)