Пример #1
0
    def stop_db(self, update_db=False, do_not_start_on_reboot=False):
        """Stop the database."""
        LOG.info(_("Stopping Vertica."))
        if do_not_start_on_reboot:
            self._disable_db_on_boot()

        try:
            # Stop vertica-agent service
            command = (system.VERTICA_AGENT_SERVICE_COMMAND % "stop")
            system.shell_execute(command)
            # Using Vertica adminTools to stop db.
            db_password = self._get_database_password()
            stop_db_command = (system.STOP_DB % (DB_NAME, db_password))
            out, err = system.shell_execute(system.STATUS_ACTIVE_DB, "dbadmin")
            if out.strip() == DB_NAME:
                system.shell_execute(stop_db_command, "dbadmin")
                if not self.status._is_restarting:
                    if not self.status.wait_for_real_status_to_change_to(
                            rd_instance.ServiceStatuses.SHUTDOWN,
                            self.state_change_wait_time, update_db):
                        LOG.error(_("Could not stop Vertica."))
                        self.status.end_restart()
                        raise RuntimeError("Could not stop Vertica!")
                LOG.debug("Database stopped.")
            else:
                LOG.debug("Database is not running.")
        except exception.ProcessExecutionError:
            LOG.exception(_("Failed to stop database."))
            raise RuntimeError("Could not stop database.")
Пример #2
0
 def install_if_needed(self, packages):
     """Prepare the guest machine with a MongoDB installation."""
     LOG.info(_("Preparing Guest as MongoDB."))
     if not system.PACKAGER.pkg_is_installed(packages):
         LOG.debug("Installing packages: %s." % str(packages))
         system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT)
     LOG.info(_("Finished installing MongoDB server."))
Пример #3
0
def change_user_group(user, group, append=True, add_group=True, **kwargs):
    """Adds a user to groups by using the usermod linux command with -a and
    -G options.

    seealso:: _execute_shell_cmd for valid optional keyword arguments.

    :param user:            Username.
    :type user:             string

    :param group:           Group names.
    :type group:            comma separated string

    :param  append:         Adds user to a group.
    :type append:           boolean

    :param add_group:       Lists the groups that the user is a member of.
                            While adding a new groups to an existing user
                            with '-G' option alone, will remove all existing
                            groups that user belongs. Therefore, always add
                            the '-a' (append) with '-G' option to add or
                            append new groups.
    :type add_group:        boolean

    :raises:                :class:`UnprocessableEntity` if user or group not
                            given.
    """

    if not user:
        raise exception.UnprocessableEntity(_("Missing user."))
    elif not group:
        raise exception.UnprocessableEntity(_("Missing group."))

    options = (('a', append), ('G', add_group))
    _execute_shell_cmd('usermod', options, group, user, **kwargs)
Пример #4
0
    def create(self, req, body, tenant_id, version_id):
        """Create configuration parameter for datastore version."""
        LOG.info(_("Creating configuration parameter for datastore"))
        LOG.debug("req : '%s'\n\n" % req)
        LOG.debug("body : '%s'\n\n" % body)
        if not body:
            raise exception.BadRequest(_("Invalid request body."))

        parameter = body['configuration-parameter']
        name = parameter['name']
        restart_required = bool(parameter['restart_required'])
        data_type, min_size, max_size = self._validate_data_type(parameter)
        datastore_version = ds_models.DatastoreVersion.load_by_uuid(version_id)

        rule = config_models.DatastoreConfigurationParameters.create(
            name=name,
            datastore_version_id=datastore_version.id,
            restart_required=restart_required,
            data_type=data_type,
            max_size=max_size,
            min_size=min_size
        )
        return wsgi.Result(
            views.MgmtConfigurationParameterView(rule).data(),
            200)
Пример #5
0
def move(source, destination, force=False, **kwargs):
    """Move a given file or directory to a new location.
    Move attempts to preserve the original ownership, permissions and
    timestamps.

    :seealso: _execute_shell_cmd for valid optional keyword arguments.

    :param source:          Path to the source location.
    :type source:           string

    :param destination:     Path to the destination location.
    :type destination:      string

    :param force:           Do not prompt before overwriting.
    :type force:            boolean

    :raises:                :class:`UnprocessableEntity` if source or
                            destination not given.
    """

    if not source:
        raise exception.UnprocessableEntity(_("Missing source path."))
    elif not destination:
        raise exception.UnprocessableEntity(_("Missing destination path."))

    options = (('f', force),)
    _execute_shell_cmd('mv', options, source, destination, **kwargs)
Пример #6
0
 def initial_setup(self):
     self.ip_address = netutils.get_my_ipv4()
     mount_point = CONF.couchbase.mount_point
     try:
         LOG.info(_('Couchbase Server change data dir path.'))
         operating_system.chown(mount_point, 'couchbase', 'couchbase',
                                as_root=True)
         pwd = CouchbaseRootAccess.get_password()
         utils.execute_with_timeout(
             (system.cmd_node_init
              % {'data_path': mount_point,
                 'IP': self.ip_address,
                 'PWD': pwd}), shell=True)
         operating_system.remove(system.INSTANCE_DATA_DIR, force=True,
                                 as_root=True)
         LOG.debug('Couchbase Server initialize cluster.')
         utils.execute_with_timeout(
             (system.cmd_cluster_init
              % {'IP': self.ip_address, 'PWD': pwd}),
             shell=True)
         utils.execute_with_timeout(system.cmd_set_swappiness, shell=True)
         utils.execute_with_timeout(system.cmd_update_sysctl_conf,
                                    shell=True)
         LOG.info(_('Couchbase Server initial setup finished.'))
     except exception.ProcessExecutionError:
         LOG.exception(_('Error performing initial Couchbase setup.'))
         raise RuntimeError("Couchbase Server initial setup failed")
Пример #7
0
def chown(path, user, group, recursive=True, force=False, **kwargs):
    """Changes the owner and group of a given file.

    seealso:: _execute_shell_cmd for valid optional keyword arguments.

    :param path:         Path to the modified file.
    :type path:          string

    :param user:         Owner.
    :type user:          string

    :param group:        Group.
    :type group:         string

    :param recursive:    Operate on files and directories recursively.
    :type recursive:     boolean

    :param force:        Suppress most error messages.
    :type force:         boolean

    :raises:             :class:`UnprocessableEntity` if path not given.
    :raises:             :class:`UnprocessableEntity` if owner/group not given.
    """

    if not path:
        raise exception.UnprocessableEntity(
            _("Cannot change ownership of a blank file or directory."))
    if not user and not group:
        raise exception.UnprocessableEntity(
            _("Please specify owner or group, or both."))

    owner_group_modifier = _build_user_group_pair(user, group)
    options = (('f', force), ('R', recursive))
    _execute_shell_cmd('chown', options, owner_group_modifier, path, **kwargs)
Пример #8
0
 def _get_user(self, username, hostname):
     """Return a single user matching the criteria."""
     user = models.MySQLUser()
     try:
         user.name = username  # Could possibly throw a BadRequest here.
     except ValueError as ve:
         LOG.exception(_("Error Getting user information"))
         raise exception.BadRequest(_("Username %(user)s is not valid"
                                      ": %(reason)s") %
                                    {'user': username, 'reason': ve.message}
                                    )
     with self.local_sql_client(self.mysql_app.get_engine()) as client:
         q = sql_query.Query()
         q.columns = ['User', 'Host', 'Password']
         q.tables = ['mysql.user']
         q.where = ["Host != 'localhost'",
                    "User = '******'" % username,
                    "Host = '%s'" % hostname]
         q.order = ['User', 'Host']
         t = text(str(q))
         result = client.execute(t).fetchall()
         LOG.debug("Getting user information %s." % result)
         if len(result) != 1:
             return None
         found_user = result[0]
         user.password = found_user['Password']
         user.host = found_user['Host']
         self._associate_dbs(user)
         return user
Пример #9
0
def clear_expired_password():
    """
    Some mysql installations generate random root password
    and save it in /root/.mysql_secret, this password is
    expired and should be changed by client that supports expired passwords.
    """
    LOG.debug("Removing expired password.")
    secret_file = "/root/.mysql_secret"
    try:
        out, err = utils.execute("cat", secret_file,
                                 run_as_root=True, root_helper="sudo")
    except exception.ProcessExecutionError:
        LOG.exception(_("/root/.mysql_secret does not exist."))
        return
    m = re.match('# The random password set for the root user at .*: (.*)',
                 out)
    if m:
        try:
            out, err = utils.execute("mysqladmin", "-p%s" % m.group(1),
                                     "password", "", run_as_root=True,
                                     root_helper="sudo")
        except exception.ProcessExecutionError:
            LOG.exception(_("Cannot change mysql password."))
            return
        operating_system.remove(secret_file, force=True, as_root=True)
        LOG.debug("Expired password removed.")
Пример #10
0
 def module_apply(self, context, modules=None):
     LOG.info(_("Applying modules."))
     results = []
     for module_data in modules:
         module = module_data['module']
         id = module.get('id', None)
         module_type = module.get('type', None)
         name = module.get('name', None)
         tenant = module.get('tenant', None)
         datastore = module.get('datastore', None)
         ds_version = module.get('datastore_version', None)
         contents = module.get('contents', None)
         md5 = module.get('md5', None)
         auto_apply = module.get('auto_apply', True)
         visible = module.get('visible', True)
         if not name:
             raise AttributeError(_("Module name not specified"))
         if not contents:
             raise AttributeError(_("Module contents not specified"))
         driver = self.module_driver_manager.get_driver(module_type)
         if not driver:
             raise exception.ModuleTypeNotFound(
                 _("No driver implemented for module type '%s'") %
                 module_type)
         result = module_manager.ModuleManager.apply_module(
             driver, module_type, name, tenant, datastore, ds_version,
             contents, id, md5, auto_apply, visible)
         results.append(result)
     LOG.info(_("Returning list of modules: %s") % results)
     return results
Пример #11
0
    def action(self, req, body, tenant_id, id):
        LOG.info("req : '%s'\n\n" % req)
        LOG.info("Committing an ACTION against instance %s for tenant '%s'" % (id, tenant_id))
        if not body:
            raise exception.BadRequest(_("Invalid request body."))
        context = req.environ[wsgi.CONTEXT_KEY]
        instance = models.MgmtInstance.load(context=context, id=id)
        _actions = {
            "stop": self._action_stop,
            "reboot": self._action_reboot,
            "migrate": self._action_migrate,
            "reset-task-status": self._action_reset_task_status,
        }
        selected_action = None
        for key in body:
            if key in _actions:
                if selected_action is not None:
                    msg = _("Only one action can be specified per request.")
                    raise exception.BadRequest(msg)
                selected_action = _actions[key]
            else:
                msg = _("Invalid instance action: %s") % key
                raise exception.BadRequest(msg)

        if selected_action:
            return selected_action(context, instance, body)
        else:
            raise exception.BadRequest(_("Invalid request body."))
Пример #12
0
    def grow_cluster(self, context, cluster_id, new_instance_ids):

        def _grow_cluster():
            LOG.debug("begin grow_cluster for Vertica cluster %s" % cluster_id)

            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False).all()

            instance_ids = [db_instance.id for db_instance in db_instances]

            # Wait for new cluster members to get to cluster-ready status.
            if not self._all_instances_ready(new_instance_ids, cluster_id):
                return

            new_insts = [Instance.load(context, instance_id)
                         for instance_id in new_instance_ids]

            existing_instances = [Instance.load(context, instance_id)
                                  for instance_id
                                  in instance_ids
                                  if instance_id not in new_instance_ids]

            existing_guests = [self.get_guest(i) for i in existing_instances]
            new_guests = [self.get_guest(i) for i in new_insts]
            all_guests = new_guests + existing_guests

            authorized_users_without_password = ['root', 'dbadmin']
            new_ips = [self.get_ip(instance) for instance in new_insts]

            for user in authorized_users_without_password:
                pub_key = [guest.get_public_keys(user) for guest in all_guests]
                for guest in all_guests:
                    guest.authorize_public_keys(user, pub_key)

            for db_instance in db_instances:
                if db_instance['type'] == 'master':
                    LOG.debug("Found 'master' instance, calling grow on guest")
                    master_instance = Instance.load(context,
                                                    db_instance.id)
                    self.get_guest(master_instance).grow_cluster(new_ips)
                    break

            for guest in new_guests:
                guest.cluster_complete()

        timeout = Timeout(CONF.cluster_usage_timeout)

        try:
            _grow_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for growing cluster."))
            self.update_statuses_on_failure(cluster_id)
        except Exception:
            LOG.exception(_("Error growing cluster %s.") % cluster_id)
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()
Пример #13
0
        def _shrink_cluster():
            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False).all()

            all_instance_ids = [db_instance.id for db_instance in db_instances]

            remove_instances = [Instance.load(context, instance_id)
                                for instance_id in instance_ids]

            left_instances = [Instance.load(context, instance_id)
                              for instance_id
                              in all_instance_ids
                              if instance_id not in instance_ids]

            remove_member_ips = [self.get_ip(instance)
                                 for instance in remove_instances]

            k = VerticaCluster.k_safety(len(left_instances))

            for db_instance in db_instances:
                if db_instance['type'] == 'master':
                    master_instance = Instance.load(context,
                                                    db_instance.id)
                    if self.get_ip(master_instance) in remove_member_ips:
                        raise RuntimeError(_("Cannot remove master instance!"))
                    LOG.debug(_("Marking cluster k-safety: %s") % k)
                    self.get_guest(master_instance).mark_design_ksafe(k)
                    self.get_guest(master_instance).shrink_cluster(
                        remove_member_ips)
                    break

            for r in remove_instances:
                Instance.delete(r)
Пример #14
0
    def delete_user(self, user):
        LOG.debug("Delete a given user.")
        db2_user = models.DatastoreUser.deserialize(user)
        db2_user.check_delete()
        userName = db2_user.name
        user_dbs = db2_user.databases
        LOG.debug("For user %s, databases to be deleted = %r." % (
            userName, user_dbs))

        if len(user_dbs) == 0:
            databases = self.list_access(db2_user.name, None)
        else:
            databases = user_dbs

        LOG.debug("databases for user = %r." % databases)
        for database in databases:
            mydb = models.DatastoreSchema.deserialize(database)
            try:
                run_command(system.REVOKE_USER_ACCESS % {
                    'dbname': mydb.name,
                    'login': userName})
                LOG.debug("Revoked access for user:%s on database:%s." % (
                    userName, mydb.name))
            except exception.ProcessExecutionError as pe:
                LOG.debug("Error occurred while revoking access to %s." %
                          mydb.name)
                pass
            try:
                utils.execute_with_timeout(system.DELETE_USER_COMMAND % {
                    'login': db2_user.name.lower()}, shell=True)
            except exception.ProcessExecutionError as pe:
                LOG.exception(_(
                    "There was an error while deleting user: %s.") % pe)
                raise exception.GuestError(original_message=_(
                    "Unable to delete user: %s.") % userName)
Пример #15
0
    def write_config(
        self,
        config_contents,
        execute_function=utils.execute_with_timeout,
        mkstemp_function=tempfile.mkstemp,
        unlink_function=os.unlink,
    ):

        # first securely create a temp file. mkstemp() will set
        # os.O_EXCL on the open() call, and we get a file with
        # permissions of 600 by default.
        (conf_fd, conf_path) = mkstemp_function()

        LOG.debug("Storing temporary configuration at %s." % conf_path)

        # write config and close the file, delete it if there is an
        # error. only unlink if there is a problem. In normal course,
        # we move the file.
        try:
            os.write(conf_fd, config_contents)
            execute_function("sudo", "mv", conf_path, system.CASSANDRA_CONF)
            # TODO(denis_makogon): figure out the dynamic way to discover
            # configs owner since it can cause errors if there is
            # no cassandra user in operating system
            execute_function("sudo", "chown", "cassandra:cassandra", system.CASSANDRA_CONF)
            execute_function("sudo", "chmod", "a+r", system.CASSANDRA_CONF)
        except Exception:
            LOG.exception(_("Exception generating Cassandra configuration %s.") % conf_path)
            unlink_function(conf_path)
            raise
        finally:
            os.close(conf_fd)

        LOG.info(_("Wrote new Cassandra configuration."))
Пример #16
0
 def index(self, req, tenant_id):
     """Return all storage devices."""
     LOG.info(_("req : '%s'\n\n") % req)
     LOG.info(_("Indexing storage info for tenant '%s'") % tenant_id)
     context = req.environ[wsgi.CONTEXT_KEY]
     storages = models.StorageDevices.load(context)
     return wsgi.Result(views.StoragesView(storages).data(), 200)
Пример #17
0
    def create_user(self, users):
        LOG.debug("Creating user(s) for accessing DB2 database(s).")
        try:
            for item in users:
                user = models.DatastoreUser.deserialize(item)
                user.check_create()
                try:
                    LOG.debug("Creating OS user: %s." % user.name)
                    utils.execute_with_timeout(
                        system.CREATE_USER_COMMAND % {
                            'login': user.name, 'login': user.name,
                            'passwd': user.password}, shell=True)
                except exception.ProcessExecutionError as pe:
                    LOG.exception(_("Error creating user: %s.") % user.name)
                    continue

                for database in user.databases:
                    mydb = models.DatastoreSchema.deserialize(database)
                    try:
                        LOG.debug("Granting user: %s access to database: %s."
                                  % (user.name, mydb.name))
                        run_command(system.GRANT_USER_ACCESS % {
                            'dbname': mydb.name, 'login': user.name})
                    except exception.ProcessExecutionError as pe:
                        LOG.debug(
                            "Error granting user: %s access to database: %s."
                            % (user.name, mydb.name))
                        LOG.debug(pe)
                        pass
        except exception.ProcessExecutionError as pe:
            LOG.exception(_("An error occurred creating users: %s.") %
                          pe.message)
            pass
Пример #18
0
 def prepare(self, context, packages, databases, memory_mb, users,
             device_path=None, mount_point=None, backup_info=None,
             config_contents=None, root_password=None, overrides=None,
             cluster_config=None, snapshot=None):
     """
     This is called when the trove instance first comes online.
     It is the first rpc message passed from the task manager.
     prepare handles all the base configuration of the redis instance.
     """
     try:
         app = RedisApp(RedisAppStatus.get())
         RedisAppStatus.get().begin_install()
         if device_path:
             device = volume.VolumeDevice(device_path)
             # unmount if device is already mounted
             device.unmount_device(device_path)
             device.format()
             device.mount(mount_point)
             operating_system.chown(mount_point, 'redis', 'redis',
                                    as_root=True)
             LOG.debug('Mounted the volume.')
         app.install_if_needed(packages)
         LOG.info(_('Writing redis configuration.'))
         app.write_config(config_contents)
         app.restart()
         LOG.info(_('Redis instance has been setup and configured.'))
     except Exception:
         LOG.exception(_("Error setting up Redis instance."))
         app.status.set_status(rd_instance.ServiceStatuses.FAILED)
         raise RuntimeError("prepare call has failed.")
Пример #19
0
def copy(source, destination, force=False, preserve=False, recursive=True,
         **kwargs):
    """Copy a given file or directory to another location.
    Copy does NOT attempt to preserve ownership, permissions and timestamps
    unless the 'preserve' option is enabled.

    :seealso: _execute_shell_cmd for valid optional keyword arguments.

    :param source:          Path to the source location.
    :type source:           string

    :param destination:     Path to the destination location.
    :type destination:      string

    :param force:           If an existing destination file cannot be
                            opened, remove it and try again.
    :type force:            boolean

    :param preserve:        Preserve mode, ownership and timestamps.
    :type preserve:         boolean

    :param recursive:       Copy directories recursively.
    :type recursive:        boolean

    :raises:                :class:`UnprocessableEntity` if source or
                            destination not given.
    """

    if not source:
        raise exception.UnprocessableEntity(_("Missing source path."))
    elif not destination:
        raise exception.UnprocessableEntity(_("Missing destination path."))

    options = (('f', force), ('p', preserve), ('R', recursive))
    _execute_shell_cmd('cp', options, source, destination, **kwargs)
Пример #20
0
def execute_with_timeout(*args, **kwargs):
    time = kwargs.pop('timeout', 30)
    log_output_on_error = kwargs.pop('log_output_on_error', False)

    timeout = Timeout(time)
    try:
        return execute(*args, **kwargs)
    except exception.ProcessExecutionError as e:
        if log_output_on_error:
            LOG.error(
                _("Command '%(cmd)s' failed. %(description)s "
                  "Exit code: %(exit_code)s\nstderr: %(stderr)s\n"
                  "stdout: %(stdout)s") %
                {'cmd': e.cmd, 'description': e.description or '',
                 'exit_code': e.exit_code, 'stderr': e.stderr,
                 'stdout': e.stdout})
        raise
    except Timeout as t:
        if t is not timeout:
            LOG.error(_("Got a timeout but not the one expected."))
            raise
        else:
            msg = (_("Time out after waiting "
                     "%(time)s seconds when running proc: %(args)s"
                     " %(kwargs)s.") % {'time': time, 'args': args,
                                        'kwargs': kwargs})
            LOG.error(msg)
            raise exception.ProcessExecutionError(msg)
    finally:
        timeout.cancel()
Пример #21
0
    def action(self, req, body, tenant_id, id):
        LOG.info(_("Committing an ACTION against a database "
                   "instance %(id)s for tenant '%(tenant_id)s'\n"
                   "req : '%(req)s'\n\n") % {
                       "tenant_id": tenant_id, "req": req, "id": id})
        if not body:
            raise exception.BadRequest(_("Invalid request body."))
        context = req.environ[wsgi.CONTEXT_KEY]
        instance = models.MgmtInstance.load(context=context, id=id)
        _actions = {
            'stop': self._action_stop,
            'reboot': self._action_reboot,
            'migrate': self._action_migrate,
            'reset-task-status': self._action_reset_task_status
        }
        selected_action = None
        for key in body:
            if key in _actions:
                if selected_action is not None:
                    msg = _("Only one action can be specified per request.")
                    raise exception.BadRequest(msg)
                selected_action = _actions[key]
            else:
                msg = _("Invalid instance action: %s") % key
                raise exception.BadRequest(msg)

        if selected_action:
            return selected_action(context, instance, req, body)
        else:
            raise exception.BadRequest(_("Invalid request body."))
Пример #22
0
 def index(self, req, tenant_id, detailed=False):
     """Return all hosts."""
     LOG.info(_("req : '%s'\n\n") % req)
     LOG.info(_("Indexing a host for tenant '%s'") % tenant_id)
     context = req.environ[wsgi.CONTEXT_KEY]
     hosts = models.SimpleHost.load_all(context)
     return wsgi.Result(views.HostsView(hosts).data(), 200)
Пример #23
0
    def stop_db_service(self, service_candidates, timeout,
                        disable_on_boot=False, update_db=False):
        """Stop the database service and wait for the database to shutdown.

        :param service_candidates:   List of possible system service names.
        :type service_candidates:    list

        :param timeout:              Wait timeout in seconds.
        :type timeout:               integer

        :param disable_on_boot:      Disable service auto-start.
                                     The auto-start setting will be updated
                                     only if the service command succeeds.
        :type disable_on_boot:       boolean

        :param update_db:            Suppress the Trove instance heartbeat.
        :type update_db:             boolean

        :raises:              :class:`RuntimeError` on failure.
        """
        LOG.info(_("Stopping database service."))
        operating_system.stop_service(service_candidates)

        LOG.debug("Waiting for database to shutdown.")
        if not self._wait_for_database_service_status(
                instance.ServiceStatuses.SHUTDOWN, timeout, update_db):
            raise RuntimeError(_("Database failed to stop."))

        LOG.info(_("Database has stopped successfully."))

        if disable_on_boot:
            LOG.info(_("Disable service auto-start on boot."))
            operating_system.disable_service_on_boot(service_candidates)
Пример #24
0
 def _enable_db_on_boot(self):
     LOG.info(_("Enabling MongoDB on boot."))
     try:
         mongo_service = self._get_service()
         utils.execute_with_timeout(mongo_service["cmd_enable"], shell=True)
     except KeyError:
         raise RuntimeError(_("MongoDB service is not discovered."))
Пример #25
0
    def start_mysql(self, update_db=False):
        LOG.info(_("Starting MySQL."))
        # This is the site of all the trouble in the restart tests.
        # Essentially what happens is that mysql start fails, but does not
        # die. It is then impossible to kill the original, so

        self._enable_mysql_on_boot()

        try:
            mysql_service = operating_system.service_discovery(
                MYSQL_SERVICE_CANDIDATES)
            utils.execute_with_timeout(mysql_service['cmd_start'], shell=True)
        except KeyError:
            raise RuntimeError("Service is not discovered.")
        except exception.ProcessExecutionError:
            # it seems mysql (percona, at least) might come back with [Fail]
            # but actually come up ok. we're looking into the timing issue on
            # parallel, but for now, we'd like to give it one more chance to
            # come up. so regardless of the execute_with_timeout() response,
            # we'll assume mysql comes up and check it's status for a while.
            pass
        if not self.status.wait_for_real_status_to_change_to(
                rd_instance.ServiceStatuses.RUNNING,
                self.state_change_wait_time, update_db):
            LOG.error(_("Start up of MySQL failed."))
            # If it won't start, but won't die either, kill it by hand so we
            # don't let a rouge process wander around.
            try:
                utils.execute_with_timeout("sudo", "pkill", "-9", "mysql")
            except exception.ProcessExecutionError:
                LOG.exception(_("Error killing stalled MySQL start command."))
                # There's nothing more we can do...
            self.status.end_install_or_restart()
            raise RuntimeError("Could not start MySQL!")
Пример #26
0
 def _disable_mysql_on_boot(self):
     try:
         utils.execute_with_timeout(self.mysql_service['cmd_disable'],
                                    shell=True)
     except KeyError:
         LOG.exception(_("Error disabling MySQL start on boot."))
         raise RuntimeError(_("Service is not discovered."))
Пример #27
0
    def _wait_for_slave_status(self, status, client, max_time):

        def verify_slave_status():
            actual_status = client.execute(
                "SHOW GLOBAL STATUS like 'slave_running'").first()
            if actual_status:
                return actual_status[1].upper() == status.upper()
            # The slave_running status is no longer available in MySql 5.7
            # Need to query the performance_schema instead.
            LOG.debug("slave_running global status doesn't exist, checking "
                      "service_state in performance_schema instead.")
            q = sql_query.Query()
            q.columns = ["a.service_state", "c.service_state"]
            q.tables = ["performance_schema.replication_applier_status a",
                        "performance_schema.replication_connection_status c"]
            q.where = ["a.channel_name = ''", "c.channel_name = ''"]
            t = text(str(q))
            actual_status = client.execute(t).first()
            if (actual_status and actual_status[0].upper() == 'ON' and
                    actual_status[1].upper() == 'ON'):
                actual_status_str = 'ON'
            else:
                actual_status_str = 'OFF'
            return actual_status_str == status.upper()

        LOG.debug("Waiting for SLAVE_RUNNING to change to %s.", status)
        try:
            utils.poll_until(verify_slave_status, sleep_time=3,
                             time_out=max_time)
            LOG.info(_("Replication is now %s.") % status.lower())
        except PollTimeOut:
            raise RuntimeError(
                _("Replication is not %(status)s after %(max)d seconds.") % {
                    'status': status.lower(), 'max': max_time})
Пример #28
0
 def _run_pre_backup(self):
     try:
         for cmd in self.pre_backup_commands:
             utils.execute_with_timeout(*cmd)
         root = service.CouchbaseRootAccess()
         pw = root.get_password()
         self._save_buckets_config(pw)
         with open(OUTFILE, "r") as f:
             out = f.read()
             if out != "[]":
                 d = json.loads(out)
                 all_memcached = True
                 for i in range(len(d)):
                     bucket_type = d[i]["bucketType"]
                     if bucket_type != "memcached":
                         all_memcached = False
                         break
                 if not all_memcached:
                     self._backup(pw)
                 else:
                     LOG.info(_("All buckets are memcached.  "
                                "Skipping backup."))
         operating_system.move(OUTFILE, system.COUCHBASE_DUMP_DIR)
         if pw != "password":
             # Not default password, backup generated root password
             operating_system.copy(system.pwd_file,
                                   system.COUCHBASE_DUMP_DIR,
                                   preserve=True, as_root=True)
     except exception.ProcessExecutionError:
         LOG.exception(_("Error during pre-backup phase."))
         raise
Пример #29
0
 def _get_user(self, username, hostname):
     """Return a single user matching the criteria."""
     user = None
     try:
         # Could possibly throw a ValueError here.
         user = models.MySQLUser(name=username)
         user.check_reserved()
         if username == ADMIN_USER_NAME and hostname == ADMIN_HOST:
             raise ValueError(
                 "User %s@%s is reserved." % (ADMIN_USER_NAME, ADMIN_HOST))
     except ValueError as ve:
         LOG.exception(_("Error Getting user information"))
         err_msg = encodeutils.exception_to_unicode(ve)
         raise exception.BadRequest(_("Username %(user)s is not valid"
                                      ": %(reason)s") %
                                    {'user': username, 'reason': err_msg}
                                    )
     with self.local_sql_client(self.mysql_app.get_engine()) as client:
         q = sql_query.Query()
         q.columns = ['User', 'Host']
         q.tables = ['mysql.user']
         q.where = ["Host != 'localhost'",
                    "User = '******'" % username,
                    "Host = '%s'" % hostname]
         q.order = ['User', 'Host']
         t = text(str(q))
         result = client.execute(t).fetchall()
         LOG.debug("Getting user information %s." % result)
         if len(result) != 1:
             return None
         found_user = result[0]
         user.host = found_user['Host']
         self._associate_dbs(user)
         return user
Пример #30
0
 def action(self, req, body, tenant_id, id):
     LOG.debug(("Committing Action Against Cluster for "
                "Tenant '%(tenant_id)s'\n"
                "req : '%(req)s'\n\nid : '%(id)s'\n\n") %
               {"req": req, "id": id, "tenant_id": tenant_id})
     if not body:
         raise exception.BadRequest(_("Invalid request body."))
     context = req.environ[wsgi.CONTEXT_KEY]
     cluster = models.Cluster.load(context, id)
     manager = cluster.datastore_version.manager
     api_strategy = strategy.load_api_strategy(manager)
     _actions = api_strategy.cluster_controller_actions
     selected_action = None
     for key in body:
         if key in _actions:
             selected_action = _actions[key]
             break
     else:
         message = _("No action '%(action)s' supplied "
                     "by strategy for manager '%(manager)s'") % (
                         {'action': key, 'manager': manager})
         raise exception.TroveError(message)
     cluster = selected_action(cluster, body)
     if cluster:
         view = views.load_view(cluster, req=req, load_servers=False)
         wsgi_result = wsgi.Result(view.data(), 202)
     else:
         wsgi_result = wsgi.Result(None, 202)
     return wsgi_result
Пример #31
0
    def prepare(self,
                context,
                packages,
                databases,
                memory_mb,
                users,
                device_path=None,
                mount_point=None,
                backup_info=None,
                config_contents=None,
                root_password=None,
                overrides=None,
                cluster_config=None):
        """Makes ready DBAAS on a Guest container."""

        LOG.debug("Preparing MongoDB instance.")

        self.status.begin_install()
        self.app.install_if_needed(packages)
        self.app.stop_db()
        self.app.clear_storage()
        mount_point = system.MONGODB_MOUNT_POINT
        if device_path:
            device = volume.VolumeDevice(device_path)
            # unmount if device is already mounted
            device.unmount_device(device_path)
            device.format()
            if os.path.exists(system.MONGODB_MOUNT_POINT):
                device.migrate_data(mount_point)
            device.mount(mount_point)
            operating_system.update_owner('mongodb', 'mongodb', mount_point)

            LOG.debug("Mounted the volume %(path)s as %(mount)s." % {
                'path': device_path,
                "mount": mount_point
            })

        conf_changes = self.get_config_changes(cluster_config, mount_point)
        config_contents = self.app.update_config_contents(
            config_contents, conf_changes)
        if cluster_config is None:
            self.app.start_db_with_conf_changes(config_contents)
        else:
            if cluster_config['instance_type'] == "query_router":
                self.app.reset_configuration(
                    {'config_contents': config_contents})
                self.app.write_mongos_upstart()
                self.app.status.is_query_router = True
                # don't start mongos until add_config_servers is invoked

            elif cluster_config['instance_type'] == "config_server":
                self.app.status.is_config_server = True
                self.app.start_db_with_conf_changes(config_contents)

            elif cluster_config['instance_type'] == "member":
                self.app.start_db_with_conf_changes(config_contents)

            else:
                LOG.error(
                    _("Bad cluster configuration; instance type "
                      "given as %s.") % cluster_config['instance_type'])
                self.status.set_status(ds_instance.ServiceStatuses.FAILED)
                return

            self.status.set_status(ds_instance.ServiceStatuses.BUILD_PENDING)
        LOG.info(_('Completed setup of MongoDB database instance.'))
Пример #32
0
class DnsRecordNotFound(NotFound):

    message = _("DnsRecord with name= %(name)s not found.")
Пример #33
0
 def reset_configuration(self, configuration):
     LOG.info(_("Resetting configuration."))
     config_contents = configuration['config_contents']
     self.configuration_manager.save_configuration(config_contents)
Пример #34
0
 def _disable_redis_on_boot(self):
     """
     Disables redis on boot.
     """
     LOG.info(_("Disabling Redis on boot."))
     operating_system.disable_service_on_boot(system.SERVICE_CANDIDATES)
Пример #35
0
 def _enable_redis_on_boot(self):
     """
     Enables redis on boot.
     """
     LOG.info(_('Enabling Redis on boot.'))
     operating_system.enable_service_on_boot(system.SERVICE_CANDIDATES)
Пример #36
0
 def clear_storage(self, mount_point):
     LOG.debug("Clearing storage at %s." % mount_point)
     try:
         operating_system.remove(mount_point, force=True, as_root=True)
     except exception.ProcessExecutionError:
         LOG.exception(_("Error clearing storage."))
Пример #37
0
 def _configure_as_query_router(self):
     LOG.info(_("Configuring instance as a cluster query router."))
     self.is_query_router = True
     self.configuration_manager = self.mongos_configuration_manager
     self._configure_network(MONGODB_PORT)
Пример #38
0
    def grow_cluster(self, context, cluster_id, new_instance_ids):
        def _grow_cluster():
            LOG.debug("begin grow_cluster for Vertica cluster %s" % cluster_id)

            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False).all()

            instance_ids = [db_instance.id for db_instance in db_instances]

            # Wait for new cluster members to get to cluster-ready status.
            if not self._all_instances_ready(new_instance_ids, cluster_id):
                return

            new_insts = [
                Instance.load(context, instance_id)
                for instance_id in new_instance_ids
            ]

            existing_instances = [
                Instance.load(context, instance_id)
                for instance_id in instance_ids
                if instance_id not in new_instance_ids
            ]

            existing_guests = [self.get_guest(i) for i in existing_instances]
            new_guests = [self.get_guest(i) for i in new_insts]
            all_guests = new_guests + existing_guests

            authorized_users_without_password = ['root', 'dbadmin']
            new_ips = [self.get_ip(instance) for instance in new_insts]

            for user in authorized_users_without_password:
                pub_key = [guest.get_public_keys(user) for guest in all_guests]
                for guest in all_guests:
                    guest.authorize_public_keys(user, pub_key)

            for db_instance in db_instances:
                if db_instance['type'] == 'master':
                    LOG.debug("Found 'master' instance, calling grow on guest")
                    master_instance = Instance.load(context, db_instance.id)
                    self.get_guest(master_instance).grow_cluster(new_ips)
                    break

            for guest in new_guests:
                guest.cluster_complete()

        timeout = Timeout(CONF.cluster_usage_timeout)

        try:
            _grow_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for growing cluster."))
            self.update_statuses_on_failure(cluster_id)
        except Exception:
            LOG.exception(_("Error growing cluster %s.") % cluster_id)
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()
Пример #39
0
        def _create_cluster():

            # fetch instances by cluster_id against instances table
            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]
            LOG.debug("instances in cluster %s: %s" %
                      (cluster_id, instance_ids))

            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            instances = [
                Instance.load(context, instance_id)
                for instance_id in instance_ids
            ]

            # filter query routers in instances into a new list: query_routers
            query_routers = [
                instance for instance in instances
                if instance.type == 'query_router'
            ]
            LOG.debug("query routers: %s" %
                      [instance.id for instance in query_routers])
            # filter config servers in instances into new list: config_servers
            config_servers = [
                instance for instance in instances
                if instance.type == 'config_server'
            ]
            LOG.debug("config servers: %s" %
                      [instance.id for instance in config_servers])
            # filter members (non router/configsvr) into a new list: members
            members = [
                instance for instance in instances if instance.type == 'member'
            ]
            LOG.debug("members: %s" % [instance.id for instance in members])

            # for config_server in config_servers, append ip/hostname to
            # "config_server_hosts", then
            # peel off the replica-set name and ip/hostname from 'x'
            config_server_ips = [
                self.get_ip(instance) for instance in config_servers
            ]
            LOG.debug("config server ips: %s" % config_server_ips)

            LOG.debug("calling add_config_servers on query_routers")
            try:
                for query_router in query_routers:
                    (self.get_guest(query_router).add_config_servers(
                        config_server_ips))
            except Exception:
                LOG.exception(_("error adding config servers"))
                self.update_statuses_on_failure(cluster_id)
                return

            if not self._create_replica_set(members, cluster_id):
                return

            replica_set_name = "rs1"
            if not self._create_shard(query_routers, replica_set_name, members,
                                      cluster_id):
                return
            # call to start checking status
            for instance in instances:
                self.get_guest(instance).cluster_complete()
Пример #40
0
    def create_cluster(self, context, cluster_id):
        LOG.debug("Begin create_cluster for id: %s." % cluster_id)

        def _create_cluster():

            # Fetch instances by cluster_id against instances table.
            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False).all()
            instance_ids = [db_instance.id for db_instance in db_instances]

            # Wait for cluster members to get to cluster-ready status.
            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            LOG.debug("All members ready, proceeding for cluster setup.")
            instances = [
                Instance.load(context, instance_id)
                for instance_id in instance_ids
            ]

            member_ips = [self.get_ip(instance) for instance in instances]
            guests = [self.get_guest(instance) for instance in instances]

            # Users to be configured for password-less SSH.
            authorized_users_without_password = ['root', 'dbadmin']

            # Configuring password-less SSH for cluster members.
            # Strategy for setting up SSH:
            # get public keys for user from member-instances in cluster,
            # combine them, finally push it back to all instances,
            # and member instances add them to authorized keys.
            LOG.debug("Configuring password-less SSH on cluster members.")
            try:
                for user in authorized_users_without_password:
                    pub_key = [guest.get_public_keys(user) for guest in guests]
                    for guest in guests:
                        guest.authorize_public_keys(user, pub_key)

                LOG.debug("Installing cluster with members: %s." % member_ips)
                for db_instance in db_instances:
                    if db_instance['type'] == 'master':
                        master_instance = Instance.load(
                            context, db_instance.id)
                        self.get_guest(master_instance).install_cluster(
                            member_ips)
                        break

                LOG.debug("Finalizing cluster configuration.")
                for guest in guests:
                    guest.cluster_complete()
            except Exception:
                LOG.exception(_("Error creating cluster."))
                self.update_statuses_on_failure(cluster_id)

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            _create_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for building cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("End create_cluster for id: %s." % cluster_id)
Пример #41
0
    def grow_cluster(self, context, cluster_id, instance_ids):
        LOG.debug("begin grow_cluster for MongoDB cluster %s", cluster_id)

        def _grow_cluster():
            new_instances = [db_instance for db_instance in self.db_instances
                             if db_instance.id in instance_ids]
            new_members = [db_instance for db_instance in new_instances
                           if db_instance.type == 'member']
            new_query_routers = [db_instance for db_instance in new_instances
                                 if db_instance.type == 'query_router']
            instances = []
            if new_members:
                shard_ids = set([db_instance.shard_id for db_instance
                                 in new_members])
                query_router_id = self._get_running_query_router_id()
                if not query_router_id:
                    return
                for shard_id in shard_ids:
                    LOG.debug('growing cluster by adding shard %(shard_id)s '
                              'on query router %(router_id)s',
                              {'shard_id': shard_id,
                               'router_id': query_router_id})
                    member_ids = [db_instance.id for db_instance in new_members
                                  if db_instance.shard_id == shard_id]
                    if not self._all_instances_ready(
                        member_ids, cluster_id, shard_id
                    ):
                        return
                    members = [Instance.load(context, member_id)
                               for member_id in member_ids]
                    query_router = Instance.load(context, query_router_id)
                    if not self._create_shard(query_router, members):
                        return
                    instances.extend(members)
            if new_query_routers:
                query_router_ids = [db_instance.id for db_instance
                                    in new_query_routers]
                config_servers_ids = [db_instance.id for db_instance
                                      in self.db_instances
                                      if db_instance.type == 'config_server']
                LOG.debug('growing cluster by adding query routers '
                          '%(router)s, with config servers %(server)s',
                          {'router': query_router_ids,
                           'server': config_servers_ids})
                if not self._all_instances_ready(
                    query_router_ids, cluster_id
                ):
                    return
                query_routers = [Instance.load(context, instance_id)
                                 for instance_id in query_router_ids]
                config_servers_ips = [
                    self.get_ip(Instance.load(context, config_server_id))
                    for config_server_id in config_servers_ids
                ]
                if not self._add_query_routers(
                        query_routers, config_servers_ips,
                        admin_password=self.get_cluster_admin_password(context)
                ):
                    return
                instances.extend(query_routers)
            for instance in instances:
                self.get_guest(instance).cluster_complete()

        cluster_usage_timeout = CONF.cluster_usage_timeout
        timeout = Timeout(cluster_usage_timeout)
        try:
            _grow_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("timeout for growing cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("end grow_cluster for MongoDB cluster %s", self.id)
Пример #42
0
        def _create_resources():
            # parse the ID from the Ref
            instance_id = utils.get_id_from_href(instance)

            # verify that the instance exists and can perform actions
            from trove.instance.models import Instance
            instance_model = Instance.load(context, instance_id)
            instance_model.validate_can_perform_action()
            cls.validate_can_perform_action(instance_model, 'backup_create')
            cls.verify_swift_auth_token(context)
            if instance_model.cluster_id is not None:
                raise exception.ClusterInstanceOperationNotSupported()

            ds = instance_model.datastore
            ds_version = instance_model.datastore_version
            parent = None
            last_backup_id = None
            if parent_id:
                # Look up the parent info or fail early if not found or if
                # the user does not have access to the parent.
                _parent = cls.get_by_id(context, parent_id)
                parent = {
                    'location': _parent.location,
                    'checksum': _parent.checksum,
                }
            elif incremental:
                _parent = Backup.get_last_completed(context, instance_id)
                if _parent:
                    parent = {
                        'location': _parent.location,
                        'checksum': _parent.checksum
                    }
                    last_backup_id = _parent.id
            try:
                db_info = DBBackup.create(name=name,
                                          description=description,
                                          tenant_id=context.tenant,
                                          state=BackupState.NEW,
                                          instance_id=instance_id,
                                          parent_id=parent_id
                                          or last_backup_id,
                                          datastore_version_id=ds_version.id,
                                          deleted=False)
            except exception.InvalidModelError as ex:
                LOG.exception(
                    _("Unable to create backup record for "
                      "instance: %s"), instance_id)
                raise exception.BackupCreationError(str(ex))

            backup_info = {
                'id': db_info.id,
                'name': name,
                'description': description,
                'instance_id': instance_id,
                'backup_type': db_info.backup_type,
                'checksum': db_info.checksum,
                'parent': parent,
                'datastore': ds.name,
                'datastore_version': ds_version.name,
            }
            api.API(context).create_backup(backup_info, instance_id)
            return db_info
Пример #43
0
 def _get_item(key, dictList):
     for item in dictList:
         if key == item.get('name'):
             return item
     raise exception.UnprocessableEntity(
         message=_("%s is not a supported configuration parameter.") % key)
Пример #44
0
    def create_cluster(self, context, cluster_id):
        LOG.debug("begin create_cluster for id: %s", cluster_id)

        def _create_cluster():

            # fetch instances by cluster_id against instances table
            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]
            LOG.debug("instances in cluster %(cluster_id)s: %(instance_ids)s",
                      {'cluster_id': cluster_id, 'instance_ids': instance_ids})

            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            LOG.debug("all instances in cluster %s ready.", cluster_id)

            instances = [Instance.load(context, instance_id) for instance_id
                         in instance_ids]

            # filter query routers in instances into a new list: query_routers
            query_routers = [instance for instance in instances if
                             instance.type == 'query_router']
            LOG.debug("query routers: %s",
                      [instance.id for instance in query_routers])
            # filter config servers in instances into new list: config_servers
            config_servers = [instance for instance in instances if
                              instance.type == 'config_server']
            LOG.debug("config servers: %s",
                      [instance.id for instance in config_servers])
            # filter members (non router/configsvr) into a new list: members
            members = [instance for instance in instances if
                       instance.type == 'member']
            LOG.debug("members: %s",
                      [instance.id for instance in members])

            # for config_server in config_servers, append ip/hostname to
            # "config_server_hosts", then
            # peel off the replica-set name and ip/hostname from 'x'
            config_server_ips = [self.get_ip(instance)
                                 for instance in config_servers]
            LOG.debug("config server ips: %s", config_server_ips)

            if not self._add_query_routers(query_routers,
                                           config_server_ips):
                return

            if not self._create_shard(query_routers[0], members):
                return

            # call to start checking status
            for instance in instances:
                self.get_guest(instance).cluster_complete()

        cluster_usage_timeout = CONF.cluster_usage_timeout
        timeout = Timeout(cluster_usage_timeout)
        try:
            _create_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("timeout for building cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("end create_cluster for id: %s", cluster_id)
Пример #45
0
    def prepare(self,
                context,
                packages,
                databases,
                memory_mb,
                users,
                device_path=None,
                mount_point=None,
                backup_info=None,
                config_contents=None,
                root_password=None,
                overrides=None,
                cluster_config=None,
                snapshot=None):
        """Makes ready DBAAS on a Guest container."""

        LOG.debug("Preparing MongoDB instance.")

        self.app.status.begin_install()
        self.app.install_if_needed(packages)
        self.app.wait_for_start()
        self.app.stop_db()
        self.app.clear_storage()
        mount_point = system.MONGODB_MOUNT_POINT
        if device_path:
            device = volume.VolumeDevice(device_path)
            # unmount if device is already mounted
            device.unmount_device(device_path)
            device.format()
            if os.path.exists(system.MONGODB_MOUNT_POINT):
                device.migrate_data(mount_point)
            device.mount(mount_point)
            operating_system.chown(mount_point,
                                   system.MONGO_USER,
                                   system.MONGO_USER,
                                   as_root=True)

            LOG.debug("Mounted the volume %(path)s as %(mount)s." % {
                'path': device_path,
                "mount": mount_point
            })

        if config_contents:
            # Save resolved configuration template first.
            self.app.configuration_manager.save_configuration(config_contents)

        # Apply guestagent specific configuration changes.
        self.app.apply_initial_guestagent_configuration(
            cluster_config, mount_point)

        if not cluster_config:
            # Create the Trove admin user.
            self.app.secure()

        # Don't start mongos until add_config_servers is invoked,
        # don't start members as they should already be running.
        if not (self.app.is_query_router or self.app.is_cluster_member):
            self.app.start_db(update_db=True)

        if not cluster_config and backup_info:
            self._perform_restore(backup_info, context, mount_point, self.app)
            if service.MongoDBAdmin().is_root_enabled():
                self.app.status.report_root(context, 'root')

        if not cluster_config and root_password:
            LOG.debug('Root password provided. Enabling root.')
            service.MongoDBAdmin().enable_root(root_password)

        if not cluster_config:
            if databases:
                self.create_database(context, databases)
            if users:
                self.create_user(context, users)

        if cluster_config:
            self.app.status.set_status(
                ds_instance.ServiceStatuses.BUILD_PENDING)
        else:
            self.app.complete_install_or_restart()

        LOG.info(_('Completed setup of MongoDB database instance.'))
Пример #46
0
 def get_latest_txn_id(self, context):
     LOG.info(_("Retrieving latest repl offset."))
     #return self._get_repl_offset()
     return None
Пример #47
0
class DatabaseNotFound(NotFound):

    message = _("Database %(uuid)s cannot be found on the instance.")
Пример #48
0
    def _validate_configuration(values, datastore_version, config_rules):
        LOG.info(_("Validating configuration values"))

        # create rules dictionary based on parameter name
        rules_lookup = {}
        for item in config_rules:
            rules_lookup[item.name.lower()] = item

        # checking if there are any rules for the datastore
        if not rules_lookup:
            output = {"version": datastore_version.name,
                      "name": datastore_version.datastore_name}
            msg = _("Configuration groups are not supported for this "
                    "datastore: %(name)s %(version)s") % output
            raise exception.UnprocessableEntity(message=msg)

        for k, v in values.items():
            key = k.lower()
            # parameter name validation
            if key not in rules_lookup:
                output = {"key": k,
                          "version": datastore_version.name,
                          "name": datastore_version.datastore_name}
                msg = _("The configuration parameter %(key)s is not "
                        "supported for this datastore: "
                        "%(name)s %(version)s.") % output
                raise exception.UnprocessableEntity(message=msg)

            rule = rules_lookup[key]

            # type checking
            value_type = rule.data_type

            if not isinstance(v, ConfigurationsController._find_type(
                    value_type)):
                output = {"key": k, "type": value_type}
                msg = _("The value provided for the configuration "
                        "parameter %(key)s is not of type %(type)s.") % output
                raise exception.UnprocessableEntity(message=msg)

            # integer min/max checking
            if isinstance(v, (int, long)) and not isinstance(v, bool):
                if rule.min_size is not None:
                    try:
                        min_value = int(rule.min_size)
                    except ValueError:
                        raise exception.TroveError(_(
                            "Invalid or unsupported min value defined in the "
                            "configuration-parameters configuration file. "
                            "Expected integer."))
                    if v < min_value:
                        output = {"key": k, "min": min_value}
                        message = _(
                            "The value for the configuration parameter "
                            "%(key)s is less than the minimum allowed: "
                            "%(min)s") % output
                        raise exception.UnprocessableEntity(message=message)

                if rule.max_size is not None:
                    try:
                        max_value = int(rule.max_size)
                    except ValueError:
                        raise exception.TroveError(_(
                            "Invalid or unsupported max value defined in the "
                            "configuration-parameters configuration file. "
                            "Expected integer."))
                    if v > max_value:
                        output = {"key": k, "max": max_value}
                        message = _(
                            "The value for the configuration parameter "
                            "%(key)s is greater than the maximum "
                            "allowed: %(max)s") % output
                        raise exception.UnprocessableEntity(message=message)
Пример #49
0
class FlavorNotFound(TroveError):

    message = _("Resource %(uuid)s cannot be found.")
Пример #50
0
class ComputeInstanceNotFound(NotFound):

    internal_message = _("Cannot find compute instance %(server_id)s for "
                         "instance %(instance_id)s.")

    message = _("Resource %(instance_id)s can not be retrieved.")
Пример #51
0
class CapabilityNotFound(NotFound):

    message = _("Capability '%(capability)s' cannot be found.")
Пример #52
0
class UserNotFound(NotFound):

    message = _("User %(uuid)s cannot be found on the instance.")
Пример #53
0
class DatastoreVersionAlreadyExists(BadRequest):

    message = _("A datastore version with the name '%(name)s' already exists.")
Пример #54
0
class CapabilityDisabled(TroveError):

    message = _("Capability '%(capability)s' is disabled.")
Пример #55
0
class BackupTooLarge(TroveError):
    message = _("Backup is too large for given flavor or volume. "
                "Backup size: %(backup_size)s GBs. "
                "Available size: %(disk_size)s GBs.")
Пример #56
0
class InvalidRPCConnectionReuse(TroveError):

    message = _("Invalid RPC Connection Reuse.")
Пример #57
0
class TroveOperationAuthError(TroveError):
    message = _("Operation not allowed for tenant %(tenant_id)s.")
Пример #58
0
class ImageNotFound(NotFound):

    message = _("Image %(uuid)s cannot be found.")
Пример #59
0
class ClusterOperationNotSupported(TroveError):

    message = _("The '%(operation)s' operation is not supported for cluster.")
Пример #60
0
class ClusterDatastoreNotSupported(TroveError):
    message = _("Clusters not supported for "
                "%(datastore)s-%(datastore_version)s.")