def remove_datastore_configuration_parameters(datastore, datastore_version): get_db_api().configure_db(CONF) (ds, ds_version) = dstore_models.get_datastore_version( type=datastore, version=datastore_version, return_inactive=True) db_params = DatastoreConfigurationParameters.load_parameters(ds_version.id) for db_param in db_params: db_param.delete()
def initialize(extra_opts=None, pre_logging=None): # Import only the modules necessary to initialize logging and determine if # debug_utils are enabled. import sys from oslo_log import log as logging from trove.common import cfg from trove.common import debug_utils conf = cfg.CONF if extra_opts: conf.register_cli_opts(extra_opts) cfg.parse_args(sys.argv) if pre_logging: pre_logging(conf) logging.setup(conf, None) debug_utils.setup() # rpc module must be loaded after decision about thread monkeypatching # because if thread module is not monkeypatched we can't use eventlet # executor from oslo_messaging library. from trove import rpc rpc.init(conf) # Initialize Trove database. from trove.db import get_db_api get_db_api().configure_db(conf) return conf # May be used by other scripts
def launch_services(): get_db_api().configure_db(CONF) manager = 'trove.conductor.manager.Manager' topic = CONF.conductor_queue server = rpc_service.RpcService(manager=manager, topic=topic) launcher = openstack_service.launch(server, workers=CONF.trove_conductor_workers) launcher.wait()
def setUp(self): super(Test_Manager, self).setUp() sqlstr = "mysql://*****:*****@192.168.16.70/trove" options = {"sql_connection": sqlstr} get_db_api().configure_db(options) self.manager = manager.KSC_Manager()
def load_datastore_configuration_parameters(datastore, datastore_version, config_file): get_db_api().configure_db(CONF) (ds, ds_v) = dstore_models.get_datastore_version(type=datastore, version=datastore_version) with open(config_file) as f: config = json.load(f) for param in config["configuration-parameters"]: create_or_update_datastore_configuration_parameter( param["name"], ds_v.id, param["restart_required"], param["type"], param.get("max"), param.get("min") )
def main(): cfg.parse_args(sys.argv) logging.setup(None) get_db_api().configure_db(CONF) conf_file = CONF.find_file(CONF.api_paste_config) launcher = wsgi.launch('trove', CONF.bind_port or 8779, conf_file, workers=CONF.trove_api_workers) launcher.wait()
def main(): cfg.parse_args(sys.argv) logging.setup(None) debug_utils.setup() get_db_api().configure_db(CONF) conf_file = CONF.find_file(CONF.api_paste_config) launcher = wsgi.launch('trove', CONF.bind_port or 8779, conf_file, workers=CONF.trove_api_workers) launcher.wait()
def initialize_rdl_config(config_file): from trove.common import cfg from trove.openstack.common import log from trove.db import get_db_api conf = cfg.CONF cfg.parse_args(['int_tests'], default_config_files=[config_file]) log.setup(None) try: get_db_api().configure_db(conf) conf_file = conf.find_file(conf.api_paste_config) except RuntimeError as error: import traceback print traceback.format_exc() sys.exit("ERROR: %s" % error)
def initialize_rdl_config(config_file): from trove.common import cfg from oslo_log import log from trove.db import get_db_api conf = cfg.CONF cfg.parse_args(['int_tests'], default_config_files=[config_file]) log.setup(conf, None) try: get_db_api().configure_db(conf) conf_file = conf.find_file(conf.api_paste_config) except RuntimeError as error: import traceback print(traceback.format_exc()) sys.exit("ERROR: %s" % error)
def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated_at'] = utils.utcnow() LOG.debug( _("Saving %s: %s") % (self.__class__.__name__, self.__dict__)) return get_db_api().save(self)
def save(self): LOG.debug( _("Saving %(name)s: %(dict)s") % { 'name': self.__class__.__name__, 'dict': self.__dict__ }) return get_db_api().save(self)
def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated_at'] = utils.utcnow() LOG.debug("Saving %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().save(self)
def delete(self): LOG.debug( _("Deleting %(name)s: %(dict)s") % { 'name': self.__class__.__name__, 'dict': self.__dict__ }) return get_db_api().delete(self)
def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated_at'] = utils.utcnow() LOG.debug(_("Saving %s: %s") % (self.__class__.__name__, self.__dict__)) return get_db_api().save(self)
def load_datastore_configuration_parameters(datastore, datastore_version, config_file): get_db_api().configure_db(CONF) (ds, ds_v) = dstore_models.get_datastore_version( type=datastore, version=datastore_version, return_inactive=True) with open(config_file) as f: config = json.load(f) for param in config['configuration-parameters']: create_or_update_datastore_configuration_parameter( param['name'], ds_v.id, param['restart_required'], param['type'], param.get('max'), param.get('min'), )
def init_db(): from trove.common import cfg from trove.db import get_db_api from trove.db.sqlalchemy import session CONF = cfg.CONF db_api = get_db_api() db_api.db_sync(CONF) session.configure_db(CONF)
def init_db(): with LOCK: global DB_SETUP if not DB_SETUP: db_api = get_db_api() db_api.db_sync(CONF) session.configure_db(CONF) DB_SETUP = True
def load_datastore_configuration_parameters(datastore, datastore_version, config_file): get_db_api().configure_db(CONF) (ds, ds_v) = dstore_models.get_datastore_version(type=datastore, version=datastore_version, return_inactive=True) with open(config_file) as f: config = json.load(f) for param in config['configuration-parameters']: create_or_update_datastore_configuration_parameter( param['name'], ds_v.id, param['restart_required'], param['type'], param.get('max'), param.get('min'), )
def initialize(extra_opts=None, pre_logging=None): # Initialize localization support (the underscore character). import gettext gettext.install('trove', unicode=1) # Apply whole eventlet.monkey_patch excluding 'thread' module. # Decision for 'thread' module patching will be made # after debug_utils is set up. import eventlet eventlet.monkey_patch(all=True, thread=False) # Import only the modules necessary to initialize logging and determine if # debug_utils are enabled. import sys from oslo_log import log as logging from trove.common import cfg from trove.common import debug_utils conf = cfg.CONF if extra_opts: conf.register_cli_opts(extra_opts) cfg.parse_args(sys.argv) if pre_logging: pre_logging(conf) logging.setup(conf, None) debug_utils.setup() # Patch 'thread' module if debug is disabled. if not debug_utils.enabled(): eventlet.monkey_patch(thread=True) # rpc module must be loaded after decision about thread monkeypatching # because if thread module is not monkeypatched we can't use eventlet # executor from oslo_messaging library. from trove import rpc rpc.init(conf) # Initialize Trove database. from trove.db import get_db_api get_db_api().configure_db(conf) return conf # May be used by other scripts
def get_backup_child(cls, backup_id): backup_child_list = db.get_db_api().find_all(models.DBBackup, deleted=False, type=Type.AUTOBACKUP) \ .filter(and_(models.DBBackup.parent_id == backup_id)) \ .filter(and_(models.DBBackup.state == models.BackupState.COMPLETED)).all() if backup_child_list: return backup_child_list[0] else: return None
def clean_db(): from trove.common import cfg from trove.db import get_db_api from trove.db.sqlalchemy import session CONF = cfg.CONF if CONF.sql_connection == "sqlite:///trove_test.sqlite": db_api = get_db_api() db_api.clean_db() session.clean_db()
def initialize_database(): from trove.db import get_db_api from trove.db.sqlalchemy import session db_api = get_db_api() db_api.drop_db(CONF) # Destroys the database, if it exists. db_api.db_sync(CONF) session.configure_db(CONF) datastore_init() db_api.configure_db(CONF)
def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) LOG.debug( _("Saving %(name)s: %(dict)s") % { 'name': self.__class__.__name__, 'dict': self.__dict__ }) return get_db_api().save(self)
def get_last_full_backup(cls, group_id): full_backup_list = db.get_db_api().find_all(models.DBBackup, group_id=group_id, deleted=False, type=Type.AUTOBACKUP) \ .filter(models.DBBackup.parent_id == None) \ .filter(and_(models.DBBackup.state == models.BackupState.COMPLETED)).order_by("created desc").all() LOG.debug("full_backup_list: %s", full_backup_list) if full_backup_list: return full_backup_list[0] else: return None
def startup(topic): cfg.parse_args(sys.argv) logging.setup(None) debug_utils.setup() # Patch 'thread' module if debug is disabled if not debug_utils.enabled(): eventlet.monkey_patch(thread=True) from trove.common.rpc import service as rpc_service from trove.openstack.common import service as openstack_service from trove.db import get_db_api get_db_api().configure_db(CONF) server = rpc_service.RpcService(manager=CONF.taskmanager_manager, topic=topic) launcher = openstack_service.launch(server) launcher.wait()
def list_groups_in_window(cls, context, from_window, to_window, deleted=False): try: _list = ( db.get_db_api() .find_all(DBAutoBackup, deleted=deleted) .filter(and_(DBAutoBackup.autobackup_at >= from_window, DBAutoBackup.autobackup_at <= to_window)) .all() ) except Exception as e: raise e return _list
def initialize_database(): from trove.db import get_db_api from trove.instance import models from trove.db.sqlalchemy import session db_api = get_db_api() db_api.drop_db(CONF) # Destroys the database, if it exists. db_api.db_sync(CONF) session.configure_db(CONF) # Adds the image for mysql (needed to make most calls work). models.ServiceImage.create(service_name="mysql", image_id="fake") db_api.configure_db(CONF)
def initialize(extra_opts=None, pre_logging=None): # Initialize localization support (the underscore character). import gettext gettext.install('trove', unicode=1) # Apply whole eventlet.monkey_patch excluding 'thread' module. # Decision for 'thread' module patching will be made # after debug_utils is set up. import eventlet eventlet.monkey_patch(all=True, thread=False) # Import only the modules necessary to initialize logging and determine if # debug_utils are enabled. import sys from trove.common import cfg from trove.common import debug_utils from trove.openstack.common import log as logging conf = cfg.CONF if extra_opts: conf.register_cli_opts(extra_opts) cfg.parse_args(sys.argv) if pre_logging: pre_logging(conf) # Fore. 2014/7/3. krds patch. patch here to make all thing work well. from trove.patch import patch logging.setup(None) debug_utils.setup() # Patch 'thread' module if debug is disabled if not debug_utils.enabled(): eventlet.monkey_patch(thread=True) # Initialize Trove database. from trove.db import get_db_api get_db_api().configure_db(conf) return conf # May be used by other scripts
def init_db(): global DB_SETUP if DB_SETUP: return from trove.common import cfg from trove.db import get_db_api from trove.db.sqlalchemy import session CONF = cfg.CONF db_api = get_db_api() db_api.db_sync(CONF) session.configure_db(CONF) DB_SETUP = True
def create_or_update_datastore_configuration_parameter(name, datastore_version_id, restart_required, data_type, max_size, min_size): get_db_api().configure_db(CONF) datastore_version = dstore_models.DatastoreVersion.load_by_uuid( datastore_version_id) try: config = DatastoreConfigurationParameters.load_parameter_by_name( datastore_version_id, name, show_deleted=True) config.restart_required = restart_required config.max_size = max_size config.min_size = min_size config.data_type = data_type get_db_api().save(config) except exception.NotFound: config = DBDatastoreConfigurationParameters( id=utils.generate_uuid(), name=name, datastore_version_id=datastore_version.id, restart_required=restart_required, data_type=data_type, max_size=max_size, min_size=min_size, deleted=False, ) get_db_api().save(config)
def get_backup_parent(cls, context, backup_id): try: backup = models.DBBackup.find_by(context=context, id=backup_id, deleted=False, type=Type.AUTOBACKUP) except Exception as e: LOG.warn("get_backup_parent exception:%s, backup_id:%s", e, backup_id) return None if not backup: return None backup_parent = db.get_db_api().find_all(models.DBBackup, deleted=False, type=Type.AUTOBACKUP) \ .filter(and_(models.DBBackup.id == backup.parent_id)) \ .filter(and_(models.DBBackup.state == models.BackupState.COMPLETED)).first() LOG.debug("backup_parent: %s", backup_parent) return backup_parent
def list_autobackup_expire_at(cls, context, expire_time, deleted=False): try: LOG.debug("list_autobackup_expire_at expire_time:%s", expire_time) expire_time = int(expire_time) backup_list = db.get_db_api().find_all(models.DBBackup, deleted=deleted, type=Type.AUTOBACKUP) \ .filter(and_(models.DBBackup.expire_at <= expire_time)) \ .filter(and_(models.DBBackup.expire_at != 0)) \ .filter(and_(models.DBBackup.state != models.BackupState.DELETE_FAILED)).all() backup_list_expired = [] if backup_list and len(backup_list) > 0: backup_list_expired = backup_list except Exception as e: LOG.error("list_autobackup_expire_at exception: %s", e) raise e LOG.debug("backup_list_expired %s, expire_time:%s", backup_list_expired, expire_time) return backup_list_expired
def _check_instances_with_running_tasks(self): """Finds Trove instances with running tasks. Such instances need to communicate with Trove control plane to report status. This may rise issues if Trove services are unavailable, e.g. Trove guest agent may be left in a failed state due to communication issues. """ db_api = db.get_db_api() db_api.configure_db(cfg.CONF) query = DBInstance.query() query = query.filter(DBInstance.task_status != InstanceTasks.NONE) query = query.filter_by(deleted=False) instances_with_tasks = query.count() if instances_with_tasks: return upgradecheck.Result( upgradecheck.Code.WARNING, _("Instances with running tasks exist.")) return upgradecheck.Result(upgradecheck.Code.SUCCESS)
def save(self): LOG.debug("Saving %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().save(self)
def get_by(cls, **kwargs): return get_db_api().find_by(cls, **cls._process_conditions(kwargs))
def __init__(self): self.db_api = get_db_api()
from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common import remote from trove.common.remote import create_nova_client from trove.common import utils from trove.db import get_db_api from trove.db import models as dbmodels from trove.flavor.models import Flavor as flavor_model from trove.volume_types.models import VolumeType as volume_type_model LOG = logging.getLogger(__name__) CONF = cfg.CONF db_api = get_db_api() def persisted_models(): return { 'datastore': DBDatastore, 'capabilities': DBCapabilities, 'datastore_version': DBDatastoreVersion, 'capability_overrides': DBCapabilityOverrides, 'datastore_version_metadata': DBDatastoreVersionMetadata } class DBDatastore(dbmodels.DatabaseModelBase): _data_fields = ['id', 'name', 'default_version_id']
def save(self): return get_db_api().save(self)
def load(cls, instance_id, method_name): seen = get_db_api().find_by(cls, instance_id=instance_id, method_name=method_name) return seen
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common import cfg from trove.common import exception from trove.common import utils from trove.db import models as dbmodels from trove.db import get_db_api from trove.openstack.common import log as logging LOG = logging.getLogger(__name__) CONF = cfg.CONF db_api = get_db_api() def persisted_models(): return { 'datastore': DBDatastore, 'capabilities': DBCapabilities, 'datastore_version': DBDatastoreVersion, 'capability_overrides': DBCapabilityOverrides, } class DBDatastore(dbmodels.DatabaseModelBase): _data_fields = ['id', 'name', 'default_version_id']
def load(cls, context, instance_id): history = get_db_api().find_by(cls, id=instance_id) return history
def db_api(self): return get_db_api()
def save(self): self['updated_at'] = utils.utcnow() return get_db_api().save(self)
def query(cls): return get_db_api()._base_query(cls)
def delete(self): LOG.debug("Deleting %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().delete(self)
def save(self): LOG.debug( _("Saving %s: %s") % (self.__class__.__name__, self.__dict__)) return get_db_api().save(self)