Ejemplo n.º 1
0
    def setUp(self):
        util.init_db()

        self.datastore = datastore_models.DBDatastore.create(
            id=str(uuid.uuid4()),
            name='name',
            default_version_id=str(uuid.uuid4()))

        self.datastore_version = datastore_models.DBDatastoreVersion.create(
            id=self.datastore.default_version_id,
            name='name',
            image_id=str(uuid.uuid4()),
            packages=str(uuid.uuid4()),
            datastore_id=self.datastore.id,
            manager='mysql',
            active=1)

        self.master = DBInstance(
            InstanceTasks.NONE,
            id=str(uuid.uuid4()),
            name="TestMasterInstance",
            datastore_version_id=self.datastore_version.id)
        self.master.set_task_status(InstanceTasks.NONE)
        self.master.save()
        self.master_status = InstanceServiceStatus(ServiceStatuses.RUNNING,
                                                   id=str(uuid.uuid4()),
                                                   instance_id=self.master.id)
        self.master_status.save()

        self.safe_nova_client = models.create_nova_client
        models.create_nova_client = nova.fake_create_nova_client
        super(TestReplication, self).setUp()
Ejemplo n.º 2
0
 def test_add_shard(self, mock_find_all, mock_load_any_instance,
                    mock_check_quotas, mock_instance_create, mock_update_db,
                    mock_task_api_load, mock_load_by_uuid):
     self.cluster.db_info.task_status = ClusterTasks.NONE
     (mock_find_all.return_value.all.return_value) = [
         DBInstance(InstanceTasks.NONE,
                    name="TestInstance1",
                    shard_id="1",
                    id='1',
                    datastore_version_id='1'),
         DBInstance(InstanceTasks.NONE,
                    name="TestInstance2",
                    shard_id="1",
                    id='2',
                    datastore_version_id='1'),
         DBInstance(InstanceTasks.NONE,
                    name="TestInstance3",
                    shard_id="1",
                    id='3',
                    datastore_version_id='1')
     ]
     mock_datastore_version = Mock()
     mock_datastore_version.manager = 'mongodb'
     mock_load_by_uuid.return_value = mock_datastore_version
     mock_task_api = Mock()
     mock_task_api.mongodb_add_shard_cluster.return_value = None
     mock_task_api_load.return_value = mock_task_api
     self.cluster.add_shard()
     mock_update_db.assert_called_with(
         task_status=ClusterTasks.ADDING_SHARD)
     mock_task_api.mongodb_add_shard_cluster.assert_called
Ejemplo n.º 3
0
        def _add_shard_cluster():

            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               shard_id=shard_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]
            LOG.debug("instances in shard %s: %s" % (shard_id, instance_ids))
            if not self._all_instances_ready(instance_ids, cluster_id,
                                             shard_id):
                return

            members = [
                Instance.load(context, instance_id)
                for instance_id in instance_ids
            ]

            if not self._create_replica_set(members, cluster_id, shard_id):
                return

            db_query_routers = DBInstance.find_all(cluster_id=cluster_id,
                                                   type='query_router',
                                                   deleted=False).all()
            query_routers = [
                Instance.load(context, db_query_router.id)
                for db_query_router in db_query_routers
            ]

            if not self._create_shard(query_routers, replica_set_name, members,
                                      cluster_id, shard_id):
                return

            for member in members:
                self.get_guest(member).cluster_complete()
Ejemplo n.º 4
0
    def test_show_deleted_mgmt_instances(self):
        args = {'deleted': 0, 'cluster_id': None}
        db_infos_active = DBInstance.find_all(**args)
        args = {'deleted': 1, 'cluster_id': None}
        db_infos_deleted = DBInstance.find_all(**args)
        args = {'cluster_id': None}
        # db_infos_all = DBInstance.find_all(**args)

        # TODO(SlickNik) Fix this assert to work reliably in the gate.
        # This fails intermittenly when the unit tests run in parallel.
        # self.assertTrue(db_infos_all.count() ==
        #                 db_infos_active.count() +
        #                 db_infos_deleted.count())

        with patch.object(self.context, 'is_admin', return_value=True):
            deleted_instance = db_infos_deleted.all()[0]
            active_instance = db_infos_active.all()[0]

            instance = DBInstance.find_by(context=self.context,
                                          id=active_instance.id)
            self.assertEqual(active_instance.id, instance.id)

            self.assertRaises(
                exception.ModelNotFoundError,
                DBInstance.find_by,
                context=self.context,
                id=deleted_instance.id,
                deleted=False)

            instance = DBInstance.find_by(context=self.context,
                                          id=deleted_instance.id,
                                          deleted=True)
            self.assertEqual(deleted_instance.id, instance.id)
Ejemplo n.º 5
0
        def _add_shard_cluster():

            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False,
                                               shard_id=shard_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]
            LOG.debug("instances in shard %(shard_id)s: %(instance_ids)s",
                      {'shard_id': shard_id, 'instance_ids': instance_ids})
            if not self._all_instances_ready(instance_ids, cluster_id,
                                             shard_id):
                return

            members = [Instance.load(context, instance_id)
                       for instance_id in instance_ids]

            db_query_routers = DBInstance.find_all(cluster_id=cluster_id,
                                                   type='query_router',
                                                   deleted=False).all()
            query_routers = [Instance.load(context, db_query_router.id)
                             for db_query_router in db_query_routers]

            if not self._create_shard(query_routers[0], members):
                return

            for member in members:
                self.get_guest(member).cluster_complete()
Ejemplo n.º 6
0
 def setUp(self):
     super(SimpleInstanceTest, self).setUp()
     self.context = trove_testtools.TroveTestContext(self, is_admin=True)
     db_info = DBInstance(InstanceTasks.BUILDING, name="TestInstance")
     self.instance = SimpleInstance(None,
                                    db_info,
                                    InstanceServiceStatus(
                                        ServiceStatuses.BUILDING),
                                    ds_version=Mock(),
                                    ds=Mock(),
                                    locality='affinity')
     self.instance.context = self.context
     db_info.addresses = {
         "private": [{
             "addr": "123.123.123.123"
         }],
         "internal": [{
             "addr": "10.123.123.123"
         }],
         "public": [{
             "addr": "15.123.123.123"
         }]
     }
     self.orig_conf = CONF.network_label_regex
     self.orig_ip_regex = CONF.ip_regex
     self.orig_black_list_regex = CONF.black_list_regex
Ejemplo n.º 7
0
        def _add_shard_cluster():

            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               shard_id=shard_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]
            LOG.debug("instances in shard %s: %s" % (shard_id,
                                                     instance_ids))
            if not self._all_instances_ready(instance_ids, cluster_id,
                                             shard_id):
                return

            members = [Instance.load(context, instance_id)
                       for instance_id in instance_ids]

            db_query_routers = DBInstance.find_all(cluster_id=cluster_id,
                                                   type='query_router',
                                                   deleted=False).all()
            query_routers = [Instance.load(context, db_query_router.id)
                             for db_query_router in db_query_routers]

            if not self._create_shard(query_routers[0], members):
                return

            for member in members:
                self.get_guest(member).cluster_complete()
Ejemplo n.º 8
0
    def test_show_deleted_mgmt_instances(self):
        args = {'deleted': 0, 'cluster_id': None}
        db_infos_active = DBInstance.find_all(**args)
        args = {'deleted': 1, 'cluster_id': None}
        db_infos_deleted = DBInstance.find_all(**args)
        args = {'cluster_id': None}
        # db_infos_all = DBInstance.find_all(**args)

        # TODO(SlickNik) Fix this assert to work reliably in the gate.
        # This fails intermittenly when the unit tests run in parallel.
        # self.assertTrue(db_infos_all.count() ==
        #                 db_infos_active.count() +
        #                 db_infos_deleted.count())

        with patch.object(self.context, 'is_admin', return_value=True):
            deleted_instance = db_infos_deleted.all()[0]
            active_instance = db_infos_active.all()[0]

            instance = DBInstance.find_by(context=self.context,
                                          id=active_instance.id)
            self.assertEqual(active_instance.id, instance.id)

            self.assertRaises(exception.ModelNotFoundError,
                              DBInstance.find_by,
                              context=self.context,
                              id=deleted_instance.id,
                              deleted=False)

            instance = DBInstance.find_by(context=self.context,
                                          id=deleted_instance.id,
                                          deleted=True)
            self.assertEqual(deleted_instance.id, instance.id)
Ejemplo n.º 9
0
 def setUp(self):
     super(SimpleInstanceTest, self).setUp()
     self.context = trove_testtools.TroveTestContext(self, is_admin=True)
     db_info = DBInstance(InstanceTasks.BUILDING, name="TestInstance")
     self.instance = SimpleInstance(None,
                                    db_info,
                                    InstanceServiceStatus(
                                        ServiceStatuses.BUILDING),
                                    ds_version=Mock(),
                                    ds=Mock(),
                                    locality='affinity')
     self.instance.context = self.context
     db_info.addresses = [
         {
             'type': 'private',
             'address': '123.123.123.123'
         },
         {
             'type': 'private',
             'address': '10.123.123.123'
         },
         {
             'type': 'public',
             'address': '15.123.123.123'
         },
     ]
     self.orig_ip_regex = CONF.ip_regex
     self.orig_black_list_regex = CONF.black_list_regex
Ejemplo n.º 10
0
def _run_query(start_date, end_date):
    created_filters = [DBInstance.created < end_date,
                       DBInstance.deleted == 0]
    created_columns = [DBInstance.created.label('timestamp'),
                       literal_column("0").label('deleted'),
                       DBDatastoreVersion.id.label('dsvid')]
    deleted_filters = [DBInstance.created < end_date,
                       DBInstance.deleted_at >= start_date,
                       DBInstance.deleted == 1]
    deleted_columns = [DBInstance.deleted_at.label('timestamp'),
                       literal_column("1").label('deleted'),
                       DBDatastoreVersion.id.label('dsvid')]

    query1 = DBInstance.query().\
        join(DBDatastoreVersion).\
        add_columns(*created_columns)
    query1 = query1.filter(*created_filters)

    query2 = DBInstance.query().\
        join(DBDatastoreVersion).\
        add_columns(*created_columns)
    query2 = query2.filter(*deleted_filters)

    query3 = DBInstance.query().\
        join(DBDatastoreVersion).\
        add_columns(*deleted_columns)
    query3 = query3.filter(*deleted_filters)

    union_query = query1.union(query2, query3).\
        order_by(text('anon_1.timestamp'))

    return union_query.all()
Ejemplo n.º 11
0
 def setUp(self):
     super(MongoDbClusterTasksTest, self).setUp()
     self.cluster_id = "1232"
     self.cluster_name = "Cluster-1234"
     self.tenant_id = "6789"
     self.db_cluster = DBCluster(ClusterTaskStatus.NONE,
                                 id=self.cluster_id,
                                 created=str(datetime.date),
                                 updated=str(datetime.date),
                                 name=self.cluster_name,
                                 task_id=ClusterTaskStatus.NONE._code,
                                 tenant_id=self.tenant_id,
                                 datastore_version_id="1",
                                 deleted=False)
     self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1",
                               compute_instance_id="compute-1",
                               task_id=InstanceTasks.NONE._code,
                               task_description=InstanceTasks.NONE._db_text,
                               volume_id="volume-1",
                               datastore_version_id="1",
                               cluster_id=self.cluster_id,
                               shard_id="shard-1",
                               type="member")
     self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2",
                               compute_instance_id="compute-2",
                               task_id=InstanceTasks.NONE._code,
                               task_description=InstanceTasks.NONE._db_text,
                               volume_id="volume-2",
                               datastore_version_id="1",
                               cluster_id=self.cluster_id,
                               shard_id="shard-1",
                               type="member")
     self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="mongos",
                               compute_instance_id="compute-3",
                               task_id=InstanceTasks.NONE._code,
                               task_description=InstanceTasks.NONE._db_text,
                               volume_id="volume-3",
                               datastore_version_id="1",
                               cluster_id=self.cluster_id,
                               shard_id="shard-1",
                               type="query_router")
     self.dbinst4 = DBInstance(InstanceTasks.NONE, id="4",
                               name="configserver",
                               compute_instance_id="compute-4",
                               task_id=InstanceTasks.NONE._code,
                               task_description=InstanceTasks.NONE._db_text,
                               volume_id="volume-4",
                               datastore_version_id="1",
                               cluster_id=self.cluster_id,
                               shard_id="shard-1",
                               type="config_server")
     mock_ds1 = Mock()
     mock_ds1.name = 'mongodb'
     mock_dv1 = Mock()
     mock_dv1.name = '2.0.4'
     self.clustertasks = ClusterTasks(Mock(),
                                      self.db_cluster,
                                      datastore=mock_ds1,
                                      datastore_version=mock_dv1)
Ejemplo n.º 12
0
 def _get_cluster_instance_id(self, tenant_id, cluster_id):
     args = {'tenant_id': tenant_id, 'cluster_id': cluster_id}
     cluster_instances = DBInstance.find_all(**args).all()
     instance_ids = [db_instance.id for db_instance in cluster_instances]
     args = {'tenant_id': tenant_id, 'cluster_id': cluster_id, 'type':
             'master'}
     master_instance = DBInstance.find_by(**args)
     master_instance_id = master_instance.id
     return master_instance_id, instance_ids
Ejemplo n.º 13
0
 def _get_cluster_instance_id(self, tenant_id, cluster_id):
     args = {'tenant_id': tenant_id, 'cluster_id': cluster_id}
     cluster_instances = DBInstance.find_all(**args).all()
     instance_ids = [db_instance.id for db_instance in cluster_instances]
     args = {'tenant_id': tenant_id, 'cluster_id': cluster_id, 'type':
             'master'}
     master_instance = DBInstance.find_by(**args)
     master_instance_id = master_instance.id
     return master_instance_id, instance_ids
Ejemplo n.º 14
0
 def setUp(self):
     super(SimpleInstanceTest, self).setUp()
     db_info = DBInstance(InstanceTasks.BUILDING, name="TestInstance")
     self.instance = SimpleInstance(None, db_info, "BUILD",
                                    ds_version=Mock(), ds=Mock())
     db_info.addresses = {"private": [{"addr": "123.123.123.123"}],
                          "internal": [{"addr": "10.123.123.123"}],
                          "public": [{"addr": "15.123.123.123"}]}
     self.orig_conf = CONF.network_label_regex
     self.orig_ip_regex = CONF.ip_regex
Ejemplo n.º 15
0
 def setUp(self):
     super(SimpleInstanceTest, self).setUp()
     db_info = DBInstance(InstanceTasks.BUILDING, name="TestInstance")
     self.instance = SimpleInstance(None, db_info,
                                    InstanceServiceStatus(
                                        ServiceStatuses.BUILDING),
                                    ds_version=Mock(), ds=Mock())
     db_info.addresses = {"private": [{"addr": "123.123.123.123"}],
                          "internal": [{"addr": "10.123.123.123"}],
                          "public": [{"addr": "15.123.123.123"}]}
     self.orig_conf = CONF.network_label_regex
     self.orig_ip_regex = CONF.ip_regex
Ejemplo n.º 16
0
 def setUp(self):
     super(PXCClusterTasksTest, self).setUp()
     util.init_db()
     self.cluster_id = "1232"
     self.cluster_name = "Cluster-1234"
     self.tenant_id = "6789"
     self.db_cluster = DBCluster(ClusterTaskStatus.NONE,
                                 id=self.cluster_id,
                                 created=str(datetime.date),
                                 updated=str(datetime.date),
                                 name=self.cluster_name,
                                 task_id=ClusterTaskStatus.NONE._code,
                                 tenant_id=self.tenant_id,
                                 datastore_version_id="1",
                                 deleted=False)
     self.dbinst1 = DBInstance(InstanceTasks.NONE,
                               id="1",
                               name="member1",
                               compute_instance_id="compute-1",
                               task_id=InstanceTasks.NONE._code,
                               task_description=InstanceTasks.NONE._db_text,
                               volume_id="volume-1",
                               datastore_version_id="1",
                               cluster_id=self.cluster_id,
                               type="member")
     self.dbinst2 = DBInstance(InstanceTasks.NONE,
                               id="2",
                               name="member2",
                               compute_instance_id="compute-2",
                               task_id=InstanceTasks.NONE._code,
                               task_description=InstanceTasks.NONE._db_text,
                               volume_id="volume-2",
                               datastore_version_id="1",
                               cluster_id=self.cluster_id,
                               type="member")
     self.dbinst3 = DBInstance(InstanceTasks.NONE,
                               id="3",
                               name="member3",
                               compute_instance_id="compute-3",
                               task_id=InstanceTasks.NONE._code,
                               task_description=InstanceTasks.NONE._db_text,
                               volume_id="volume-3",
                               datastore_version_id="1",
                               cluster_id=self.cluster_id,
                               type="member")
     mock_ds1 = Mock()
     mock_ds1.name = 'pxc'
     mock_dv1 = Mock()
     mock_dv1.name = '7.1'
     self.clustertasks = ClusterTasks(Mock(),
                                      self.db_cluster,
                                      datastore=mock_ds1,
                                      datastore_version=mock_dv1)
Ejemplo n.º 17
0
    def update_statuses_on_failure(self, cluster_id, shard_id=None):

        if CONF.update_status_on_fail:
            if shard_id:
                db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                                   shard_id=shard_id).all()
            else:
                db_instances = DBInstance.find_all(cluster_id=cluster_id).all()

            for db_instance in db_instances:
                db_instance.set_task_status(
                    InstanceTasks.BUILDING_ERROR_SERVER)
                db_instance.save()
Ejemplo n.º 18
0
    def update_statuses_on_failure(self, cluster_id, shard_id=None):

        if CONF.update_status_on_fail:
            if shard_id:
                db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                                   shard_id=shard_id).all()
            else:
                db_instances = DBInstance.find_all(
                    cluster_id=cluster_id).all()

            for db_instance in db_instances:
                db_instance.set_task_status(
                    InstanceTasks.BUILDING_ERROR_SERVER)
                db_instance.save()
Ejemplo n.º 19
0
    def test_upgrade(self, task_upgrade):
        instance_model = DBInstance(
            InstanceTasks.NONE,
            id=str(uuid.uuid4()),
            name="TestUpgradeInstance",
            datastore_version_id=self.datastore_version1.id)
        instance_model.set_task_status(InstanceTasks.NONE)
        instance_model.save()
        instance_status = InstanceServiceStatus(ServiceStatuses.RUNNING,
                                                id=str(uuid.uuid4()),
                                                instance_id=instance_model.id)
        instance_status.save()
        self.assertIsNotNone(instance_model)
        instance = models.load_instance(models.Instance, self.context,
                                        instance_model.id)

        try:
            instance.upgrade(self.datastore_version2)

            self.assertEqual(self.datastore_version2.id,
                             instance.db_info.datastore_version_id)
            self.assertEqual(InstanceTasks.UPGRADING,
                             instance.db_info.task_status)
            self.assertTrue(task_upgrade.called)
        finally:
            instance_status.delete()
            instance_model.delete()
Ejemplo n.º 20
0
 def setUp(self):
     super(SimpleInstanceTest, self).setUp()
     db_info = DBInstance(
         InstanceTasks.BUILDING, name="TestInstance")
     self.instance = SimpleInstance(
         None, db_info, InstanceServiceStatus(
             ServiceStatuses.BUILDING), ds_version=Mock(), ds=Mock(),
         locality='affinity')
     db_info.addresses = {"private": [{"addr": "123.123.123.123"}],
                          "internal": [{"addr": "10.123.123.123"}],
                          "public": [{"addr": "15.123.123.123"}]}
     self.orig_conf = CONF.network_label_regex
     self.orig_ip_regex = CONF.ip_regex
     self.orig_black_list_regex = CONF.black_list_regex
Ejemplo n.º 21
0
 def build_db_instance(self, status, task_status=InstanceTasks.NONE):
     version = datastore_models.DBDatastoreVersion.get_by(name='5.5')
     instance = DBInstance(InstanceTasks.NONE,
                           name='test_name',
                           id=str(uuid.uuid4()),
                           flavor_id='flavor_1',
                           datastore_version_id=
                           version.id,
                           compute_instance_id='compute_id_1',
                           server_id='server_id_1',
                           tenant_id='tenant_id_1',
                           server_status=
                           rd_instance.ServiceStatuses.
                           BUILDING.api_status,
                           deleted=False)
     instance.save()
     service_status = InstanceServiceStatus(
         rd_instance.ServiceStatuses.RUNNING,
         id=str(uuid.uuid4()),
         instance_id=instance.id,
     )
     service_status.save()
     instance.set_task_status(task_status)
     instance.server_status = status
     instance.save()
     return instance, service_status
Ejemplo n.º 22
0
 def _init(self):
     self.mock = mox.Mox()
     self.instance_id = 500
     context = TroveContext()
     self.db_info = DBInstance.create(name="instance",
                                      flavor_id=OLD_FLAVOR_ID,
                                      tenant_id=999,
                                      volume_size=None,
                                      service_type='mysql',
                                      task_status=InstanceTasks.RESIZING)
     self.server = self.mock.CreateMock(Server)
     self.instance = models.BuiltInstanceTasks(context,
                                               self.db_info,
                                               self.server,
                                               service_status="ACTIVE")
     self.instance.server.flavor = {'id': OLD_FLAVOR_ID}
     self.guest = self.mock.CreateMock(guest.API)
     self.instance._guest = self.guest
     self.instance._refresh_compute_server_info = lambda: None
     self.instance._refresh_compute_service_status = lambda: None
     self.mock.StubOutWithMock(self.instance, 'update_db')
     self.mock.StubOutWithMock(self.instance,
                               '_set_service_status_to_paused')
     self.poll_until_mocked = False
     self.action = None
Ejemplo n.º 23
0
        def _create_cluster():

            # fetch instances by cluster_id against instances table
            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]
            LOG.debug("instances in cluster %(cluster_id)s: %(instance_ids)s",
                      {'cluster_id': cluster_id, 'instance_ids': instance_ids})

            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            LOG.debug("all instances in cluster %s ready.", cluster_id)

            instances = [Instance.load(context, instance_id) for instance_id
                         in instance_ids]

            # filter tidb_server in instances into a new list: query_routers
            tidb_server = [instance for instance in instances if
                             instance.type == 'tidb_server']
            LOG.debug("tidb_server: %s",
                      [instance.id for instance in query_routers])
            # filter pd_server in instances into new list: config_servers
            pd_server = [instance for instance in instances if
                              instance.type == 'pd_server']
            LOG.debug("pd_server: %s",
                      [instance.id for instance in pd_server])
            # filter tikv  into a new list: tikvs
            tikv = [instance for instance in instances if
                       instance.type == 'tikv']
            LOG.debug("tikv: %s",
                      [instance.id for instance in tikv])
 def _init(self):
     self.mock = mox.Mox()
     self.instance_id = 500
     context = TroveContext()
     self.db_info = DBInstance.create(
         name="instance",
         flavor_id=OLD_FLAVOR_ID,
         tenant_id=999,
         volume_size=None,
         service_type='mysql',
         task_status=InstanceTasks.RESIZING)
     self.server = self.mock.CreateMock(Server)
     self.instance = models.BuiltInstanceTasks(context,
                                               self.db_info,
                                               self.server,
                                               service_status="ACTIVE")
     self.instance.server.flavor = {'id': OLD_FLAVOR_ID}
     self.guest = self.mock.CreateMock(guest.API)
     self.instance._guest = self.guest
     self.instance._refresh_compute_server_info = lambda: None
     self.instance._refresh_compute_service_status = lambda: None
     self.mock.StubOutWithMock(self.instance, 'update_db')
     self.mock.StubOutWithMock(self.instance,
                               '_set_service_status_to_paused')
     self.poll_until_mocked = False
     self.action = None
Ejemplo n.º 25
0
 def _init(self):
     self.instance_id = 500
     context = trove_testtools.TroveTestContext(self)
     self.db_info = DBInstance.create(
         name="instance",
         flavor_id=OLD_FLAVOR_ID,
         tenant_id=999,
         volume_size=None,
         datastore_version_id=test_config.dbaas_datastore_version_id,
         task_status=InstanceTasks.RESIZING)
     self.server = mock.MagicMock(spec=Server)
     self.instance = models.BuiltInstanceTasks(
         context,
         self.db_info,
         self.server,
         datastore_status=InstanceServiceStatus.create(
             instance_id=self.db_info.id,
             status=srvstatus.ServiceStatuses.RUNNING))
     self.instance.server.flavor = {'id': OLD_FLAVOR_ID}
     self.guest = mock.MagicMock(spec=guest.API)
     self.instance._guest = self.guest
     self.instance.refresh_compute_server_info = lambda: None
     self.instance._refresh_datastore_status = lambda: None
     self.instance.update_db = mock.Mock()
     self.instance.set_datastore_status_to_paused = mock.Mock()
     self.poll_until_side_effects = []
     self.action = None
Ejemplo n.º 26
0
    def prepare(self,
                memory_mb,
                packages,
                databases,
                users,
                device_path=None,
                mount_point=None,
                backup_info=None,
                config_contents=None,
                root_password=None):
        from trove.instance.models import DBInstance
        from trove.instance.models import InstanceServiceStatus
        from trove.guestagent.models import AgentHeartBeat
        LOG.debug("users... %s" % users)
        LOG.debug("databases... %s" % databases)
        instance_name = DBInstance.find_by(id=self.id).name
        self.create_user(users)
        self.create_database(databases)

        def update_db():
            status = InstanceServiceStatus.find_by(instance_id=self.id)
            if instance_name.endswith('GUEST_ERROR'):
                status.status = rd_instance.ServiceStatuses.FAILED
            else:
                status.status = rd_instance.ServiceStatuses.RUNNING
            status.save()
            AgentHeartBeat.create(instance_id=self.id)

        eventlet.spawn_after(1.0, update_db)
Ejemplo n.º 27
0
    def update_statuses_on_time_out(self):

        if CONF.update_status_on_fail:
            #Updating service status
            service = InstanceServiceStatus.find_by(instance_id=self.id)
            service.set_status(ServiceStatuses.
                               FAILED_TIMEOUT_GUESTAGENT)
            service.save()
            LOG.error(_("Service status: %(status)s") %
                      {'status': ServiceStatuses.
                       FAILED_TIMEOUT_GUESTAGENT.api_status})
            LOG.error(_("Service error description: %(desc)s") %
                      {'desc': ServiceStatuses.
                       FAILED_TIMEOUT_GUESTAGENT.description})
            #Updating instance status
            db_info = DBInstance.find_by(name=self.name)
            db_info.set_task_status(InstanceTasks.
                                    BUILDING_ERROR_TIMEOUT_GA)
            db_info.save()
            LOG.error(_("Trove instance status: %(action)s") %
                      {'action': InstanceTasks.
                       BUILDING_ERROR_TIMEOUT_GA.action})
            LOG.error(_("Trove instance status description: %(text)s") %
                      {'text': InstanceTasks.
                       BUILDING_ERROR_TIMEOUT_GA.db_text})
Ejemplo n.º 28
0
 def test_create_replica_from_replica(self):
     self.replica_datastore_version = Mock(spec=datastore_models.DBDatastoreVersion)
     self.replica_datastore_version.id = "UUID"
     self.replica_datastore_version.manager = "mysql"
     self.replica_info = DBInstance(
         InstanceTasks.NONE,
         id="UUID",
         name="TestInstance",
         datastore_version_id=self.replica_datastore_version.id,
         slave_of_id=self.master.id,
     )
     self.replica_info.save()
     self.assertRaises(
         exception.Forbidden,
         Instance.create,
         None,
         "name",
         2,
         "UUID",
         [],
         [],
         None,
         self.datastore_version,
         1,
         None,
         slave_of_id=self.replica_info.id,
     )
Ejemplo n.º 29
0
        def _grow_cluster():

            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            cluster_head = next(Instance.load(context, db_inst.id)
                                for db_inst in db_instances
                                if db_inst.id not in new_instance_ids)
            if not cluster_head:
                raise TroveError("Unable to determine existing Redis cluster "
                                 "member")

            (cluster_head_ip, cluster_head_port) = (
                self.get_guest(cluster_head).get_node_ip())

            # Wait for cluster members to get to cluster-ready status.
            if not self._all_instances_ready(new_instance_ids, cluster_id):
                return

            LOG.debug("All members ready, proceeding for cluster setup.")
            new_insts = [Instance.load(context, instance_id)
                         for instance_id in new_instance_ids]
            new_guests = map(self.get_guest, new_insts)

            # Connect nodes to the cluster head
            for guest in new_guests:
                guest.cluster_meet(cluster_head_ip, cluster_head_port)

            for guest in new_guests:
                guest.cluster_complete()
Ejemplo n.º 30
0
    def setUp(self):
        util.init_db()

        self.datastore = datastore_models.DBDatastore.create(
            id=str(uuid.uuid4()), name="name", default_version_id=str(uuid.uuid4())
        )

        self.datastore_version = datastore_models.DBDatastoreVersion.create(
            id=self.datastore.default_version_id,
            name="name",
            image_id=str(uuid.uuid4()),
            packages=str(uuid.uuid4()),
            datastore_id=self.datastore.id,
            manager="mysql",
            active=1,
        )

        self.master = DBInstance(
            InstanceTasks.NONE,
            id=str(uuid.uuid4()),
            name="TestMasterInstance",
            datastore_version_id=self.datastore_version.id,
        )
        self.master.set_task_status(InstanceTasks.NONE)
        self.master.save()
        self.master_status = InstanceServiceStatus(
            ServiceStatuses.RUNNING, id=str(uuid.uuid4()), instance_id=self.master.id
        )
        self.master_status.save()

        self.safe_nova_client = models.create_nova_client
        models.create_nova_client = nova.fake_create_nova_client
        super(TestReplication, self).setUp()
Ejemplo n.º 31
0
 def _init(self):
     self.mock = mox.Mox()
     self.instance_id = 500
     context = TroveContext()
     self.db_info = DBInstance.create(
         name="instance",
         flavor_id=OLD_FLAVOR_ID,
         tenant_id=999,
         volume_size=None,
         datastore_version_id=test_config.dbaas_datastore_version_id,
         task_status=InstanceTasks.RESIZING)
     self.server = self.mock.CreateMock(Server)
     self.instance = models.BuiltInstanceTasks(
         context,
         self.db_info,
         self.server,
         datastore_status=InstanceServiceStatus.create(
             instance_id=self.db_info.id,
             status=rd_instance.ServiceStatuses.RUNNING))
     self.instance.server.flavor = {'id': OLD_FLAVOR_ID}
     self.guest = self.mock.CreateMock(guest.API)
     self.instance._guest = self.guest
     self.instance.refresh_compute_server_info = lambda: None
     self.instance._refresh_datastore_status = lambda: None
     self.mock.StubOutWithMock(self.instance, 'update_db')
     self.mock.StubOutWithMock(self.instance,
                               'set_datastore_status_to_paused')
     self.poll_until_mocked = False
     self.action = None
Ejemplo n.º 32
0
Archivo: api.py Proyecto: no2a/trove
    def shrink(self, instances):
        """Removes instances from a cluster."""
        LOG.debug("Shrinking cluster %s.", self.id)

        self.validate_cluster_available()
        removal_instances = [
            Instance.load(self.context, inst_id) for inst_id in instances
        ]
        db_instances = DBInstance.find_all(cluster_id=self.db_info.id,
                                           deleted=False).all()
        if len(db_instances) - len(removal_instances) < 1:
            raise exception.ClusterShrinkMustNotLeaveClusterEmpty()

        self.db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER)
        try:
            task_api.load(self.context,
                          self.ds_version.manager).shrink_cluster(
                              self.db_info.id,
                              [instance.id for instance in removal_instances])
        except Exception:
            self.db_info.update(task_status=ClusterTasks.NONE)
            raise

        return self.__class__(self.context, self.db_info, self.ds,
                              self.ds_version)
Ejemplo n.º 33
0
        def _shrink_cluster():
            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False).all()

            all_instance_ids = [db_instance.id for db_instance in db_instances]

            remove_instances = [Instance.load(context, instance_id)
                                for instance_id in instance_ids]

            left_instances = [Instance.load(context, instance_id)
                              for instance_id
                              in all_instance_ids
                              if instance_id not in instance_ids]

            remove_member_ips = [self.get_ip(instance)
                                 for instance in remove_instances]

            k = VerticaCluster.k_safety(len(left_instances))

            for db_instance in db_instances:
                if db_instance['type'] == 'master':
                    master_instance = Instance.load(context,
                                                    db_instance.id)
                    if self.get_ip(master_instance) in remove_member_ips:
                        raise RuntimeError(_("Cannot remove master instance!"))
                    LOG.debug(_("Marking cluster k-safety: %s") % k)
                    self.get_guest(master_instance).mark_design_ksafe(k)
                    self.get_guest(master_instance).shrink_cluster(
                        remove_member_ips)
                    break

            for r in remove_instances:
                Instance.delete(r)
Ejemplo n.º 34
0
 def test_create_replica_from_replica(self):
     self.replica_datastore_version = Mock(
         spec=datastore_models.DBDatastoreVersion)
     self.replica_datastore_version.id = "UUID"
     self.replica_datastore_version.manager = 'mysql'
     self.replica_info = DBInstance(
         InstanceTasks.NONE,
         id="UUID",
         name="TestInstance",
         datastore_version_id=self.replica_datastore_version.id,
         slave_of_id=self.master.id)
     self.replica_info.save()
     self.assertRaises(exception.Forbidden, Instance.create,
                       None, 'name', 2, "UUID", [], [], None,
                       self.datastore_version, 1,
                       None, slave_of_id=self.replica_info.id)
Ejemplo n.º 35
0
    def prepare(
        self,
        memory_mb,
        databases,
        users,
        device_path=None,
        mount_point=None,
        backup_id=None,
        config_contents=None,
        root_password=None,
    ):
        from trove.instance.models import DBInstance
        from trove.instance.models import InstanceServiceStatus
        from trove.guestagent.models import AgentHeartBeat

        LOG.debug("users... %s" % users)
        LOG.debug("databases... %s" % databases)
        instance_name = DBInstance.find_by(id=self.id).name
        self.create_user(users)
        self.create_database(databases)

        def update_db():
            status = InstanceServiceStatus.find_by(instance_id=self.id)
            if instance_name.endswith("GUEST_ERROR"):
                status.status = rd_instance.ServiceStatuses.FAILED
            else:
                status.status = rd_instance.ServiceStatuses.RUNNING
            status.save()
            AgentHeartBeat.create(instance_id=self.id)

        eventlet.spawn_after(1.0, update_db)
Ejemplo n.º 36
0
 def _get_cluster_instance_id(self, tenant_id, cluster_id):
     instance_ids = self._find_cluster_node_ids(tenant_id, cluster_id)
     args = {'tenant_id': tenant_id, 'cluster_id': cluster_id, 'type':
             'master'}
     master_instance = DBInstance.find_by(**args)
     master_instance_id = master_instance.id
     return master_instance_id, instance_ids
Ejemplo n.º 37
0
    def prepare(self,
                memory_mb,
                packages,
                databases,
                users,
                device_path=None,
                mount_point=None,
                backup_info=None,
                config_contents=None,
                root_password=None,
                overrides=None,
                cluster_config=None,
                snapshot=None,
                modules=None):
        from trove.guestagent.models import AgentHeartBeat
        from trove.instance.models import DBInstance
        from trove.instance.models import InstanceServiceStatus
        LOG.debug("users... %s", users)
        LOG.debug("databases... %s", databases)
        instance_name = DBInstance.find_by(id=self.id).name
        self.create_user(users)
        self.create_database(databases)
        self.overrides = overrides or {}

        def update_db():
            status = InstanceServiceStatus.find_by(instance_id=self.id)
            if instance_name.endswith('GUEST_ERROR'):
                status.status = srvstatus.ServiceStatuses.FAILED
            else:
                status.status = srvstatus.ServiceStatuses.HEALTHY
            status.save()
            AgentHeartBeat.create(instance_id=self.id)

        eventlet.spawn_after(3.5, update_db)
Ejemplo n.º 38
0
        def _grow_cluster():

            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            cluster_head = next(
                Instance.load(context, db_inst.id) for db_inst in db_instances
                if db_inst.id not in new_instance_ids)
            if not cluster_head:
                raise TroveError(
                    _("Unable to determine existing Redis cluster"
                      " member"))

            (cluster_head_ip,
             cluster_head_port) = (self.get_guest(cluster_head).get_node_ip())

            # Wait for cluster members to get to cluster-ready status.
            if not self._all_instances_ready(new_instance_ids, cluster_id):
                return

            LOG.debug("All members ready, proceeding for cluster setup.")
            new_insts = [
                Instance.load(context, instance_id)
                for instance_id in new_instance_ids
            ]
            new_guests = map(self.get_guest, new_insts)

            # Connect nodes to the cluster head
            for guest in new_guests:
                guest.cluster_meet(cluster_head_ip, cluster_head_port)

            for guest in new_guests:
                guest.cluster_complete()
Ejemplo n.º 39
0
 def _init(self):
     self.mock = mox.Mox()
     self.instance_id = 500
     context = trove_testtools.TroveTestContext(self)
     self.db_info = DBInstance.create(
         name="instance",
         flavor_id=OLD_FLAVOR_ID,
         tenant_id=999,
         volume_size=None,
         datastore_version_id=test_config.dbaas_datastore_version_id,
         task_status=InstanceTasks.RESIZING)
     self.server = self.mock.CreateMock(Server)
     self.instance = models.BuiltInstanceTasks(
         context,
         self.db_info,
         self.server,
         datastore_status=InstanceServiceStatus.create(
             instance_id=self.db_info.id,
             status=rd_instance.ServiceStatuses.RUNNING))
     self.instance.server.flavor = {'id': OLD_FLAVOR_ID}
     self.guest = self.mock.CreateMock(guest.API)
     self.instance._guest = self.guest
     self.instance.refresh_compute_server_info = lambda: None
     self.instance._refresh_datastore_status = lambda: None
     self.mock.StubOutWithMock(self.instance, 'update_db')
     self.mock.StubOutWithMock(self.instance,
                               'set_datastore_status_to_paused')
     self.poll_until_mocked = False
     self.action = None
Ejemplo n.º 40
0
        def _shrink_cluster():
            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False).all()

            all_instance_ids = [db_instance.id for db_instance in db_instances]

            remove_instances = [Instance.load(context, instance_id)
                                for instance_id in instance_ids]

            left_instances = [Instance.load(context, instance_id)
                              for instance_id
                              in all_instance_ids
                              if instance_id not in instance_ids]

            remove_member_ips = [self.get_ip(instance)
                                 for instance in remove_instances]

            k = VerticaCluster.k_safety(len(left_instances))

            for db_instance in db_instances:
                if db_instance['type'] == 'master':
                    master_instance = Instance.load(context,
                                                    db_instance.id)
                    if self.get_ip(master_instance) in remove_member_ips:
                        raise RuntimeError(_("Cannot remove master instance!"))
                    LOG.debug("Marking cluster k-safety: %s", k)
                    self.get_guest(master_instance).mark_design_ksafe(k)
                    self.get_guest(master_instance).shrink_cluster(
                        remove_member_ips)
                    break

            for r in remove_instances:
                Instance.delete(r)
Ejemplo n.º 41
0
    def update_statuses_on_time_out(self):

        if CONF.update_status_on_fail:
            #Updating service status
            service = InstanceServiceStatus.find_by(instance_id=self.id)
            service.set_status(ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT)
            service.save()
            LOG.error(
                _("Service status: %(status)s") % {
                    'status':
                    ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT.api_status
                })
            LOG.error(
                _("Service error description: %(desc)s") % {
                    'desc':
                    ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT.description
                })
            #Updating instance status
            db_info = DBInstance.find_by(name=self.name)
            db_info.set_task_status(InstanceTasks.BUILDING_ERROR_TIMEOUT_GA)
            db_info.save()
            LOG.error(
                _("Trove instance status: %(action)s") %
                {'action': InstanceTasks.BUILDING_ERROR_TIMEOUT_GA.action})
            LOG.error(
                _("Trove instance status description: %(text)s") %
                {'text': InstanceTasks.BUILDING_ERROR_TIMEOUT_GA.db_text})
Ejemplo n.º 42
0
 def all_instances_marked_deleted():
     non_deleted_instances = DBInstance.find_all(
         cluster_id=cluster_id, deleted=False).all()
     non_deleted_ids = [db_instance.id for db_instance
                        in non_deleted_instances]
     return not bool(
         set(instance_ids).intersection(set(non_deleted_ids))
     )
Ejemplo n.º 43
0
 def _find_cluster_node_ids(self, tenant_id, cluster_id):
     args = {
         'tenant_id': tenant_id,
         'cluster_id': cluster_id,
         'deleted': False
     }
     cluster_instances = DBInstance.find_all(**args).all()
     return [db_instance.id for db_instance in cluster_instances]
Ejemplo n.º 44
0
 def load(context, id):
     client = create_nova_client(context)
     account = client.accounts.get_instances(id)
     db_infos = DBInstance.find_all(tenant_id=id, deleted=False)
     servers = [Server(server) for server in account.servers]
     instances = MgmtInstances.load_status_from_existing(
         context, db_infos, servers)
     return Account(id, instances)
Ejemplo n.º 45
0
 def load(context, id):
     client = create_nova_client(context)
     account = client.accounts.get_instances(id)
     db_infos = DBInstance.find_all(tenant_id=id, deleted=False)
     servers = [Server(server) for server in account.servers]
     instances = MgmtInstances.load_status_from_existing(context, db_infos,
                                                         servers)
     return Account(id, instances)
Ejemplo n.º 46
0
 def _find_query_router_ids(self, tenant_id, cluster_id):
     args = {
         'tenant_id': tenant_id,
         'cluster_id': cluster_id,
         'type': 'query_router'
     }
     query_router_instances = DBInstance.find_all(**args).all()
     return [db_instance.id for db_instance in query_router_instances]
Ejemplo n.º 47
0
 def all_instances_marked_deleted():
     non_deleted_instances = DBInstance.find_all(
         cluster_id=cluster_id, deleted=False).all()
     non_deleted_ids = [db_instance.id for db_instance
                        in non_deleted_instances]
     return not bool(
         set(instance_ids).intersection(set(non_deleted_ids))
     )
    def test_upgrade(self, task_upgrade):
        instance_model = DBInstance(
            InstanceTasks.NONE,
            id=str(uuid.uuid4()),
            name="TestUpgradeInstance",
            datastore_version_id=self.datastore_version1.id)
        instance_model.set_task_status(InstanceTasks.NONE)
        instance_model.save()
        instance_status = InstanceServiceStatus(
            ServiceStatuses.RUNNING,
            id=str(uuid.uuid4()),
            instance_id=instance_model.id)
        instance_status.save()
        self.assertIsNotNone(instance_model)
        instance = models.load_instance(models.Instance, self.context,
                                        instance_model.id)

        try:
            instance.upgrade(self.datastore_version2)

            self.assertEqual(self.datastore_version2.id,
                             instance.db_info.datastore_version_id)
            self.assertEqual(InstanceTasks.UPGRADING,
                             instance.db_info.task_status)
            self.assertTrue(task_upgrade.called)
        finally:
            instance_status.delete()
            instance_model.delete()
Ejemplo n.º 49
0
        def _create_cluster():

            # fetch instances by cluster_id against instances table
            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]
            LOG.debug("instances in cluster %s: %s" % (cluster_id,
                                                       instance_ids))

            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            instances = [Instance.load(context, instance_id) for instance_id
                         in instance_ids]

            # filter query routers in instances into a new list: query_routers
            query_routers = [instance for instance in instances if
                             instance.type == 'query_router']
            LOG.debug("query routers: %s" %
                      [instance.id for instance in query_routers])
            # filter config servers in instances into new list: config_servers
            config_servers = [instance for instance in instances if
                              instance.type == 'config_server']
            LOG.debug("config servers: %s" %
                      [instance.id for instance in config_servers])
            # filter members (non router/configsvr) into a new list: members
            members = [instance for instance in instances if
                       instance.type == 'member']
            LOG.debug("members: %s" %
                      [instance.id for instance in members])

            # for config_server in config_servers, append ip/hostname to
            # "config_server_hosts", then
            # peel off the replica-set name and ip/hostname from 'x'
            config_server_ips = [self.get_ip(instance)
                                 for instance in config_servers]
            LOG.debug("config server ips: %s" % config_server_ips)

            LOG.debug("calling add_config_servers on query_routers")
            try:
                for query_router in query_routers:
                    (self.get_guest(query_router)
                     .add_config_servers(config_server_ips))
            except Exception:
                LOG.exception(_("error adding config servers"))
                self.update_statuses_on_failure(cluster_id)
                return

            if not self._create_replica_set(members, cluster_id):
                return

            replica_set_name = "rs1"
            if not self._create_shard(query_routers, replica_set_name,
                                      members, cluster_id):
                return
            # call to start checking status
            for instance in instances:
                self.get_guest(instance).cluster_complete()
Ejemplo n.º 50
0
        def _create_cluster():

            # fetch instances by cluster_id against instances table
            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]
            LOG.debug("instances in cluster %s: %s" % (cluster_id,
                                                       instance_ids))

            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            instances = [Instance.load(context, instance_id) for instance_id
                         in instance_ids]

            # filter query routers in instances into a new list: query_routers
            query_routers = [instance for instance in instances if
                             instance.type == 'query_router']
            LOG.debug("query routers: %s" %
                      [instance.id for instance in query_routers])
            # filter config servers in instances into new list: config_servers
            config_servers = [instance for instance in instances if
                              instance.type == 'config_server']
            LOG.debug("config servers: %s" %
                      [instance.id for instance in config_servers])
            # filter members (non router/configsvr) into a new list: members
            members = [instance for instance in instances if
                       instance.type == 'member']
            LOG.debug("members: %s" %
                      [instance.id for instance in members])

            # for config_server in config_servers, append ip/hostname to
            # "config_server_hosts", then
            # peel off the replica-set name and ip/hostname from 'x'
            config_server_ips = [self.get_ip(instance)
                                 for instance in config_servers]
            LOG.debug("config server ips: %s" % config_server_ips)

            LOG.debug("calling add_config_servers on query_routers")
            try:
                for query_router in query_routers:
                    (self.get_guest(query_router)
                     .add_config_servers(config_server_ips))
            except Exception:
                LOG.exception(_("error adding config servers"))
                self.update_statuses_on_failure(cluster_id)
                return

            if not self._create_replica_set(members, cluster_id):
                return

            replica_set_name = "rs1"
            if not self._create_shard(query_routers, replica_set_name,
                                      members, cluster_id):
                return
            # call to start checking status
            for instance in instances:
                self.get_guest(instance).cluster_complete()
Ejemplo n.º 51
0
    def setUp(self):
        util.init_db()
        self.context = trove_testtools.TroveTestContext(self, is_admin=True)
        self.datastore = datastore_models.DBDatastore.create(
            id=str(uuid.uuid4()),
            name='redis' + str(uuid.uuid4()),
        )
        self.datastore_version = (datastore_models.DBDatastoreVersion.create(
            id=str(uuid.uuid4()),
            datastore_id=self.datastore.id,
            name="3.2" + str(uuid.uuid4()),
            manager="redis",
            image_id="image_id",
            packages="",
            active=True))
        self.tenant_id = "UUID"
        self.single_db_info = DBInstance.create(
            id="redis-single",
            name="redis-single",
            flavor_id=1,
            datastore_version_id=self.datastore_version.id,
            tenant_id=self.tenant_id,
            volume_size=None,
            task_status=InstanceTasks.NONE)
        self.master_db_info = DBInstance.create(
            id="redis-master",
            name="redis-master",
            flavor_id=1,
            datastore_version_id=self.datastore_version.id,
            tenant_id=self.tenant_id,
            volume_size=None,
            task_status=InstanceTasks.NONE)
        self.slave_db_info = DBInstance.create(
            id="redis-slave",
            name="redis-slave",
            flavor_id=1,
            datastore_version_id=self.datastore_version.id,
            tenant_id=self.tenant_id,
            volume_size=None,
            task_status=InstanceTasks.NONE,
            slave_of_id=self.master_db_info.id)

        super(TestRedisRootController, self).setUp()
        self.controller = RedisRootController()
Ejemplo n.º 52
0
    def update_statuses_on_failure(self, cluster_id):

        if CONF.update_status_on_fail:
            db_instances = DBInstance.find_all(
                cluster_id=cluster_id, deleted=False).all()

            for db_instance in db_instances:
                db_instance.set_task_status(
                    InstanceTasks.BUILDING_ERROR_SERVER)
                db_instance.save()
Ejemplo n.º 53
0
def check_resize(group_id):
    resize = InstanceTasks.RESIZING
    instances = DBInstance.find_all(group_id=group_id,task_id=resize.code())
    insts = list()
    for inst in instances:
        insts.append(inst)
        check_server_status(inst.id)
    return insts
    
    
Ejemplo n.º 54
0
    def get_manager(self, tenant_id, target_id):
        args = {'id': target_id, 'tenant_id': tenant_id}
        is_cluster = False
        try:
            db_info = DBInstance.find_by(**args)
        except exception.ModelNotFoundError:
            is_cluster = True
            db_info = DBCluster.find_by(**args)

        ds_version = (datastore_models.DatastoreVersion.
                      load_by_uuid(db_info.datastore_version_id))
        ds_manager = ds_version.manager
        return (ds_manager, is_cluster)
Ejemplo n.º 55
0
    def setUp(self):
        util.init_db()
        self.replica_datastore_version = Mock(spec=DBDatastoreVersion)
        self.replica_datastore_version.id = "UUID"
        self.replica_datastore_version.manager = 'mysql'
        self.root_info = DBInstance(
            InstanceTasks.NONE,
            id="Another_instance",
            name="TestInstance",
            datastore_version_id=self.replica_datastore_version.id)
        self.root_info.save()
        self.replica_info = DBInstance(
            InstanceTasks.NONE,
            id="UUID",
            name="TestInstance",
            datastore_version_id=self.replica_datastore_version.id,
            slave_of_id="Another_instance")
        self.replica_info.save()
        self.safe_nova = models.create_nova_client
        models.create_nova_client = nova.fake_create_nova_client

        super(TestReplication, self).setUp()
Ejemplo n.º 56
0
 def load(cls):
     # TODO(pdmars): This should probably be changed to a more generic
     # database filter query if one is added, however, this should suffice
     # for now.
     db_infos = DBInstance.find_all(deleted=False)
     tenant_ids_for_instances = [db_info.tenant_id for db_info in db_infos]
     tenant_ids = set(tenant_ids_for_instances)
     LOG.debug("All tenants with instances: %s" % tenant_ids)
     accounts = []
     for tenant_id in tenant_ids:
         num_instances = tenant_ids_for_instances.count(tenant_id)
         accounts.append({'id': tenant_id, 'num_instances': num_instances})
     return cls(accounts)
Ejemplo n.º 57
0
 def _create_instance(self):
     self.context = TroveContext(is_admin=True)
     self.tenant_id = 999
     self.db_info = DBInstance.create(
         name="instance",
         flavor_id=1,
         tenant_id=self.tenant_id,
         volume_size=None,
         task_status=InstanceTasks.NONE)
     self.server = self.mock.CreateMock(Server)
     self.instance = imodels.Instance(self.context,
                                      self.db_info,
                                      self.server,
                                      service_status="ACTIVE")
Ejemplo n.º 58
0
        def _create_cluster():

            # fetch instances by cluster_id against instances table
            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]
            LOG.debug("instances in cluster %s: %s" % (cluster_id,
                                                       instance_ids))

            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            LOG.debug("all instances in cluster %s ready." % cluster_id)

            instances = [Instance.load(context, instance_id) for instance_id
                         in instance_ids]

            # filter query routers in instances into a new list: query_routers
            query_routers = [instance for instance in instances if
                             instance.type == 'query_router']
            LOG.debug("query routers: %s" %
                      [instance.id for instance in query_routers])
            # filter config servers in instances into new list: config_servers
            config_servers = [instance for instance in instances if
                              instance.type == 'config_server']
            LOG.debug("config servers: %s" %
                      [instance.id for instance in config_servers])
            # filter members (non router/configsvr) into a new list: members
            members = [instance for instance in instances if
                       instance.type == 'member']
            LOG.debug("members: %s" %
                      [instance.id for instance in members])

            # for config_server in config_servers, append ip/hostname to
            # "config_server_hosts", then
            # peel off the replica-set name and ip/hostname from 'x'
            config_server_ips = [self.get_ip(instance)
                                 for instance in config_servers]
            LOG.debug("config server ips: %s" % config_server_ips)

            if not self._add_query_routers(query_routers,
                                           config_server_ips):
                return

            if not self._create_shard(query_routers[0], members):
                return

            # call to start checking status
            for instance in instances:
                self.get_guest(instance).cluster_complete()
Ejemplo n.º 59
0
        def _shrink_cluster():
            removal_instances = [Instance.load(context, instance_id)
                                 for instance_id in removal_instance_ids]
            for instance in removal_instances:
                Instance.delete(instance)

            # wait for instances to be deleted
            def all_instances_marked_deleted():
                non_deleted_instances = DBInstance.find_all(
                    cluster_id=cluster_id, deleted=False).all()
                non_deleted_ids = [db_instance.id for db_instance
                                   in non_deleted_instances]
                return not bool(
                    set(removal_instance_ids).intersection(
                        set(non_deleted_ids))
                )
            try:
                LOG.info(_("Deleting instances (%s)") % removal_instance_ids)
                utils.poll_until(all_instances_marked_deleted,
                                 sleep_time=2,
                                 time_out=CONF.cluster_delete_time_out)
            except PollTimeOut:
                LOG.error(_("timeout for instances to be marked as deleted."))
                return

            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            leftover_instances = [Instance.load(context, db_inst.id)
                                  for db_inst in db_instances
                                  if db_inst.id not in removal_instance_ids]
            leftover_cluster_ips = [self.get_ip(instance) for instance in
                                    leftover_instances]

            # Get config changes for left over instances
            rnd_cluster_guest = self.get_guest(leftover_instances[0])
            cluster_context = rnd_cluster_guest.get_cluster_context()

            # apply the new config to all leftover instances
            for instance in leftover_instances:
                guest = self.get_guest(instance)
                # render the conf.d/cluster.cnf configuration
                cluster_configuration = self._render_cluster_config(
                    context,
                    instance,
                    ",".join(leftover_cluster_ips),
                    cluster_context['cluster_name'],
                    cluster_context['replication_user'])
                guest.write_cluster_configuration_overrides(
                    cluster_configuration)
Ejemplo n.º 60
0
        def _create_cluster():

            # Fetch instances by cluster_id against instances table.
            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False).all()
            instance_ids = [db_instance.id for db_instance in db_instances]

            # Wait for cluster members to get to cluster-ready status.
            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            LOG.debug("All members ready, proceeding for cluster setup.")
            instances = [Instance.load(context, instance_id) for instance_id
                         in instance_ids]

            member_ips = [self.get_ip(instance) for instance in instances]
            guests = [self.get_guest(instance) for instance in instances]

            # Users to be configured for password-less SSH.
            authorized_users_without_password = ['root', 'dbadmin']

            # Configuring password-less SSH for cluster members.
            # Strategy for setting up SSH:
            # get public keys for user from member-instances in cluster,
            # combine them, finally push it back to all instances,
            # and member instances add them to authorized keys.
            LOG.debug("Configuring password-less SSH on cluster members.")
            try:
                for user in authorized_users_without_password:
                    pub_key = [guest.get_public_keys(user) for guest in guests]
                    for guest in guests:
                        guest.authorize_public_keys(user, pub_key)

                LOG.debug("Installing cluster with members: %s." % member_ips)
                for db_instance in db_instances:
                    if db_instance['type'] == 'master':
                        master_instance = Instance.load(context,
                                                        db_instance.id)
                        self.get_guest(master_instance).install_cluster(
                            member_ips)
                        break

                LOG.debug("Finalizing cluster configuration.")
                for guest in guests:
                    guest.cluster_complete()
            except Exception:
                LOG.exception(_("Error creating cluster."))
                self.update_statuses_on_failure(cluster_id)