Beispiel #1
0
 def test_static_result(self):
     ctxt = context.get_admin_context()
     obj = MyObj.get(ctxt)
     self.assertEqual(obj.bar, 'bar')
     result = obj.marco()
     self.assertEqual(result, 'polo')
     self.assertRemotes()
Beispiel #2
0
    def start(self):
        super(FpgaAgentManager, self).start()

        if os.path.isfile('/etc/sysinv/sysinv.conf'):
            LOG.info('sysinv-fpga-agent started')
        else:
            LOG.info('No config file for sysinv-fpga-agent found.')
            raise exception.ConfigNotFound(message="Unable to find sysinv config file!")

        # Wait for puppet to log in to the local docker registry
        wait_for_docker_login()

        # Trigger reset of N3000 FPGAs.  This is needed because the PCI address
        # changes on the first reset after boot.
        reset_n3000_fpgas()

        # Wait around until someone else updates the platform.conf file
        # with our host UUID.
        self.wait_for_host_uuid()

        context = ctx.get_admin_context()

        # Collect updated PCI device information for N3000 FPGAs
        # and send it to sysinv-conductor
        self.fpga_pci_update(context)

        # Collect FPGA inventory and report to conductor.
        self.report_fpga_inventory(context)
    def _test_rpcapi(self, method, rpc_method, **kwargs):
        ctxt = context.get_admin_context()
        rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')

        default_rpc_api_version = '1.0'
        expected_retval = 'hello world' if method == 'call' else None
        expected_version = kwargs.pop('version', default_rpc_api_version)
        expected_msg = rpcapi.make_msg(method, **kwargs)

        expected_msg['version'] = expected_version

        expected_topic = 'fake-topic'

        self.fake_args = None
        self.fake_kwargs = None

        def _fake_rpc_method(*args, **kwargs):
            self.fake_args = args
            self.fake_kwargs = kwargs
            if expected_retval:
                return expected_retval

        with mock.patch.object(rpc, rpc_method) as mock_method:
            mock_method.side_effect = _fake_rpc_method
            retval = getattr(rpcapi, method)(ctxt, **kwargs)
            self.assertEqual(retval, expected_retval)
            expected_args = [ctxt, expected_topic, expected_msg]
            for arg, expected_arg in zip(self.fake_args, expected_args):
                self.assertEqual(arg, expected_arg)
Beispiel #4
0
    def disk_format_gpt(self, host_uuid, idisk_dict, is_cinder_device):
        disk_node = idisk_dict.get('device_path')

        utils.disk_wipe(disk_node)
        utils.execute('parted', disk_node, 'mklabel', 'gpt')

        if is_cinder_device:
            LOG.debug("Removing .node_cinder_lvm_config_complete_file")
            try:
                os.remove(constants.NODE_CINDER_LVM_CONFIG_COMPLETE_FILE)
            except OSError:
                LOG.error(".node_cinder_lvm_config_complete_file not present.")
                pass

        # On SX ensure wipe succeeds before DB is updated.
        # Flag file is used to mark wiping in progress.
        try:
            os.remove(constants.DISK_WIPE_IN_PROGRESS_FLAG)
        except OSError:
            # it's ok if file is not present.
            pass

        # We need to send the updated info about the host disks back to
        # the conductor.
        idisk_update = self.idisk_get()
        ctxt = context.get_admin_context()
        rpcapi = conductor_rpcapi.ConductorAPI(
            topic=conductor_rpcapi.MANAGER_TOPIC)
        rpcapi.idisk_update_by_ihost(ctxt,
                                     host_uuid,
                                     idisk_update)
Beispiel #5
0
 def test_static_result(self):
     ctxt = context.get_admin_context()
     obj = MyObj.get(ctxt)
     self.assertEqual(obj.bar, 'bar')
     result = obj.marco()  # pylint: disable=no-value-for-parameter
     self.assertEqual(result, 'polo')
     self.assertRemotes()
Beispiel #6
0
 def test_updates(self):
     ctxt = context.get_admin_context()
     obj = MyObj.get(ctxt)
     self.assertEqual(obj.foo, 1)
     obj.update_test()  # pylint: disable=no-value-for-parameter
     self.assertEqual(obj.bar, 'updated')
     self.assertRemotes()
Beispiel #7
0
 def test_orphaned_object(self):
     ctxt = context.get_admin_context()
     obj = MyObj.get(ctxt)
     obj._context = None
     self.assertRaises(exception.OrphanedObjectError,
                       obj.update_test)
     self.assertRemotes()
Beispiel #8
0
 def test_updates(self):
     ctxt = context.get_admin_context()
     obj = MyObj.get(ctxt)
     self.assertEqual(obj.foo, 1)
     obj.update_test()
     self.assertEqual(obj.bar, 'updated')
     self.assertRemotes()
Beispiel #9
0
 def run(self):
     while not self.stop:
         try:
             # Set timeout to check self.stop periodically
             (node_id, params) = QUEUE.get(block=True,
                                           timeout=self.queue_timeout)
         except Queue.Empty:
             pass
         else:
             # Requests comes here from BareMetalDeploy.post()
             LOG.info(
                 _('start deployment for node %(node_id)s, '
                   'params %(params)s') % {
                       'node_id': node_id,
                       'params': params
                   })
             context = sysinv_context.get_admin_context()
             try:
                 db.bm_node_update(context, node_id,
                                   {'task_state': states.DEPLOYING})
                 deploy(**params)
             except Exception:
                 LOG.error(_('deployment to node %s failed') % node_id)
                 db.bm_node_update(context, node_id,
                                   {'task_state': states.DEPLOYFAIL})
             else:
                 LOG.info(_('deployment to node %s done') % node_id)
                 db.bm_node_update(context, node_id,
                                   {'task_state': states.DEPLOYDONE})
 def setUp(self):
     super(ManagerTestCase, self).setUp()
     self.service = manager.ConductorManager('test-host', 'test-topic')
     self.service.dbapi = dbapi.get_instance()
     self.context = context.get_admin_context()
     self.dbapi = dbapi.get_instance()
     self.system = utils.create_test_isystem()
     self.load = utils.create_test_load()
Beispiel #11
0
    def setUp(self):
        super(AppOperatorTestCase, self).setUp()

        # Set up objects for testing
        self.app_operator = kube_app.AppOperator(dbapi.get_instance())
        self.context = context.get_admin_context()
        self.dbapi = dbapi.get_instance()
        self.temp_dir = self.useFixture(fixtures.TempDir())
Beispiel #12
0
 def test_changed_2(self):
     ctxt = context.get_admin_context()
     obj = MyObj.get(ctxt)
     obj.foo = 123
     self.assertEqual(obj.obj_what_changed(), set(['foo']))
     obj.save(ctxt)
     self.assertEqual(obj.obj_what_changed(), set([]))
     self.assertEqual(obj.foo, 123)
     self.assertRemotes()
Beispiel #13
0
 def test_object_serialization(self):
     ser = base.SysinvObjectSerializer()
     ctxt = context.get_admin_context()
     obj = MyObj()
     primitive = ser.serialize_entity(ctxt, obj)
     self.assertTrue('sysinv_object.name' in primitive)
     obj2 = ser.deserialize_entity(ctxt, primitive)
     self.assertTrue(isinstance(obj2, MyObj))
     self.assertEqual(ctxt, obj2._context)
Beispiel #14
0
 def test_changed_4(self):
     ctxt = context.get_admin_context()
     obj = MyObj.get(ctxt)
     obj.bar = 'something'
     self.assertEqual(obj.obj_what_changed(), set(['bar']))
     obj.modify_save_modify(ctxt)
     self.assertEqual(obj.obj_what_changed(), set(['foo']))
     self.assertEqual(obj.foo, 42)
     self.assertEqual(obj.bar, 'meow')
     self.assertRemotes()
Beispiel #15
0
 def test_changed_3(self):
     ctxt = context.get_admin_context()
     obj = MyObj.get(ctxt)
     obj.foo = 123
     self.assertEqual(obj.obj_what_changed(), set(['foo']))
     obj.refresh(ctxt)
     self.assertEqual(obj.obj_what_changed(), set([]))
     self.assertEqual(obj.foo, 321)
     self.assertEqual(obj.bar, 'refreshed')
     self.assertRemotes()
Beispiel #16
0
def local_registry_list(filename, included_apps, include_all_apps=False):
    """ Save the list of images present in the local registry
    to a file in yaml format.

    :param filename: name of the file to save to.
    :param include_apps: list of applications for which images are saved in yaml format.
    :param include_all_apps: if True then the list of apps images will include all apps
                             regardless of include_apps list.
    """

    ctxt = context.get_admin_context()
    rpcapi = conductor_rpcapi.ConductorAPI(
        topic=conductor_rpcapi.MANAGER_TOPIC)

    # Save local registry images tags
    image_name_tag_list = []
    temp_image_name_list = rpcapi.docker_registry_image_list(ctxt)
    if not temp_image_name_list:
        raise Exception("Image list could not be retrieved "
                        "from local registry")

    for temp_image_name in temp_image_name_list:
        image_name = temp_image_name.get('name', None)
        if image_name:
            temp_image_tags = rpcapi.docker_registry_image_tags(
                ctxt, image_name)

            for image_name_tag in temp_image_tags:
                image_tag = image_name_tag.get('tag', None)
                if image_tag:
                    image_name_tag_list.append("%s:%s" %
                                               (image_name, image_tag))

    # Retrieve the images used by apps that should be excluded from yaml file
    excluded_images = []
    if not include_all_apps:
        apps_images = rpcapi.docker_get_apps_images(ctxt).items()
        if not apps_images:
            raise Exception("Apps image list could not be retrieved")
        for app, images in apps_images:
            if included_apps is None or app not in included_apps:
                excluded_images.extend(images)

    # Exclude apps images
    image_name_tag_list = list(set(image_name_tag_list) - set(excluded_images))

    data = {}
    data.update({'images': image_name_tag_list})

    try:
        with open(filename, 'w') as outfile:
            yaml.safe_dump(data, outfile, default_flow_style=False)
    except Exception as e:
        LOG.error("Error with local_registry_list: %s", e)
        sys.exit(1)
Beispiel #17
0
 def setUp(self):
     super(StorageTierDependentTCs, self).setUp()
     self.mock_set_crushmap = self.set_crushmap_patcher.start()
     self.service = manager.ConductorManager('test-host', 'test-topic')
     self.service.dbapi = dbapi.get_instance()
     self.context = context.get_admin_context()
     self.dbapi = dbapi.get_instance()
     self.system = dbutils.create_test_isystem()
     self.load = dbutils.create_test_load()
     self.host_index = -1
     self.mon_index = -1
Beispiel #18
0
    def setUp(self):
        super(UpdateCephCluster, self).setUp()
        self.service = manager.ConductorManager('test-host', 'test-topic')
        self.service.dbapi = dbapi.get_instance()
        self.context = context.get_admin_context()
        self.dbapi = dbapi.get_instance()
        self.system = utils.create_test_isystem()
        self.load = utils.create_test_load()
        self.host_index = -1

        self.mock_upgrade_downgrade_kube_components = self.upgrade_downgrade_kube_components_patcher.start(
        )
Beispiel #19
0
    def post(self, environ, start_response):
        LOG.info(_("post: environ=%s") % environ)
        inpt = environ['wsgi.input']
        length = int(environ.get('CONTENT_LENGTH', 0))

        x = inpt.read(length)
        q = dict(cgi.parse_qsl(x))
        try:
            node_id = q['i']
            deploy_key = q['k']
            address = q['a']
            port = q.get('p', '3260')
            iqn = q['n']
            lun = q.get('l', '1')
            err_msg = q.get('e')
        except KeyError as e:
            start_response('400 Bad Request', [('Content-type', 'text/plain')])
            return "parameter '%s' is not defined" % e

        if err_msg:
            LOG.error(_('Deploy agent error message: %s'), err_msg)

        context = sysinv_context.get_admin_context()
        d = db.bm_node_get(context, node_id)

        if d['deploy_key'] != deploy_key:
            start_response('400 Bad Request', [('Content-type', 'text/plain')])
            return 'key is not match'

        params = {
            'address': address,
            'port': port,
            'iqn': iqn,
            'lun': lun,
            'image_path': d['image_path'],
            'pxe_config_path': d['pxe_config_path'],
            'root_mb': int(d['root_mb']),
            'swap_mb': int(d['swap_mb']),
        }
        # Restart worker, if needed
        if not self.worker.isAlive():
            self.worker = Worker()
            self.worker.start()
        LOG.info(
            _("request is queued: node %(node_id)s, params %(params)s") % {
                'node_id': node_id,
                'params': params
            })
        QUEUE.put((node_id, params))
        # Requests go to Worker.run()
        start_response('200 OK', [('Content-type', 'text/plain')])
        return ''
Beispiel #20
0
def refresh_helm_repo_information():
    """Refresh the helm chart repository information.

    Ensure that the local repository information maintained in key user home
    directories are updated. Run this after application uploads.

    This handles scenarios where an upload occurs on the active controller
    followed by a swact. The newly actvated controller needs to make sure that
    the local repository cache reflect any changes.
    """
    LOG.debug("refresh_helm_repo_information: sending command to agent(s)")
    rpcapi = agent_rpcapi.AgentAPI()
    rpcapi.refresh_helm_repo_information(context.get_admin_context())
Beispiel #21
0
    def setUp(self):
        super(TestHealth, self).setUp()

        # Mock the patching API
        self.mock_patch_query_hosts_result = None

        def mock_patch_query_hosts(token, timeout, region_name):
            return self.mock_patch_query_hosts_result

        self.mocked_patch_query_hosts = mock.patch(
            'sysinv.api.controllers.v1.patch_api.patch_query_hosts',
            mock_patch_query_hosts)
        self.mocked_patch_query_hosts.start()
        self.addCleanup(self.mocked_patch_query_hosts.stop)

        # Mock the KubeOperator
        self.kube_get_nodes_result = None

        def mock_kube_get_nodes(obj):
            return self.kube_get_nodes_result

        self.mocked_kube_get_nodes = mock.patch(
            'sysinv.common.kubernetes.KubeOperator.kube_get_nodes',
            mock_kube_get_nodes)
        self.mocked_kube_get_nodes.start()
        self.addCleanup(self.mocked_kube_get_nodes.stop)

        self.kube_get_control_plane_pod_ready_status_result = None

        def mock_kube_get_control_plane_pod_ready_status(obj):
            return self.kube_get_control_plane_pod_ready_status_result

        self.mocked_kube_get_control_plane_pod_ready_status = mock.patch(
            'sysinv.common.kubernetes.KubeOperator.'
            'kube_get_control_plane_pod_ready_status',
            mock_kube_get_control_plane_pod_ready_status)
        self.mocked_kube_get_control_plane_pod_ready_status.start()
        self.addCleanup(
            self.mocked_kube_get_control_plane_pod_ready_status.stop)

        # Mock the fm API
        p = mock.patch('sysinv.common.health.fmclient')
        self.mock_fm_client_alarm_list = p.start()
        self.addCleanup(p.stop)

        # Set up objects for testing
        self.context = context.get_admin_context()
        self.health = health.Health(self.dbapi)

        # Set up results
        self.setup_result()
Beispiel #22
0
 def test_object_serialization_iterables(self):
     ser = base.SysinvObjectSerializer()
     ctxt = context.get_admin_context()
     obj = MyObj()
     for iterable in (list, tuple, set):
         thing = iterable([obj])
         primitive = ser.serialize_entity(ctxt, thing)
         self.assertEqual(1, len(primitive))
         for item in primitive:
             self.assertFalse(isinstance(item, base.SysinvObject))
         thing2 = ser.deserialize_entity(ctxt, primitive)
         self.assertEqual(1, len(thing2))
         for item in thing2:
             self.assertTrue(isinstance(item, MyObj))
def notify(context, message):
    """Sends a notification via RPC"""
    if not context:
        context = req_context.get_admin_context()
    priority = message.get('priority', CONF.default_notification_level)
    priority = priority.lower()
    for topic in CONF.notification_topics:
        topic = '%s.%s' % (topic, priority)
        try:
            rpc.notify(context, topic, message)
        except Exception:
            LOG.exception(
                _("Could not send notification to %(topic)s. "
                  "Payload=%(message)s"), locals())
Beispiel #24
0
    def setUp(self):
        super(AppOperatorTestCase, self).setUp()

        # Manager holds apps_metadata dict
        self.service = manager.ConductorManager('test-host', 'test-topic')

        # Set up objects for testing
        self.helm_operator = helm.HelmOperator(dbapi.get_instance())
        self.app_operator = kube_app.AppOperator(dbapi.get_instance(),
                                                 self.helm_operator,
                                                 self.service.apps_metadata)
        self.context = context.get_admin_context()
        self.dbapi = dbapi.get_instance()
        self.temp_dir = self.useFixture(fixtures.TempDir())
Beispiel #25
0
    def setUp(self):
        super(RestoreTestCase, self).setUp()

        # Set up objects for testing
        self.service = manager.ConductorManager('test-host', 'test-topic')
        self.service.dbapi = dbapi.get_instance()
        self.context = context.get_admin_context()
        self.valid_restore_states = [
            constants.RESTORE_PROGRESS_ALREADY_COMPLETED,
            constants.RESTORE_PROGRESS_STARTED,
            constants.RESTORE_PROGRESS_ALREADY_IN_PROGRESS,
            constants.RESTORE_PROGRESS_NOT_IN_PROGRESS,
            constants.RESTORE_PROGRESS_IN_PROGRESS,
            constants.RESTORE_PROGRESS_COMPLETED
        ]
Beispiel #26
0
    def setUp(self):
        super(UpdateCephCluster, self).setUp()
        self.service = manager.ConductorManager('test-host', 'test-topic')
        self.service.dbapi = dbapi.get_instance()
        self.context = context.get_admin_context()
        self.dbapi = dbapi.get_instance()
        self.system = utils.create_test_isystem()
        self.load = utils.create_test_load()
        self.host_index = -1

        self.mock_upgrade_downgrade_kube_components = self.upgrade_downgrade_kube_components_patcher.start(
        )
        self.mock_fix_crushmap = self.fix_crushmap_patcher.start()
        self.mock_fix_crushmap.return_value = True

        self.service._sx_to_dx_post_migration_actions = mock.Mock()
def add_lease(mac, ip_address):
    """Called when a new lease is created."""

    ctxt = context.get_admin_context()
    rpcapi = conductor_rpcapi.ConductorAPI(
        topic=conductor_rpcapi.MANAGER_TOPIC)

    cid = None
    cid = os.getenv('DNSMASQ_CLIENT_ID')

    tags = None
    tags = os.getenv('DNSMASQ_TAGS')

    if tags is not None:
        # TODO: Maybe this shouldn't be synchronous - if this hangs, we could
        # cause dnsmasq to get stuck...
        rpcapi.handle_dhcp_lease(ctxt, tags, mac, ip_address, cid)
Beispiel #28
0
    def setUp(self):
        super(StorageTierDependentTCs, self).setUp()
        self.mock_set_crushmap = self.set_crushmap_patcher.start()
        self.set_monitors_status_patcher = self.set_monitors_status_patcher.start()
        self.set_monitors_status_patcher.return_value = \
            [3, 2, ['controller-0', 'controller-1', 'storage-0']]
        self.set_is_initial_config_patcher = self.set_is_initial_config_patcher.start()
        self.set_is_initial_config_patcher.return_value = True
        self.service = manager.ConductorManager('test-host', 'test-topic')
        self.service.dbapi = dbapi.get_instance()
        self.context = context.get_admin_context()
        self.dbapi = dbapi.get_instance()
        self.system = dbutils.create_test_isystem()
        self.load = dbutils.create_test_load()
        self.host_index = -1
        self.mon_index = -1

        self.mock_upgrade_downgrade_kube_components = self.upgrade_downgrade_kube_components_patcher.start()
Beispiel #29
0
def send_notification(operation, success):
    if operation not in VALID_NOTIFICATION_VALUES:
        LOG.error("Invalid notification '{}'.".format(operation))
        sys.exit(2)
    ctx = context.get_admin_context()
    rpcapi = conductor_rpcapi.ConductorAPI(
        topic=conductor_rpcapi.MANAGER_TOPIC)
    ok, app = rpcapi.backup_restore_lifecycle_actions(ctx, operation, success)
    if not ok:
        if app is not None:
            LOG.error("Operation '{}' was aborted by '{}' appliction.".format(
                operation, app))
            sys.stderr.write(app)
            sys.exit(1)
        else:
            LOG.error(
                "Error while performing operation '{}'.".format(operation))
            sys.exit(2)
def old_lease(mac, ip_address):
    """Called when an old lease is recognized."""

    # This happens when a node is rebooted, but it can also happen if the
    # node was deleted and then rebooted, so we need to re-add in that case.

    ctxt = context.get_admin_context()
    rpcapi = conductor_rpcapi.ConductorAPI(
        topic=conductor_rpcapi.MANAGER_TOPIC)

    cid = None
    cid = os.getenv('DNSMASQ_CLIENT_ID')

    tags = None
    tags = os.getenv('DNSMASQ_TAGS')

    if tags is not None:
        # TODO: Maybe this shouldn't be synchronous - if this hangs, we could
        # cause dnsmasq to get stuck...
        rpcapi.handle_dhcp_lease(ctxt, tags, mac, ip_address, cid)