Exemple #1
0
def _configure_block_devices():
    """Configure block devices, either from Juju storage or as a local block
    device configured in the config.
    """
    if service_enabled('volume'):
        block_devices = []
        # first see if a specified block device is configured
        conf = config()
        if conf['block-device'] not in [None, 'None', 'none']:
            block_devices.extend(conf['block-device'].split())
        # now see if there are any Juju storage devies configured
        storage_ids = storage_list('block-devices')
        storage_devs = [storage_get('location', s) for s in storage_ids]
        # add them into the block_devices:
        block_devices.extend(storage_devs)
        if block_devices:
            status_set('maintenance', 'Checking configuration of lvm storage')
        # Note that there may be None now, and remove-missing is set to true,
        # so we still have to run the function regardless of whether
        # block_devices is an empty list or not.
        configure_lvm_storage(block_devices,
                              conf['volume-group'],
                              conf['overwrite'] in ['true', 'True', True],
                              conf['remove-missing'],
                              conf['remove-missing-force'])
Exemple #2
0
    def test_configure_lvm_storage_different_vg_ignore(self, extend_lvm,
                                                       reduce_lvm,
                                                       clean_storage):
        def pv_lookup(device):
            devices = {
                '/dev/fakevbd': True,
                '/dev/fakevdc': True
            }
            return devices[device]

        def vg_lookup(device):
            devices = {
                '/dev/fakevbd': 'test',
                '/dev/fakevdc': 'another'
            }
            return devices[device]
        devices = ['/dev/fakevbd', '/dev/fakevdc']
        self.is_lvm_physical_volume.side_effect = pv_lookup
        self.list_lvm_volume_group.side_effect = vg_lookup
        self.ensure_loopback_device.side_effect = lambda x, y: x
        cinder_utils.configure_lvm_storage(devices, 'test', False, False)
        self.assertFalse(clean_storage.called)
        self.assertFalse(self.create_lvm_physical_volume.called)
        self.assertFalse(reduce_lvm.called)
        self.assertFalse(extend_lvm.called)
        self.assertFalse(self.create_lvm_volume_group.called)
    def test_configure_lvm_storage_different_vg(self, extend_lvm, reduce_lvm,
                                                clean_storage, lvm_exists):
        def pv_lookup(device):
            devices = {
                '/dev/fakevbd': True,
                '/dev/fakevdc': True
            }
            return devices[device]

        def vg_lookup(device):
            devices = {
                '/dev/fakevbd': 'test',
                '/dev/fakevdc': 'another'
            }
            return devices[device]
        devices = ['/dev/fakevbd', '/dev/fakevdc']
        self.is_lvm_physical_volume.side_effect = pv_lookup
        self.list_lvm_volume_group.side_effect = vg_lookup
        lvm_exists.return_value = False
        self.ensure_loopback_device.side_effect = lambda x, y: x
        cinder_utils.configure_lvm_storage(devices, 'test', True, True)
        clean_storage.assert_called_with('/dev/fakevdc')
        self.create_lvm_physical_volume.assert_called_with('/dev/fakevdc')
        reduce_lvm.assert_called_with('test')
        extend_lvm.assert_called_with('test', '/dev/fakevdc')
        self.assertFalse(self.create_lvm_volume_group.called)
Exemple #4
0
    def test_configure_lvm_storage_different_vg(self, list_thin_pools,
                                                extend_lvm, reduce_lvm,
                                                clean_storage, lvm_exists):
        def pv_lookup(device):
            devices = {
                '/dev/fakevbd': True,
                '/dev/fakevdc': True
            }
            return devices[device]

        def vg_lookup(device):
            devices = {
                '/dev/fakevbd': 'test',
                '/dev/fakevdc': 'another'
            }
            return devices[device]
        devices = ['/dev/fakevbd', '/dev/fakevdc']
        self.is_device_mounted.return_value = False
        self.is_block_device.return_value = True
        self.is_lvm_physical_volume.side_effect = pv_lookup
        self.list_lvm_volume_group.side_effect = vg_lookup
        lvm_exists.return_value = False
        self.ensure_loopback_device.side_effect = lambda x, y: x
        cinder_utils.configure_lvm_storage(devices, 'test', True, True)
        clean_storage.assert_called_with('/dev/fakevdc')
        self.create_lvm_physical_volume.assert_called_with('/dev/fakevdc')
        reduce_lvm.assert_called_with('test')
        extend_lvm.assert_called_with('test', '/dev/fakevdc')
        self.assertFalse(self.create_lvm_volume_group.called)
Exemple #5
0
 def test_configure_lvm_storage_unused_dev(self, extend_lv_by_dev,
                                           list_thin_pools,
                                           extend_lvm, reduce_lvm,
                                           clean_storage, has_part):
     devices = ['/dev/fakevbd', '/dev/fakevdc']
     self.is_device_mounted.return_value = False
     self.is_lvm_physical_volume.return_value = False
     self.is_block_device.return_value = True
     has_part.return_value = False
     self.ensure_loopback_device.side_effect = lambda x, y: x
     list_thin_pools.return_value = ['vg/thinpool']
     cinder_utils.configure_lvm_storage(devices, 'test', False, True)
     clean_storage.assert_has_calls(
         [call('/dev/fakevbd'),
          call('/dev/fakevdc')]
     )
     self.create_lvm_physical_volume.assert_has_calls(
         [call('/dev/fakevbd'),
          call('/dev/fakevdc')]
     )
     self.create_lvm_volume_group.assert_called_with('test', '/dev/fakevbd')
     reduce_lvm.assert_called_with('test')
     extend_lvm.assert_called_with('test', '/dev/fakevdc')
     extend_lv_by_dev.assert_called_once_with('vg/thinpool',
                                              '/dev/fakevdc')
    def test_configure_lvm_storage_different_vg_ignore(self, extend_lvm,
                                                       reduce_lvm,
                                                       clean_storage):
        def pv_lookup(device):
            devices = {
                '/dev/fakevbd': True,
                '/dev/fakevdc': True
            }
            return devices[device]

        def vg_lookup(device):
            devices = {
                '/dev/fakevbd': 'test',
                '/dev/fakevdc': 'another'
            }
            return devices[device]
        devices = ['/dev/fakevbd', '/dev/fakevdc']
        self.is_lvm_physical_volume.side_effect = pv_lookup
        self.list_lvm_volume_group.side_effect = vg_lookup
        self.ensure_loopback_device.side_effect = lambda x, y: x
        cinder_utils.configure_lvm_storage(devices, 'test', False, False)
        self.assertFalse(clean_storage.called)
        self.assertFalse(self.create_lvm_physical_volume.called)
        self.assertFalse(reduce_lvm.called)
        self.assertFalse(extend_lvm.called)
        self.assertFalse(self.create_lvm_volume_group.called)
    def test_configure_lvm_storage_existing_vg(self, extend_lvm, reduce_lvm,
                                               clean_storage, lvm_exists):
        def pv_lookup(device):
            devices = {
                '/dev/fakevbd': True,
                '/dev/fakevdc': False
            }
            return devices[device]

        def vg_lookup(device):
            devices = {
                '/dev/fakevbd': 'test',
                '/dev/fakevdc': None
            }
            return devices[device]
        devices = ['/dev/fakevbd', '/dev/fakevdc']
        lvm_exists.return_value = False
        self.is_lvm_physical_volume.side_effect = pv_lookup
        self.list_lvm_volume_group.side_effect = vg_lookup
        cinder_utils.configure_lvm_storage(devices, 'test', True, True)
        clean_storage.assert_has_calls(
            [call('/dev/fakevdc')]
        )
        self.create_lvm_physical_volume.assert_has_calls(
            [call('/dev/fakevdc')]
        )
        reduce_lvm.assert_called_with('test')
        extend_lvm.assert_called_with('test', '/dev/fakevdc')
        self.assertFalse(self.create_lvm_volume_group.called)
Exemple #8
0
    def test_configure_lvm_storage_existing_vg(self, extend_lvm, reduce_lvm,
                                               clean_storage, lvm_exists):
        def pv_lookup(device):
            devices = {
                '/dev/fakevbd': True,
                '/dev/fakevdc': False
            }
            return devices[device]

        def vg_lookup(device):
            devices = {
                '/dev/fakevbd': 'test',
                '/dev/fakevdc': None
            }
            return devices[device]
        devices = ['/dev/fakevbd', '/dev/fakevdc']
        lvm_exists.return_value = False
        self.is_lvm_physical_volume.side_effect = pv_lookup
        self.list_lvm_volume_group.side_effect = vg_lookup
        cinder_utils.configure_lvm_storage(devices, 'test', True, True)
        clean_storage.assert_has_calls(
            [call('/dev/fakevdc')]
        )
        self.create_lvm_physical_volume.assert_has_calls(
            [call('/dev/fakevdc')]
        )
        reduce_lvm.assert_called_with('test')
        extend_lvm.assert_called_with('test', '/dev/fakevdc')
        self.assertFalse(self.create_lvm_volume_group.called)
Exemple #9
0
 def test_configure_lvm_storage_force_removemissing(self, reduce_lvm):
     """It forces remove missing when asked to."""
     devices = ['/dev/fakevbd']
     cinder_utils.configure_lvm_storage(devices,
                                        'test',
                                        False,
                                        True,
                                        remove_missing_force=True)
     reduce_lvm.assert_called_with('test', extra_args=['--force'])
Exemple #10
0
def config_changed():
    conf = config()

    if conf['prefer-ipv6']:
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    e_mountpoint = config('ephemeral-unmount')
    if e_mountpoint and filesystem_mounted(e_mountpoint):
        umount(e_mountpoint)

    if (service_enabled('volume') and
            conf['block-device'] not in [None, 'None', 'none']):
        status_set('maintenance', 'Configuring lvm storage')
        block_devices = conf['block-device'].split()
        configure_lvm_storage(block_devices,
                              conf['volume-group'],
                              conf['overwrite'] in ['true', 'True', True],
                              conf['remove-missing'],
                              conf['remove-missing-force'])

    if git_install_requested():
        if config_value_changed('openstack-origin-git'):
            status_set('maintenance', 'Running Git install')
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('cinder-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(configs=CONFIGS)
            # NOTE(jamespage) tell any storage-backends we just upgraded
            for rid in relation_ids('storage-backend'):
                relation_set(relation_id=rid,
                             upgrade_nonce=uuid.uuid4())
            # NOTE(hopem) tell any backup-backends we just upgraded
            for rid in relation_ids('backup-backend'):
                relation_set(relation_id=rid,
                             upgrade_nonce=uuid.uuid4())

    # overwrite config is not in conf file. so We can't use restart_on_change
    if config_value_changed('overwrite') and not is_unit_paused_set():
        service_restart('cinder-volume')

    CONFIGS.write_all()
    configure_https()
    update_nrpe_config()
    open_port(config('api-listening-port'))

    for rid in relation_ids('cluster'):
        cluster_joined(relation_id=rid)
    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)
Exemple #11
0
def config_changed():
    conf = config()

    if conf['prefer-ipv6']:
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    e_mountpoint = config('ephemeral-unmount')
    if e_mountpoint and filesystem_mounted(e_mountpoint):
        umount(e_mountpoint)

    if (service_enabled('volume') and
            conf['block-device'] not in [None, 'None', 'none']):
        status_set('maintenance', 'Configuring lvm storage')
        block_devices = conf['block-device'].split()
        configure_lvm_storage(block_devices,
                              conf['volume-group'],
                              conf['overwrite'] in ['true', 'True', True],
                              conf['remove-missing'],
                              conf['remove-missing-force'])

    if git_install_requested():
        if config_value_changed('openstack-origin-git'):
            status_set('maintenance', 'Running Git install')
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('cinder-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(configs=CONFIGS)
            # NOTE(jamespage) tell any storage-backends we just upgraded
            for rid in relation_ids('storage-backend'):
                relation_set(relation_id=rid,
                             upgrade_nonce=uuid.uuid4())
            # NOTE(hopem) tell any backup-backends we just upgraded
            for rid in relation_ids('backup-backend'):
                relation_set(relation_id=rid,
                             upgrade_nonce=uuid.uuid4())

    # overwrite config is not in conf file. so We can't use restart_on_change
    if config_value_changed('overwrite') and not is_unit_paused_set():
        service_restart('cinder-volume')

    CONFIGS.write_all()
    configure_https()
    update_nrpe_config()
    open_port(config('api-listening-port'))

    for rid in relation_ids('cluster'):
        cluster_joined(relation_id=rid)
    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)
Exemple #12
0
 def test_configure_lvm_storage_unused_dev(self, extend_lvm, reduce_lvm,
                                           clean_storage, has_part):
     devices = ['/dev/fakevbd', '/dev/fakevdc']
     self.is_lvm_physical_volume.return_value = False
     has_part.return_value = False
     cinder_utils.configure_lvm_storage(devices, 'test', False, True)
     clean_storage.assert_has_calls(
         [call('/dev/fakevbd'), call('/dev/fakevdc')])
     self.create_lvm_physical_volume.assert_has_calls(
         [call('/dev/fakevbd'), call('/dev/fakevdc')])
     self.create_lvm_volume_group.assert_called_with('test', '/dev/fakevbd')
     reduce_lvm.assert_called_with('test')
     extend_lvm.assert_called_with('test', '/dev/fakevdc')
Exemple #13
0
 def test_configure_lvm_storage_loopback(self, extend_lvm, reduce_lvm,
                                         clean_storage,
                                         ensure_non_existent):
     devices = ['/mnt/loop0|10']
     self.ensure_loopback_device.return_value = '/dev/loop0'
     self.is_lvm_physical_volume.return_value = False
     cinder_utils.configure_lvm_storage(devices, 'test', True, True)
     clean_storage.assert_called_with('/dev/loop0')
     self.ensure_loopback_device.assert_called_with('/mnt/loop0', '10')
     self.create_lvm_physical_volume.assert_called_with('/dev/loop0')
     self.create_lvm_volume_group.assert_called_with('test', '/dev/loop0')
     reduce_lvm.assert_called_with('test')
     self.assertFalse(extend_lvm.called)
     ensure_non_existent.assert_called_with('test')
 def test_configure_lvm_storage(self, extend_lvm, reduce_lvm, clean_storage,
                                ensure_non_existent):
     devices = ['/dev/fakevbd', '/dev/fakevdc']
     self.is_lvm_physical_volume.return_value = False
     self.ensure_loopback_device.side_effect = lambda x, y: x
     cinder_utils.configure_lvm_storage(devices, 'test', True, True)
     clean_storage.assert_has_calls(
         [call('/dev/fakevbd'), call('/dev/fakevdc')])
     self.create_lvm_physical_volume.assert_has_calls(
         [call('/dev/fakevbd'), call('/dev/fakevdc')])
     self.create_lvm_volume_group.assert_called_with('test', '/dev/fakevbd')
     reduce_lvm.assert_called_with('test')
     extend_lvm.assert_called_with('test', '/dev/fakevdc')
     ensure_non_existent.assert_called_with('test')
 def test_configure_lvm_storage_loopback(self, extend_lvm, reduce_lvm,
                                         clean_storage,
                                         ensure_non_existent):
     devices = ['/mnt/loop0|10']
     self.ensure_loopback_device.return_value = '/dev/loop0'
     self.is_lvm_physical_volume.return_value = False
     cinder_utils.configure_lvm_storage(devices, 'test', True, True)
     clean_storage.assert_called_with('/dev/loop0')
     self.ensure_loopback_device.assert_called_with('/mnt/loop0', '10')
     self.create_lvm_physical_volume.assert_called_with('/dev/loop0')
     self.create_lvm_volume_group.assert_called_with('test', '/dev/loop0')
     reduce_lvm.assert_called_with('test')
     self.assertFalse(extend_lvm.called)
     ensure_non_existent.assert_called_with('test')
 def test_configure_lvm_storage_unused_dev(self, extend_lvm, reduce_lvm,
                                           clean_storage, has_part):
     devices = ['/dev/fakevbd', '/dev/fakevdc']
     self.is_lvm_physical_volume.return_value = False
     has_part.return_value = False
     cinder_utils.configure_lvm_storage(devices, 'test', False, True)
     clean_storage.assert_has_calls(
         [call('/dev/fakevbd'),
          call('/dev/fakevdc')]
     )
     self.create_lvm_physical_volume.assert_has_calls(
         [call('/dev/fakevbd'),
          call('/dev/fakevdc')]
     )
     self.create_lvm_volume_group.assert_called_with('test', '/dev/fakevbd')
     reduce_lvm.assert_called_with('test')
     extend_lvm.assert_called_with('test', '/dev/fakevdc')
 def test_configure_lvm_storage(self, extend_lvm, reduce_lvm,
                                clean_storage, ensure_non_existent):
     devices = ['/dev/fakevbd', '/dev/fakevdc']
     self.is_lvm_physical_volume.return_value = False
     self.ensure_loopback_device.side_effect = lambda x, y: x
     cinder_utils.configure_lvm_storage(devices, 'test', True, True)
     clean_storage.assert_has_calls(
         [call('/dev/fakevbd'),
          call('/dev/fakevdc')]
     )
     self.create_lvm_physical_volume.assert_has_calls(
         [call('/dev/fakevbd'),
          call('/dev/fakevdc')]
     )
     self.create_lvm_volume_group.assert_called_with('test', '/dev/fakevbd')
     reduce_lvm.assert_called_with('test')
     extend_lvm.assert_called_with('test', '/dev/fakevdc')
     ensure_non_existent.assert_called_with('test')
 def test_configure_lvm_storage_unused_dev(self, extend_lv_by_dev,
                                           list_thin_pools,
                                           extend_lvm, reduce_lvm,
                                           clean_storage, has_part):
     devices = ['/dev/fakevbd', '/dev/fakevdc']
     self.is_lvm_physical_volume.return_value = False
     has_part.return_value = False
     self.ensure_loopback_device.side_effect = lambda x, y: x
     list_thin_pools.return_value = ['vg/thinpool']
     cinder_utils.configure_lvm_storage(devices, 'test', False, True)
     clean_storage.assert_has_calls(
         [call('/dev/fakevbd'),
          call('/dev/fakevdc')]
     )
     self.create_lvm_physical_volume.assert_has_calls(
         [call('/dev/fakevbd'),
          call('/dev/fakevdc')]
     )
     self.create_lvm_volume_group.assert_called_with('test', '/dev/fakevbd')
     reduce_lvm.assert_called_with('test')
     extend_lvm.assert_called_with('test', '/dev/fakevdc')
     extend_lv_by_dev.assert_called_once_with('vg/thinpool',
                                              '/dev/fakevdc')
Exemple #19
0
 def test_configure_lvm_storage_unforced_remove_default(self, reduce_lvm):
     """It doesn't force remove missing by default."""
     devices = ['/dev/fakevbd']
     cinder_utils.configure_lvm_storage(devices, 'test', False, True)
     reduce_lvm.assert_called_with('test')
Exemple #20
0
 def test_configure_lvm_storage_used_dev(self, reduce_lvm, has_part):
     devices = ['/dev/fakevbd', '/dev/fakevdc']
     self.is_lvm_physical_volume.return_value = False
     has_part.return_value = True
     cinder_utils.configure_lvm_storage(devices, 'test', False, True)
     reduce_lvm.assert_called_with('test')
 def test_configure_lvm_storage_force_removemissing(self, reduce_lvm):
     """It forces remove missing when asked to."""
     devices = ['/dev/fakevbd']
     cinder_utils.configure_lvm_storage(
         devices, 'test', False, True, remove_missing_force=True)
     reduce_lvm.assert_called_with('test', extra_args=['--force'])
 def test_configure_lvm_storage_unforced_remove_default(self, reduce_lvm):
     """It doesn't force remove missing by default."""
     devices = ['/dev/fakevbd']
     cinder_utils.configure_lvm_storage(devices, 'test', False, True)
     reduce_lvm.assert_called_with('test')
 def test_configure_lvm_storage_used_dev(self, reduce_lvm, has_part):
     devices = ['/dev/fakevbd', '/dev/fakevdc']
     self.is_lvm_physical_volume.return_value = False
     has_part.return_value = True
     cinder_utils.configure_lvm_storage(devices, 'test', False, True)
     reduce_lvm.assert_called_with('test')
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    conf = config()

    if conf['prefer-ipv6']:
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    e_mountpoint = config('ephemeral-unmount')
    if e_mountpoint and filesystem_mounted(e_mountpoint):
        umount(e_mountpoint)

    if (service_enabled('volume') and
            conf['block-device'] not in [None, 'None', 'none']):
        status_set('maintenance', 'Configuring lvm storage')
        block_devices = conf['block-device'].split()
        configure_lvm_storage(block_devices,
                              conf['volume-group'],
                              conf['overwrite'] in ['true', 'True', True],
                              conf['remove-missing'],
                              conf['remove-missing-force'])

    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('cinder-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(configs=CONFIGS)
            # NOTE(jamespage) tell any storage-backends we just upgraded
            for rid in relation_ids('storage-backend'):
                relation_set(relation_id=rid,
                             upgrade_nonce=uuid.uuid4())
            # NOTE(hopem) tell any backup-backends we just upgraded
            for rid in relation_ids('backup-backend'):
                relation_set(relation_id=rid,
                             upgrade_nonce=uuid.uuid4())

    # overwrite config is not in conf file. so We can't use restart_on_change
    if config_value_changed('overwrite') and not is_unit_paused_set():
        service_restart('cinder-volume')

    CONFIGS.write_all()
    configure_https()
    update_nrpe_config()
    open_port(config('api-listening-port'))

    for rid in relation_ids('cluster'):
        cluster_joined(relation_id=rid)
    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)
    # NOTE(jamespage):
    # ensure any new volume endpoints are created. Note that this
    # is normally done after an openstack series upgrade, but this
    # was not performed historically so always execute to ensure
    # any upgrades where this step was missed are fixed.
    for rid in relation_ids('identity-service'):
        identity_joined(rid=rid)
Exemple #25
0
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    conf = config()

    if conf['prefer-ipv6']:
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    e_mountpoint = config('ephemeral-unmount')
    if e_mountpoint and filesystem_mounted(e_mountpoint):
        umount(e_mountpoint)

    if (service_enabled('volume') and
            conf['block-device'] not in [None, 'None', 'none']):
        status_set('maintenance', 'Configuring lvm storage')
        block_devices = conf['block-device'].split()
        configure_lvm_storage(block_devices,
                              conf['volume-group'],
                              conf['overwrite'] in ['true', 'True', True],
                              conf['remove-missing'],
                              conf['remove-missing-force'])

    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('cinder-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(configs=CONFIGS)
            # NOTE(jamespage) tell any storage-backends we just upgraded
            for rid in relation_ids('storage-backend'):
                relation_set(relation_id=rid,
                             upgrade_nonce=uuid.uuid4())
            # NOTE(hopem) tell any backup-backends we just upgraded
            for rid in relation_ids('backup-backend'):
                relation_set(relation_id=rid,
                             upgrade_nonce=uuid.uuid4())

    # overwrite config is not in conf file. so We can't use restart_on_change
    if config_value_changed('overwrite') and not is_unit_paused_set():
        service_restart('cinder-volume')

    CONFIGS.write_all()
    configure_https()
    update_nrpe_config()
    open_port(config('api-listening-port'))

    for rid in relation_ids('cluster'):
        cluster_joined(relation_id=rid)
    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)
    # NOTE(jamespage):
    # ensure any new volume endpoints are created. Note that this
    # is normally done after an openstack series upgrade, but this
    # was not performed historically so always execute to ensure
    # any upgrades where this step was missed are fixed.
    for rid in relation_ids('identity-service'):
        identity_joined(rid=rid)

    # call the policy overrides handler which will install any policy overrides
    maybe_do_policyd_overrides_on_config_changed(
        os_release('cinder-common'),
        'cinder',
        restart_handler=lambda: service_restart('cinder-api'))