def storage_changed(): """Storage relation. Only the leader unit can update and distribute rings so if we are not the leader we ignore this event and wait for a resync request from the leader. """ if not is_elected_leader(SWIFT_HA_RES): log( "Not the leader - deferring storage relation change to leader " "unit.", level=DEBUG) return log("Storage relation changed -processing", level=DEBUG) host_ip = get_host_ip() if not host_ip: log( "No host ip found in storage relation - deferring storage " "relation", level=WARNING) return update_rsync_acls() zone = get_zone(config('zone-assignment')) node_settings = { 'ip': host_ip, 'zone': zone, 'account_port': relation_get('account_port'), 'object_port': relation_get('object_port'), 'container_port': relation_get('container_port'), } if None in node_settings.values(): missing = [k for k, v in node_settings.items() if v is None] log("Relation not ready - some required values not provided by " "relation (missing={})".format(', '.join(missing)), level=INFO) return None for k in ['zone', 'account_port', 'object_port', 'container_port']: node_settings[k] = int(node_settings[k]) CONFIGS.write_all() # Allow for multiple devs per unit, passed along as a : separated list # Update and balance rings. nodes = [] devs = relation_get('device') if devs: for dev in devs.split(':'): node = {k: v for k, v in node_settings.items()} node['device'] = dev nodes.append(node) update_rings(nodes) if not openstack.is_unit_paused_set(): # Restart proxy here in case no config changes made (so # restart_on_change() ineffective). service_restart('swift-proxy')
def storage_changed(): """Storage relation. Only the leader unit can update and distribute rings so if we are not the leader we ignore this event and wait for a resync request from the leader. """ if not is_elected_leader(SWIFT_HA_RES): log("Not the leader - ignoring storage relation until leader ready.", level=DEBUG) return log("Leader established, updating ring builders", level=INFO) host_ip = get_host_ip() if not host_ip: log("No host ip found in storage relation - deferring storage " "relation", level=WARNING) return update_rsync_acls() zone = get_zone(config('zone-assignment')) node_settings = { 'ip': host_ip, 'zone': zone, 'account_port': relation_get('account_port'), 'object_port': relation_get('object_port'), 'container_port': relation_get('container_port'), } if None in node_settings.itervalues(): missing = [k for k, v in node_settings.iteritems() if v is None] log("Relation not ready - some required values not provided by " "relation (missing=%s)" % (', '.join(missing)), level=INFO) return None for k in ['zone', 'account_port', 'object_port', 'container_port']: node_settings[k] = int(node_settings[k]) CONFIGS.write_all() # Allow for multiple devs per unit, passed along as a : separated list # Update and balance rings. nodes = [] devs = relation_get('device') if devs: for dev in devs.split(':'): node = {k: v for k, v in node_settings.items()} node['device'] = dev nodes.append(node) update_rings(nodes) if not is_paused(): # Restart proxy here in case no config changes made (so # pause_aware_restart_on_change() ineffective). service_restart('swift-proxy')
def config_changed(): if is_elected_leader(SWIFT_HA_RES): log("Leader established, generating ring builders", level=INFO) # initialize new storage rings. for ring, path in SWIFT_RINGS.items(): if not os.path.exists(path): initialize_ring(path, config('partition-power'), determine_replicas(ring), config('min-hours')) if config('prefer-ipv6'): status_set('maintenance', 'Configuring ipv6') setup_ipv6() configure_https() open_port(config('bind-port')) update_nrpe_config() # Determine whether or not we should do an upgrade. if not config('action-managed-upgrade') and \ openstack.openstack_upgrade_available('swift'): do_openstack_upgrade(CONFIGS) status_set('maintenance', 'Running openstack upgrade') if not leader_get('swift-proxy-rings-consumer'): status_set('maintenance', 'Updating and (maybe) balancing rings') update_rings(min_part_hours=config('min-hours'), replicas=config('replicas')) if not config('disable-ring-balance') and is_elected_leader(SWIFT_HA_RES): # Try ring balance. If rings are balanced, no sync will occur. balance_rings() for r_id in relation_ids('identity-service'): keystone_joined(relid=r_id) for r_id in relation_ids('cluster'): cluster_joined(relation_id=r_id) for r_id in relation_ids('object-store'): object_store_joined(relation_id=r_id) for r_id in relation_ids('amqp'): amqp_joined(relation_id=r_id) for r_id in relation_ids('ha'): ha_relation_joined(relation_id=r_id) try_initialize_swauth() # call the policy overrides handler which will install any policy overrides policyd.maybe_do_policyd_overrides(openstack.os_release('swift-proxy'), 'swift')
def test_update_rings(self, mock_set_min_hours, mock_get_min_hours, mock_is_elected_leader, mock_path_exists, mock_log, mock_balance_rings, mock_get_rings_checksum, mock_get_builders_checksum, mock_update_www_rings, mock_previously_synced): # Make sure same is returned for both so that we don't try to sync mock_get_rings_checksum.return_value = None mock_get_builders_checksum.return_value = None mock_previously_synced.return_value = True # Test blocker 1 mock_is_elected_leader.return_value = False swift_utils.update_rings() self.assertFalse(mock_balance_rings.called) # Test blocker 2 mock_path_exists.return_value = False mock_is_elected_leader.return_value = True swift_utils.update_rings() self.assertFalse(mock_get_min_hours.called) self.assertFalse(mock_balance_rings.called) # Test blocker 3 mock_path_exists.return_value = True mock_is_elected_leader.return_value = True mock_get_min_hours.return_value = 10 swift_utils.update_rings(min_part_hours=10) self.assertTrue(mock_get_min_hours.called) self.assertFalse(mock_set_min_hours.called) self.assertFalse(mock_balance_rings.called) mock_get_min_hours.reset_mock() # Test go through mock_path_exists.return_value = True mock_is_elected_leader.return_value = True mock_get_min_hours.return_value = 0 swift_utils.update_rings(min_part_hours=10) self.assertTrue(mock_get_min_hours.called) self.assertTrue(mock_set_min_hours.called) self.assertTrue(mock_balance_rings.called) mock_balance_rings.reset_mock() swift_utils.update_rings(min_part_hours=10, rebalance=False) self.assertTrue(mock_get_min_hours.called) self.assertTrue(mock_set_min_hours.called) self.assertFalse(mock_balance_rings.called)
def config_changed(): if is_elected_leader(SWIFT_HA_RES): log("Leader established, generating ring builders", level=INFO) # initialize new storage rings. for path in SWIFT_RINGS.itervalues(): if not os.path.exists(path): initialize_ring(path, config('partition-power'), config('replicas'), config('min-hours')) if config('prefer-ipv6'): status_set('maintenance', 'Configuring ipv6') setup_ipv6() configure_https() open_port(config('bind-port')) update_nrpe_config() # Determine whether or not we should do an upgrade. if not config('action-managed-upgrade') and \ openstack.openstack_upgrade_available('python-swift'): do_openstack_upgrade(CONFIGS) status_set('maintenance', 'Running openstack upgrade') status_set('maintenance', 'Updating and balancing rings') update_rings(min_part_hours=config('min-hours')) if not config('disable-ring-balance') and is_elected_leader(SWIFT_HA_RES): # Try ring balance. If rings are balanced, no sync will occur. balance_rings() for r_id in relation_ids('identity-service'): keystone_joined(relid=r_id) for r_id in relation_ids('object-store'): object_store_joined(relation_id=r_id)
def test_update_rings(self, mock_set_min_hours, mock_get_min_hours, mock_is_elected_leader, mock_path_exists, mock_log, mock_balance_rings, mock_get_rings_checksum, mock_get_builders_checksum, mock_update_www_rings, mock_previously_synced): # Make sure same is returned for both so that we don't try to sync mock_get_rings_checksum.return_value = None mock_get_builders_checksum.return_value = None mock_previously_synced.return_value = True # Test blocker 1 mock_is_elected_leader.return_value = False swift_utils.update_rings() self.assertFalse(mock_balance_rings.called) # Test blocker 2 mock_path_exists.return_value = False mock_is_elected_leader.return_value = True swift_utils.update_rings() self.assertFalse(mock_get_min_hours.called) self.assertFalse(mock_balance_rings.called) # Test blocker 3 mock_path_exists.return_value = True mock_is_elected_leader.return_value = True mock_get_min_hours.return_value = 10 swift_utils.update_rings(min_part_hours=10) self.assertTrue(mock_get_min_hours.called) self.assertFalse(mock_set_min_hours.called) self.assertFalse(mock_balance_rings.called) mock_get_min_hours.reset_mock() # Test go through mock_path_exists.return_value = True mock_is_elected_leader.return_value = True mock_get_min_hours.return_value = 0 swift_utils.update_rings(min_part_hours=10) self.assertTrue(mock_get_min_hours.called) self.assertTrue(mock_set_min_hours.called) self.assertTrue(mock_balance_rings.called)
def test_update_rings_multiple_devs(self, mock_is_leader_elected, mock_exists_in_ring, mock_add_to_ring, mock_balance_rings, mock_previously_synced): # note that this test does not (and neither did its predecessor) test # the 'min_part_hours is non None' part of update_rings() devices = ['sdb', 'sdc'] node_settings = { 'object_port': 6000, 'container_port': 6001, 'account_port': 6002, 'zone': 1, 'ip': '1.2.3.4', } nodes = [] for dev in devices: node = node_settings.copy() node['device'] = dev nodes.append(node) mock_is_leader_elected.return_value = True mock_previously_synced.return_value = True mock_exists_in_ring.side_effect = lambda *args: False swift_utils.update_rings(nodes) calls = [ mock.call( os.path.join(swift_utils.SWIFT_CONF_DIR, 'account.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdb', 'account_port': 6002 }), mock.call( os.path.join(swift_utils.SWIFT_CONF_DIR, 'container.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdb', 'account_port': 6002 }), mock.call( os.path.join(swift_utils.SWIFT_CONF_DIR, 'object.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdb', 'account_port': 6002 }), mock.call( os.path.join(swift_utils.SWIFT_CONF_DIR, 'account.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdc', 'account_port': 6002 }), mock.call( os.path.join(swift_utils.SWIFT_CONF_DIR, 'container.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdc', 'account_port': 6002 }), mock.call( os.path.join(swift_utils.SWIFT_CONF_DIR, 'object.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdc', 'account_port': 6002 }) ] mock_exists_in_ring.assert_has_calls(calls) mock_balance_rings.assert_called_once_with() mock_add_to_ring.assert_called() # try re-adding, assert add_to_ring was not called mock_add_to_ring.reset_mock() mock_exists_in_ring.side_effect = lambda *args: True swift_utils.update_rings(nodes) mock_add_to_ring.assert_not_called()
def test_update_rings_multiple_devs(self, mock_is_elected_leader, mock_log, mock_balance_rings, mock_get_rings_checksum, mock_get_builders_checksum, mock_update_www_rings, mock_initialize_ring, mock_load_builder, mock_previously_synced): # To avoid the need for swift.common.ring library, mock a basic # rings dictionary, keyed by path. # Each ring has enough logic to hold a dictionary with a single 'devs' # key, which stores the list of passed dev(s) by add_dev(). # # If swift (actual) ring representation diverges (see _load_builder), # this mock will need to be adapted. mock_rings = {} def mock_load_builder_fn(path): class mock_ring(object): def __init__(self, path): self.path = path def to_dict(self): return mock_rings[self.path] def add_dev(self, dev): mock_rings[self.path]['devs'].append(dev) return mock_ring(path) def mock_initialize_ring_fn(path, *args): mock_rings.setdefault(path, {'devs': []}) mock_is_elected_leader.return_value = True mock_load_builder.side_effect = mock_load_builder_fn mock_initialize_ring.side_effect = mock_initialize_ring_fn init_ring_paths(tempfile.mkdtemp()) devices = ['sdb', 'sdc'] node_settings = { 'object_port': 6000, 'container_port': 6001, 'account_port': 6002, 'zone': 1, 'ip': '1.2.3.4', } for path in swift_utils.SWIFT_RINGS.itervalues(): swift_utils.initialize_ring(path, 8, 3, 0) # verify all devices added to each ring nodes = [] for dev in devices: node = {k: v for k, v in node_settings.items()} node['device'] = dev nodes.append(node) swift_utils.update_rings(nodes) for path in swift_utils.SWIFT_RINGS.itervalues(): devs = swift_utils._load_builder(path).to_dict()['devs'] added_devices = [dev['device'] for dev in devs] self.assertEqual(devices, added_devices) # try re-adding, assert add_to_ring was not called with mock.patch('lib.swift_utils.add_to_ring') as mock_add_to_ring: swift_utils.update_rings(nodes) self.assertFalse(mock_add_to_ring.called)
def test_update_rings_multiple_devs(self, mock_is_leader_elected, mock_exists_in_ring, mock_add_to_ring, mock_balance_rings, mock_previously_synced): # note that this test does not (and neither did its predecessor) test # the 'min_part_hours is non None' part of update_rings() devices = ['sdb', 'sdc'] node_settings = { 'object_port': 6000, 'container_port': 6001, 'account_port': 6002, 'zone': 1, 'ip': '1.2.3.4', } nodes = [] for dev in devices: node = node_settings.copy() node['device'] = dev nodes.append(node) mock_is_leader_elected.return_value = True mock_previously_synced.return_value = True mock_exists_in_ring.side_effect = lambda *args: False swift_utils.update_rings(nodes) calls = [mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR, 'account.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdb', 'account_port': 6002}), mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR, 'container.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdb', 'account_port': 6002}), mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR, 'object.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdb', 'account_port': 6002}), mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR, 'account.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdc', 'account_port': 6002}), mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR, 'container.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdc', 'account_port': 6002}), mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR, 'object.builder'), { 'zone': 1, 'object_port': 6000, 'ip': '1.2.3.4', 'container_port': 6001, 'device': 'sdc', 'account_port': 6002})] mock_exists_in_ring.assert_has_calls(calls) mock_balance_rings.assert_called_once_with() mock_add_to_ring.assert_called() # try re-adding, assert add_to_ring was not called mock_add_to_ring.reset_mock() mock_exists_in_ring.side_effect = lambda *args: True swift_utils.update_rings(nodes) mock_add_to_ring.assert_not_called()
def test_update_rings_multiple_devs(self, mock_is_elected_leader, mock_log, mock_balance_rings, mock_get_rings_checksum, mock_get_builders_checksum, mock_update_www_rings, mock_get_broker_token, mock_initialize_ring, mock_load_builder, ): # To avoid the need for swift.common.ring library, mock a basic # rings dictionary, keyed by path. # Each ring has enough logic to hold a dictionary with a single 'devs' # key, which stores the list of passed dev(s) by add_dev(). # # If swift (actual) ring representation diverges (see _load_builder), # this mock will need to be adapted. mock_rings = {} def mock_load_builder_fn(path): class mock_ring(object): def __init__(self, path): self.path = path def to_dict(self): return mock_rings[self.path] def add_dev(self, dev): mock_rings[self.path]['devs'].append(dev) return mock_ring(path) def mock_initialize_ring_fn(path, *args): mock_rings.setdefault(path, {'devs': []}) mock_load_builder.side_effect = mock_load_builder_fn mock_initialize_ring.side_effect = mock_initialize_ring_fn init_ring_paths(tempfile.mkdtemp()) devices = ['sdb', 'sdc'] node_settings = { 'object_port': 6000, 'container_port': 6001, 'account_port': 6002, 'zone': 1, 'ip': '1.2.3.4', } for path in swift_utils.SWIFT_RINGS.itervalues(): swift_utils.initialize_ring(path, 8, 3, 0) # verify all devices added to each ring nodes = [] for dev in devices: node = {k: v for k, v in node_settings.items()} node['device'] = dev nodes.append(node) swift_utils.update_rings(nodes) for path in swift_utils.SWIFT_RINGS.itervalues(): devs = swift_utils._load_builder(path).to_dict()['devs'] added_devices = [dev['device'] for dev in devs] self.assertEqual(devices, added_devices) # try re-adding, assert add_to_ring was not called with mock.patch('lib.swift_utils.add_to_ring') as mock_add_to_ring: swift_utils.update_rings(nodes) self.assertFalse(mock_add_to_ring.called)