def storage_changed(): zone = get_zone(config('zone-assignment')) node_settings = { 'ip': openstack.get_host_ip(relation_get('private-address')), 'zone': zone, 'account_port': relation_get('account_port'), 'object_port': relation_get('object_port'), 'container_port': relation_get('container_port'), } if None in node_settings.itervalues(): log('storage_changed: Relation not ready.') return None for k in ['zone', 'account_port', 'object_port', 'container_port']: node_settings[k] = int(node_settings[k]) CONFIGS.write_all() # allow for multiple devs per unit, passed along as a : separated list devs = relation_get('device').split(':') for dev in devs: node_settings['device'] = dev for ring in SWIFT_RINGS.itervalues(): if not exists_in_ring(ring, node_settings): add_to_ring(ring, node_settings) if should_balance([r for r in SWIFT_RINGS.itervalues()]): balance_rings()
def balance_rings(): '''handle doing ring balancing and distribution.''' new_ring = False for ring in SWIFT_RINGS.itervalues(): if balance_ring(ring): log('Balanced ring %s' % ring) new_ring = True if not new_ring: return for ring in SWIFT_RINGS.keys(): f = '%s.ring.gz' % ring shutil.copyfile(os.path.join(SWIFT_CONF_DIR, f), os.path.join(WWW_DIR, f)) if cluster.eligible_leader(SWIFT_HA_RES): msg = 'Broadcasting notification to all storage nodes that new '\ 'ring is ready for consumption.' log(msg) path = WWW_DIR.split('/var/www/')[1] trigger = uuid.uuid4() if cluster.is_clustered(): hostname = config('vip') else: hostname = unit_get('private-address') rings_url = 'http://%s/%s' % (hostname, path) # notify storage nodes that there is a new ring to fetch. for relid in relation_ids('swift-storage'): relation_set(relation_id=relid, swift_hash=get_swift_hash(), rings_url=rings_url, trigger=trigger) service_restart('swift-proxy')