예제 #1
0
def cluster_changed():
    unison.ssh_authorized_peers(user=rabbit.SSH_USER,
                                group='rabbit',
                                peer_interface='cluster',
                                ensure_local_user=True)
    rabbit.synchronize_service_credentials()

    if utils.is_relation_made('ha'):
        utils.juju_log(
            'INFO', 'hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no < r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
        return

    cookie = utils.relation_get('cookie')
    if cookie is None:
        utils.juju_log('INFO', 'cluster_joined: cookie not yet set.')
        return

    if open(rabbit.COOKIE_PATH, 'r').read().strip() == cookie:
        utils.juju_log('INFO', 'Cookie already synchronized with peer.')
    else:
        utils.juju_log('INFO', 'Synchronizing erlang cookie from peer.')
        rabbit.service('stop')
        with open(rabbit.COOKIE_PATH, 'wb') as out:
            out.write(cookie)
        rabbit.service('start')

    # cluster with other nodes
    rabbit.cluster_with()
def cluster_changed():
    unison.ssh_authorized_peers(user=rabbit.SSH_USER,
                                group='rabbit',
                                peer_interface='cluster',
                                ensure_local_user=True)
    rabbit.synchronize_service_credentials()

    if utils.is_relation_made('ha'):
        utils.juju_log('INFO',
                       'hacluster relation is present, skipping native '
                       'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no < r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
        return

    cookie = utils.relation_get('cookie')
    if cookie is None:
        utils.juju_log('INFO',
                       'cluster_joined: cookie not yet set.')
        return

    if open(rabbit.COOKIE_PATH, 'r').read().strip() == cookie:
        utils.juju_log('INFO', 'Cookie already synchronized with peer.')
    else:
        utils.juju_log('INFO', 'Synchronizing erlang cookie from peer.')
        rabbit.service('stop')
        with open(rabbit.COOKIE_PATH, 'wb') as out:
            out.write(cookie)
        rabbit.service('start')

    # cluster with other nodes
    rabbit.cluster_with()
def cluster_changed():
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO',
                       'hacluster relation is present, skipping native '\
                       'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no < r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
        return

    remote_host = utils.relation_get('host')
    cookie = utils.relation_get('cookie')
    if None in [remote_host, cookie]:
        utils.juju_log('INFO',
                       'cluster_joined: remote_host|cookie not yet set.')
        return

    if open(rabbit.COOKIE_PATH, 'r').read().strip() == cookie:
        utils.juju_log('INFO', 'Cookie already synchronized with peer.')
        return

    utils.juju_log('INFO', 'Synchronizing erlang cookie from peer.')
    rabbit.service('stop')
    with open(rabbit.COOKIE_PATH, 'wb') as out:
        out.write(cookie)
    rabbit.service('start')
    rabbit.cluster_with(remote_host)
def cluster_joined():
    unison.ssh_authorized_peers(user=rabbit.SSH_USER,
                                group='rabbit',
                                peer_interface='cluster',
                                ensure_local_user=True)
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO',
                       'hacluster relation is present, skipping native '
                       'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no > r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation greater.')
        return
    rabbit.COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
    if not os.path.isfile(rabbit.COOKIE_PATH):
        utils.juju_log('ERROR', 'erlang cookie missing from %s' %
                       rabbit.COOKIE_PATH)
        return
    cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()

    # add parent host to the relation
    local_hostname = subprocess.check_output(['hostname']).strip()
    utils.relation_set(cookie=cookie, host=local_hostname)
def ceph_changed():
    utils.juju_log('INFO', 'Start Ceph Relation Changed')
    auth = utils.relation_get('auth')
    key = utils.relation_get('key')
    if None in [auth, key]:
        utils.juju_log('INFO', 'Missing key or auth in relation')
        sys.exit(0)

    ceph.configure(service=SERVICE_NAME, key=key, auth=auth)

    if cluster.eligible_leader('res_rabbitmq_vip'):
        rbd_img = utils.config_get('rbd-name')
        rbd_size = utils.config_get('rbd-size')
        sizemb = int(rbd_size.split('G')[0]) * 1024
        blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
        ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
                                 rbd_img=rbd_img, sizemb=sizemb,
                                 fstype='ext4', mount_point=RABBIT_DIR,
                                 blk_device=blk_device,
                                 system_services=['rabbitmq-server'])
    else:
        utils.juju_log('INFO',
                       'This is not the peer leader. Not configuring RBD.')
        utils.juju_log('INFO', 'Stopping rabbitmq-server.')
        utils.stop('rabbitmq-server')

    # If 'ha' relation has been made before the 'ceph' relation
    # it is important to make sure the ha-relation data is being
    # sent.
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO', '*ha* relation exists. Triggering ha_joined()')
        ha_joined()
    else:
        utils.juju_log('INFO', '*ha* relation does not exist.')
    utils.juju_log('INFO', 'Finish Ceph Relation Changed')
예제 #6
0
def cluster_joined():
    unison.ssh_authorized_peers(user=rabbit.SSH_USER,
                                group='rabbit',
                                peer_interface='cluster',
                                ensure_local_user=True)
    if utils.is_relation_made('ha'):
        utils.juju_log(
            'INFO', 'hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no > r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation greater.')
        return
    rabbit.COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
    if not os.path.isfile(rabbit.COOKIE_PATH):
        utils.juju_log('ERROR',
                       'erlang cookie missing from %s' % rabbit.COOKIE_PATH)
        return
    cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()

    # add parent host to the relation
    local_hostname = subprocess.check_output(['hostname']).strip()
    utils.relation_set(cookie=cookie, host=local_hostname)
예제 #7
0
def ha_relation_joined():
    vip = utils.config_get('vip')
    vip_iface = utils.config_get('vip_iface')
    vip_cidr = utils.config_get('vip_cidr')
    corosync_bindiface = utils.config_get('ha-bindiface')
    corosync_mcastport = utils.config_get('ha-mcastport')

    if None in [vip, vip_cidr, vip_iface]:
        utils.juju_log('WARNING',
                       'Insufficient VIP information to configure cluster')
        sys.exit(1)

    # Starting configuring resources.
    init_services = {'res_mysqld': 'mysql'}

    # If the 'ha' relation has been made *before* the 'ceph' relation,
    # it doesn't make sense to make it until after the 'ceph' relation is made
    if not utils.is_relation_made('ceph', 'auth'):
        utils.juju_log('INFO',
                       '*ceph* relation does not exist. '
                       'Not sending *ha* relation data yet')
        return
    else:
        utils.juju_log('INFO',
                       '*ceph* relation exists. Sending *ha* relation data')

        block_storage = 'ceph'

        resources = {
            'res_mysql_rbd': 'ocf:ceph:rbd',
            'res_mysql_fs': 'ocf:heartbeat:Filesystem',
            'res_mysql_vip': 'ocf:heartbeat:IPaddr2',
            'res_mysqld': 'upstart:mysql'}

        rbd_name = utils.config_get('rbd-name')
        resource_params = {
            'res_mysql_rbd': 'params name="%s" pool="%s" user="******" '
                             'secret="%s"' %
                             (rbd_name, POOL_NAME,
                              SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
            'res_mysql_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
                            'fstype="ext4" op start start-delay="10s"' %
                            (POOL_NAME, rbd_name, DATA_SRC_DST),
            'res_mysql_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
                             (vip, vip_cidr, vip_iface),
            'res_mysqld': 'op start start-delay="5s" op monitor interval="5s"'}

        groups = {
            'grp_mysql': 'res_mysql_rbd res_mysql_fs res_mysql_vip res_mysqld'}

        for rel_id in utils.relation_ids('ha'):
            utils.relation_set(rid=rel_id,
                               block_storage=block_storage,
                               corosync_bindiface=corosync_bindiface,
                               corosync_mcastport=corosync_mcastport,
                               resources=resources,
                               resource_params=resource_params,
                               init_services=init_services,
                               groups=groups)
예제 #8
0
def ha_relation_joined():
    vip = utils.config_get("vip")
    vip_iface = utils.config_get("vip_iface")
    vip_cidr = utils.config_get("vip_cidr")
    corosync_bindiface = utils.config_get("ha-bindiface")
    corosync_mcastport = utils.config_get("ha-mcastport")

    if None in [vip, vip_cidr, vip_iface]:
        utils.juju_log("WARNING", "Insufficient VIP information to configure cluster")
        sys.exit(1)

    # Starting configuring resources.
    init_services = {"res_mysqld": "mysql"}

    # If the 'ha' relation has been made *before* the 'ceph' relation,
    # it doesn't make sense to make it until after the 'ceph' relation is made
    if not utils.is_relation_made("ceph", "auth"):
        utils.juju_log("INFO", "*ceph* relation does not exist. " "Not sending *ha* relation data yet")
        return
    else:
        utils.juju_log("INFO", "*ceph* relation exists. Sending *ha* relation data")

        block_storage = "ceph"

        resources = {
            "res_mysql_rbd": "ocf:ceph:rbd",
            "res_mysql_fs": "ocf:heartbeat:Filesystem",
            "res_mysql_vip": "ocf:heartbeat:IPaddr2",
            "res_mysqld": "upstart:mysql",
        }

        rbd_name = utils.config_get("rbd-name")
        resource_params = {
            "res_mysql_rbd": 'params name="%s" pool="%s" user="******" '
            'secret="%s"' % (rbd_name, POOL_NAME, SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
            "res_mysql_fs": 'params device="/dev/rbd/%s/%s" directory="%s" '
            'fstype="ext4" op start start-delay="10s"' % (POOL_NAME, rbd_name, DATA_SRC_DST),
            "res_mysql_vip": 'params ip="%s" cidr_netmask="%s" nic="%s"' % (vip, vip_cidr, vip_iface),
            "res_mysqld": 'op start start-delay="5s" op monitor interval="5s"',
        }

        groups = {"grp_mysql": "res_mysql_rbd res_mysql_fs res_mysql_vip res_mysqld"}

        for rel_id in utils.relation_ids("ha"):
            utils.relation_set(
                rid=rel_id,
                block_storage=block_storage,
                corosync_bindiface=corosync_bindiface,
                corosync_mcastport=corosync_mcastport,
                resources=resources,
                resource_params=resource_params,
                init_services=init_services,
                groups=groups,
            )
def cluster_departed():
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO',
                       'hacluster relation is present, skipping native '
                       'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no < r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
        return
    rabbit.break_cluster()
예제 #10
0
def cluster_departed():
    if utils.is_relation_made('ha'):
        utils.juju_log(
            'INFO', 'hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no < r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
        return
    rabbit.break_cluster()
def amqp_changed(relation_id=None, remote_unit=None):
    if not cluster.eligible_leader('res_rabbitmq_vip'):
        msg = 'amqp_changed(): Deferring amqp_changed to eligible_leader.'
        utils.juju_log('INFO', msg)
        return

    relation_settings = {}
    settings = hookenv.relation_get(rid=relation_id, unit=remote_unit)

    singleset = set([
        'username',
        'vhost'
        ])

    if singleset.issubset(settings):
        if None in [settings['username'], settings['vhost']]:
            utils.juju_log('INFO', 'amqp_changed(): Relation not ready.')
            return

        relation_settings['password'] = configure_amqp(username=settings['username'],
                                                       vhost=settings['vhost'])
    else:
        queues = {}
        for k, v in settings.iteritems():
            amqp = k.split('_')[0]
            x = '_'.join(k.split('_')[1:])
            if amqp not in queues:
                queues[amqp] = {}
            queues[amqp][x] = v
        relation_settings = {}
        for amqp in queues:
            if singleset.issubset(queues[amqp]):
                relation_settings['_'.join([amqp, 'password'])] = configure_amqp(queues[amqp]['username'],
                                                                                 queues[amqp]['vhost'])

    relation_settings['hostname'] = utils.unit_get('private-address')

    if cluster.is_clustered():
        relation_settings['clustered'] = 'true'
        if utils.is_relation_made('ha'):
            # active/passive settings
            relation_settings['vip'] = utils.config_get('vip')

    if relation_id:
        relation_settings['rid'] = relation_id
    utils.relation_set(**relation_settings)

    # sync new creds to all peers
    rabbit.synchronize_service_credentials()
예제 #12
0
def amqp_changed(relation_id=None, remote_unit=None):
    if not cluster.eligible_leader('res_rabbitmq_vip'):
        msg = 'amqp_changed(): Deferring amqp_changed to eligible_leader.'
        utils.juju_log('INFO', msg)
        return

    relation_settings = {}
    settings = hookenv.relation_get(rid=relation_id, unit=remote_unit)

    singleset = set(['username', 'vhost'])

    if singleset.issubset(settings):
        if None in [settings['username'], settings['vhost']]:
            utils.juju_log('INFO', 'amqp_changed(): Relation not ready.')
            return

        relation_settings['password'] = configure_amqp(
            username=settings['username'], vhost=settings['vhost'])
    else:
        queues = {}
        for k, v in settings.iteritems():
            amqp = k.split('_')[0]
            x = '_'.join(k.split('_')[1:])
            if amqp not in queues:
                queues[amqp] = {}
            queues[amqp][x] = v
        relation_settings = {}
        for amqp in queues:
            if singleset.issubset(queues[amqp]):
                relation_settings['_'.join([amqp,
                                            'password'])] = configure_amqp(
                                                queues[amqp]['username'],
                                                queues[amqp]['vhost'])

    relation_settings['hostname'] = utils.unit_get('private-address')

    if cluster.is_clustered():
        relation_settings['clustered'] = 'true'
        if utils.is_relation_made('ha'):
            # active/passive settings
            relation_settings['vip'] = utils.config_get('vip')

    if relation_id:
        relation_settings['rid'] = relation_id
    utils.relation_set(**relation_settings)

    # sync new creds to all peers
    rabbit.synchronize_service_credentials()
예제 #13
0
def ceph_changed():
    utils.juju_log('INFO', 'Start Ceph Relation Changed')
    auth = utils.relation_get('auth')
    key = utils.relation_get('key')
    use_syslog = utils.relation_get('use_syslog')
    if None in [auth, key]:
        utils.juju_log('INFO', 'Missing key or auth in relation')
        return

    ceph.configure(service=SERVICE_NAME,
                   key=key,
                   auth=auth,
                   use_syslog=use_syslog)

    if cluster.eligible_leader(LEADER_RES):
        sizemb = int(utils.config_get('block-size')) * 1024
        rbd_img = utils.config_get('rbd-name')
        blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
        rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
        ceph.ensure_ceph_storage(service=SERVICE_NAME,
                                 pool=POOL_NAME,
                                 rbd_img=rbd_img,
                                 sizemb=sizemb,
                                 fstype='ext4',
                                 mount_point=DATA_SRC_DST,
                                 blk_device=blk_device,
                                 system_services=['mysql'],
                                 rbd_pool_replicas=rbd_pool_rep_count)
    else:
        utils.juju_log('INFO',
                       'This is not the peer leader. Not configuring RBD.')
        # Stopping MySQL
        if utils.running('mysql'):
            utils.juju_log('INFO', 'Stopping MySQL...')
            utils.stop('mysql')

    # If 'ha' relation has been made before the 'ceph' relation
    # it is important to make sure the ha-relation data is being
    # sent.
    if utils.is_relation_made('ha'):
        utils.juju_log(
            'INFO', '*ha* relation exists. Making sure the ha'
            ' relation data is sent.')
        ha_relation_joined()
        return

    utils.juju_log('INFO', 'Finish Ceph Relation Changed')
def cluster_joined():
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO',
                       'hacluster relation is present, skipping native '\
                       'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no > r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation greater.')
        return
    rabbit.COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
    if not os.path.isfile(rabbit.COOKIE_PATH):
        utils.juju_log('ERROR', 'erlang cookie missing from %s' %\
                       rabbit.COOKIE_PATH)
    cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
    local_hostname = subprocess.check_output(['hostname']).strip()
    utils.relation_set(cookie=cookie, host=local_hostname)
예제 #15
0
def ceph_changed():
    utils.juju_log("INFO", "Start Ceph Relation Changed")
    auth = utils.relation_get("auth")
    key = utils.relation_get("key")
    use_syslog = utils.relation_get("use_syslog")
    if None in [auth, key]:
        utils.juju_log("INFO", "Missing key or auth in relation")
        return

    ceph.configure(service=SERVICE_NAME, key=key, auth=auth, use_syslog=use_syslog)

    if cluster.eligible_leader(LEADER_RES):
        sizemb = int(utils.config_get("block-size")) * 1024
        rbd_img = utils.config_get("rbd-name")
        blk_device = "/dev/rbd/%s/%s" % (POOL_NAME, rbd_img)
        rbd_pool_rep_count = utils.config_get("ceph-osd-replication-count")
        ceph.ensure_ceph_storage(
            service=SERVICE_NAME,
            pool=POOL_NAME,
            rbd_img=rbd_img,
            sizemb=sizemb,
            fstype="ext4",
            mount_point=DATA_SRC_DST,
            blk_device=blk_device,
            system_services=["mysql"],
            rbd_pool_replicas=rbd_pool_rep_count,
        )
    else:
        utils.juju_log("INFO", "This is not the peer leader. Not configuring RBD.")
        # Stopping MySQL
        if utils.running("mysql"):
            utils.juju_log("INFO", "Stopping MySQL...")
            utils.stop("mysql")

    # If 'ha' relation has been made before the 'ceph' relation
    # it is important to make sure the ha-relation data is being
    # sent.
    if utils.is_relation_made("ha"):
        utils.juju_log("INFO", "*ha* relation exists. Making sure the ha" " relation data is sent.")
        ha_relation_joined()
        return

    utils.juju_log("INFO", "Finish Ceph Relation Changed")
예제 #16
0
def ceph_changed():
    utils.juju_log('INFO', 'Start Ceph Relation Changed')
    auth = utils.relation_get('auth')
    key = utils.relation_get('key')
    use_syslog = utils.relation_get('use_syslog')
    if None in [auth, key]:
        utils.juju_log('INFO', 'Missing key or auth in relation')
        return

    ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
                   use_syslog=use_syslog)

    if cluster.eligible_leader(LEADER_RES):
        sizemb = int(utils.config_get('block-size')) * 1024
        rbd_img = utils.config_get('rbd-name')
        blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
        rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
        ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
                                 rbd_img=rbd_img, sizemb=sizemb,
                                 fstype='ext4', mount_point=DATA_SRC_DST,
                                 blk_device=blk_device,
                                 system_services=['mysql'],
                                 rbd_pool_replicas=rbd_pool_rep_count)
    else:
        utils.juju_log('INFO',
                       'This is not the peer leader. Not configuring RBD.')
        # Stopping MySQL
        if utils.running('mysql'):
            utils.juju_log('INFO', 'Stopping MySQL...')
            utils.stop('mysql')

    # If 'ha' relation has been made before the 'ceph' relation
    # it is important to make sure the ha-relation data is being
    # sent.
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO',
                       '*ha* relation exists. Making sure the ha'
                       ' relation data is sent.')
        ha_relation_joined()
        return

    utils.juju_log('INFO', 'Finish Ceph Relation Changed')
예제 #17
0
def ceph_changed():
    utils.juju_log('INFO', 'Start Ceph Relation Changed')
    auth = utils.relation_get('auth')
    key = utils.relation_get('key')
    if None in [auth, key]:
        utils.juju_log('INFO', 'Missing key or auth in relation')
        sys.exit(0)

    ceph.configure(service=SERVICE_NAME, key=key, auth=auth)

    if cluster.eligible_leader('res_rabbitmq_vip'):
        rbd_img = utils.config_get('rbd-name')
        rbd_size = utils.config_get('rbd-size')
        sizemb = int(rbd_size.split('G')[0]) * 1024
        blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
        rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
        ceph.ensure_ceph_storage(service=SERVICE_NAME,
                                 pool=POOL_NAME,
                                 rbd_img=rbd_img,
                                 sizemb=sizemb,
                                 fstype='ext4',
                                 mount_point=RABBIT_DIR,
                                 blk_device=blk_device,
                                 system_services=['rabbitmq-server'],
                                 rbd_pool_replicas=rbd_pool_rep_count)
    else:
        utils.juju_log('INFO',
                       'This is not the peer leader. Not configuring RBD.')
        utils.juju_log('INFO', 'Stopping rabbitmq-server.')
        utils.stop('rabbitmq-server')

    # If 'ha' relation has been made before the 'ceph' relation
    # it is important to make sure the ha-relation data is being
    # sent.
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO', '*ha* relation exists. Triggering ha_joined()')
        ha_joined()
    else:
        utils.juju_log('INFO', '*ha* relation does not exist.')
    utils.juju_log('INFO', 'Finish Ceph Relation Changed')
예제 #18
0
def ha_relation_joined():
    vip = utils.config_get('vip')
    vip_iface = utils.config_get('vip_iface')
    vip_cidr = utils.config_get('vip_cidr')
    corosync_bindiface = utils.config_get('ha-bindiface')
    corosync_mcastport = utils.config_get('ha-mcastport')

    if None in [vip, vip_cidr, vip_iface]:
        utils.juju_log('WARNING',
                       'Insufficient VIP information to configure cluster')
        sys.exit(1)

    # Starting configuring resources.
    init_services = {
        'res_mysqld': 'mysql',
    }

    # If the 'ha' relation has been made *before* the 'ceph' relation,
    # it doesn't make sense to make it until after the 'ceph' relation is made
    if not utils.is_relation_made('ceph', 'auth'):
        utils.juju_log(
            'INFO', '*ceph* relation does not exist. '
            'Not sending *ha* relation data yet')
        return
    else:
        utils.juju_log('INFO',
                       '*ceph* relation exists. Sending *ha* relation data')

        block_storage = 'ceph'

        resources = {
            'res_mysql_rbd': 'ocf:ceph:rbd',
            'res_mysql_fs': 'ocf:heartbeat:Filesystem',
            'res_mysql_vip': 'ocf:heartbeat:IPaddr2',
            'res_mysqld': 'upstart:mysql',
        }

        rbd_name = utils.config_get('rbd-name')
        resource_params = {
            'res_mysql_rbd': 'params name="%s" pool="%s" user="******" '
                             'secret="%s"' % \
                             (rbd_name, POOL_NAME,
                              SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
            'res_mysql_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
                            'fstype="ext4" op start start-delay="10s"' % \
                            (POOL_NAME, rbd_name, DATA_SRC_DST),
            'res_mysql_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
                             (vip, vip_cidr, vip_iface),
            'res_mysqld': 'op start start-delay="5s" op monitor interval="5s"',
            }

        groups = {
            'grp_mysql': 'res_mysql_rbd res_mysql_fs res_mysql_vip res_mysqld',
        }

        for rel_id in utils.relation_ids('ha'):
            utils.relation_set(rid=rel_id,
                               block_storage=block_storage,
                               corosync_bindiface=corosync_bindiface,
                               corosync_mcastport=corosync_mcastport,
                               resources=resources,
                               resource_params=resource_params,
                               init_services=init_services,
                               groups=groups)
예제 #19
0
def ha_joined():
    corosync_bindiface = utils.config_get('ha-bindiface')
    corosync_mcastport = utils.config_get('ha-mcastport')
    vip = utils.config_get('vip')
    vip_iface = utils.config_get('vip_iface')
    vip_cidr = utils.config_get('vip_cidr')
    rbd_name = utils.config_get('rbd-name')

    if None in [
            corosync_bindiface, corosync_mcastport, vip, vip_iface, vip_cidr,
            rbd_name
    ]:
        utils.juju_log(
            'ERROR', 'Insufficient configuration data to '
            'configure hacluster.')
        sys.exit(1)

    if not utils.is_relation_made('ceph', 'auth'):
        utils.juju_log('INFO', 'ha_joined: No ceph relation yet, deferring.')
        return

    name = '%s@localhost' % SERVICE_NAME
    if rabbit.get_node_name() != name:
        utils.juju_log('INFO', 'Stopping rabbitmq-server.')
        utils.stop('rabbitmq-server')
        rabbit.set_node_name('%s@localhost' % SERVICE_NAME)
    else:
        utils.juju_log('INFO', 'Node name already set to %s.' % name)

    relation_settings = {}
    relation_settings['corosync_bindiface'] = corosync_bindiface
    relation_settings['corosync_mcastport'] = corosync_mcastport

    relation_settings['resources'] = {
        'res_rabbitmq_rbd': 'ocf:ceph:rbd',
        'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
        'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
        'res_rabbitmq-server': 'lsb:rabbitmq-server',
    }

    relation_settings['resource_params'] = {
        'res_rabbitmq_rbd':
        'params name="%s" pool="%s" user="******" '
        'secret="%s"' %
        (rbd_name, POOL_NAME, SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
        'res_rabbitmq_fs':
        'params device="/dev/rbd/%s/%s" directory="%s" '
        'fstype="ext4" op start start-delay="10s"' %
        (POOL_NAME, rbd_name, RABBIT_DIR),
        'res_rabbitmq_vip':
        'params ip="%s" cidr_netmask="%s" nic="%s"' %
        (vip, vip_cidr, vip_iface),
        'res_rabbitmq-server':
        'op start start-delay="5s" '
        'op monitor interval="5s"',
    }

    relation_settings['groups'] = {
        'grp_rabbitmq':
        'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
        'res_rabbitmq-server',
    }

    for rel_id in utils.relation_ids('ha'):
        utils.relation_set(rid=rel_id, **relation_settings)

    env_vars = {
        'OPENSTACK_PORT_EPMD': 4369,
        'OPENSTACK_PORT_MCASTPORT': utils.config_get('ha-mcastport'),
    }
    openstack.save_script_rc(**env_vars)
def ha_joined():
    corosync_bindiface = utils.config_get('ha-bindiface')
    corosync_mcastport = utils.config_get('ha-mcastport')
    vip = utils.config_get('vip')
    vip_iface = utils.config_get('vip_iface')
    vip_cidr = utils.config_get('vip_cidr')
    rbd_name = utils.config_get('rbd-name')

    if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
                vip_cidr, rbd_name]:
        utils.juju_log('ERROR', 'Insufficient configuration data to '
                       'configure hacluster.')
        sys.exit(1)

    if not utils.is_relation_made('ceph', 'auth'):
        utils.juju_log('INFO',
                       'ha_joined: No ceph relation yet, deferring.')
        return

    name = '%s@localhost' % SERVICE_NAME
    if rabbit.get_node_name() != name:
        utils.juju_log('INFO', 'Stopping rabbitmq-server.')
        utils.stop('rabbitmq-server')
        rabbit.set_node_name('%s@localhost' % SERVICE_NAME)
    else:
        utils.juju_log('INFO', 'Node name already set to %s.' % name)

    relation_settings = {}
    relation_settings['corosync_bindiface'] = corosync_bindiface
    relation_settings['corosync_mcastport'] = corosync_mcastport

    relation_settings['resources'] = {
        'res_rabbitmq_rbd': 'ocf:ceph:rbd',
        'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
        'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
        'res_rabbitmq-server': 'lsb:rabbitmq-server',
    }

    relation_settings['resource_params'] = {
        'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="******" '
                            'secret="%s"' %
                            (rbd_name, POOL_NAME,
                             SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
        'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
                           'fstype="ext4" op start start-delay="10s"' %
                           (POOL_NAME, rbd_name, RABBIT_DIR),
        'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
                            (vip, vip_cidr, vip_iface),
        'res_rabbitmq-server': 'op start start-delay="5s" '
                               'op monitor interval="5s"',
    }

    relation_settings['groups'] = {
        'grp_rabbitmq': 'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
                        'res_rabbitmq-server',
    }

    for rel_id in utils.relation_ids('ha'):
        utils.relation_set(rid=rel_id, **relation_settings)

    env_vars = {
        'OPENSTACK_PORT_EPMD': 4369,
        'OPENSTACK_PORT_MCASTPORT': utils.config_get('ha-mcastport'),
    }
    openstack.save_script_rc(**env_vars)