if down: log('down: find a new master!', 'info') try: master = r.info()['master_host'] log("master: {0}".format(master), 'info') if cluster.exists(master): grandmaster = cluster.get_master(master) log("{0} = cluster.get_master({1})".format(grandmaster, master), 'info') # and make sure the master doesn't participate anymore cluster.incarcerate_node(master) log("cluster.incarcerate_node({0})".format(master), 'info') else: grandmaster = cluster.get_master(node) log("{0} = cluster.get_master({1})".format(grandmaster, node), 'info') except: log('we never were a slave', 'info') grandmaster = None if grandmaster == None: r53_zone.update_record(cluster.name(), endpoint) log("r53_zone.update_record({0}, {1})".format(cluster.name(), endpoint), 'info') host.set_master() log("host.set_master()", 'info') else: host.set_master(grandmaster) log("host.set_master({0})".format(grandmaster), 'info') else: log("master is up (and running)", 'info')
def log(message, logging='info'): events.log(node, component, message, logging) if __name__ == '__main__': log('joining the cluster', 'info') log('adding the node to the cluster', 'info') # now we are ready to be (added to) the cluster cluster.add_node(node, endpoint) log('creating a Route53 records', 'info') r53_zone.create_record(node, endpoint) log('setting the tag', 'info') ec2.set_tag(node) log('getting the master of the node', 'info') master = cluster.get_master(node) # if we don't have a master, we ARE the master if master == None: log('setting the main Route53 record for the cluster', 'info') r53_zone.update_record(cluster.name(), endpoint) # and make sure we 'run' correctly (no-slave, well-monitored) log('set the host to run as master', 'info') host.set_master() else: # attach to the master (and start watching its availability) log('set the host to run as slave of {0}'.format(master), 'info') host.set_master(master) log('joined the cluster', 'info')