def scale_osds(count): # Script print "Up-scaling Ceph cluster (OSDs)" start_time = time.time() # Connect to the ComodIT API client = Client(config.endpoint, config.username, config.password) env = client.get_environment(config.organization, 'Cluster') latest_id = get_latest_id('Object Store ', env) if latest_id < 0: raise Exception("No OSD found") conf_app = [{"name": "Ceph Configuration", "settings": {}}] osd_hosts = [] for i in xrange(latest_id + 1, latest_id + count + 1): osd = create_host(env, 'Object Store ' + str(i), config.platform, config.distribution, conf_app) print "Deploying Object Store " + str(i) osd.provision() osd_hosts.append(osd) print "Waiting for all hosts to be deployed..." for h in osd_hosts: h.wait_for_state(Host.State.READY, config.time_out) osd_ips = [] osd_names = [] for h in osd_hosts: osd_ips.append(h.get_instance().wait_for_property("ip.eth0", config.time_out)) osd_names.append(get_short_hostname(h.get_instance().wait_for_property("hostname", config.time_out))) for i in xrange(0, len(osd_ips)): print "OSD %i has IP %s and hostname %s" % (latest_id + i + 1, osd_ips[i], osd_names[i]) print "Configure cluster..." next_id = latest_id + 1 osds = env.get_setting("osds").value for name in osd_names: osds.append({"id": str(next_id), "host": name}) next_id += 1 env.settings().update("osds", osds) time.sleep(3) print "Installing OSD(s)..." i = 0 next_id = latest_id + 1 for h in osd_hosts: h.install("Ceph Object Store", {"osd_id": str(next_id), "osd_hostname": osd_names[i]}) next_id += 1 i += 1 time.sleep(3) for h in osd_hosts: h.wait_for_pending_changes() total_time = time.time() - start_time print "Up-scaling time: " + str(total_time)
def downscale_osds(count): # Script print "Down-scaling Ceph cluster (OSDs)" start_time = time.time() # Connect to the ComodIT API client = Client(config.endpoint, config.username, config.password) env = client.get_environment(config.organization, 'Cluster') latest_id = get_latest_id('Object Store ', env) if latest_id < 0: raise Exception("No OSD found") if latest_id - count + 1 <= 1: raise Exception("Cannot down-scale to less than 2 OSDs") osd_hosts = [] for i in xrange(latest_id - count + 1, latest_id + 1): osd = env.get_host('Object Store ' + str(i)) print "Bringing Object Store %i out of cluster..." % i osd.settings().create("status", "out") osd_hosts.append(osd) for h in osd_hosts: h.wait_for_pending_changes() print "Configure cluster..." next_id = latest_id + 1 osds = env.get_setting("osds").value for i in xrange(0, len(osds)): osd = osds[i] id = int(osd["id"]) if id >= latest_id - count + 1 and id < latest_id + 1: del osds[i] env.settings().update("osds", osds) time.sleep(3) print "Deleting OSD(s)..." for h in osd_hosts: h.get_instance().delete() h.delete() total_time = time.time() - start_time print "Down-scaling time: " + str(total_time)
def downscale_osds(count): # Script print "Down-scaling Ceph cluster (monitors)" start_time = time.time() # Connect to the ComodIT API client = Client(config.endpoint, config.username, config.password) env = client.get_environment(config.organization, 'Cluster') latest_id = get_latest_id('Monitor ', env) if latest_id < 0: raise Exception("No monitor found") if latest_id - count + 1 <= 2: raise Exception("Cannot down-scale to less than 3 monitors") mon_hosts = [] for i in xrange(latest_id - count + 1, latest_id + 1): mon = env.get_host('Monitor ' + str(i)) print "Bringing Monitor %i out of cluster..." % i mon.settings().create("status", "down") mon_hosts.append(mon) for h in mon_hosts: h.wait_for_pending_changes() print "Configure cluster..." next_id = latest_id + 1 monitors = env.get_setting("monitors").value for i in xrange(0, len(monitors)): mon = monitors[i] id = int(mon["id"]) if id >= latest_id - count + 1 and id < latest_id + 1: del monitors[i] env.settings().update("monitors", monitors) time.sleep(3) print "Deleting monitor(s)..." for h in mon_hosts: h.get_instance().delete() h.delete() total_time = time.time() - start_time print "Down-scaling time: " + str(total_time)
def scale_mons(count): # Script print "Up-scaling Ceph cluster (monitors)" start_time = time.time() # Connect to the ComodIT API client = Client(config.endpoint, config.username, config.password) env = client.get_environment(config.organization, "Cluster") latest_id = get_latest_id("Monitor ", env) if latest_id < 0: raise Exception("No monitor found") conf_app = [{"name": "Ceph Configuration", "settings": {}}] mon_hosts = [] for i in xrange(latest_id + 1, latest_id + count + 1): mon = create_host(env, "Monitor " + str(i), config.platform, config.distribution, conf_app) print "Deploying Monitor " + str(i) mon.provision() mon_hosts.append(mon) print "Waiting for all hosts to be deployed..." for h in mon_hosts: h.wait_for_state(Host.State.READY, config.time_out) mon_ips = [] mon_names = [] mon_addrs = [] for h in mon_hosts: ip = h.get_instance().wait_for_property("ip.eth0", config.time_out) mon_ips.append(ip) mon_names.append(get_short_hostname(h.get_instance().wait_for_property("hostname", config.time_out))) mon_addrs.append(ip + ":6879") for i in xrange(0, len(mon_addrs)): print "Monitor %i has address %s and hostname %s" % (latest_id + i + 1, mon_addrs[i], mon_names[i]) print "Configure cluster..." next_id = latest_id + 1 monitors = env.get_setting("monitors").value for name in mon_names: monitors.append({"id": str(next_id), "host": mon_names[i], "addr": mon_addrs[i]}) next_id += 1 env.settings().update("monitors", monitors) time.sleep(3) print "Installing monitor(s)..." next_id = latest_id + 1 for h in mon_hosts: h.install("Ceph Monitor", {"mon_id": str(next_id)}) next_id += 1 time.sleep(3) for h in mon_hosts: h.wait_for_pending_changes() total_time = time.time() - start_time print "Up-scaling time: " + str(total_time) if (latest_id + count + 1) % 2 == 0: print print "WARNING: you do not have an odd number of monitors (-> potential quorum problems)" print