def main(reactor, configFile): c = Configurator(configFile=configFile) control_ip = c.config["control_node"] install_command = ( 'TOKEN="%s" ' """sh -c 'curl -H "cache-control: max-age=0" -ssL https://get-volumehub.clusterhq.com/ |sh'""" % (environ["TOKEN"], )) deferreds = [ c.runSSHAsync(control_ip, "TARGET=control-service " + install_command) ] first = True for node in c.config["agent_nodes"]: if first: run_flocker_agent_here = "RUN_FLOCKER_AGENT_HERE=1 " else: run_flocker_agent_here = "" deferreds.append( c.runSSHAsync( node["public"], run_flocker_agent_here + "TARGET=agent-node " + install_command)) first = False log("Installing volume hub catalog agents...") yield gatherResults(deferreds) log("Done!")
def __init__(self, img_path): self._img_path = img_path self._img = None self._cfg_path = cfg_path + img_path.split('/')[-1].split( '.')[0] + ".cfg" self._block_data = list() # list of BlockData elements self._configurator = Configurator()
def main(reactor, *args): c = Configurator(configFile=sys.argv[1]) # Run flocker-diagnostics deferreds = [] log("Running Flocker-diagnostics on agent nodes.") for node in c.config["agent_nodes"]: d = c.runSSHAsync(node["public"], "rm -rf /tmp/diagnostics; mkdir /tmp/diagnostics; cd /tmp/diagnostics; flocker-diagnostics") d.addCallback(report_completion, public_ip=node["public"], message=" * Ran diagnostics on agent node.") deferreds.append(d) d = c.runSSHAsync(c.config["control_node"], "rm -rf /tmp/diagnostics; mkdir /tmp/diagnostics; cd /tmp/diagnostics; flocker-diagnostics") d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Ran diagnostics on control node.") deferreds.append(d) yield gatherResults(deferreds) # Let flocker diagnostics run time.sleep(5) # Gather flocker-diagnostics deferreds = [] log("Gathering Flocker-diagnostics on agent nodes.") for node in c.config["agent_nodes"]: d = c.scp("./", node["public"], "/tmp/diagnostics/clusterhq_flocker_logs_*.tar", async=True, reverse=True) d.addCallback(report_completion, public_ip=node["public"], message=" * Gathering diagnostics on agent node.") deferreds.append(d) d = c.scp("./", c.config["control_node"], "/tmp/diagnostics/clusterhq_flocker_logs_*.tar", async=True, reverse=True) d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Gathering diagnostics on control node.") deferreds.append(d) yield gatherResults(deferreds)
def main(): c = Configurator(configFile=sys.argv[1]) if not c.config["users"]: print "no users!" else: node_mapping = yaml.load(open("node_mapping.yml")) prefix = ("curl -s --cacert $PWD/cluster.crt --cert $PWD/%(user)s.crt " "--key $PWD/%(user)s.key" % dict(user=c.config["users"][0], )) url = "https://%(control_node)s:4523/v1" % dict( control_node=c.config["control_node"], ) header = ' --header "Content-type: application/json"' print "\nThis should create a volume on a node:" print "NODE_UUID=" + node_mapping.values()[0] print prefix + header + """ -XPOST -d '{"primary": "'${NODE_UUID}'", "maximum_size": 107374182400, "metadata": {"name": "mongodb_data"}}' """, print url + "/configuration/datasets| jq ." print "\nThen record the dataset_id (you'll need it later)..." print "DATASET_ID=..." print "\nWait for your dataset to show up..." print prefix + " " + url + "/state/datasets | jq ." print "\nThen create a container with the volume" print prefix + header + """ -XPOST -d '{"node_uuid": "'${NODE_UUID}'", "name": "mongodb", """, print """"image": "clusterhq/mongodb:latest", "ports": [{"internal": 27017, "external": 27017}], """, print """"volumes": [{"dataset_id": "'${DATASET_ID}'", "mountpoint": "/data/db"}]}' """ + url + "/configuration/containers | jq ." print "\nThen wait for the container to show up..." print prefix + " " + url + "/state/containers | jq ." print "\nNow move the container to another machine, and the dataset will follow!" print "NODE_UUID_2=" + node_mapping.values()[1] print prefix + header + """ -XPOST -d '{"node_uuid": "'${NODE_UUID_2}'"}' """ + url + "/configuration/containers/mongodb | jq ."
class BlockExtractor(object): def __init__(self, img_path): self._img_path = img_path self._img = None self._cfg_path = cfg_path + img_path.split('/')[-1].split( '.')[0] + ".cfg" self._block_data = list() # list of BlockData elements self._configurator = Configurator() def _get_image(self): """Open input image and set format to HSV.""" self._img = cv.imread(self._img_path) ASSERT(self._img.size != 0, "Image not loaded. Path: %s" % self._img_path) hsv_img = cv.cvtColor(self._img, cv.COLOR_BGR2HSV) return hsv_img def _white_blocks(self, hsv_image): """Get data blocks from input image. Returns a black image with white contours representing the blocks. """ lower_mask = cv.inRange(hsv_image, BlockData.lower_red[0], BlockData.lower_red[1]) upper_mask = cv.inRange(hsv_image, BlockData.upper_red[0], BlockData.upper_red[1]) bw_img = lower_mask + upper_mask return bw_img def _get_wb(self, bw_img): """Identify white blocks and save their coordinates.""" _, contours, _ = cv.findContours(bw_img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) ASSERT(len(contours) != 0, "No contours found. Check template image.") for c in contours: x, y, w, h = cv.boundingRect(c) self._block_data.append(BlockData(x, y, w, h)) def get_blocks(self): """Functionality wrapper that returns a list of BlockData objects.""" hsv_img = self._get_image() bw_img = self._white_blocks(hsv_img) self._get_wb(bw_img) return self._block_data def get_cfg(self): out_cfg = OPEN(self._cfg_path, "w") [ out_cfg.write(key + ":" + value + "\n") for key, value in self._configurator.get_dict().items() ] out_cfg.close()
def main(): c = Configurator(configFile=sys.argv[1]) for node in c.config["agent_nodes"]: public_ip = node["public"] if c.config["os"] == "ubuntu": c.runSSH(public_ip, """apt-get -y install apt-transport-https software-properties-common add-apt-repository -y ppa:james-page/docker add-apt-repository -y 'deb https://clusterhq-archive.s3.amazonaws.com/ubuntu-testing/14.04/$(ARCH) /' apt-get update apt-get -y --force-yes install clusterhq-flocker-node """) elif c.config["os"] == "centos": c.runSSH(public_ip, """if selinuxenabled; then setenforce 0; fi test -e /etc/selinux/config && sed --in-place='.preflocker' 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config yum install -y https://s3.amazonaws.com/clusterhq-archive/centos/clusterhq-release$(rpm -E %dist).noarch.rpm yum install -y clusterhq-flocker-node """) print "Installed clusterhq-flocker-node on all nodes" print "To configure and deploy the cluster:" print "flocker-config cluster.yml"
def main(reactor, configFile): c = Configurator(configFile=configFile) control_ip = c.config["control_node"] install_command = ('TOKEN="%s" ' """sh -c 'curl -H "cache-control: max-age=0" -ssL https://get-volumehub.clusterhq.com/ |sh'""" % (environ["TOKEN"],)) deferreds = [c.runSSHAsync(control_ip, "TARGET=control-service " + install_command)] first = True for node in c.config["agent_nodes"]: if first: run_flocker_agent_here = "RUN_FLOCKER_AGENT_HERE=1 " else: run_flocker_agent_here = "" deferreds.append(c.runSSHAsync(node["public"], run_flocker_agent_here + "TARGET=agent-node " + install_command)) first = False log("Installing volume hub catalog agents...") yield gatherResults(deferreds) log("Done!")
def main(): c = Configurator(configFile=sys.argv[1]) for node in c.config["agent_nodes"]: public_ip = node["public"] if c.config["os"] == "ubuntu": c.runSSH( public_ip, """apt-get -y install apt-transport-https software-properties-common add-apt-repository -y ppa:james-page/docker add-apt-repository -y 'deb https://clusterhq-archive.s3.amazonaws.com/ubuntu-testing/14.04/$(ARCH) /' apt-get update apt-get -y --force-yes install clusterhq-flocker-node """) elif c.config["os"] == "centos": c.runSSH( public_ip, """if selinuxenabled; then setenforce 0; fi test -e /etc/selinux/config && sed --in-place='.preflocker' 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config yum install -y https://s3.amazonaws.com/clusterhq-archive/centos/clusterhq-release$(rpm -E %dist).noarch.rpm yum install -y clusterhq-flocker-node """) print "Installed clusterhq-flocker-node on all nodes" print "To configure and deploy the cluster:" print "flocker-config cluster.yml"
def main(): c = Configurator(configFile=sys.argv[1]) control_ip = c.config["control_node"] # download and replace the docker binary on each of the nodes for node in c.config["agent_nodes"]: # don't download a new docker for reasons only the user knows if settings["SKIP_DOCKER_BINARY"]: break public_ip = node["public"] print "Replacing docker binary on %s" % (public_ip, ) # stop the docker service print "Stopping the docker service on %s - %s" \ % (public_ip, settings['DOCKER_SERVICE_NAME'],) if c.config["os"] == "ubuntu": c.runSSHRaw( public_ip, "stop %s || true" % (settings['DOCKER_SERVICE_NAME'], )) elif c.config["os"] == "centos": c.runSSHRaw( public_ip, "systemctl stop %s.service || true" % (settings['DOCKER_SERVICE_NAME'], )) # download the latest docker binary\ print "Downloading the latest docker binary on %s - %s" \ % (public_ip, settings['DOCKER_BINARY_URL'],) c.runSSHRaw( public_ip, "wget -O /usr/bin/docker %s" % (settings['DOCKER_BINARY_URL'], )) if c.config["os"] == "ubuntu": # newer versions of docker insist on AUFS on ubuntu, probably for good reason. c.runSSHRaw( public_ip, "DEBIAN_FRONTEND=noninteractive " "'apt-get install -y linux-image-extra-$(uname -r)'") # start the docker service print "Starting the docker service on %s" % (public_ip, ) if c.config["os"] == "ubuntu": c.runSSHRaw(public_ip, "start %s" % (settings['DOCKER_SERVICE_NAME'], )) elif c.config["os"] == "centos": c.runSSHRaw( public_ip, "systemctl start %s.service" % (settings['DOCKER_SERVICE_NAME'], )) print "Generating plugin certs" # generate and upload plugin.crt and plugin.key for each node for node in c.config["agent_nodes"]: public_ip = node["public"] # use the node IP to name the local files # so they do not overwrite each other c.run("flocker-ca create-api-certificate %s-plugin" % (public_ip, )) print "Generated plugin certs for", public_ip # upload the .crt and .key for ext in ("crt", "key"): c.scp("%s-plugin.%s" % ( public_ip, ext, ), public_ip, "/etc/flocker/plugin.%s" % (ext, )) print "Uploaded plugin certs for", public_ip print "Installing flocker plugin" # loop each agent and get the plugin installed/running # clone the plugin and configure an upstart/systemd unit for it to run for node in c.config["agent_nodes"]: public_ip = node["public"] private_ip = node["private"] # the full api path to the control service controlservice = 'https://%s:4523/v1' % (control_ip, ) # perhaps the user has pre-compiled images with the plugin # downloaded and installed if not settings["SKIP_INSTALL_PLUGIN"]: if c.config["os"] == "ubuntu": print c.runSSHRaw( public_ip, "apt-get install -y " "python-pip python-dev build-essential " "libssl-dev libffi-dev") elif c.config["os"] == "centos": print c.runSSHRaw( public_ip, "yum install -y " "python-pip python-devel " "gcc libffi-devel python-devel openssl-devel") # pip install the plugin print c.runSSHRaw( public_ip, "pip install git+%s@%s" % ( settings['PLUGIN_REPO'], settings['PLUGIN_BRANCH'], )) else: print "Skipping installing plugin: %r" % ( settings["SKIP_INSTALL_PLUGIN"], ) # ensure that the /usr/share/docker/plugins # folder exists print "Creating the /usr/share/docker/plugins folder" c.runSSHRaw(public_ip, "mkdir -p /usr/share/docker/plugins") # configure an upstart job that runs the bash script if c.config["os"] == "ubuntu": print "Writing flocker-docker-plugin upstart job to %s" % ( public_ip, ) c.runSSH( public_ip, """cat <<EOF > /etc/init/flocker-docker-plugin.conf # flocker-plugin - flocker-docker-plugin job file description "Flocker Plugin service" author "ClusterHQ <*****@*****.**>" respawn env FLOCKER_CONTROL_SERVICE_BASE_URL=%s env MY_NETWORK_IDENTITY=%s exec /usr/local/bin/flocker-docker-plugin EOF service flocker-docker-plugin restart """ % ( controlservice, private_ip, )) # configure a systemd job that runs the bash script elif c.config["os"] == "centos": print "Writing flocker-docker-plugin systemd job to %s" % ( public_ip, ) c.runSSH( public_ip, """# writing flocker-docker-plugin systemd cat <<EOF > /etc/systemd/system/flocker-docker-plugin.service [Unit] Description=flocker-plugin - flocker-docker-plugin job file [Service] Environment=FLOCKER_CONTROL_SERVICE_BASE_URL=%s Environment=MY_NETWORK_IDENTITY=%s ExecStart=/usr/local/bin/flocker-docker-plugin [Install] WantedBy=multi-user.target EOF systemctl enable flocker-docker-plugin.service systemctl start flocker-docker-plugin.service """ % ( controlservice, private_ip, ))
def main(reactor, configFile): c = Configurator(configFile=configFile) # Check that key file is accessible. If it isn't, give an error that # doesn't include the container-wrapping `/host/` to avoid confusing the # user. if not FilePath(c.get_container_facing_key_path()).exists(): raise UsageError( "Private key specified in private_key_path in config does not exist at: %s" % (c.get_user_facing_key_path(),)) # Permit root access if c.config["os"] == "coreos": user = "******" elif c.config["os"] == "ubuntu": user = "******" elif c.config["os"] == "centos": user = "******" # Gather IPs of all nodes nodes = c.config["agent_nodes"] node_public_ips = [n["public"] for n in nodes] node_public_ips.append(c.config["control_node"]) # Wait for all nodes to boot yield gatherResults([verify_socket(ip, 22, timeout=600) for ip in node_public_ips]) # Enable root access cmd1 = "sudo mkdir -p /root/.ssh" cmd2 = "sudo cp /home/%s/.ssh/authorized_keys /root/.ssh/authorized_keys" % (user,) deferreds = [] for public_ip in node_public_ips: d = c.runSSHAsync(public_ip, cmd1 + " && " + cmd2, username=user) d.addCallback(report_completion, public_ip=public_ip, message="Enabled root login for") deferreds.append(d) yield gatherResults(deferreds) # Install flocker node software on all the nodes deferreds = [] for public_ip in node_public_ips: if c.config["os"] == "ubuntu": log("Running install for", public_ip, "...") default = "https://clusterhq-archive.s3.amazonaws.com/ubuntu/$(lsb_release --release --short)/\$(ARCH)" repo = os.environ.get("CUSTOM_REPO", default) if not repo: repo = default d = c.runSSHAsync(public_ip, """apt-get -y install apt-transport-https software-properties-common add-apt-repository -y "deb %s /" apt-get update curl -sSL https://get.docker.com/ | sh apt-get -y --force-yes install clusterhq-flocker-node """ % (repo,)) d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) elif c.config["os"] == "centos": default = "https://clusterhq-archive.s3.amazonaws.com/centos/clusterhq-release$(rpm -E %dist).noarch.rpm" repo = os.environ.get("CUSTOM_REPO", default) if not repo: repo = default d = c.runSSHAsync(public_ip, """if selinuxenabled; then setenforce 0; fi yum update curl -sSL https://get.docker.com/ | sh service docker start test -e /etc/selinux/config && sed --in-place='.preflocker' 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config yum install -y %s yum install -y clusterhq-flocker-node """ % (repo,)) d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) yield gatherResults(deferreds) # if the dataset.backend is ZFS then install ZFS and mount a flocker pool # then create and distribute SSH keys amoungst the nodes if c.config["agent_config"]["dataset"]["backend"] == "zfs": # CentOS ZFS installation requires a restart # XXX todo - find out a way to handle a restart mid-script if c.config["os"] == "centos": log("Auto-install of ZFS on CentOS is not currently supported") sys.exit(1) if c.config["os"] == "coreos": log("Auto-install of ZFS on CoreOS is not currently supported") sys.exit(1) for node in c.config["agent_nodes"]: node_public_ip = node["public"] if c.config["os"] == "ubuntu": c.runSSH(node_public_ip, """echo installing-zfs add-apt-repository -y ppa:zfs-native/stable apt-get update apt-get -y --force-yes install libc6-dev zfsutils mkdir -p /var/opt/flocker truncate --size 10G /var/opt/flocker/pool-vdev zpool create flocker /var/opt/flocker/pool-vdev """) """ Loop over each node and generate SSH keys Then get the public key so we can distribute it to other nodes """ for node in c.config["agent_nodes"]: node_public_ip = node["public"] log("Generating SSH Keys for %s" % (node_public_ip,)) publicKey = c.runSSH(node_public_ip, """cat <<EOF > /tmp/genkeys.sh #!/bin/bash ssh-keygen -q -f /root/.ssh/id_rsa -N "" EOF bash /tmp/genkeys.sh cat /root/.ssh/id_rsa.pub rm /tmp/genkeys.sh """) publicKey = publicKey.rstrip('\n') """ Now we have the public key for the node we loop over all the other nodes and append it to /root/.ssh/authorized_keys """ for othernode in c.config["agent_nodes"]: othernode_public_ip = othernode["public"] if othernode_public_ip != node_public_ip: log("Copying %s key -> %s" % (node_public_ip, othernode_public_ip,)) c.runSSH(othernode_public_ip, """cat <<EOF > /tmp/uploadkey.sh #!/bin/bash echo "%s" >> /root/.ssh/authorized_keys EOF bash /tmp/uploadkey.sh rm /tmp/uploadkey.sh """ % (publicKey,)) log("Installed clusterhq-flocker-node on all nodes")
#!/usr/bin/env python # This script will use the correct repo to install packages for clusterhq-flocker-node import sys # Usage: deploy.py cluster.yml from utils import Configurator if __name__ == "__main__": c = Configurator(configFile=sys.argv[1]) for node in c.config["agent_nodes"]: public_ip = node["public"] if c.config["os"] == "ubuntu": c.runSSH(public_ip, """apt-get -y install apt-transport-https software-properties-common add-apt-repository -y ppa:james-page/docker add-apt-repository -y 'deb https://clusterhq-archive.s3.amazonaws.com/ubuntu-testing/14.04/$(ARCH) /' apt-get update apt-get -y --force-yes install clusterhq-flocker-node """) elif c.config["os"] == "centos": c.runSSH(public_ip, """if selinuxenabled; then setenforce 0; fi test -e /etc/selinux/config && sed --in-place='.preflocker' 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config yum install -y https://s3.amazonaws.com/clusterhq-archive/centos/clusterhq-release$(rpm -E %dist).noarch.rpm yum install -y clusterhq-flocker-node """) print "Installed clusterhq-flocker-node on all nodes" print "To configure and deploy the cluster:" print "./deploy.py cluster.yml"
def main(reactor, configFile): c = Configurator(configFile=configFile) control_ip = c.config["control_node"] log("Generating plugin certs") # generate and upload plugin.crt and plugin.key for each node for node in c.config["agent_nodes"]: public_ip = node["public"] # use the node IP to name the local files # so they do not overwrite each other c.run("flocker-ca create-api-certificate %s-plugin" % (public_ip, )) log("Generated plugin certs for", public_ip) def report_completion(result, public_ip, message="Completed plugin install for"): log(message, public_ip) return result deferreds = [] log("Uploading plugin certs...") for node in c.config["agent_nodes"]: public_ip = node["public"] # upload the .crt and .key for ext in ("crt", "key"): d = c.scp("%s-plugin.%s" % ( public_ip, ext, ), public_ip, "/etc/flocker/plugin.%s" % (ext, ), async=True) d.addCallback(report_completion, public_ip=public_ip, message=" * Uploaded plugin cert for") deferreds.append(d) yield gatherResults(deferreds) log("Uploaded plugin certs") log("Installing flocker plugin") # loop each agent and get the plugin installed/running # clone the plugin and configure an upstart/systemd unit for it to run deferreds = [] for node in c.config["agent_nodes"]: public_ip = node["public"] private_ip = node["private"] log("Using %s => %s" % (public_ip, private_ip)) # the full api path to the control service controlservice = 'https://%s:4523/v1' % (control_ip, ) # perhaps the user has pre-compiled images with the plugin # downloaded and installed if not settings["SKIP_INSTALL_PLUGIN"]: if c.config["os"] == "ubuntu": log("Installing plugin for", public_ip, "...") d = c.runSSHAsync( public_ip, "apt-get install -y --force-yes clusterhq-flocker-docker-plugin && " "service flocker-docker-plugin restart") d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) elif c.config["os"] == "centos": log("Installing plugin for", public_ip, "...") d = c.runSSHAsync( public_ip, "yum install -y clusterhq-flocker-docker-plugin && " "systemctl enable flocker-docker-plugin && " "systemctl start flocker-docker-plugin") d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) else: log("Skipping installing plugin: %r" % (settings["SKIP_INSTALL_PLUGIN"], )) yield gatherResults(deferreds) for node in c.config["agent_nodes"]: public_ip = node["public"] private_ip = node["private"] # ensure that the /run/docker/plugins # folder exists log("Creating the /run/docker/plugins folder") c.runSSHRaw(public_ip, "mkdir -p /run/docker/plugins") if c.config["os"] == "coreos": log("Starting flocker-docker-plugin as docker container on CoreOS on %s" % (public_ip, )) c.runSSH( public_ip, """echo docker run --restart=always -d --net=host --privileged \\ -e FLOCKER_CONTROL_SERVICE_BASE_URL=%s \\ -e MY_NETWORK_IDENTITY=%s \\ -v /etc/flocker:/etc/flocker \\ -v /run/docker:/run/docker \\ --name=flocker-docker-plugin \\ clusterhq/flocker-docker-plugin""" % ( controlservice, private_ip, )) log("Done!")
def main(reactor, configFile): c = Configurator(configFile=configFile) control_ip = c.config["control_node"] log("Generating plugin certs") # generate and upload plugin.crt and plugin.key for each node for node in c.config["agent_nodes"]: public_ip = node["public"] # use the node IP to name the local files # so they do not overwrite each other c.run("flocker-ca create-api-certificate %s-plugin" % (public_ip,)) log("Generated plugin certs for", public_ip) def report_completion(result, public_ip, message="Completed plugin install for"): log(message, public_ip) return result deferreds = [] log("Uploading plugin certs...") for node in c.config["agent_nodes"]: public_ip = node["public"] # upload the .crt and .key for ext in ("crt", "key"): d = c.scp("%s-plugin.%s" % (public_ip, ext,), public_ip, "/etc/flocker/plugin.%s" % (ext,), async=True) d.addCallback(report_completion, public_ip=public_ip, message=" * Uploaded plugin cert for") deferreds.append(d) yield gatherResults(deferreds) log("Uploaded plugin certs") log("Installing flocker plugin") # loop each agent and get the plugin installed/running # clone the plugin and configure an upstart/systemd unit for it to run deferreds = [] for node in c.config["agent_nodes"]: public_ip = node["public"] private_ip = node["private"] log("Using %s => %s" % (public_ip, private_ip)) # the full api path to the control service controlservice = 'https://%s:4523/v1' % (control_ip,) # perhaps the user has pre-compiled images with the plugin # downloaded and installed if not settings["SKIP_INSTALL_PLUGIN"]: if c.config["os"] == "ubuntu": log("Installing plugin for", public_ip, "...") d = c.runSSHAsync(public_ip, "apt-get install -y --force-yes clusterhq-flocker-docker-plugin && " "service flocker-docker-plugin restart") d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) elif c.config["os"] == "centos": log("Installing plugin for", public_ip, "...") d = c.runSSHAsync(public_ip, "yum install -y clusterhq-flocker-docker-plugin && " "systemctl enable flocker-docker-plugin && " "systemctl start flocker-docker-plugin") d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) else: log("Skipping installing plugin: %r" % (settings["SKIP_INSTALL_PLUGIN"],)) yield gatherResults(deferreds) for node in c.config["agent_nodes"]: public_ip = node["public"] private_ip = node["private"] # ensure that the /run/docker/plugins # folder exists log("Creating the /run/docker/plugins folder") c.runSSHRaw(public_ip, "mkdir -p /run/docker/plugins") if c.config["os"] == "coreos": log("Starting flocker-docker-plugin as docker container on CoreOS on %s" % (public_ip,)) c.runSSH(public_ip, """echo docker run --restart=always -d --net=host --privileged \\ -e FLOCKER_CONTROL_SERVICE_BASE_URL=%s \\ -e MY_NETWORK_IDENTITY=%s \\ -v /etc/flocker:/etc/flocker \\ -v /run/docker:/run/docker \\ --name=flocker-docker-plugin \\ clusterhq/flocker-docker-plugin""" % (controlservice, private_ip,)) log("Done!")
def main(): c = Configurator(configFile=sys.argv[1]) control_ip = c.config["control_node"] # download and replace the docker binary on each of the nodes for node in c.config["agent_nodes"]: # don't download a new docker for reasons only the user knows if settings["SKIP_DOCKER_BINARY"]: break public_ip = node["public"] print "Replacing docker binary on %s" % (public_ip,) # stop the docker service print "Stopping the docker service on %s - %s" \ % (public_ip, settings['DOCKER_SERVICE_NAME'],) if c.config["os"] == "ubuntu": c.runSSHRaw(public_ip, "stop %s || true" % (settings['DOCKER_SERVICE_NAME'],)) elif c.config["os"] == "centos": c.runSSHRaw(public_ip, "systemctl stop %s.service || true" % (settings['DOCKER_SERVICE_NAME'],)) # download the latest docker binary\ print "Downloading the latest docker binary on %s - %s" \ % (public_ip, settings['DOCKER_BINARY_URL'],) c.runSSHRaw(public_ip, "wget -O /usr/bin/docker %s" % (settings['DOCKER_BINARY_URL'],)) if c.config["os"] == "ubuntu": # newer versions of docker insist on AUFS on ubuntu, probably for good reason. c.runSSHRaw(public_ip, "DEBIAN_FRONTEND=noninteractive " "'apt-get install -y linux-image-extra-$(uname -r)'") # start the docker service print "Starting the docker service on %s" % (public_ip,) if c.config["os"] == "ubuntu": c.runSSHRaw(public_ip, "start %s" % (settings['DOCKER_SERVICE_NAME'],)) elif c.config["os"] == "centos": c.runSSHRaw(public_ip, "systemctl start %s.service" % (settings['DOCKER_SERVICE_NAME'],)) print "Generating plugin certs" # generate and upload plugin.crt and plugin.key for each node for node in c.config["agent_nodes"]: public_ip = node["public"] # use the node IP to name the local files # so they do not overwrite each other c.run("flocker-ca create-api-certificate %s-plugin" % (public_ip,)) print "Generated plugin certs for", public_ip # upload the .crt and .key for ext in ("crt", "key"): c.scp("%s-plugin.%s" % (public_ip, ext,), public_ip, "/etc/flocker/plugin.%s" % (ext,)) print "Uploaded plugin certs for", public_ip print "Installing flocker plugin" # loop each agent and get the plugin installed/running # clone the plugin and configure an upstart/systemd unit for it to run for node in c.config["agent_nodes"]: public_ip = node["public"] private_ip = node["private"] # the full api path to the control service controlservice = 'https://%s:4523/v1' % (control_ip,) # perhaps the user has pre-compiled images with the plugin # downloaded and installed if not settings["SKIP_INSTALL_PLUGIN"]: if c.config["os"] == "ubuntu": print c.runSSHRaw(public_ip, "apt-get install -y " "python-pip python-dev build-essential " "libssl-dev libffi-dev") elif c.config["os"] == "centos": print c.runSSHRaw(public_ip, "yum install -y " "python-pip python-devel " "gcc libffi-devel python-devel openssl-devel") # pip install the plugin print c.runSSHRaw(public_ip, "pip install git+%s@%s" % (settings['PLUGIN_REPO'], settings['PLUGIN_BRANCH'],)) else: print "Skipping installing plugin: %r" % (settings["SKIP_INSTALL_PLUGIN"],) # ensure that the /usr/share/docker/plugins # folder exists print "Creating the /usr/share/docker/plugins folder" c.runSSHRaw(public_ip, "mkdir -p /usr/share/docker/plugins") # configure an upstart job that runs the bash script if c.config["os"] == "ubuntu": print "Writing flocker-docker-plugin upstart job to %s" % (public_ip,) c.runSSH(public_ip, """cat <<EOF > /etc/init/flocker-docker-plugin.conf # flocker-plugin - flocker-docker-plugin job file description "Flocker Plugin service" author "ClusterHQ <*****@*****.**>" respawn env FLOCKER_CONTROL_SERVICE_BASE_URL=%s env MY_NETWORK_IDENTITY=%s exec /usr/local/bin/flocker-docker-plugin EOF service flocker-docker-plugin restart """ % (controlservice, private_ip,)) # configure a systemd job that runs the bash script elif c.config["os"] == "centos": print "Writing flocker-docker-plugin systemd job to %s" % (public_ip,) c.runSSH(public_ip, """# writing flocker-docker-plugin systemd cat <<EOF > /etc/systemd/system/flocker-docker-plugin.service [Unit] Description=flocker-plugin - flocker-docker-plugin job file [Service] Environment=FLOCKER_CONTROL_SERVICE_BASE_URL=%s Environment=MY_NETWORK_IDENTITY=%s ExecStart=/usr/local/bin/flocker-docker-plugin [Install] WantedBy=multi-user.target EOF systemctl enable flocker-docker-plugin.service systemctl start flocker-docker-plugin.service """ % (controlservice, private_ip,))
'PLUGIN_BRANCH': 'txflocker-env-vars' } # dict that holds our actual env vars once the overrides have been applied settings = {} # loop over each of the default vars and check to see if we have been # given an override in the environment for field in settings_defaults: value = os.environ.get(field) if value is None: value = settings_defaults[field] settings[field] = value if __name__ == "__main__": c = Configurator(configFile=sys.argv[1]) control_ip = c.config["control_node"] print "Generating plugin certs" # generate and upload plugin.crt and plugin.key for each node for node in c.config["agent_nodes"]: public_ip = node["public"] # use the node IP to name the local files # so they do not overwrite each other c.run("flocker-ca create-api-certificate %s-plugin" % (public_ip,)) print "Generated plugin certs for", public_ip # upload the .crt and .key for ext in ("crt", "key"): c.scp("%s-plugin.%s" % (public_ip, ext,), public_ip, "/etc/flocker/plugin.%s" % (ext,)) print "Uploaded plugin certs for", public_ip
#!/usr/bin/env python # This script will use the correct repo to install # packages for clusterhq-flocker-node import sys # Usage: deploy.py cluster.yml from utils import Configurator if __name__ == "__main__": c = Configurator(configFile=sys.argv[1]) for node in c.config["agent_nodes"]: public_ip = node["public"] if c.config["os"] == "ubuntu": c.runSSH(public_ip, """# install flocker apt-get -y install apt-transport-https software-properties-common add-apt-repository -y ppa:james-page/docker add-apt-repository -y 'deb https://clusterhq-archive.s3.amazonaws.com/ubuntu-testing/14.04/$(ARCH) /' # noqa apt-get update apt-get -y --force-yes install clusterhq-flocker-node """) elif c.config["os"] == "centos": c.runSSH(public_ip, """# install flocker if selinuxenabled; then setenforce 0; fi test -e /etc/selinux/config && \ sed --in-place='.preflocker' \ 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config yum install -y https://s3.amazonaws.com/clusterhq-archive/centos/clusterhq-release$(rpm -E %dist).noarch.rpm # noqa yum install -y clusterhq-flocker-node """)
def main(reactor, *args): c = Configurator(configFile=sys.argv[1]) c.run("flocker-ca initialize %s" % (c.config["cluster_name"], )) log("Initialized cluster CA.") c.run("flocker-ca create-control-certificate %s" % (c.config["control_node"], )) log("Created control cert.") node_mapping = {} for node in c.config["agent_nodes"]: public_ip = node["public"] # Created 8eab4b8d-c0a2-4ce2-80aa-0709277a9a7a.crt. Copy ... uuid = c.run("flocker-ca create-node-certificate").split(".")[0].split( " ")[1] node_mapping[public_ip] = uuid log("Generated", uuid, "for", public_ip) for user in c.config["users"]: c.run("flocker-ca create-api-certificate %s" % (user, )) log("Created user key for", user) # Dump agent_config into a file and scp it to /etc/flocker/agent.yml on the # nodes. f = open("agent.yml", "w") yaml.dump(c.config["agent_config"], f) f.close() # Record the node mapping for later. f = open("node_mapping.yml", "w") yaml.dump(node_mapping, f) f.close() log("Making /etc/flocker directory on all nodes") deferreds = [] for node, uuid in node_mapping.iteritems(): deferreds.append(c.runSSHAsync(node, "mkdir -p /etc/flocker")) deferreds.append( c.runSSHAsync(c.config["control_node"], "mkdir -p /etc/flocker")) yield gatherResults(deferreds) log("Uploading keys to respective nodes:") deferreds = [] # Copy cluster cert, and control cert and key to control node. d = c.scp("cluster.crt", c.config["control_node"], "/etc/flocker/cluster.crt", async=True) d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Uploaded cluster cert to") deferreds.append(d) for ext in ("crt", "key"): d = c.scp("control-%s.%s" % (c.config["control_node"], ext), c.config["control_node"], "/etc/flocker/control-service.%s" % (ext, ), async=True) d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Uploaded control %s to" % (ext, )) deferreds.append(d) log(" * Uploaded control cert & key to control node.") # Copy cluster cert, and agent cert and key to agent nodes. deferreds = [] for node, uuid in node_mapping.iteritems(): d = c.scp("cluster.crt", node, "/etc/flocker/cluster.crt", async=True) d.addCallback(report_completion, public_ip=node, message=" * Uploaded cluster cert to") deferreds.append(d) d = c.scp("agent.yml", node, "/etc/flocker/agent.yml", async=True) d.addCallback(report_completion, public_ip=node, message=" * Uploaded agent.yml to") deferreds.append(d) for ext in ("crt", "key"): d = c.scp("%s.%s" % (uuid, ext), node, "/etc/flocker/node.%s" % (ext, ), async=True) d.addCallback(report_completion, public_ip=node, message=" * Uploaded node %s to" % (ext, )) deferreds.append(d) yield gatherResults(deferreds) deferreds = [] for node, uuid in node_mapping.iteritems(): if c.config["os"] == "ubuntu": d = c.runSSHAsync( node, """echo "starting flocker-container-agent..." service flocker-container-agent start echo "starting flocker-dataset-agent..." service flocker-dataset-agent start """) elif c.config["os"] == "centos": d = c.runSSHAsync( node, """if selinuxenabled; then setenforce 0; fi systemctl enable docker.service systemctl start docker.service """) elif c.config["os"] == "coreos": d = c.runSSHAsync( node, """echo echo > /tmp/flocker-command-log docker %(early_docker_prefix)s run --restart=always -d --net=host --privileged \\ -v /etc/flocker:/etc/flocker \\ -v /var/run/docker.sock:/var/run/docker.sock \\ --name=flocker-container-agent \\ clusterhq/flocker-container-agent docker %(early_docker_prefix)s run --restart=always -d --net=host --privileged \\ -e DEBUG=1 \\ -v /tmp/flocker-command-log:/tmp/flocker-command-log \\ -v /flocker:/flocker -v /:/host -v /etc/flocker:/etc/flocker \\ -v /dev:/dev \\ --name=flocker-dataset-agent \\ clusterhq/flocker-dataset-agent """ % dict(early_docker_prefix=EARLY_DOCKER_PREFIX)) deferreds.append(d) if c.config["os"] == "ubuntu": d = c.runSSHAsync( c.config["control_node"], """cat <<EOF > /etc/init/flocker-control.override start on runlevel [2345] stop on runlevel [016] EOF echo 'flocker-control-api 4523/tcp # Flocker Control API port' >> /etc/services echo 'flocker-control-agent 4524/tcp # Flocker Control Agent port' >> /etc/services service flocker-control restart ufw allow flocker-control-api ufw allow flocker-control-agent """) elif c.config["os"] == "centos": d = c.runSSHAsync( c.config["control_node"], """systemctl enable flocker-control systemctl start flocker-control firewall-cmd --permanent --add-service flocker-control-api firewall-cmd --add-service flocker-control-api firewall-cmd --permanent --add-service flocker-control-agent firewall-cmd --add-service flocker-control-agent """) elif c.config["os"] == "coreos": d = c.runSSHAsync( c.config["control_node"], """echo docker %(early_docker_prefix)s run --name=flocker-control-volume -v /var/lib/flocker clusterhq/flocker-control-service true docker %(early_docker_prefix)s run --restart=always -d --net=host -v /etc/flocker:/etc/flocker --volumes-from=flocker-control-volume --name=flocker-control-service clusterhq/flocker-control-service""" % dict(early_docker_prefix=EARLY_DOCKER_PREFIX)) deferreds.append(d) yield gatherResults(deferreds) if c.config["os"] == "ubuntu": # XXX INSECURE, UNSUPPORTED, UNDOCUMENTED EXPERIMENTAL OPTION # Usage: `uft-flocker-config --ubuntu-aws --swarm`, I guess if len(sys.argv) > 2 and sys.argv[2] == "--swarm": # Install swarm deferreds = [] clusterid = c.runSSH(c.config["control_node"], """ docker run swarm create""").strip() log("Created Swarm ID") for node in c.config["agent_nodes"]: d = c.runSSHAsync( node['public'], """ service docker stop docker daemon -H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 >> /tmp/dockerlogs 2>&1 & """) # Let daemon come up time.sleep(3) d = c.runSSHAsync( node['public'], """ docker run -d swarm join --addr=%s:2375 token://%s """ % (node['private'], clusterid)) log("Started Swarm Agent for %s" % node['public']) deferreds.append(d) d = c.runSSHAsync( c.config["control_node"], """ docker run -d -p 2357:2375 swarm manage token://%s """ % clusterid) log("Starting Swarm Master") deferreds.append(d) yield gatherResults(deferreds) log("Swarm Master is at tcp://%s:2357" % c.config["control_node"])
def main(): c = Configurator(configFile=sys.argv[1]) c.run("flocker-ca initialize %s" % (c.config["cluster_name"], )) print "Initialized cluster CA." c.run("flocker-ca create-control-certificate %s" % (c.config["control_node"], )) print "Created control cert." node_mapping = {} for node in c.config["agent_nodes"]: public_ip = node["public"] # Created 8eab4b8d-c0a2-4ce2-80aa-0709277a9a7a.crt. Copy ... uuid = c.run("flocker-ca create-node-certificate").split(".")[0].split( " ")[1] node_mapping[public_ip] = uuid print "Generated", uuid, "for", public_ip for user in c.config["users"]: c.run("flocker-ca create-api-certificate %s" % (user, )) print "Created user key for", user print "Uploading keys to respective nodes:" # Copy cluster cert, and control cert and key to control node. c.runSSHRaw(c.config["control_node"], "mkdir -p /etc/flocker") c.scp("cluster.crt", c.config["control_node"], "/etc/flocker/cluster.crt") print " * Uploaded cluster cert to control node." for ext in ("crt", "key"): c.scp("control-%s.%s" % (c.config["control_node"], ext), c.config["control_node"], "/etc/flocker/control-service.%s" % (ext, )) print " * Uploaded control cert & key to control node." # Dump agent_config into a file and scp it to /etc/flocker/agent.yml on the # nodes. f = open("agent.yml", "w") agent_config = yaml.dump(c.config["agent_config"], f) f.close() # Record the node mapping for later. f = open("node_mapping.yml", "w") agent_config = yaml.dump(node_mapping, f) f.close() # Copy cluster cert, and agent cert and key to agent nodes. for node, uuid in node_mapping.iteritems(): c.runSSHRaw(node, "mkdir -p /etc/flocker") c.scp("cluster.crt", node, "/etc/flocker/cluster.crt") c.scp("agent.yml", node, "/etc/flocker/agent.yml") print " * Uploaded cluster cert to %s." % (node, ) for ext in ("crt", "key"): c.scp("%s.%s" % (uuid, ext), node, "/etc/flocker/node.%s" % (ext, )) print " * Uploaded node cert and key to %s." % (node, ) for node, uuid in node_mapping.iteritems(): if c.config["os"] == "ubuntu": c.runSSH( node, """apt-get -y install apt-transport-https software-properties-common service flocker-container-agent restart service flocker-dataset-agent restart """) elif c.config["os"] == "centos": c.runSSH( node, """if selinuxenabled; then setenforce 0; fi systemctl enable docker.service systemctl start docker.service """) if c.config["os"] == "ubuntu": c.runSSH( c.config["control_node"], """cat <<EOF > /etc/init/flocker-control.override start on runlevel [2345] stop on runlevel [016] EOF echo 'flocker-control-api 4523/tcp # Flocker Control API port' >> /etc/services echo 'flocker-control-agent 4524/tcp # Flocker Control Agent port' >> /etc/services service flocker-control restart ufw allow flocker-control-api ufw allow flocker-control-agent """) elif c.config["os"] == "centos": c.runSSH( c.config["control_node"], """systemctl enable flocker-control systemctl start flocker-control firewall-cmd --permanent --add-service flocker-control-api firewall-cmd --add-service flocker-control-api firewall-cmd --permanent --add-service flocker-control-agent firewall-cmd --add-service flocker-control-agent """) print "Configured and started control service, opened firewall." if c.config["users"]: print "\nYou should now be able to communicate with the control service, for example:\n" prefix = ("curl -s --cacert $PWD/cluster.crt --cert $PWD/%(user)s.crt " "--key $PWD/%(user)s.key" % dict(user=c.config["users"][0], )) url = "https://%(control_node)s:4523/v1" % dict( control_node=c.config["control_node"], ) header = ' --header "Content-type: application/json"' print "This should give you a list of your nodes:" print prefix + " " + url + "/state/nodes | jq ." print "Try running tutorial.py cluster.yml for more..."
def main(reactor, args): c = Configurator(configFile=sys.argv[1]) c.run("flocker-ca initialize %s" % (c.config["cluster_name"],)) log("Initialized cluster CA.") c.run("flocker-ca create-control-certificate %s" % (c.config["control_node"],)) log("Created control cert.") node_mapping = {} for node in c.config["agent_nodes"]: public_ip = node["public"] # Created 8eab4b8d-c0a2-4ce2-80aa-0709277a9a7a.crt. Copy ... uuid = c.run("flocker-ca create-node-certificate").split(".")[0].split(" ")[1] node_mapping[public_ip] = uuid log("Generated", uuid, "for", public_ip) for user in c.config["users"]: c.run("flocker-ca create-api-certificate %s" % (user,)) log("Created user key for", user) # Dump agent_config into a file and scp it to /etc/flocker/agent.yml on the # nodes. f = open("agent.yml", "w") yaml.dump(c.config["agent_config"], f) f.close() # Record the node mapping for later. f = open("node_mapping.yml", "w") yaml.dump(node_mapping, f) f.close() log("Making /etc/flocker directory on all nodes") deferreds = [] for node, uuid in node_mapping.iteritems(): deferreds.append(c.runSSHAsync(node, "mkdir -p /etc/flocker")) deferreds.append(c.runSSHAsync(c.config["control_node"], "mkdir -p /etc/flocker")) yield gatherResults(deferreds) log("Uploading keys to respective nodes:") deferreds = [] # Copy cluster cert, and control cert and key to control node. d = c.scp("cluster.crt", c.config["control_node"], "/etc/flocker/cluster.crt", async=True) d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Uploaded cluster cert to") deferreds.append(d) for ext in ("crt", "key"): d = c.scp("control-%s.%s" % (c.config["control_node"], ext), c.config["control_node"], "/etc/flocker/control-service.%s" % (ext,), async=True) d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Uploaded control %s to" % (ext,)) deferreds.append(d) log(" * Uploaded control cert & key to control node.") # Copy cluster cert, and agent cert and key to agent nodes. deferreds = [] for node, uuid in node_mapping.iteritems(): d = c.scp("cluster.crt", node, "/etc/flocker/cluster.crt", async=True) d.addCallback(report_completion, public_ip=node, message=" * Uploaded cluster cert to") deferreds.append(d) d = c.scp("agent.yml", node, "/etc/flocker/agent.yml", async=True) d.addCallback(report_completion, public_ip=node, message=" * Uploaded agent.yml to") deferreds.append(d) for ext in ("crt", "key"): d = c.scp("%s.%s" % (uuid, ext), node, "/etc/flocker/node.%s" % (ext,), async=True) d.addCallback(report_completion, public_ip=node, message=" * Uploaded node %s to" % (ext,)) deferreds.append(d) yield gatherResults(deferreds) deferreds = [] for node, uuid in node_mapping.iteritems(): if c.config["os"] == "ubuntu": d = c.runSSHAsync(node, """echo "starting flocker-container-agent..." service flocker-container-agent start echo "starting flocker-dataset-agent..." service flocker-dataset-agent start """) elif c.config["os"] == "centos": d = c.runSSHAsync(node, """if selinuxenabled; then setenforce 0; fi systemctl enable docker.service systemctl start docker.service """) elif c.config["os"] == "coreos": d = c.runSSHAsync(node, """echo echo > /tmp/flocker-command-log docker run --restart=always -d --net=host --privileged \\ -v /etc/flocker:/etc/flocker \\ -v /var/run/docker.sock:/var/run/docker.sock \\ --name=flocker-container-agent \\ clusterhq/flocker-container-agent docker run --restart=always -d --net=host --privileged \\ -e DEBUG=1 \\ -v /tmp/flocker-command-log:/tmp/flocker-command-log \\ -v /flocker:/flocker -v /:/host -v /etc/flocker:/etc/flocker \\ -v /dev:/dev \\ --name=flocker-dataset-agent \\ clusterhq/flocker-dataset-agent """) deferreds.append(d) if c.config["os"] == "ubuntu": d = c.runSSHAsync(c.config["control_node"], """cat <<EOF > /etc/init/flocker-control.override start on runlevel [2345] stop on runlevel [016] EOF echo 'flocker-control-api 4523/tcp # Flocker Control API port' >> /etc/services echo 'flocker-control-agent 4524/tcp # Flocker Control Agent port' >> /etc/services service flocker-control restart ufw allow flocker-control-api ufw allow flocker-control-agent """) elif c.config["os"] == "centos": d = c.runSSHAsync(c.config["control_node"], """systemctl enable flocker-control systemctl start flocker-control firewall-cmd --permanent --add-service flocker-control-api firewall-cmd --add-service flocker-control-api firewall-cmd --permanent --add-service flocker-control-agent firewall-cmd --add-service flocker-control-agent """) elif c.config["os"] == "coreos": d = c.runSSHAsync(c.config["control_node"], """echo docker run --name=flocker-control-volume -v /var/lib/flocker clusterhq/flocker-control-service true docker run --restart=always -d --net=host -v /etc/flocker:/etc/flocker --volumes-from=flocker-control-volume --name=flocker-control-service clusterhq/flocker-control-service""") deferreds.append(d) yield gatherResults(deferreds)
def main(reactor, configFile): c = Configurator(configFile=configFile) if c.config["os"] == "ubuntu": user = "******" elif c.config["os"] == "centos": user = "******" elif c.config["os"] == "amazon": user = "******" # Gather IPs of all nodes nodes = c.config["agent_nodes"] node_public_ips = [n["public"] for n in nodes] node_public_ips.append(c.config["control_node"]) # Wait for all nodes to boot yield gatherResults( [verify_socket(ip, 22, timeout=600) for ip in node_public_ips]) log("Generating API certs") # generate and upload plugin.crt and plugin.key for each node for public_ip in node_public_ips: # use the node IP to name the local files # so they do not overwrite each other c.run("flocker-ca create-api-certificate %s-plugin" % (public_ip, )) log("Generated api certs for", public_ip) deferreds = [] log("Uploading api certs...") for public_ip in node_public_ips: # upload the .crt and .key for ext in ("crt", "key"): d = c.scp("%s-plugin.%s" % ( public_ip, ext, ), public_ip, "/etc/flocker/api.%s" % (ext, ), async=True) d.addCallback(report_completion, public_ip=public_ip, message=" * Uploaded api cert for") deferreds.append(d) yield gatherResults(deferreds) log("Uploaded api certs") deferreds = [] for public_ip in node_public_ips: cmd = """echo cat > /etc/flocker/env << EOF FLOCKER_CONTROL_SERVICE_HOST=%s\n FLOCKER_CONTROL_SERVICE_PORT=4523\n FLOCKER_CONTROL_SERVICE_CA_FILE=/etc/flocker/cluster.crt\n FLOCKER_CONTROL_SERVICE_CLIENT_KEY_FILE=/etc/flocker/api.key\n FLOCKER_CONTROL_SERVICE_CLIENT_CERT_FILE=/etc/flocker/api.crt EOF""" % c.config['control_node'] d = c.runSSHAsync(public_ip, cmd) d.addCallback(report_completion, public_ip=public_ip, message="Enabled flocker ENVs for") deferreds.append(d) yield gatherResults(deferreds) log("Uploaded Flocker ENV file.") deferreds = [] for public_ip in node_public_ips: cmd = """echo sed -i -e 's,/usr/bin/kubelet,/root/kubelet,g' /etc/systemd/system/kubelet.service; sed -i '/\[Service\]/aEnvironmentFile=/etc/flocker/env' /etc/systemd/system/kubelet.service """ d = c.runSSHAsync(public_ip, cmd) d.addCallback(report_completion, public_ip=public_ip, message="Configured flocker ENVs for") deferreds.append(d) yield gatherResults(deferreds) log("Configured Flocker ENV file.") deferreds = [] for public_ip in node_public_ips: cmd = """echo wget https://storage.googleapis.com/kubernetes-release/release/v1.1.1/bin/linux/amd64/kubelet; chmod +x kubelet; systemctl daemon-reload; systemctl restart kubelet; """ d = c.runSSHAsync(public_ip, cmd) d.addCallback(report_completion, public_ip=public_ip, message="Restarted Kubelet for") deferreds.append(d) yield gatherResults(deferreds) log("Restarted Kubelet") log("Completed")
def main(reactor, configFile): c = Configurator(configFile=configFile) if c.config["os"] == "ubuntu": user = "******" elif c.config["os"] == "centos": user = "******" elif c.config["os"] == "amazon": user = "******" # Gather IPs of all nodes nodes = c.config["agent_nodes"] node_public_ips = [n["public"] for n in nodes] node_public_ips.append(c.config["control_node"]) # Wait for all nodes to boot yield gatherResults([verify_socket(ip, 22, timeout=600) for ip in node_public_ips]) log("Generating API certs") # generate and upload plugin.crt and plugin.key for each node for public_ip in node_public_ips: # use the node IP to name the local files # so they do not overwrite each other c.run("flocker-ca create-api-certificate %s-plugin" % (public_ip,)) log("Generated api certs for", public_ip) deferreds = [] log("Uploading api certs...") for public_ip in node_public_ips: # upload the .crt and .key for ext in ("crt", "key"): d = c.scp("%s-plugin.%s" % (public_ip, ext,), public_ip, "/etc/flocker/api.%s" % (ext,), async=True) d.addCallback(report_completion, public_ip=public_ip, message=" * Uploaded api cert for") deferreds.append(d) yield gatherResults(deferreds) log("Uploaded api certs") deferreds = [] for public_ip in node_public_ips: cmd = """echo cat > /etc/flocker/env << EOF FLOCKER_CONTROL_SERVICE_HOST=%s\n FLOCKER_CONTROL_SERVICE_PORT=4523\n FLOCKER_CONTROL_SERVICE_CA_FILE=/etc/flocker/cluster.crt\n FLOCKER_CONTROL_SERVICE_CLIENT_KEY_FILE=/etc/flocker/api.key\n FLOCKER_CONTROL_SERVICE_CLIENT_CERT_FILE=/etc/flocker/api.crt EOF""" % c.config['control_node'] d = c.runSSHAsync(public_ip, cmd) d.addCallback(report_completion, public_ip=public_ip, message="Enabled flocker ENVs for") deferreds.append(d) yield gatherResults(deferreds) log("Uploaded Flocker ENV file.") deferreds = [] for public_ip in node_public_ips: cmd = """echo sed -i -e 's,/usr/bin/kubelet,/root/kubelet,g' /etc/systemd/system/kubelet.service; sed -i '/\[Service\]/aEnvironmentFile=/etc/flocker/env' /etc/systemd/system/kubelet.service """ d = c.runSSHAsync(public_ip, cmd) d.addCallback(report_completion, public_ip=public_ip, message="Configured flocker ENVs for") deferreds.append(d) yield gatherResults(deferreds) log("Configured Flocker ENV file.") deferreds = [] for public_ip in node_public_ips: cmd = """echo wget https://storage.googleapis.com/kubernetes-release/release/v1.1.1/bin/linux/amd64/kubelet; chmod +x kubelet; systemctl daemon-reload; systemctl restart kubelet; """ d = c.runSSHAsync(public_ip, cmd) d.addCallback(report_completion, public_ip=public_ip, message="Restarted Kubelet for") deferreds.append(d) yield gatherResults(deferreds) log("Restarted Kubelet") log("Completed")
def main(reactor, configFile): c = Configurator(configFile=configFile) # Check that key file is accessible. If it isn't, give an error that # doesn't include the container-wrapping `/host/` to avoid confusing the # user. if not FilePath(c.get_container_facing_key_path()).exists(): raise UsageError( "Private key specified in private_key_path in config does not exist at: %s" % (c.get_user_facing_key_path(), )) # Permit root access if c.config["os"] == "coreos": user = "******" elif c.config["os"] == "ubuntu": user = "******" elif c.config["os"] == "centos": user = "******" # Gather IPs of all nodes nodes = c.config["agent_nodes"] node_public_ips = [n["public"] for n in nodes] node_public_ips.append(c.config["control_node"]) # Wait for all nodes to boot yield gatherResults( [verify_socket(ip, 22, timeout=600) for ip in node_public_ips]) # Enable root access cmd1 = "sudo mkdir -p /root/.ssh" cmd2 = "sudo cp /home/%s/.ssh/authorized_keys /root/.ssh/authorized_keys" % ( user, ) deferreds = [] for public_ip in node_public_ips: d = c.runSSHAsync(public_ip, cmd1 + " && " + cmd2, username=user) d.addCallback(report_completion, public_ip=public_ip, message="Enabled root login for") deferreds.append(d) yield gatherResults(deferreds) # Install flocker node software on all the nodes deferreds = [] for public_ip in node_public_ips: if c.config["os"] == "ubuntu": log("Running install for", public_ip, "...") default = "https://clusterhq-archive.s3.amazonaws.com/ubuntu/$(lsb_release --release --short)/\$(ARCH)" repo = os.environ.get("CUSTOM_REPO", default) if not repo: repo = default d = c.runSSHAsync( public_ip, """apt-get -y install apt-transport-https software-properties-common add-apt-repository -y "deb %s /" apt-get update curl -sSL https://get.docker.com/ | sh apt-get -y --force-yes install clusterhq-flocker-node """ % (repo, )) d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) elif c.config["os"] == "centos": default = "https://clusterhq-archive.s3.amazonaws.com/centos/clusterhq-release$(rpm -E %dist).noarch.rpm" repo = os.environ.get("CUSTOM_REPO", default) if not repo: repo = default d = c.runSSHAsync( public_ip, """if selinuxenabled; then setenforce 0; fi yum update curl -sSL https://get.docker.com/ | sh service docker start test -e /etc/selinux/config && sed --in-place='.preflocker' 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config yum install -y %s yum install -y clusterhq-flocker-node """ % (repo, )) d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) yield gatherResults(deferreds) # if the dataset.backend is ZFS then install ZFS and mount a flocker pool # then create and distribute SSH keys amoungst the nodes if c.config["agent_config"]["dataset"]["backend"] == "zfs": # CentOS ZFS installation requires a restart # XXX todo - find out a way to handle a restart mid-script if c.config["os"] == "centos": log("Auto-install of ZFS on CentOS is not currently supported") sys.exit(1) if c.config["os"] == "coreos": log("Auto-install of ZFS on CoreOS is not currently supported") sys.exit(1) for node in c.config["agent_nodes"]: node_public_ip = node["public"] if c.config["os"] == "ubuntu": c.runSSH( node_public_ip, """echo installing-zfs add-apt-repository -y ppa:zfs-native/stable apt-get update apt-get -y --force-yes install libc6-dev zfsutils mkdir -p /var/opt/flocker truncate --size 10G /var/opt/flocker/pool-vdev zpool create flocker /var/opt/flocker/pool-vdev """) """ Loop over each node and generate SSH keys Then get the public key so we can distribute it to other nodes """ for node in c.config["agent_nodes"]: node_public_ip = node["public"] log("Generating SSH Keys for %s" % (node_public_ip, )) publicKey = c.runSSH( node_public_ip, """cat <<EOF > /tmp/genkeys.sh #!/bin/bash ssh-keygen -q -f /root/.ssh/id_rsa -N "" EOF bash /tmp/genkeys.sh cat /root/.ssh/id_rsa.pub rm /tmp/genkeys.sh """) publicKey = publicKey.rstrip('\n') """ Now we have the public key for the node we loop over all the other nodes and append it to /root/.ssh/authorized_keys """ for othernode in c.config["agent_nodes"]: othernode_public_ip = othernode["public"] if othernode_public_ip != node_public_ip: log("Copying %s key -> %s" % ( node_public_ip, othernode_public_ip, )) c.runSSH( othernode_public_ip, """cat <<EOF > /tmp/uploadkey.sh #!/bin/bash echo "%s" >> /root/.ssh/authorized_keys EOF bash /tmp/uploadkey.sh rm /tmp/uploadkey.sh """ % (publicKey, )) log("Installed clusterhq-flocker-node on all nodes")
def main(reactor, configFile): c = Configurator(configFile=configFile) control_ip = c.config["control_node"] # download and replace the docker binary on each of the nodes for node in c.config["agent_nodes"]: if c.config["os"] != "coreos": log( "Skipping installing new docker binary because we're on", "ubuntu/centos, assuming we installed a sufficiently recent one", "already.") break # only install new docker binary on coreos. XXX TODO coreos > 801.0.0 # doesn't need newer docker. if settings["SKIP_DOCKER_BINARY"] or c.config["os"] != "coreos": break public_ip = node["public"] log("Replacing docker binary on %s" % (public_ip, )) # stop the docker service log("Stopping the docker service on %s" % (public_ip, )) if c.config["os"] == "ubuntu": c.runSSHRaw( public_ip, "stop %s || true" % (settings['DOCKER_SERVICE_NAME'], )) elif c.config["os"] == "centos": c.runSSHRaw( public_ip, "systemctl stop %s.service || true" % (settings['DOCKER_SERVICE_NAME'], )) elif c.config["os"] == "coreos": c.runSSHRaw(public_ip, "systemctl stop docker.service || true") # download the latest docker binary if c.config["os"] == "coreos": log("Downloading the latest docker binary on %s - %s" \ % (public_ip, settings['DOCKER_BINARY_URL'],)) c.runSSHRaw(public_ip, "mkdir -p /root/bin") c.runSSHRaw( public_ip, "wget -qO /root/bin/docker %s" % (settings['DOCKER_BINARY_URL'], )) c.runSSHRaw(public_ip, "chmod +x /root/bin/docker") c.runSSHRaw(public_ip, "cp /usr/lib/coreos/dockerd /root/bin/dockerd") c.runSSHRaw( public_ip, "cp /usr/lib/systemd/system/docker.service /etc/systemd/system/" ) c.runSSHRaw( public_ip, "sed -i s@/usr/lib/coreos@/root/bin@g /etc/systemd/system/docker.service" ) c.runSSHRaw( public_ip, "sed -i \\'s@exec docker@exec /root/bin/docker@g\\' /root/bin/dockerd" ) c.runSSHRaw(public_ip, "systemctl daemon-reload") else: log("Downloading the latest docker binary on %s - %s" \ % (public_ip, settings['DOCKER_BINARY_URL'],)) c.runSSHRaw( public_ip, "wget -O /usr/bin/docker %s" % (settings['DOCKER_BINARY_URL'], )) # start the docker service log("Starting the docker service on %s" % (public_ip, )) if c.config["os"] == "ubuntu": c.runSSHRaw(public_ip, "start %s" % (settings['DOCKER_SERVICE_NAME'], )) elif c.config["os"] == "centos": c.runSSHRaw( public_ip, "systemctl start %s.service" % (settings['DOCKER_SERVICE_NAME'], )) elif c.config["os"] == "coreos": c.runSSHRaw(public_ip, "systemctl start docker.service") log("Generating plugin certs") # generate and upload plugin.crt and plugin.key for each node for node in c.config["agent_nodes"]: public_ip = node["public"] # use the node IP to name the local files # so they do not overwrite each other c.run("flocker-ca create-api-certificate %s-plugin" % (public_ip, )) log("Generated plugin certs for", public_ip) def report_completion(result, public_ip, message="Completed plugin install for"): log(message, public_ip) return result deferreds = [] log("Uploading plugin certs...") for node in c.config["agent_nodes"]: public_ip = node["public"] # upload the .crt and .key for ext in ("crt", "key"): d = c.scp("%s-plugin.%s" % ( public_ip, ext, ), public_ip, "/etc/flocker/plugin.%s" % (ext, ), async=True) d.addCallback(report_completion, public_ip=public_ip, message=" * Uploaded plugin cert for") deferreds.append(d) yield gatherResults(deferreds) log("Uploaded plugin certs") log("Installing flocker plugin") # loop each agent and get the plugin installed/running # clone the plugin and configure an upstart/systemd unit for it to run deferreds = [] for node in c.config["agent_nodes"]: public_ip = node["public"] private_ip = node["private"] log("Using %s => %s" % (public_ip, private_ip)) # the full api path to the control service controlservice = 'https://%s:4523/v1' % (control_ip, ) # perhaps the user has pre-compiled images with the plugin # downloaded and installed if not settings["SKIP_INSTALL_PLUGIN"]: if c.config["os"] == "ubuntu": log("Installing plugin for", public_ip, "...") d = c.runSSHAsync( public_ip, "apt-get install -y --force-yes clusterhq-flocker-docker-plugin && " "service flocker-docker-plugin restart") d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) elif c.config["os"] == "centos": log("Installing plugin for", public_ip, "...") d = c.runSSHAsync( public_ip, "yum install -y clusterhq-flocker-docker-plugin && " "systemctl enable flocker-docker-plugin && " "systemctl start flocker-docker-plugin") d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) else: log("Skipping installing plugin: %r" % (settings["SKIP_INSTALL_PLUGIN"], )) yield gatherResults(deferreds) for node in c.config["agent_nodes"]: public_ip = node["public"] private_ip = node["private"] # ensure that the /run/docker/plugins # folder exists log("Creating the /run/docker/plugins folder") c.runSSHRaw(public_ip, "mkdir -p /run/docker/plugins") if c.config["os"] == "coreos": log("Starting flocker-docker-plugin as docker container on CoreOS on %s" % (public_ip, )) c.runSSH( public_ip, """echo /root/bin/docker run --restart=always -d --net=host --privileged \\ -e FLOCKER_CONTROL_SERVICE_BASE_URL=%s \\ -e MY_NETWORK_IDENTITY=%s \\ -v /etc/flocker:/etc/flocker \\ -v /run/docker:/run/docker \\ --name=flocker-docker-plugin \\ clusterhq/flocker-docker-plugin""" % ( controlservice, private_ip, )) log("Done!")
#!/usr/bin/env python # This script will generate some certificates using flocker-ca and upload them # to the servers specified in a cluster.yml import sys import yaml # Usage: deploy.py cluster.yml from utils import Configurator if __name__ == "__main__": c = Configurator(configFile=sys.argv[1]) c.run("flocker-ca initialize %s" % (c.config["cluster_name"],)) print "Initialized cluster CA." c.run("flocker-ca create-control-certificate %s" % (c.config["control_node"],)) print "Created control cert." node_mapping = {} for node in c.config["agent_nodes"]: public_ip = node["public"] # Created 8eab4b8d-c0a2-4ce2-80aa-0709277a9a7a.crt. Copy ... uuid = c.run("flocker-ca create-node-certificate").split(".")[0].split(" ")[1] node_mapping[public_ip] = uuid print "Generated", uuid, "for", public_ip for user in c.config["users"]: c.run("flocker-ca create-api-certificate %s" % (user,)) print "Created user key for", user print "Uploading keys to respective nodes:" # Copy cluster cert, and control cert and key to control node. c.runSSHRaw(c.config["control_node"], "mkdir -p /etc/flocker")
def main(reactor, *args): c = Configurator(configFile=sys.argv[1]) c.run("flocker-ca initialize %s" % (c.config["cluster_name"],)) log("Initialized cluster CA.") c.run("flocker-ca create-control-certificate %s" % (c.config["control_node"],)) log("Created control cert.") node_mapping = {} for node in c.config["agent_nodes"]: public_ip = node["public"] # Created 8eab4b8d-c0a2-4ce2-80aa-0709277a9a7a.crt. Copy ... uuid = c.run("flocker-ca create-node-certificate").split(".")[0].split(" ")[1] node_mapping[public_ip] = uuid log("Generated", uuid, "for", public_ip) for user in c.config["users"]: c.run("flocker-ca create-api-certificate %s" % (user,)) log("Created user key for", user) # Dump agent_config into a file and scp it to /etc/flocker/agent.yml on the # nodes. f = open("agent.yml", "w") yaml.dump(c.config["agent_config"], f) f.close() # Record the node mapping for later. f = open("node_mapping.yml", "w") yaml.dump(node_mapping, f) f.close() log("Making /etc/flocker directory on all nodes") deferreds = [] for node, uuid in node_mapping.iteritems(): deferreds.append(c.runSSHAsync(node, "mkdir -p /etc/flocker")) deferreds.append(c.runSSHAsync(c.config["control_node"], "mkdir -p /etc/flocker")) yield gatherResults(deferreds) log("Uploading keys to respective nodes:") deferreds = [] # Copy cluster cert, and control cert and key to control node. d = c.scp("cluster.crt", c.config["control_node"], "/etc/flocker/cluster.crt", async=True) d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Uploaded cluster cert to") deferreds.append(d) for ext in ("crt", "key"): d = c.scp("control-%s.%s" % (c.config["control_node"], ext), c.config["control_node"], "/etc/flocker/control-service.%s" % (ext,), async=True) d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Uploaded control %s to" % (ext,)) deferreds.append(d) log(" * Uploaded control cert & key to control node.") # Copy cluster cert, and agent cert and key to agent nodes. deferreds = [] for node, uuid in node_mapping.iteritems(): d = c.scp("cluster.crt", node, "/etc/flocker/cluster.crt", async=True) d.addCallback(report_completion, public_ip=node, message=" * Uploaded cluster cert to") deferreds.append(d) d = c.scp("agent.yml", node, "/etc/flocker/agent.yml", async=True) d.addCallback(report_completion, public_ip=node, message=" * Uploaded agent.yml to") deferreds.append(d) for ext in ("crt", "key"): d = c.scp("%s.%s" % (uuid, ext), node, "/etc/flocker/node.%s" % (ext,), async=True) d.addCallback(report_completion, public_ip=node, message=" * Uploaded node %s to" % (ext,)) deferreds.append(d) yield gatherResults(deferreds) deferreds = [] for node, uuid in node_mapping.iteritems(): if c.config["os"] == "ubuntu": d = c.runSSHAsync(node, """echo "starting flocker-container-agent..." service flocker-container-agent start echo "starting flocker-dataset-agent..." service flocker-dataset-agent start """) elif c.config["os"] == "centos": d = c.runSSHAsync(node, """if selinuxenabled; then setenforce 0; fi systemctl enable docker.service systemctl start docker.service """) elif c.config["os"] == "coreos": d = c.runSSHAsync(node, """echo echo > /tmp/flocker-command-log docker %(early_docker_prefix)s run --restart=always -d --net=host --privileged \\ -v /etc/flocker:/etc/flocker \\ -v /var/run/docker.sock:/var/run/docker.sock \\ --name=flocker-container-agent \\ clusterhq/flocker-container-agent docker %(early_docker_prefix)s run --restart=always -d --net=host --privileged \\ -e DEBUG=1 \\ -v /tmp/flocker-command-log:/tmp/flocker-command-log \\ -v /flocker:/flocker -v /:/host -v /etc/flocker:/etc/flocker \\ -v /dev:/dev \\ --name=flocker-dataset-agent \\ clusterhq/flocker-dataset-agent """ % dict(early_docker_prefix=EARLY_DOCKER_PREFIX)) deferreds.append(d) if c.config["os"] == "ubuntu": d = c.runSSHAsync(c.config["control_node"], """cat <<EOF > /etc/init/flocker-control.override start on runlevel [2345] stop on runlevel [016] EOF echo 'flocker-control-api 4523/tcp # Flocker Control API port' >> /etc/services echo 'flocker-control-agent 4524/tcp # Flocker Control Agent port' >> /etc/services service flocker-control restart ufw allow flocker-control-api ufw allow flocker-control-agent """) elif c.config["os"] == "centos": d = c.runSSHAsync(c.config["control_node"], """systemctl enable flocker-control systemctl start flocker-control firewall-cmd --permanent --add-service flocker-control-api firewall-cmd --add-service flocker-control-api firewall-cmd --permanent --add-service flocker-control-agent firewall-cmd --add-service flocker-control-agent """) elif c.config["os"] == "coreos": d = c.runSSHAsync(c.config["control_node"], """echo docker %(early_docker_prefix)s run --name=flocker-control-volume -v /var/lib/flocker clusterhq/flocker-control-service true docker %(early_docker_prefix)s run --restart=always -d --net=host -v /etc/flocker:/etc/flocker --volumes-from=flocker-control-volume --name=flocker-control-service clusterhq/flocker-control-service""" % dict(early_docker_prefix=EARLY_DOCKER_PREFIX)) deferreds.append(d) yield gatherResults(deferreds) if c.config["os"] == "ubuntu": # XXX INSECURE, UNSUPPORTED, UNDOCUMENTED EXPERIMENTAL OPTION # Usage: `uft-flocker-config --ubuntu-aws --swarm`, I guess if len(sys.argv) > 2 and sys.argv[2] == "--swarm": # Install swarm deferreds = [] clusterid = c.runSSH(c.config["control_node"], """ docker run swarm create""").strip() log("Created Swarm ID") for node in c.config["agent_nodes"]: d = c.runSSHAsync(node['public'], """ service docker stop docker daemon -H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 >> /tmp/dockerlogs 2>&1 & """) # Let daemon come up time.sleep(3) d = c.runSSHAsync(node['public'], """ docker run -d swarm join --addr=%s:2375 token://%s """ % (node['private'], clusterid)) log("Started Swarm Agent for %s" % node['public']) deferreds.append(d) d = c.runSSHAsync(c.config["control_node"], """ docker run -d -p 2357:2375 swarm manage token://%s """ % clusterid) log("Starting Swarm Master") deferreds.append(d) yield gatherResults(deferreds) log("Swarm Master is at tcp://%s:2357" % c.config["control_node"])
def main(): c = Configurator(configFile=sys.argv[1]) c.run("flocker-ca initialize %s" % (c.config["cluster_name"],)) print "Initialized cluster CA." c.run("flocker-ca create-control-certificate %s" % (c.config["control_node"],)) print "Created control cert." node_mapping = {} for node in c.config["agent_nodes"]: public_ip = node["public"] # Created 8eab4b8d-c0a2-4ce2-80aa-0709277a9a7a.crt. Copy ... uuid = c.run("flocker-ca create-node-certificate").split(".")[0].split(" ")[1] node_mapping[public_ip] = uuid print "Generated", uuid, "for", public_ip for user in c.config["users"]: c.run("flocker-ca create-api-certificate %s" % (user,)) print "Created user key for", user print "Uploading keys to respective nodes:" # Copy cluster cert, and control cert and key to control node. c.runSSHRaw(c.config["control_node"], "mkdir -p /etc/flocker") c.scp("cluster.crt", c.config["control_node"], "/etc/flocker/cluster.crt") print " * Uploaded cluster cert to control node." for ext in ("crt", "key"): c.scp("control-%s.%s" % (c.config["control_node"], ext), c.config["control_node"], "/etc/flocker/control-service.%s" % (ext,)) print " * Uploaded control cert & key to control node." # Dump agent_config into a file and scp it to /etc/flocker/agent.yml on the # nodes. f = open("agent.yml", "w") agent_config = yaml.dump(c.config["agent_config"], f) f.close() # Record the node mapping for later. f = open("node_mapping.yml", "w") agent_config = yaml.dump(node_mapping, f) f.close() # Copy cluster cert, and agent cert and key to agent nodes. for node, uuid in node_mapping.iteritems(): c.runSSHRaw(node, "mkdir -p /etc/flocker") c.scp("cluster.crt", node, "/etc/flocker/cluster.crt") c.scp("agent.yml", node, "/etc/flocker/agent.yml") print " * Uploaded cluster cert to %s." % (node,) for ext in ("crt", "key"): c.scp("%s.%s" % (uuid, ext), node, "/etc/flocker/node.%s" % (ext,)) print " * Uploaded node cert and key to %s." % (node,) for node, uuid in node_mapping.iteritems(): if c.config["os"] == "ubuntu": c.runSSH(node, """apt-get -y install apt-transport-https software-properties-common service flocker-container-agent restart service flocker-dataset-agent restart """) elif c.config["os"] == "centos": c.runSSH(node, """if selinuxenabled; then setenforce 0; fi systemctl enable docker.service systemctl start docker.service """) if c.config["os"] == "ubuntu": c.runSSH(c.config["control_node"], """cat <<EOF > /etc/init/flocker-control.override start on runlevel [2345] stop on runlevel [016] EOF echo 'flocker-control-api 4523/tcp # Flocker Control API port' >> /etc/services echo 'flocker-control-agent 4524/tcp # Flocker Control Agent port' >> /etc/services service flocker-control restart ufw allow flocker-control-api ufw allow flocker-control-agent """) elif c.config["os"] == "centos": c.runSSH(c.config["control_node"], """systemctl enable flocker-control systemctl start flocker-control firewall-cmd --permanent --add-service flocker-control-api firewall-cmd --add-service flocker-control-api firewall-cmd --permanent --add-service flocker-control-agent firewall-cmd --add-service flocker-control-agent """) print "Configured and started control service, opened firewall." if c.config["users"]: print "\nYou should now be able to communicate with the control service, for example:\n" prefix = ("curl -s --cacert $PWD/cluster.crt --cert $PWD/%(user)s.crt " "--key $PWD/%(user)s.key" % dict(user=c.config["users"][0],)) url = "https://%(control_node)s:4523/v1" % dict(control_node=c.config["control_node"],) header = ' --header "Content-type: application/json"' print "This should give you a list of your nodes:" print prefix + " " + url + "/state/nodes | jq ." print "Try running tutorial.py cluster.yml for more..."
def main(reactor, configFile): c = Configurator(configFile=configFile) control_ip = c.config["control_node"] # download and replace the docker binary on each of the nodes for node in c.config["agent_nodes"]: if c.config["os"] != "coreos": log("Skipping installing new docker binary because we're on", "ubuntu/centos, assuming we installed a sufficiently recent one", "already.") break # only install new docker binary on coreos. XXX TODO coreos > 801.0.0 # doesn't need newer docker. if settings["SKIP_DOCKER_BINARY"] or c.config["os"] != "coreos": break public_ip = node["public"] log("Replacing docker binary on %s" % (public_ip,)) # stop the docker service log("Stopping the docker service on %s" % (public_ip,)) if c.config["os"] == "ubuntu": c.runSSHRaw(public_ip, "stop %s || true" % (settings['DOCKER_SERVICE_NAME'],)) elif c.config["os"] == "centos": c.runSSHRaw(public_ip, "systemctl stop %s.service || true" % (settings['DOCKER_SERVICE_NAME'],)) elif c.config["os"] == "coreos": c.runSSHRaw(public_ip, "systemctl stop docker.service || true") # download the latest docker binary if c.config["os"] == "coreos": log("Downloading the latest docker binary on %s - %s" \ % (public_ip, settings['DOCKER_BINARY_URL'],)) c.runSSHRaw(public_ip, "mkdir -p /root/bin") c.runSSHRaw(public_ip, "wget -qO /root/bin/docker %s" % (settings['DOCKER_BINARY_URL'],)) c.runSSHRaw(public_ip, "chmod +x /root/bin/docker") c.runSSHRaw(public_ip, "cp /usr/lib/coreos/dockerd /root/bin/dockerd") c.runSSHRaw(public_ip, "cp /usr/lib/systemd/system/docker.service /etc/systemd/system/") c.runSSHRaw(public_ip, "sed -i s@/usr/lib/coreos@/root/bin@g /etc/systemd/system/docker.service") c.runSSHRaw(public_ip, "sed -i \\'s@exec docker@exec /root/bin/docker@g\\' /root/bin/dockerd") c.runSSHRaw(public_ip, "systemctl daemon-reload") else: log("Downloading the latest docker binary on %s - %s" \ % (public_ip, settings['DOCKER_BINARY_URL'],)) c.runSSHRaw(public_ip, "wget -O /usr/bin/docker %s" % (settings['DOCKER_BINARY_URL'],)) # start the docker service log("Starting the docker service on %s" % (public_ip,)) if c.config["os"] == "ubuntu": c.runSSHRaw(public_ip, "start %s" % (settings['DOCKER_SERVICE_NAME'],)) elif c.config["os"] == "centos": c.runSSHRaw(public_ip, "systemctl start %s.service" % (settings['DOCKER_SERVICE_NAME'],)) elif c.config["os"] == "coreos": c.runSSHRaw(public_ip, "systemctl start docker.service") log("Generating plugin certs") # generate and upload plugin.crt and plugin.key for each node for node in c.config["agent_nodes"]: public_ip = node["public"] # use the node IP to name the local files # so they do not overwrite each other c.run("flocker-ca create-api-certificate %s-plugin" % (public_ip,)) log("Generated plugin certs for", public_ip) def report_completion(result, public_ip, message="Completed plugin install for"): log(message, public_ip) return result deferreds = [] log("Uploading plugin certs...") for node in c.config["agent_nodes"]: public_ip = node["public"] # upload the .crt and .key for ext in ("crt", "key"): d = c.scp("%s-plugin.%s" % (public_ip, ext,), public_ip, "/etc/flocker/plugin.%s" % (ext,), async=True) d.addCallback(report_completion, public_ip=public_ip, message=" * Uploaded plugin cert for") deferreds.append(d) yield gatherResults(deferreds) log("Uploaded plugin certs") log("Installing flocker plugin") # loop each agent and get the plugin installed/running # clone the plugin and configure an upstart/systemd unit for it to run deferreds = [] for node in c.config["agent_nodes"]: public_ip = node["public"] private_ip = node["private"] log("Using %s => %s" % (public_ip, private_ip)) # the full api path to the control service controlservice = 'https://%s:4523/v1' % (control_ip,) # perhaps the user has pre-compiled images with the plugin # downloaded and installed if not settings["SKIP_INSTALL_PLUGIN"]: if c.config["os"] == "ubuntu": log("Installing plugin for", public_ip, "...") d = c.runSSHAsync(public_ip, "apt-get install -y --force-yes clusterhq-flocker-docker-plugin && " "service flocker-docker-plugin restart") d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) elif c.config["os"] == "centos": log("Installing plugin for", public_ip, "...") d = c.runSSHAsync(public_ip, "yum install -y clusterhq-flocker-docker-plugin && " "systemctl enable flocker-docker-plugin && " "systemctl start flocker-docker-plugin") d.addCallback(report_completion, public_ip=public_ip) deferreds.append(d) else: log("Skipping installing plugin: %r" % (settings["SKIP_INSTALL_PLUGIN"],)) yield gatherResults(deferreds) for node in c.config["agent_nodes"]: public_ip = node["public"] private_ip = node["private"] # ensure that the /run/docker/plugins # folder exists log("Creating the /run/docker/plugins folder") c.runSSHRaw(public_ip, "mkdir -p /run/docker/plugins") if c.config["os"] == "coreos": log("Starting flocker-docker-plugin as docker container on CoreOS on %s" % (public_ip,)) c.runSSH(public_ip, """echo /root/bin/docker run --restart=always -d --net=host --privileged \\ -e FLOCKER_CONTROL_SERVICE_BASE_URL=%s \\ -e MY_NETWORK_IDENTITY=%s \\ -v /etc/flocker:/etc/flocker \\ -v /run/docker:/run/docker \\ --name=flocker-docker-plugin \\ clusterhq/flocker-docker-plugin""" % (controlservice, private_ip,)) log("Done!")