def __init__(self, cli=None): self.network = DHCPClient() self.fabric = LocalFabric(bootstrap=True) self.mongo = MongoInitializer(self.fabric.system) self.mongo.fabric = self.fabric self.mongo.template_dir = DEFAULT_TEMPLATE_DIR + '/mongo/' self.cli = cli self.config = read_ferry_config()
def __init__(self, cli=None): self.network = DHCPClient() self.fabric = DockerFabric(bootstrap=True) self.mongo = MongoInitializer() self.mongo.fabric = self.fabric self.mongo.template_dir = DEFAULT_TEMPLATE_DIR + '/mongo/' self.cli = cli
def __init__(self, bootstrap=False): self.name = "local" self.repo = 'public' self.cli = DockerCLI(ferry.install.DOCKER_REGISTRY) self.docker_user = self.cli.docker_user self.inspector = DockerInspector(self.cli) self.bootstrap = bootstrap # The system returns information regarding # the instance types. self.system = System() # Bootstrap mode means that the DHCP network # isn't available yet, so we can't use the network. if not bootstrap: self.network = DHCPClient(ferry.install._get_gateway())
def __init__(self, bootstrap=False): self.repo = 'public' self.docker_user = '******' self.cli = DockerCLI(ferry.install.DOCKER_REGISTRY) self.bootstrap = bootstrap # Bootstrap mode means that the DHCP network # isn't available yet, so we can't use the network. if not bootstrap: self.network = DHCPClient(self._get_gateway())
class Installer(object): def __init__(self, cli=None): self.network = DHCPClient() self.fabric = DockerFabric(bootstrap=True) self.mongo = MongoInitializer() self.mongo.fabric = self.fabric self.mongo.template_dir = DEFAULT_TEMPLATE_DIR + '/mongo/' self.cli = cli def get_ferry_account(self): """ Read in the remote Ferry DB account information. Used for registering applications. """ with open(ferry.install.DEFAULT_DOCKER_LOGIN, 'r') as f: args = yaml.load(f) args = args['ferry'] if all(k in args for k in ("user","key","server")): return args['user'], args['key'], args['server'] return None, None, None def create_signature(self, request, key): """ Generated a signed request. """ return hmac.new(key, request, hashlib.sha256).hexdigest() def store_app(self, app, ext, content): """ Store the application in the global directory. """ try: # We may need to create the parent directory # if this is the first time an application from this user # is downloaded. file_name = os.path.join(DEFAULT_FERRY_APPS, app + ext) os.makedirs(os.path.dirname(file_name)) with open(file_name, "w") as f: f.write(content) return file_name except IOError as e: logging.error(e) return None except OSError as os: logging.error(os) return None def _clean_rules(self): self.network.clean_rules() def _reset_ssh_key(self, root): """ Reset the temporary ssh key. This function should only be called from the server. """ keydir, tmp = self.cli._read_key_dir(root=root) # Only reset temporary keys. User-defined key directories # shouldn't be touched. if keydir != DEFAULT_KEY_DIR and tmp == "tmp": shutil.rmtree(keydir) # Mark that we are using the default package keys if root: global GLOBAL_ROOT_DIR GLOBAL_ROOT_DIR = 'tmp://' + DEFAULT_KEY_DIR _touch_file(_get_key_dir(root=True, server=True), GLOBAL_ROOT_DIR, root=True) else: global GLOBAL_KEY_DIR GLOBAL_KEY_DIR = 'tmp://' + DEFAULT_KEY_DIR _touch_file(_get_key_dir(root=False, server=True), GLOBAL_KEY_DIR, root=True) def _process_ssh_key(self, options, root=False): """ Initialize the ssh key location. This method is used when starting the ferry server. """ if root: global GLOBAL_ROOT_DIR if options and '-k' in options: GLOBAL_ROOT_DIR = 'user://' + self.fetch_image_keys(options['-k'][0]) else: GLOBAL_ROOT_DIR = 'tmp://' + DEFAULT_KEY_DIR logging.warning("using key directory " + GLOBAL_ROOT_DIR) _touch_file(_get_key_dir(root=True, server=True), GLOBAL_ROOT_DIR, root=True) else: global GLOBAL_KEY_DIR if options and '-k' in options: GLOBAL_KEY_DIR = 'user://' + self.fetch_image_keys(options['-k'][0]) else: GLOBAL_KEY_DIR = 'tmp://' + DEFAULT_KEY_DIR logging.warning("using key directory " + GLOBAL_KEY_DIR) _touch_file(_get_key_dir(root=False, server=False), GLOBAL_KEY_DIR, root=False) def install(self, args, options): # Check if the host is actually 64-bit. If not raise a warning and quit. if not _supported_arch(): return 'Your architecture appears to be 32-bit.\nOnly 64-bit architectures are supported at the moment.' if not _supported_python(): return 'You appear to be running Python3.\nOnly Python2 is supported at the moment.' if not _supported_lxc(): return 'You appear to be running an older version of LXC.\nOnly versions > 0.7.5 are supported.' if not _has_ferry_user(): return 'You do not appear to have the \'docker\' group configured. Please create the \'docker\' group and try again.' # Create the various directories. try: if not os.path.isdir(DOCKER_DIR): os.makedirs(DOCKER_DIR) self._change_permission(DOCKER_DIR) except OSError as e: logging.error("Could not install Ferry.\n") logging.error(e.strerror) sys.exit(1) # Start the Ferry docker daemon. If it does not successfully # start, print out a msg. logging.warning("all prerequisites met...") start, msg = self._start_docker_daemon(options) if not start: logging.error('ferry docker daemon not started') return msg # Normally we don't want to build the Dockerfiles, # but sometimes we may for testing, etc. build = False if options and '-b' in options: build = True if options and '-u' in options: if len(options['-u']) > 0 and options['-u'][0] != True: logging.warning("performing select rebuild (%s)" % str(options['-u'])) self.build_from_list(options['-u'], DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build, recurse=False) else: logging.warning("performing forced rebuild") self.build_from_dir(DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build) else: # We want to be selective about which images # to rebuild. Useful if one image breaks, etc. to_build = self.check_images(DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO) if len(to_build) > 0: logging.warning("performing select rebuild (%s)" % str(to_build)) self.build_from_list(to_build, DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build) # Check that all the images were built. not_installed = self._check_all_images() if len(not_installed) == 0: return 'installed ferry' else: logging.error('images not built: ' + str(not_installed)) return 'Some images were not installed. Please type \'ferry install\' again.' def _check_all_images(self): not_installed = [] images = ['mongodb', 'ferry-base', 'hadoop-base', 'hadoop', 'hadoop-client', 'hive-metastore', 'gluster', 'openmpi', 'openmpi-client', 'cassandra', 'cassandra-client', 'titan', 'spark'] for i in images: if not self._check_image_installed("%s/%s" % (DEFAULT_DOCKER_REPO, i)): not_installed.append(i) return not_installed def _check_and_pull_image(self, image_name): if not self._check_image_installed(image_name): self._pull_image(image_name, on_client=False) return self._check_image_installed(image_name) def _check_image_installed(self, image_name): cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect %s 2> /dev/null' % image_name output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() == '[]': return False else: return True def _transfer_config(self, config_dirs): """ Transfer the configuration to the containers. """ for c in config_dirs: container = c[0] from_dir = c[1] to_dir = c[2] self.fabric.copy([container], from_dir, to_dir) def start_web(self, options=None, clean=False): start, msg = self._start_docker_daemon(options) if not clean and not start: # We are trying to start the web services but the Docker # daemon won't start. If we're cleaning, it's not a big deal. logging.error(msg) sys.exit(1) # Check if the user wants to use a specific key directory. self._process_ssh_key(options=options, root=True) # Check if the user-application directory exists. # If not, create it. try: if not os.path.isdir(DEFAULT_FERRY_APPS): os.makedirs(DEFAULT_FERRY_APPS) self._change_permission(DEFAULT_FERRY_APPS) except OSError as e: logging.error("Could not create application directory.\n") logging.error(e.strerror) sys.exit(1) # Check if the Mongo directory exists yet. If not # go ahead and create it. try: if not os.path.isdir(DEFAULT_MONGO_DB): os.makedirs(DEFAULT_MONGO_DB) self._change_permission(DEFAULT_MONGO_DB) if not os.path.isdir(DEFAULT_MONGO_LOG): os.makedirs(DEFAULT_MONGO_LOG) self._change_permission(DEFAULT_MONGO_LOG) except OSError as e: logging.error("Could not start ferry servers.\n") logging.error(e.strerror) sys.exit(1) # Check if the Mongo image is built. if not self._check_image_installed('%s/mongodb' % DEFAULT_DOCKER_REPO): logging.error("Could not start ferry servers.\n") logging.error("MongoDB images not found. Try executing 'ferry install'.") sys.exit(1) # Check if there are any other Mongo instances runnig. self._clean_web() # Copy over the ssh keys. self.cli._check_ssh_key(root=True, server=True) # Start the Mongo server. Create a new configuration and # manually start the container. keydir, _ = self.cli._read_key_dir(root=True) volumes = { DEFAULT_MONGO_LOG : self.mongo.container_log_dir, DEFAULT_MONGO_DB : self.mongo.container_data_dir } mongoplan = {'image':'ferry/mongodb', 'type':'ferry/mongodb', 'volumes':volumes, 'volume_user':DEFAULT_FERRY_OWNER, 'keys': { '/service/keys' : keydir }, 'ports':[], 'exposed':self.mongo.get_exposed_ports(1), 'hostname':'ferrydb', 'netenable':True, 'args': 'trust' } mongoconf = self.mongo.generate(1) mongoconf.uuid = 'fdb-' + str(uuid.uuid4()).split('-')[0] mongobox = self.fabric.alloc([mongoplan])[0] if not mongobox: logging.error("Could not start MongoDB image") sys.exit(1) ip = mongobox.internal_ip _touch_file('/tmp/mongodb.ip', ip, root=True) # Once the container is started, we'll need to copy over the # configuration files, and then manually send the 'start' command. s = { 'container':mongobox, 'data_dev':'eth0', 'data_ip':mongobox.internal_ip, 'manage_ip':mongobox.internal_ip, 'host_name':mongobox.host_name, 'type':mongobox.service_type, 'args':mongobox.args } config_dirs, entry_point = self.mongo.apply(mongoconf, [s]) self._transfer_config(config_dirs) self.mongo.start_service([mongobox], entry_point, self.fabric) # Set the MongoDB env. variable. my_env = os.environ.copy() my_env['MONGODB'] = ip # Sleep a little while to let Mongo start receiving. time.sleep(2) # Start the DHCP server logging.warning("starting dhcp server") cmd = 'gunicorn -t 3600 -b 127.0.0.1:5000 -w 1 ferry.ip.dhcp:app &' Popen(cmd, stdout=PIPE, shell=True, env=my_env) time.sleep(2) # Reserve the Mongo IP. self.network.reserve_ip(ip) # Start the Ferry HTTP servers logging.warning("starting http servers on port 4000 and mongo %s" % ip) cmd = 'gunicorn -e FERRY_HOME=%s -t 3600 -w 3 -b 127.0.0.1:4000 ferry.http.httpapi:app &' % FERRY_HOME Popen(cmd, stdout=PIPE, shell=True, env=my_env) def stop_web(self): # Shutdown the mongo instance if os.path.exists('/tmp/mongodb.ip'): f = open('/tmp/mongodb.ip', 'r') ip = f.read().strip() f.close() keydir, tmp = self.cli._read_key_dir(root=True) key = keydir + "/id_rsa" cmd = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s /service/sbin/startnode stop' % (key, ip) logging.warning(cmd) output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() logging.warning(output) cmd = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s /service/sbin/startnode halt' % (key, ip) logging.warning(cmd) output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() logging.warning(output) os.remove('/tmp/mongodb.ip') # Kill all the gunicorn instances. logging.warning("stopping http servers") cmd = 'ps -eaf | grep httpapi | awk \'{print $2}\' | xargs kill -15' Popen(cmd, stdout=PIPE, shell=True) cmd = 'ps -eaf | grep ferry.ip.dhcp | awk \'{print $2}\' | xargs kill -15' Popen(cmd, stdout=PIPE, shell=True) def _clean_web(self): docker = DOCKER_CMD + ' -H=' + DOCKER_SOCK cmd = docker + ' ps | grep ferry/mongodb | awk \'{print $1}\' | xargs ' + docker + ' stop ' logging.warning("cleaning previous mongo resources") logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) child.stdout.read() child.stderr.read() def _copytree(self, src, dst): for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d) else: shutil.copy2(s, d) def _change_permission(self, location): uid, gid = _get_ferry_user() os.chown(location, uid, gid) if os.path.isdir(location): os.chmod(location, 0774) for entry in os.listdir(location): self._change_permission(os.path.join(location, entry)) else: # Check if this file has a file extension. If not, # then assume it's a binary. s = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH if len(location.split(".")) == 1: s |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH os.chmod(location, s) """ Ask for the key directory. """ def fetch_image_keys(self, key_dir=None): if key_dir and os.path.exists(key_dir): return key_dir else: return DEFAULT_KEY_DIR """ Check if the dockerfiles are already built. """ def check_images(self, image_dir, repo): if self._docker_running(): build_images = [] for f in os.listdir(image_dir): dockerfile = image_dir + '/' + f + '/Dockerfile' image_names = self._check_dockerfile(dockerfile, repo) if len(image_names) > 0: build_images += image_names return build_images else: logging.error("ferry daemon not started") """ Build the docker images """ def build_from_list(self, to_build, image_dir, repo, build=False, recurse=True): if self._docker_running(): built_images = {} for f in os.listdir(image_dir): logging.warning("transforming dockerfile") self._transform_dockerfile(image_dir, f, repo) for f in os.listdir("/tmp/dockerfiles/"): dockerfile = '/tmp/dockerfiles/' + f + '/Dockerfile' images = self._get_image(dockerfile) intersection = [i for i in images if i in to_build] if len(intersection) > 0: image = images.pop(0) logging.warning("building image " + image) self._build_image(image, dockerfile, repo, built_images, recurse=recurse, build=build) if len(images) > 0: logging.warning("tagging images " + image) self._tag_images(image, repo, images) # After building everything, get rid of the temp dir. # shutil.rmtree("/tmp/dockerfiles") else: logging.error("ferry daemon not started") """ Build the docker images """ def build_from_dir(self, image_dir, repo, build=False): if self._docker_running(): built_images = {} for f in os.listdir(image_dir): self._transform_dockerfile(image_dir, f, repo) for f in os.listdir("/tmp/dockerfiles/"): dockerfile = "/tmp/dockerfiles/" + f + "/Dockerfile" images = self._get_image(dockerfile) image = images.pop(0) self._build_image(image, dockerfile, repo, built_images, recurse=True, build=build) if len(images) > 0: logging.warning("tagging images " + image) self._tag_images(image, repo, images) # After building everything, get rid of the temp dir. # shutil.rmtree("/tmp/dockerfiles") else: logging.error("ferry daemon not started") def _docker_running(self): return os.path.exists('/var/run/ferry.sock') def _check_dockerfile(self, dockerfile, repo): not_installed = [] images = self._get_image(dockerfile) for image in images: qualified = DEFAULT_DOCKER_REPO + '/' + image cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect ' + qualified + ' 2> /dev/null' output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() == '[]': not_installed.append(image) return not_installed def _transform_dockerfile(self, image_dir, f, repo): if not os.path.exists("/tmp/dockerfiles/" + f): shutil.copytree(image_dir + '/' + f, '/tmp/dockerfiles/' + f) out_file = "/tmp/dockerfiles/" + f + "/Dockerfile" out = open(out_file, "w+") uid, gid = _get_ferry_user() download_url = _get_download_url() changes = { "USER" : repo, "DOWNLOAD_URL" : download_url, "DOCKER" : gid } for line in open(image_dir + '/' + f + '/Dockerfile', "r"): s = Template(line).substitute(changes) out.write(s) out.close() def _build_image(self, image, f, repo, built_images, recurse=False, build=False): base = self._get_base(f) if recurse and base != "ubuntu:14.04": image_dir = os.path.dirname(os.path.dirname(f)) dockerfile = image_dir + '/' + base + '/Dockerfile' self._build_image(base, dockerfile, repo, built_images, recurse, build) if not image in built_images: if base == "ubuntu:14.04": self._pull_image(base) built_images[image] = True self._compile_image(image, repo, os.path.dirname(f), build) def _get_image(self, dockerfile): names = [] for l in open(dockerfile, 'r'): if l.strip() != '': s = l.split() if len(s) > 0: if s[0].upper() == 'NAME': names.append(s[1].strip()) return names def _get_base(self, dockerfile): base = None for l in open(dockerfile, 'r'): s = l.split() if len(s) > 0: if s[0].upper() == 'FROM': base = s[1].strip().split("/") return base[-1] return base def _continuous_print(self, process, on_client=True): while True: try: out = process.stdout.read(15) if out == '': break else: if on_client: sys.stdout.write(out) sys.stdout.flush() else: logging.warning("downloading image...") except IOError as e: logging.warning(e) try: errmsg = process.stderr.readline() if errmsg and errmsg != '': logging.warning(errmsg) else: logging.warning("downloaded image!") except IOError: pass def _pull_image(self, image, tag=None, on_client=True): if not tag: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' pull %s' % image else: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' pull %s:%s' % (image, tag) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) self._continuous_print(child, on_client=on_client) # Now tag the image with the 'latest' tag. if tag and tag != 'latest': cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s:%s %s:%s' % (image, tag, image, 'latest') logging.warning(cmd) Popen(cmd, stdout=PIPE, shell=True) def _compile_image(self, image, repo, image_dir, build=False): # Now build the image. if build: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' build --rm=true -t' + ' %s/%s %s' % (repo, image, image_dir) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) self._continuous_print(child) # Now tag the image. cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s %s/%s:%s' % (repo, image, repo, image, ferry.__version__) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) else: # Just pull the image from the public repo. image_name = "%s/%s" % (repo, image) self._pull_image(image_name, tag=ferry.__version__) def _tag_images(self, image, repo, alternatives): for a in alternatives: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s:%s %s/%s:%s' % (repo, image, ferry.__version__, repo, a, ferry.__version__) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s:latest %s/%s:latest' % (repo, image, repo, a) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) def _clean_images(self): cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' | grep none | awk \'{print $1}\' | xargs ' + DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' rmi' Popen(cmd, stdout=PIPE, shell=True) def _is_parent_dir(self, pdir, cdir): pdirs = pdir.split("/") cdirs = cdir.split("/") # Parent directory can never be longer than # the child directory. if len(pdirs) > len(cdirs): return False for i in range(0, len(pdirs)): # The parent directory shoudl always match # the child directory. Ignore the start and end # blank spaces caused by "split". if pdirs[i] != "" and pdirs[i] != cdirs[i]: return False return True def _is_running_btrfs(self): logging.warning("checking for btrfs") cmd = 'cat /etc/mtab | grep btrfs | awk \'{print $2}\'' output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() != "": dirs = output.strip().split("\n") for d in dirs: if self._is_parent_dir(d, DOCKER_DIR): return True return False def _start_docker_daemon(self, options=None): # Check if the docker daemon is already running try: if not self._docker_running(): bflag = '' if self._is_running_btrfs(): logging.warning("using btrfs backend") bflag = ' -s btrfs' # Explicitly supply the DNS. if options and '-n' in options: logging.warning("using custom dns") dflag = '' for d in options['-n']: dflag += ' --dns %s' % d else: logging.warning("using public dns") dflag = ' --dns 8.8.8.8 --dns 8.8.4.4' # We need to fix this so that ICC is set to false. icc = ' --icc=true' cmd = 'nohup ' + DOCKER_CMD + ' -d' + ' -H=' + DOCKER_SOCK + ' -g=' + DOCKER_DIR + ' -p=' + DOCKER_PID + dflag + bflag + icc + ' 1>%s 2>&1 &' % DEFAULT_DOCKER_LOG logging.warning(cmd) Popen(cmd, stdout=PIPE, shell=True) # Wait a second to let the docker daemon do its thing. time.sleep(2) return True, "Ferry daemon running on /var/run/ferry.sock" else: return False, "Ferry appears to be already running. If this is an error, please type \'ferry clean\' and try again." except OSError as e: logging.error("could not start docker daemon.\n") logging.error(e.strerror) sys.exit(1) def _stop_docker_daemon(self, force=False): if force or self._docker_running(): logging.warning("stopping docker daemon") cmd = 'pkill -f docker-ferry' Popen(cmd, stdout=PIPE, shell=True) try: os.remove('/var/run/ferry.sock') except OSError: pass def _get_gateway(self): cmd = "LC_MESSAGES=C ifconfig drydock0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'" gw = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() cmd = "LC_MESSAGES=C ifconfig drydock0 | grep 'inet addr:' | cut -d: -f4 | awk '{ print $1}'" netmask = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() mask = map(int, netmask.split(".")) cidr = 1 if mask[3] == 0: cidr = 8 if mask[2] == 0: cidr *= 2 return "%s/%d" % (gw, 32 - cidr)
class LocalFabric(object): def __init__(self, bootstrap=False): self.name = "local" self.repo = 'public' self.cli = DockerCLI(ferry.install.DOCKER_REGISTRY) self.docker_user = self.cli.docker_user self.inspector = DockerInspector(self.cli) self.bootstrap = bootstrap # The system returns information regarding # the instance types. self.system = System() # Bootstrap mode means that the DHCP network # isn't available yet, so we can't use the network. if not bootstrap: self.network = DHCPClient(ferry.install._get_gateway()) def _get_host(self): cmd = "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'" return Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() def get_data_dir(self): if 'FERRY_SCRATCH' in os.environ: scratch_dir = os.environ['FERRY_SCRATCH'] else: scratch_dir = os.path.join( ferry.install._get_ferry_dir(server=True), 'scratch') if not os.path.isdir(scratch_dir): os.makedirs(scratch_dir) return scratch_dir def version(self): """ Fetch the current docker version. """ return self.cli.version() def get_fs_type(self): """ Get the filesystem type associated with docker. """ return self.cli.get_fs_type() def quit(self): """ Quit the local fabric. """ logging.info("quitting local fabric") def restart(self, cluster_uuid, service_uuid, containers): """ Restart the stopped containers. """ new_containers = [] for c in containers: container = self.cli.start(image=c.image, container=c.container, service_type=c.service_type, keydir=c.keydir, keyname=c.keyname, privatekey=c.privatekey, volumes=c.volumes, args=c.args, inspector=self.inspector) container.default_user = self.docker_user new_containers.append(container) # We should wait for a second to let the ssh server start # on the containers (otherwise sometimes we get a connection refused) time.sleep(2) return new_containers def alloc(self, cluster_uuid, service_uuid, container_info, ctype): """ Allocate several instances. """ containers = [] mounts = {} for c in container_info: # Get a new IP address for this container and construct # a default command. gw = ferry.install._get_gateway().split("/")[0] # Check if we should use the manual LXC option. if not 'netenable' in c: ip = self.network.assign_ip(c) lxc_opts = [ "lxc.network.type = veth", "lxc.network.ipv4 = %s/24" % ip, "lxc.network.ipv4.gateway = %s" % gw, "lxc.network.link = ferry0", "lxc.network.name = eth0", "lxc.network.flags = up" ] # Check if we need to forward any ports. host_map = {} for p in c['ports']: p = str(p) s = p.split(":") if len(s) > 1: host = s[0] dest = s[1] else: host = self.network.random_port() dest = s[0] host_map[dest] = [{'HostIp': '0.0.0.0', 'HostPort': host}] self.network.forward_rule('0.0.0.0/0', host, ip, dest) host_map_keys = host_map.keys() else: lxc_opts = None host_map = None host_map_keys = [] # Start a container with a specific image, in daemon mode, # without TTY, and on a specific port if not 'default_cmd' in c: c['default_cmd'] = "/service/sbin/startnode init" container = self.cli.run(service_type=c['type'], image=c['image'], volumes=c['volumes'], keydir=c['keydir'], keyname=c['keyname'], privatekey=c['privatekey'], open_ports=host_map_keys, host_map=host_map, expose_group=c['exposed'], hostname=c['hostname'], default_cmd=c['default_cmd'], args=c['args'], lxc_opts=lxc_opts, inspector=self.inspector, background=False) if container: container.default_user = self.docker_user containers.append(container) if not 'netenable' in c: container.internal_ip = ip container.external_ip = ip self.network.set_owner(ip, container.container) if 'name' in c: container.name = c['name'] if 'volume_user' in c: mounts[container] = { 'user': c['volume_user'], 'vols': c['volumes'].items() } # We should wait for a second to let the ssh server start # on the containers (otherwise sometimes we get a connection refused) time.sleep(3) # Check if we need to set the file permissions # for the mounted volumes. for c, i in mounts.items(): for _, v in i['vols']: self.cmd([c], 'chown -R %s %s' % (i['user'], v)) return containers def stop(self, cluster_uuid, service_uuid, containers): """ Forceably stop the running containers """ for c in containers: if type(c) is dict: self.cli.stop(c['container']) else: self.cli.stop(c.container) def remove(self, cluster_uuid, service_uuid, containers): """ Remove the running instances """ for c in containers: for p in c.ports.keys(): self.network.delete_rule(c.internal_ip, p) self.network.free_ip(c.internal_ip) self.cli.remove(c.container) def snapshot(self, containers, cluster_uuid, num_snapshots): """ Save/commit the running instances """ snapshots = [] for c in containers: snapshot_name = '%s-%s-%s:SNAPSHOT-%s' % ( c.image, cluster_uuid, c.host_name, num_snapshots) snapshots.append({ 'image': snapshot_name, 'base': c.image, 'type': c.service_type, 'name': c.name, 'args': c.args, 'ports': c.ports }) self.cli.commit(c, snapshot_name) return snapshots def push(self, image, registry=None): """ Push an image to a remote registry. """ return self.cli.push(image, registry) def pull(self, image): """ Pull a remote image to the local registry. """ return self.cli.pull(image) def halt(self, cluster_uuid, service_uuid, containers): """ Safe stop the containers. """ cmd = '/service/sbin/startnode halt' for c in containers: self.cmd_raw(c.privatekey, c.internal_ip, cmd, c.default_user) def copy(self, containers, from_dir, to_dir): """ Copy over the contents to each container """ for c in containers: self.copy_raw(c.privatekey, c.internal_ip, from_dir, to_dir, c.default_user) def copy_raw(self, key, ip, from_dir, to_dir, user): if key: opts = '-o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' scp = 'scp ' + opts + ' -i ' + key + ' -r ' + from_dir + ' ' + user + '@' + ip + ':' + to_dir logging.warning(scp) robust_com(scp) def cmd(self, containers, cmd): """ Run a command on all the containers and collect the output. """ all_output = {} for c in containers: output = self.cmd_raw(c.privatekey, c.internal_ip, cmd, c.default_user) if output.strip() != "": all_output[c.host_name] = output.strip() return all_output def cmd_raw(self, key, ip, cmd, user): if key: ip = user + '@' + ip ssh = 'LC_ALL=C && ssh -o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ' + key + ' -t -t ' + ip + ' \'%s\'' % cmd logging.warning(ssh) out, _, _ = robust_com(ssh) return out else: return '' def login(self): """ Login to a remote registry. Use the login credentials found in the user's home directory. """ config = ferry.install.read_ferry_config() args = config['docker'] if all(k in args for k in ("user", "password", "email")): if 'server' in args: server = args['server'] else: server = '' return self.cli.login(user=args['user'], password=args['password'], email=args['email'], registry=server) logging.error("Could not open login credentials " + ferry.install.DEFAULT_LOGIN_KEY) return False
def __init__(self): self.network = DHCPClient()
class Installer(object): def __init__(self): self.network = DHCPClient() def _read_key_dir(self): f = open(ferry.install.DEFAULT_DOCKER_KEY, 'r') k = f.read().strip().split("://") return k[1], k[0] def _clean_rules(self): self.network.clean_rules() def _reset_ssh_key(self): keydir, tmp = self._read_key_dir() # Only reset temporary keys. User-defined key directories # shouldn't be touched. if tmp == "tmp": shutil.rmtree(keydir) # Mark that we are using the default package keys global GLOBAL_KEY_DIR GLOBAL_KEY_DIR = 'tmp://' + DEFAULT_KEY_DIR _touch_file(DEFAULT_DOCKER_KEY, GLOBAL_KEY_DIR, root=True) logging.warning("reset key directory " + GLOBAL_KEY_DIR) def _process_ssh_key(self, options): global GLOBAL_KEY_DIR if options and '-k' in options: GLOBAL_KEY_DIR = 'user://' + self.fetch_image_keys(options['-k'][0]) else: GLOBAL_KEY_DIR = 'tmp://' + DEFAULT_KEY_DIR logging.warning("using key directory " + GLOBAL_KEY_DIR) _touch_file(DEFAULT_DOCKER_KEY, GLOBAL_KEY_DIR, root=True) def install(self, args, options): # Check if the host is actually 64-bit. If not raise a warning and quit. if not _supported_arch(): return 'Your architecture appears to be 32-bit.\nOnly 64-bit architectures are supported at the moment.' if not _supported_python(): return 'You appear to be running Python3.\nOnly Python2 is supported at the moment.' if not _supported_lxc(): return 'You appear to be running an older version of LXC.\nOnly versions > 0.7.5 are supported.' if not _has_ferry_user(): return 'You do not appear to have the \'docker\' group configured. Please create the \'docker\' group and try again.' # Create the various directories. try: if not os.path.isdir(DOCKER_DIR): os.makedirs(DOCKER_DIR) self._change_permission(DOCKER_DIR) except OSError as e: logging.error("Could not install Ferry.\n") logging.error(e.explanation) sys.exit(1) # Start the Ferry docker daemon. If it does not successfully # start, print out a msg. logging.warning("all prerequisites met...") start, msg = self._start_docker_daemon(options) if not start: logging.error('ferry docker daemon not started') return msg # Normally we don't want to build the Dockerfiles, # but sometimes we may for testing, etc. build = False if options and '-b' in options: build = True if options and '-u' in options: if len(options['-u']) > 0 and options['-u'][0] != True: logging.warning("performing select rebuild (%s)" % str(options['-u'])) self.build_from_list(options['-u'], DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build, recurse=False) else: logging.warning("performing forced rebuild") self.build_from_dir(DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build) else: # We want to be selective about which images # to rebuild. Useful if one image breaks, etc. to_build = self.check_images(DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO) if len(to_build) > 0: logging.warning("performing select rebuild (%s)" % str(to_build)) self.build_from_list(to_build, DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build) # Check that all the images were built. not_installed = self._check_all_images() if len(not_installed) == 0: return 'installed ferry' else: logging.error('images not built: ' + str(not_installed)) return 'Some images were not installed. Please type \'ferry install\' again.' def _check_all_images(self): not_installed = [] images = ['mongodb', 'ferry-base', 'hadoop-base', 'hadoop', 'hadoop-client', 'hive-metastore', 'gluster', 'openmpi', 'cassandra', 'cassandra-client', 'titan'] for i in images: if not self._check_image_installed("%s/%s" % (DEFAULT_DOCKER_REPO, i)): not_installed.append(i) return not_installed def _check_and_pull_image(self, image_name): if not self._check_image_installed(image_name): self._pull_image(image_name, on_client=False) return self._check_image_installed(image_name) def _check_image_installed(self, image_name): cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect %s 2> /dev/null' % image_name output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() == '[]': return False else: return True def start_web(self, options=None, clean=False): start, msg = self._start_docker_daemon(options) if not clean and not start: # We are trying to start the web services but the Docker # daemon won't start. If we're cleaning, it's not a big deal. logging.error(msg) sys.exit(1) # Check if the user wants to use a specific key directory. self._process_ssh_key(options) # Check if the Mongo directory exists yet. If not # go ahead and create it. try: if not os.path.isdir(DEFAULT_MONGO_DB): os.makedirs(DEFAULT_MONGO_DB) self._change_permission(DEFAULT_MONGO_DB) if not os.path.isdir(DEFAULT_MONGO_LOG): os.makedirs(DEFAULT_MONGO_LOG) self._change_permission(DEFAULT_MONGO_LOG) except OSError as e: logging.error("Could not start ferry servers.\n") logging.error(e.explanation) sys.exit(1) # Check if the Mongo image is built. if not self._check_image_installed('%s/mongodb' % DEFAULT_DOCKER_REPO): logging.error("Could not start ferry servers.\n") logging.error("MongoDB images not found. Try executing 'ferry install'.") sys.exit(1) # Check if there are any other Mongo instances runnig. self._clean_web() # Start the Mongo server. keydir, _ = self._read_key_dir() cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' run -d -v %s:%s -v %s:%s -v %s:%s %s/mongodb' % (keydir, '/service/keys', DEFAULT_MONGO_DB, '/service/data', DEFAULT_MONGO_LOG, '/service/logs', DEFAULT_DOCKER_REPO) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) output = child.stderr.read().strip() if re.compile('[/:\s\w]*Can\'t connect[\'\s\w]*').match(output): logging.error("Ferry docker daemon does not appear to be running") sys.exit(1) elif re.compile('Unable to find image[\'\s\w]*').match(output): logging.error("Ferry mongo image not present") sys.exit(1) # Need to get Mongo connection info and store in temp file. container = child.stdout.read().strip() cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect %s' % container logging.warning(cmd) output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() output_json = json.loads(output.strip()) ip = output_json[0]['NetworkSettings']['IPAddress'] _touch_file('/tmp/mongodb.ip', ip, root=True) # Set the MongoDB env. variable. my_env = os.environ.copy() my_env['MONGODB'] = ip # Sleep a little while to let Mongo start receiving. time.sleep(2) # Start the DHCP server logging.warning("starting dhcp server") cmd = 'gunicorn -t 3600 -b 127.0.0.1:5000 -w 1 ferry.ip.dhcp:app &' Popen(cmd, stdout=PIPE, shell=True, env=my_env) time.sleep(2) # Reserve the Mongo IP. self.network.reserve_ip(ip) # Start the Ferry HTTP servers logging.warning("starting http servers on port 4000 and mongo %s" % ip) cmd = 'gunicorn -e FERRY_HOME=%s -t 3600 -w 3 -b 127.0.0.1:4000 ferry.http.httpapi:app &' % FERRY_HOME Popen(cmd, stdout=PIPE, shell=True, env=my_env) def stop_web(self): # Shutdown the mongo instance if os.path.exists('/tmp/mongodb.ip'): f = open('/tmp/mongodb.ip', 'r') ip = f.read().strip() f.close() keydir, tmp = self._read_key_dir() key = keydir + "/id_rsa" cmd = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s /service/bin/mongodb stop' % (key, ip) logging.warning(cmd) output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() os.remove('/tmp/mongodb.ip') # Kill all the gunicorn instances. logging.warning("stopping http servers") cmd = 'ps -eaf | grep httpapi | awk \'{print $2}\' | xargs kill -15' Popen(cmd, stdout=PIPE, shell=True) cmd = 'ps -eaf | grep ferry.ip.dhcp | awk \'{print $2}\' | xargs kill -15' Popen(cmd, stdout=PIPE, shell=True) def _clean_web(self): docker = DOCKER_CMD + ' -H=' + DOCKER_SOCK cmd = docker + ' ps | grep ferry/mongodb | awk \'{print $1}\' | xargs ' + docker + ' stop ' logging.warning("cleaning previous mongo resources") logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) child.stdout.read() child.stderr.read() def _copytree(self, src, dst): for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d) else: shutil.copy2(s, d) def _change_permission(self, location): uid, gid = _get_ferry_user() os.chown(location, uid, gid) if os.path.isdir(location): os.chmod(location, 0774) for entry in os.listdir(location): self._change_permission(os.path.join(location, entry)) else: # Check if this file has a file extension. If not, # then assume it's a binary. s = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH if len(location.split(".")) == 1: s |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH os.chmod(location, s) """ Ask for the key directory. """ def fetch_image_keys(self, key_dir=None): if key_dir and os.path.exists(key_dir): return key_dir else: return DEFAULT_KEY_DIR """ Check if the dockerfiles are already built. """ def check_images(self, image_dir, repo): if self._docker_running(): build_images = [] for f in os.listdir(image_dir): dockerfile = image_dir + '/' + f + '/Dockerfile' image_names = self._check_dockerfile(dockerfile, repo) if len(image_names) > 0: build_images += image_names return build_images else: logging.error("ferry daemon not started") """ Build the docker images """ def build_from_list(self, to_build, image_dir, repo, build=False, recurse=True): if self._docker_running(): built_images = {} for f in os.listdir(image_dir): self._transform_dockerfile(image_dir, f, repo) for f in os.listdir("/tmp/dockerfiles/"): dockerfile = '/tmp/dockerfiles/' + f + '/Dockerfile' images = self._get_image(dockerfile) intersection = [i for i in images if i in to_build] if len(intersection) > 0: image = images.pop(0) logging.warning("building image " + image) self._build_image(image, dockerfile, repo, built_images, recurse=recurse, build=build) if len(images) > 0: logging.warning("tagging images " + image) self._tag_images(image, repo, images) # After building everything, get rid of the temp dir. shutil.rmtree("/tmp/dockerfiles") else: logging.error("ferry daemon not started") """ Build the docker images """ def build_from_dir(self, image_dir, repo, build=False): if self._docker_running(): built_images = {} for f in os.listdir(image_dir): self._transform_dockerfile(image_dir, f, repo) for f in os.listdir("/tmp/dockerfiles/"): dockerfile = "/tmp/dockerfiles/" + f + "/Dockerfile" images = self._get_image(dockerfile) image = images.pop(0) self._build_image(image, dockerfile, repo, built_images, recurse=True, build=build) if len(images) > 0: logging.warning("tagging images " + image) self._tag_images(image, repo, images) # After building everything, get rid of the temp dir. shutil.rmtree("/tmp/dockerfiles") else: logging.error("ferry daemon not started") def _docker_running(self): return os.path.exists('/var/run/ferry.sock') def _check_dockerfile(self, dockerfile, repo): not_installed = [] images = self._get_image(dockerfile) for image in images: qualified = DEFAULT_DOCKER_REPO + '/' + image cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect ' + qualified + ' 2> /dev/null' output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() == '[]': not_installed.append(image) return not_installed def _transform_dockerfile(self, image_dir, f, repo): if not os.path.exists("/tmp/dockerfiles/" + f): shutil.copytree(image_dir + '/' + f, '/tmp/dockerfiles/' + f) out_file = "/tmp/dockerfiles/" + f + "/Dockerfile" out = open(out_file, "w+") uid, gid = _get_ferry_user() changes = { "USER" : repo, "DOCKER" : gid } for line in open(image_dir + '/' + f + '/Dockerfile', "r"): s = Template(line).substitute(changes) out.write(s) out.close() def _build_image(self, image, f, repo, built_images, recurse=False, build=False): base = self._get_base(f) if recurse and base != "base": image_dir = os.path.dirname(os.path.dirname(f)) dockerfile = image_dir + '/' + base + '/Dockerfile' self._build_image(base, dockerfile, repo, built_images, recurse, build) if not image in built_images: if base == "base": self._pull_image(base, tag='latest') built_images[image] = True self._compile_image(image, repo, os.path.dirname(f), build) def _get_image(self, dockerfile): names = [] for l in open(dockerfile, 'r'): if l.strip() != '': s = l.split() if len(s) > 0: if s[0].upper() == 'NAME': names.append(s[1].strip()) return names def _get_base(self, dockerfile): base = None for l in open(dockerfile, 'r'): s = l.split() if len(s) > 0: if s[0].upper() == 'FROM': base = s[1].strip().split("/") return base[-1] return base def _continuous_print(self, process, on_client=True): while True: try: out = process.stdout.read(15) if out == '': break else: if on_client: sys.stdout.write(out) sys.stdout.flush() else: logging.warning("downloading image...") except IOError as e: logging.warning(e) try: errmsg = process.stderr.readline() if errmsg and errmsg != '': logging.warning(errmsg) else: logging.warning("downloaded image!") except IOError: pass def _pull_image(self, image, tag=None, on_client=True): if not tag: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' pull %s' % image else: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' pull %s:%s' % (image, tag) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) self._continuous_print(child, on_client=on_client) # Now tag the image with the 'latest' tag. if tag and tag != 'latest': cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s:%s %s:%s' % (image, tag, image, 'latest') logging.warning(cmd) Popen(cmd, stdout=PIPE, shell=True) def _compile_image(self, image, repo, image_dir, build=False): # Now build the image. if build: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' build --rm=true -t' + ' %s/%s %s' % (repo, image, image_dir) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) self._continuous_print(child) # Now tag the image. cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s %s/%s:%s' % (repo, image, repo, image, ferry.__version__) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) else: # Just pull the image from the public repo. image_name = "%s/%s" % (repo, image) self._pull_image(image_name, tag=ferry.__version__) def _tag_images(self, image, repo, alternatives): for a in alternatives: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s:%s %s/%s:%s' % (repo, image, ferry.__version__, repo, a, ferry.__version__) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s:latest %s/%s:latest' % (repo, image, repo, a) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) def _clean_images(self): cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' | grep none | awk \'{print $1}\' | xargs ' + DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' rmi' Popen(cmd, stdout=PIPE, shell=True) def _is_running_btrfs(self): logging.warning("checking for btrfs") cmd = 'cat /etc/mtab | grep %s | awk \'{print $3}\'' % DOCKER_DIR output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() return output.strip() == "btrfs" def _start_docker_daemon(self, options=None): # Check if the docker daemon is already running try: if not self._docker_running(): bflag = '' if self._is_running_btrfs(): logging.warning("using btrfs backend") bflag = ' -s btrfs' # Explicitly supply the DNS. if options and '-n' in options: logging.warning("using custom dns") dflag = '' for d in options['-n']: dflag += ' --dns %s' % d else: logging.warning("using public dns") dflag = ' --dns 8.8.8.8 --dns 8.8.4.4' # We need to fix this so that ICC is set to false. icc = ' --icc=true' cmd = 'nohup ' + DOCKER_CMD + ' -d' + ' -H=' + DOCKER_SOCK + ' -g=' + DOCKER_DIR + ' -p=' + DOCKER_PID + dflag + bflag + icc + ' 1>%s 2>&1 &' % DEFAULT_DOCKER_LOG logging.warning(cmd) Popen(cmd, stdout=PIPE, shell=True) # Wait a second to let the docker daemon do its thing. time.sleep(2) return True, "Ferry daemon running on /var/run/ferry.sock" else: return False, "Ferry appears to be already running. If this is an error, please type \'ferry clean\' and try again." except OSError as e: logging.error("could not start docker daemon.\n") logging.error(e.explanation) sys.exit(1) def _stop_docker_daemon(self, force=False): if force or self._docker_running(): logging.warning("stopping docker daemon") cmd = 'pkill -f docker-ferry' Popen(cmd, stdout=PIPE, shell=True) try: os.remove('/var/run/ferry.sock') except OSError: pass def _get_gateway(self): cmd = "ifconfig drydock0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'" gw = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() cmd = "ifconfig drydock0 | grep 'inet addr:' | cut -d: -f4 | awk '{ print $1}'" netmask = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() mask = map(int, netmask.split(".")) cidr = 1 if mask[3] == 0: cidr = 8 if mask[2] == 0: cidr *= 2 return "%s/%d" % (gw, 32 - cidr)
class DockerFabric(object): def __init__(self, bootstrap=False): self.repo = 'public' self.docker_user = '******' self.cli = DockerCLI(ferry.install.DOCKER_REGISTRY) self.bootstrap = bootstrap # Bootstrap mode means that the DHCP network # isn't available yet, so we can't use the network. if not bootstrap: self.network = DHCPClient(self._get_gateway()) def _get_gateway(self): """ Get the gateway address in CIDR notation. This defines the range of IP addresses available to the containers. """ cmd = "LC_MESSAGES=C ifconfig drydock0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'" gw = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() cmd = "LC_MESSAGES=C ifconfig drydock0 | grep 'inet addr:' | cut -d: -f4 | awk '{ print $1}'" netmask = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() mask = map(int, netmask.split(".")) cidr = 1 if mask[3] == 0: cidr = 8 if mask[2] == 0: cidr *= 2 return "%s/%d" % (gw, 32 - cidr) def _get_host(self): cmd = "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'" return Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() def _read_key_dir(self): """ Read the location of the directory containing the keys used to communicate with the containers. """ keydir = ferry.install._get_key_dir(root=self.bootstrap, server=True) with open(keydir, 'r') as f: k = f.read().strip().split("://") return k[1], k[0] def version(self): """ Fetch the current docker version. """ return self.cli.version() def get_fs_type(self): """ Get the filesystem type associated with docker. """ return self.cli.get_fs_type() def restart(self, containers): """ Restart the stopped containers. """ new_containers = [] for c in containers: container = self.cli.start(c.container, c.service_type, c.keys, c.volumes, c.args) container.default_user = self.docker_user new_containers.append(container) # We should wait for a second to let the ssh server start # on the containers (otherwise sometimes we get a connection refused) time.sleep(2) return new_containers def alloc(self, container_info): """ Allocate several instances. """ containers = [] mounts = {} for c in container_info: # Get a new IP address for this container and construct # a default command. gw = self._get_gateway().split("/")[0] # Check if we should use the manual LXC option. if not 'netenable' in c: ip = self.network.assign_ip(c) lxc_opts = ["lxc.network.type = veth", "lxc.network.ipv4 = %s/24" % ip, "lxc.network.ipv4.gateway = %s" % gw, "lxc.network.link = drydock0", "lxc.network.name = eth0", "lxc.network.flags = up"] # Check if we need to forward any ports. host_map = {} for p in c['ports']: p = str(p) s = p.split(":") if len(s) > 1: host = s[0] dest = s[1] else: host = self.network.random_port() dest = s[0] host_map[dest] = [{'HostIp' : '0.0.0.0', 'HostPort' : host}] self.network.forward_rule('0.0.0.0/0', host, ip, dest) host_map_keys = host_map.keys() else: lxc_opts = None host_map = None host_map_keys = [] # Start a container with a specific image, in daemon mode, # without TTY, and on a specific port c['default_cmd'] = "/service/sbin/startnode init" container = self.cli.run(service_type = c['type'], image = c['image'], volumes = c['volumes'], keys = c['keys'], open_ports = host_map_keys, host_map = host_map, expose_group = c['exposed'], hostname = c['hostname'], default_cmd = c['default_cmd'], args= c['args'], lxc_opts = lxc_opts) if container: container.default_user = self.docker_user containers.append(container) if not 'netenable' in c: container.internal_ip = ip self.network.set_owner(ip, container.container) if 'name' in c: container.name = c['name'] if 'volume_user' in c: mounts[container] = {'user':c['volume_user'], 'vols':c['volumes'].items()} # We should wait for a second to let the ssh server start # on the containers (otherwise sometimes we get a connection refused) time.sleep(2) # Check if we need to set the file permissions # for the mounted volumes. for c, i in mounts.items(): for _, v in i['vols']: self.cmd([c], 'chown -R %s %s' % (i['user'], v)) return containers def stop(self, containers): """ Forceably stop the running containers """ for c in containers: self.cli.stop(c['container']) def remove(self, containers): """ Remove the running instances """ for c in containers: for p in c.ports.keys(): self.network.delete_rule(c.internal_ip, p) self.network.free_ip(c.internal_ip) self.cli.remove(c.container) def snapshot(self, containers, cluster_uuid, num_snapshots): """ Save/commit the running instances """ snapshots = [] for c in containers: snapshot_name = '%s-%s-%s:SNAPSHOT-%s' % (c.image, cluster_uuid, c.host_name, num_snapshots) snapshots.append( {'image' : snapshot_name, 'base' : c.image, 'type' : c.service_type, 'name' : c.name, 'args' : c.args, 'ports': c.ports} ) self.cli.commit(c, snapshot_name) return snapshots def deploy(self, containers, registry=None): """ Upload these containers to the specified registry. """ deployed = [] for c in containers: image_name = '%s-%s:DEPLOYED' % (c.image, c.host_name) deployed.append( {'image' : image_name, 'base' : c.image, 'type' : c.service_type, 'name' : c.name, 'args' : c.args, 'ports': c.ports} ) if not registry: self.cli.commit(c, image_name) else: self.cli.push(c.image, registry) return deployed def push(self, image, registry=None): """ Push an image to a remote registry. """ return self.cli.push(image, registry) def pull(self, image): """ Pull a remote image to the local registry. """ return self.cli.pull(image) def halt(self, containers): """ Safe stop the containers. """ cmd = '/service/sbin/startnode halt' for c in containers: self.cmd_raw(c.internal_ip, cmd) def copy(self, containers, from_dir, to_dir): """ Copy over the contents to each container """ for c in containers: self.copy_raw(c.internal_ip, from_dir, to_dir) def copy_raw(self, ip, from_dir, to_dir): keydir, _ = self._read_key_dir() opts = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' key = '-i ' + keydir + '/id_rsa' scp = 'scp ' + opts + ' ' + key + ' -r ' + from_dir + ' ' + self.docker_user + '@' + ip + ':' + to_dir logging.warning(scp) output = Popen(scp, stdout=PIPE, shell=True).stdout.read() def cmd(self, containers, cmd): """ Run a command on all the containers and collect the output. """ all_output = {} for c in containers: output = self.cmd_raw(c.internal_ip, cmd) all_output[c.host_name] = output.strip() return all_output def cmd_raw(self, ip, cmd): keydir, _ = self._read_key_dir() key = keydir + '/id_rsa' ip = self.docker_user + '@' + ip ssh = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ' + key + ' -t -t ' + ip + ' \'%s\'' % cmd logging.warning(ssh) output = Popen(ssh, stdout=PIPE, shell=True).stdout.read() return output def login(self): """ Login to a remote registry. Use the login credentials found in the user's home directory. """ with open(ferry.install.DEFAULT_DOCKER_LOGIN, 'r') as f: args = yaml.load(f) args = args['docker'] if all(k in args for k in ("user","password","email")): if 'server' in args: server = args['server'] else: server = '' return self.cli.login(user = args['user'], password = args['password'], email = args['email'], registry = server) logging.error("Could not open login credentials " + ferry.install.DEFAULT_LOGIN_KEY) return False
class Installer(object): def __init__(self, cli=None): self.network = DHCPClient() self.fabric = LocalFabric(bootstrap=True) self.mongo = MongoInitializer(self.fabric.system) self.mongo.fabric = self.fabric self.mongo.template_dir = DEFAULT_TEMPLATE_DIR + '/mongo/' self.cli = cli self.config = read_ferry_config() def get_ferry_account(self): """ Get the Ferry authentication information. """ if 'ferry' in self.config: args = self.config['ferry'] if all(k in args for k in ("user", "key", "server")): return args['user'], args['key'], args['server'] return None, None, None def _get_worker_info(self): """ Get information regarding how to start the remote HTTP API. """ args = self.config['web'] return int(args['workers']), args['bind'], args['port'] def create_signature(self, request, key): """ Generated a signed request. """ return hmac.new(key, request, hashlib.sha256).hexdigest() def store_app(self, app, ext, content): """ Store the application in the global directory. """ try: # We may need to create the parent directory # if this is the first time an application from this user # is downloaded. file_name = os.path.join(DEFAULT_FERRY_APPS, app + ext) os.makedirs(os.path.dirname(file_name)) with open(file_name, "w") as f: f.write(content) return file_name except IOError as e: logging.error(e) return None except OSError as os: logging.error(os) return None def _clean_rules(self): """ Get rid of all the forwarding rules. """ self.network.clean_rules() def _install_perform_build(self, options, num_tries): # Normally we don't want to build the Dockerfiles, # but sometimes we may for testing, etc. build = False if options and '-b' in options: build = True if options and '-u' in options: if len(options['-u']) > 0 and options['-u'][0] != True: logging.warning("performing select rebuild (%s)" % str(options['-u'])) self.build_from_list(options['-u'], DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build, recurse=False) else: logging.warning("performing forced rebuild") self.build_from_dir(DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build) else: # We want to be selective about which images # to rebuild. Useful if one image breaks, etc. to_build = self.check_images(DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO) if len(to_build) > 0: logging.warning("performing select rebuild (%s)" % str(to_build)) self.build_from_list(to_build, DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build) # Check that all the images were built. not_installed = self._check_all_images() if len(not_installed) == 0: return 'installed ferry' else: logging.error('images not built: ' + str(not_installed)) if num_tries == 0: return 'Some images were not installed. Please type \'ferry install\' again.' else: # Try building the images again. logging.info("retrying install (%d)" % num_tries) return self._install_perform_build(options, num_tries - 1) def install(self, args, options): # Check if the host is actually 64-bit. If not raise a warning and quit. if not _supported_arch(): return 'Your architecture appears to be 32-bit.\nOnly 64-bit architectures are supported at the moment.' if not _supported_python(): return 'You appear to be running Python3.\nOnly Python2 is supported at the moment.' if not _supported_lxc(): return 'You appear to be running an older version of LXC.\nOnly versions > 0.7.5 are supported.' if not _supported_docker(): return 'You appear to be running an older version of Docker.\nOnly versions > 1.2 are supported.' if not _has_ferry_user(): return 'You do not appear to have the \'docker\' group configured. Please create the \'docker\' group and try again.' # Create the various directories. try: if not os.path.isdir(DOCKER_DIR): os.makedirs(DOCKER_DIR) self._change_permission(DOCKER_DIR) except OSError as e: logging.error("Could not install Ferry.\n") logging.error(e.strerror) sys.exit(1) # Make sure that the Ferry keys have the correct # ownership & permission. self._check_and_change_ssh_keyperm() # Start the Ferry docker daemon. If it does not successfully # start, print out a msg. logging.warning("all prerequisites met...") start, msg = self._start_docker_daemon(options) if not start: logging.error('ferry docker daemon not started') return msg # Perform the actual build/download. Sometimes the download # may fail so give the user a chance to automatically retry # a few times before giving up. if options and '-r' in options: num_tries = int(options['-r']) else: num_tries = 0 return self._install_perform_build(options, num_tries) def _check_all_images(self): not_installed = [] images = [ 'mongodb', 'ferry-base', 'hadoop-base', 'hadoop', 'hadoop-client', 'hive-metastore', 'gluster', 'openmpi', 'openmpi-client', 'cassandra', 'cassandra-client', 'titan', 'spark' ] for i in images: if not self._check_image_installed("%s/%s" % (DEFAULT_DOCKER_REPO, i)): not_installed.append(i) return not_installed def _check_and_pull_image(self, image_name): if not self._check_image_installed(image_name): self._pull_image(image_name, on_client=False) return self._check_image_installed(image_name) def _check_image_installed(self, image_name): cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect %s 2> /dev/null' % image_name output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() == '[]': return False else: return True def _transfer_config(self, config_dirs): """ Transfer the configuration to the containers. """ for c in config_dirs: container = c[0] from_dir = c[1] to_dir = c[2] self.fabric.copy([container], from_dir, to_dir) def _read_public_key(self, private_key): s = private_key.split("/") p = os.path.splitext(s[len(s) - 1])[0] return p def _check_and_change_ssh_keyperm(self): os.chmod(DEFAULT_SSH_KEY, 0600) uid, gid = _get_ferry_user() os.chown(DEFAULT_SSH_KEY, uid, gid) def start_web(self, options=None, clean=False): start, msg = self._start_docker_daemon(options) if not clean and not start: # We are trying to start the web services but the Docker # daemon won't start. If we're cleaning, it's not a big deal. logging.error(msg) sys.exit(1) # Check if the ssh key permission is properly set. self._check_and_change_ssh_keyperm() # Check if we're operating in naked mode. If so, # we just needed to start the docker daemon, so we're all done! if options and '-n' in options: return # Check if the user-application directory exists. # If not, create it. try: if not os.path.isdir(DEFAULT_FERRY_APPS): os.makedirs(DEFAULT_FERRY_APPS) self._change_permission(DEFAULT_FERRY_APPS) except OSError as e: logging.error("Could not create application directory.\n") logging.error(e.strerror) sys.exit(1) # Check if the Mongo directory exists yet. If not # go ahead and create it. try: if not os.path.isdir(DEFAULT_MONGO_DB): os.makedirs(DEFAULT_MONGO_DB) self._change_permission(DEFAULT_MONGO_DB) if not os.path.isdir(DEFAULT_MONGO_LOG): os.makedirs(DEFAULT_MONGO_LOG) self._change_permission(DEFAULT_MONGO_LOG) except OSError as e: logging.error("Could not start ferry servers.\n") logging.error(e.strerror) sys.exit(1) # Check if the Mongo image is built. if not self._check_image_installed('%s/mongodb' % DEFAULT_DOCKER_REPO): logging.error("Could not start ferry servers.\n") logging.error( "MongoDB images not found. Try executing 'ferry install'.") sys.exit(1) # Check if there are any other Mongo instances runnig. self._clean_web() # Start the Mongo server. Create a new configuration and # manually start the container. private_key = self.cli._get_ssh_key(options) volumes = { DEFAULT_MONGO_LOG: self.mongo.container_log_dir, DEFAULT_MONGO_DB: self.mongo.container_data_dir } mongoplan = { 'image': 'ferry/mongodb', 'type': 'ferry/mongodb', 'keydir': { '/service/keys': DEFAULT_KEY_DIR }, 'keyname': self._read_public_key(private_key), 'privatekey': private_key, 'volumes': volumes, 'volume_user': DEFAULT_FERRY_OWNER, 'ports': [], 'exposed': self.mongo.get_working_ports(1), 'internal': self.mongo.get_internal_ports(1), 'hostname': 'ferrydb', 'netenable': True, 'args': 'trust' } mongoconf = self.mongo.generate(1) mongoconf.uuid = 'fdb-' + str(uuid.uuid4()).split('-')[0] containers = self.fabric.alloc(mongoconf.uuid, mongoconf.uuid, [mongoplan], "MONGO") if containers and len(containers) > 0: mongobox = containers[0] else: logging.error("Could not start MongoDB image") sys.exit(1) ip = mongobox.internal_ip _touch_file('/tmp/ferry/mongodb.ip', ip, root=True) # Once the container is started, we'll need to copy over the # configuration files, and then manually send the 'start' command. s = { 'container': mongobox, 'data_dev': 'eth0', 'data_ip': mongobox.internal_ip, 'manage_ip': mongobox.internal_ip, 'host_name': mongobox.host_name, 'type': mongobox.service_type, 'args': mongobox.args } config_dirs, entry_point = self.mongo.apply(mongoconf, [s]) self._transfer_config(config_dirs) self.mongo.start_service([mongobox], entry_point, self.fabric) # Set the MongoDB env. variable. my_env = os.environ.copy() my_env['MONGODB'] = ip # Sleep a little while to let Mongo start receiving. time.sleep(2) # Start the DHCP server logging.warning("starting dhcp server") # cmd = 'gunicorn -t 3600 -b 127.0.0.1:5000 -w 1 ferry.ip.dhcp:app &' cmd = 'python %s/ip/dhcp.py 127.0.0.1 5000 &' % FERRY_HOME Popen(cmd, stdout=PIPE, shell=True, env=my_env) time.sleep(2) # Reserve the Mongo IP. self.network.reserve_ip(ip) # Start the Ferry HTTP server. Read in the web configuration # so that we know how many workers to start, etc. workers, bind, port = self._get_worker_info() logging.warning("starting API servers on (%s:%s) and (mongo:%s)" % (bind, port, ip)) # cmd = 'gunicorn -e FERRY_HOME=%s -t 3600 -w %d -b %s:%s ferry.http.httpapi:app &' % (FERRY_HOME, workers, bind, port) cmd = 'export FERRY_HOME=%s && python %s/http/httpapi.py %s %s &' % ( FERRY_HOME, FERRY_HOME, bind, port) Popen(cmd, shell=True, env=my_env) def _force_stop_web(self): logging.warning("stopping docker http servers") cmd = 'pkill -f gunicorn' Popen(cmd, stdout=PIPE, shell=True) def stop_web(self, key): # Shutdown the mongo instance if os.path.exists('/tmp/ferry/mongodb.ip'): f = open('/tmp/ferry/mongodb.ip', 'r') ip = f.read().strip() f.close() cmd = 'LC_ALL=C && ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s /service/sbin/startnode stop' % ( key, ip) logging.warning(cmd) output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() logging.warning(output) cmd = 'LC_ALL=C && ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s /service/sbin/startnode halt' % ( key, ip) logging.warning(cmd) output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() logging.warning(output) os.remove('/tmp/ferry/mongodb.ip') # Kill all the gunicorn instances. logging.warning("stopping http servers") cmd = 'ps -eaf | grep httpapi | awk \'{print $2}\' | xargs kill -15' Popen(cmd, stdout=PIPE, shell=True) cmd = 'ps -eaf | grep ferry.ip.dhcp | awk \'{print $2}\' | xargs kill -15' Popen(cmd, stdout=PIPE, shell=True) def _clean_web(self): docker = DOCKER_CMD + ' -H=' + DOCKER_SOCK cmd = docker + ' ps | grep ferry/mongodb | awk \'{print $1}\' | xargs ' + docker + ' stop ' logging.warning("cleaning previous mongo resources") logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) child.stdout.read() child.stderr.read() def _copytree(self, src, dst): for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d) else: shutil.copy2(s, d) def _change_permission(self, location): uid, gid = _get_ferry_user() os.chown(location, uid, gid) if os.path.isdir(location): os.chmod(location, 0774) for entry in os.listdir(location): self._change_permission(os.path.join(location, entry)) else: # Check if this file has a file extension. If not, # then assume it's a binary. s = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH if len(location.split(".")) == 1: s |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH os.chmod(location, s) """ Check if the dockerfiles are already built. """ def check_images(self, image_dir, repo): if self._docker_running(): build_images = [] for f in os.listdir(image_dir): dockerfile = image_dir + '/' + f + '/Dockerfile' image_names = self._check_dockerfile(dockerfile, repo) if len(image_names) > 0: build_images += image_names return build_images else: logging.error("ferry daemon not started") """ Build the docker images """ def build_from_list(self, to_build, image_dir, repo, build=False, recurse=True): if self._docker_running(): built_images = {} for f in os.listdir(image_dir): logging.warning("transforming dockerfile") self._transform_dockerfile(image_dir, f, repo) for f in os.listdir("/tmp/dockerfiles/"): dockerfile = '/tmp/dockerfiles/' + f + '/Dockerfile' images = self._get_image(dockerfile) intersection = [i for i in images if i in to_build] if len(intersection) > 0: image = images.pop(0) logging.warning("building image " + image) self._build_image(image, dockerfile, repo, built_images, recurse=recurse, build=build) if len(images) > 0: logging.warning("tagging images " + image) self._tag_images(image, repo, images) # After building everything, get rid of the temp dir. shutil.rmtree("/tmp/dockerfiles") else: logging.error("ferry daemon not started") """ Build the docker images """ def build_from_dir(self, image_dir, repo, build=False): if self._docker_running(): built_images = {} for f in os.listdir(image_dir): self._transform_dockerfile(image_dir, f, repo) for f in os.listdir("/tmp/dockerfiles/"): dockerfile = "/tmp/dockerfiles/" + f + "/Dockerfile" images = self._get_image(dockerfile) image = images.pop(0) self._build_image(image, dockerfile, repo, built_images, recurse=True, build=build) if len(images) > 0: logging.warning("tagging images " + image) self._tag_images(image, repo, images) # After building everything, get rid of the temp dir. # shutil.rmtree("/tmp/dockerfiles") else: logging.error("ferry daemon not started") def _docker_running(self): return os.path.exists('/var/run/ferry.sock') def _check_dockerimage(self, image, repo): qualified = repo + '/' + image cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect ' + qualified + ' 2> /dev/null' output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() == '[]': return image else: return None def _check_dockerfile(self, dockerfile, repo): not_installed = [] images = self._get_image(dockerfile) for image in images: i = self._check_dockerimage(image, DEFAULT_DOCKER_REPO) if i: not_installed.append(image) return not_installed def _transform_dockerfile(self, image_dir, f, repo): if not os.path.exists("/tmp/dockerfiles/" + f): shutil.copytree(image_dir + '/' + f, '/tmp/dockerfiles/' + f) out_file = "/tmp/dockerfiles/" + f + "/Dockerfile" out = open(out_file, "w+") uid, gid = _get_ferry_user() download_url = _get_download_url() changes = {"USER": repo, "DOWNLOAD_URL": download_url, "DOCKER": gid} for line in open(image_dir + '/' + f + '/Dockerfile', "r"): s = Template(line).substitute(changes) out.write(s) out.close() def _build_image(self, image, f, repo, built_images, recurse=False, build=False): base = self._get_base(f) if recurse and base != "ubuntu:14.04": image_dir = os.path.dirname(os.path.dirname(f)) dockerfile = image_dir + '/' + base + '/Dockerfile' self._build_image(base, dockerfile, repo, built_images, recurse, build) if not image in built_images: if base == "ubuntu:14.04": self._pull_image(base) built_images[image] = True self._compile_image(image, repo, os.path.dirname(f), build) def _get_image(self, dockerfile): names = [] for l in open(dockerfile, 'r'): if l.strip() != '': s = l.split() if len(s) > 0: if s[0].upper() == 'NAME': names.append(s[1].strip()) return names def _get_base(self, dockerfile): base = None for l in open(dockerfile, 'r'): s = l.split() if len(s) > 0: if s[0].upper() == 'FROM': base = s[1].strip().split("/") return base[-1] return base def _continuous_print(self, process, on_client=True): while True: try: out = process.stdout.read(15) if out == '': break else: if on_client: sys.stdout.write(out) sys.stdout.flush() else: logging.warning("downloading image...") except IOError as e: logging.warning(e) try: errmsg = process.stderr.readline() if errmsg and errmsg != '': logging.warning(errmsg) else: logging.warning("downloaded image!") except IOError: pass def _pull_image(self, image, tag=None, on_client=True): if not tag: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' pull %s' % image else: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' pull %s:%s' % (image, tag) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) self._continuous_print(child, on_client=on_client) # Now tag the image with the 'latest' tag. if tag and tag != 'latest': cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s:%s %s:%s' % ( image, tag, image, 'latest') logging.warning(cmd) Popen(cmd, stdout=PIPE, shell=True) def _compile_image(self, image, repo, image_dir, build=False): # Now build the image. if build: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' build --rm=true -t' + ' %s/%s %s' % ( repo, image, image_dir) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) self._continuous_print(child) # Now tag the image. cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s %s/%s:%s' % ( repo, image, repo, image, ferry.__version__) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) else: # Just pull the image from the public repo. image_name = "%s/%s" % (repo, image) self._pull_image(image_name, tag=ferry.__version__) def _tag_images(self, image, repo, alternatives): for a in alternatives: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s:%s %s/%s:%s' % ( repo, image, ferry.__version__, repo, a, ferry.__version__) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s:latest %s/%s:latest' % ( repo, image, repo, a) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) def _clean_images(self): cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' | grep none | awk \'{print $1}\' | xargs ' + DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' rmi' Popen(cmd, stdout=PIPE, shell=True) def _is_parent_dir(self, pdir, cdir): pdirs = pdir.split("/") cdirs = cdir.split("/") # Parent directory can never be longer than # the child directory. if len(pdirs) > len(cdirs): return False for i in range(0, len(pdirs)): # The parent directory shoudl always match # the child directory. Ignore the start and end # blank spaces caused by "split". if pdirs[i] != "" and pdirs[i] != cdirs[i]: return False return True def _is_running_btrfs(self): logging.warning("checking for btrfs") cmd = 'cat /etc/mtab | grep btrfs | awk \'{print $2}\'' output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() != "": dirs = output.strip().split("\n") for d in dirs: if self._is_parent_dir(d, DOCKER_DIR): return True return False def _start_docker_daemon(self, options=None): # Check if the Ferry bridge has been created. _create_bridge() # Check if the docker daemon is already running try: if not self._docker_running(): # Use the ferry0 bridge. nflag = ' -b ferry0' # Use the LXC backend. This backend option must be specified # for Docker versions greater than 0.9.0. _, ver = _get_docker_version() if ver > (0, 9, 0): lflag = ' -e lxc' else: lflag = '' # Figure out which storage backend to use. Right # now we only support BTRFS or DeviceMapper, since # AUFS seems to break on some occasions. if self._is_running_btrfs(): logging.warning("using btrfs backend") bflag = ' -s btrfs' else: logging.warning("using devmapper backend") bflag = ' -s devicemapper' # Explicitly supply the DNS. if options and '-d' in options: logging.warning("using custom dns") dflag = '' for d in options['-d']: dflag += ' --dns %s' % d else: logging.warning("using public dns") dflag = ' --dns 8.8.8.8 --dns 8.8.4.4' # We need to fix this so that ICC is set to false. icc = ' --icc=true' cmd = 'nohup ' + DOCKER_CMD + ' -d' + ' -H=' + DOCKER_SOCK + ' -g=' + DOCKER_DIR + ' -p=' + DOCKER_PID + nflag + dflag + lflag + bflag + icc + ' 1>%s 2>&1 &' % DEFAULT_DOCKER_LOG logging.warning(cmd) Popen(cmd, stdout=PIPE, shell=True) # Wait a second to let the docker daemon do its thing. time.sleep(3) return True, "Ferry daemon running on /var/run/ferry.sock" else: return False, "Ferry appears to be already running. If this is an error, please type \'ferry clean\' and try again." except OSError as e: logging.error("could not start docker daemon.\n") logging.error(e.strerror) sys.exit(1) def _stop_docker_daemon(self, force=False): if force or self._docker_running(): logging.warning("stopping docker daemon") cmd = 'pkill -f ' + DOCKER_CMD Popen(cmd, stdout=PIPE, shell=True) try: os.remove('/var/run/ferry.sock') except OSError: pass
class LocalFabric(object): def __init__(self, bootstrap=False): self.name = "local" self.repo = 'public' self.cli = DockerCLI(ferry.install.DOCKER_REGISTRY) self.docker_user = self.cli.docker_user self.inspector = DockerInspector(self.cli) self.bootstrap = bootstrap # The system returns information regarding # the instance types. self.system = System() # Bootstrap mode means that the DHCP network # isn't available yet, so we can't use the network. if not bootstrap: self.network = DHCPClient(ferry.install._get_gateway()) def _get_host(self): cmd = "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'" return Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() def get_data_dir(self): if 'FERRY_SCRATCH' in os.environ: scratch_dir = os.environ['FERRY_SCRATCH'] else: scratch_dir = os.path.join(ferry.install._get_ferry_dir(server=True), 'scratch') if not os.path.isdir(scratch_dir): os.makedirs(scratch_dir) return scratch_dir def installed_images(self): """ List all the installed Docker images. """ images = [] image_string = self.cli.images() for image in image_string.split(): image_name = image.strip() if image_name != "REPOSITORY" and image_name != "<none>": images.append(image_name) return images def version(self): """ Fetch the current docker version. """ return self.cli.version() def get_fs_type(self): """ Get the filesystem type associated with docker. """ return self.cli.get_fs_type() def quit(self): """ Quit the local fabric. """ logging.info("quitting local fabric") def restart(self, cluster_uuid, service_uuid, containers): """ Restart the stopped containers. """ new_containers = [] for c in containers: container = self.cli.start(image = c.image, container = c.container, service_type = c.service_type, keydir = c.keydir, keyname = c.keyname, privatekey = c.privatekey, volumes = c.volumes, args = c.args, inspector = self.inspector) container.default_user = self.docker_user new_containers.append(container) # We should wait for a second to let the ssh server start # on the containers (otherwise sometimes we get a connection refused) time.sleep(2) return new_containers def alloc(self, cluster_uuid, service_uuid, container_info, ctype): """ Allocate several instances. """ containers = [] mounts = {} for c in container_info: # Get a new IP address for this container and construct # a default command. gw = ferry.install._get_gateway().split("/")[0] # Check if we should use the manual LXC option. if not 'netenable' in c: ip = self.network.assign_ip(c) lxc_opts = ["lxc.network.type = veth", "lxc.network.ipv4 = %s/24" % ip, "lxc.network.ipv4.gateway = %s" % gw, "lxc.network.link = ferry0", "lxc.network.name = eth0", "lxc.network.flags = up"] # Check if we need to forward any ports. host_map = {} for p in c['ports']: p = str(p) s = p.split(":") if len(s) > 1: host = s[0] dest = s[1] else: host = self.network.random_port() dest = s[0] host_map[dest] = [{'HostIp' : '0.0.0.0', 'HostPort' : host}] self.network.forward_rule('0.0.0.0/0', host, ip, dest) host_map_keys = host_map.keys() else: lxc_opts = None host_map = None host_map_keys = [] # Start a container with a specific image, in daemon mode, # without TTY, and on a specific port if not 'default_cmd' in c: c['default_cmd'] = "/service/sbin/startnode init" container = self.cli.run(service_type = c['type'], image = c['image'], volumes = c['volumes'], keydir = c['keydir'], keyname = c['keyname'], privatekey = c['privatekey'], open_ports = host_map_keys, host_map = host_map, expose_group = c['exposed'], hostname = c['hostname'], default_cmd = c['default_cmd'], args= c['args'], lxc_opts = lxc_opts, inspector = self.inspector, background = False) if container: container.default_user = self.docker_user containers.append(container) if not 'netenable' in c: container.internal_ip = ip container.external_ip = ip self.network.set_owner(ip, container.container) if 'name' in c: container.name = c['name'] if 'volume_user' in c: mounts[container] = {'user':c['volume_user'], 'vols':c['volumes'].items()} # We should wait for a second to let the ssh server start # on the containers (otherwise sometimes we get a connection refused) time.sleep(3) # Check if we need to set the file permissions # for the mounted volumes. for c, i in mounts.items(): for _, v in i['vols']: self.cmd([c], 'chown -R %s %s' % (i['user'], v)) return containers def stop(self, cluster_uuid, service_uuid, containers): """ Forceably stop the running containers """ for c in containers: if type(c) is dict: self.cli.stop(c['container']) else: self.cli.stop(c.container) def remove(self, cluster_uuid, service_uuid, containers): """ Remove the running instances """ for c in containers: for p in c.ports.keys(): self.network.delete_rule(c.internal_ip, p) self.network.free_ip(c.internal_ip) self.cli.remove(c.container) def snapshot(self, containers, cluster_uuid, num_snapshots): """ Save/commit the running instances """ snapshots = [] for c in containers: snapshot_name = '%s-%s-%s:SNAPSHOT-%s' % (c.image, cluster_uuid, c.host_name, num_snapshots) snapshots.append( {'image' : snapshot_name, 'base' : c.image, 'type' : c.service_type, 'name' : c.name, 'args' : c.args, 'ports': c.ports} ) self.cli.commit(c, snapshot_name) return snapshots def push(self, image, registry=None): """ Push an image to a remote registry. """ return self.cli.push(image, registry) def pull(self, image): """ Pull a remote image to the local registry. """ return self.cli.pull(image) def halt(self, cluster_uuid, service_uuid, containers): """ Safe stop the containers. """ cmd = '/service/sbin/startnode halt' for c in containers: self.cmd_raw(c.privatekey, c.internal_ip, cmd, c.default_user) def copy(self, containers, from_dir, to_dir): """ Copy over the contents to each container """ for c in containers: self.copy_raw(c.privatekey, c.internal_ip, from_dir, to_dir, c.default_user) def copy_raw(self, key, ip, from_dir, to_dir, user): if key: opts = '-o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' scp = 'scp ' + opts + ' -i ' + key + ' -r ' + from_dir + ' ' + user + '@' + ip + ':' + to_dir logging.warning(scp) robust_com(scp) def cmd(self, containers, cmd): """ Run a command on all the containers and collect the output. """ all_output = {} for c in containers: output = self.cmd_raw(c.privatekey, c.internal_ip, cmd, c.default_user) if output.strip() != "": all_output[c.host_name] = output.strip() return all_output def cmd_raw(self, key, ip, cmd, user): if key: ip = user + '@' + ip ssh = 'LC_ALL=C && ssh -o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ' + key + ' -t -t ' + ip + ' \'%s\'' % cmd logging.warning(ssh) out, _, _ = robust_com(ssh) return out else: return '' def login(self): """ Login to a remote registry. Use the login credentials found in the user's home directory. """ config = ferry.install.read_ferry_config() args = config['docker'] if all(k in args for k in ("user","password","email")): if 'server' in args: server = args['server'] else: server = '' return self.cli.login(user = args['user'], password = args['password'], email = args['email'], registry = server) logging.error("Could not open login credentials " + ferry.install.DEFAULT_LOGIN_KEY) return False
class Installer(object): def __init__(self, cli=None): self.network = DHCPClient() self.fabric = LocalFabric(bootstrap=True) self.mongo = MongoInitializer(self.fabric.system) self.mongo.fabric = self.fabric self.mongo.template_dir = DEFAULT_TEMPLATE_DIR + '/mongo/' self.cli = cli self.config = read_ferry_config() def get_ferry_account(self): """ Get the Ferry authentication information. """ if 'ferry' in self.config: args = self.config['ferry'] if all(k in args for k in ("user","key","server")): return args['user'], args['key'], args['server'] return None, None, None def _get_worker_info(self): """ Get information regarding how to start the remote HTTP API. """ args = self.config['web'] return int(args['workers']), args['bind'], args['port'] def create_signature(self, request, key): """ Generated a signed request. """ return hmac.new(key, request, hashlib.sha256).hexdigest() def store_app(self, app, ext, content): """ Store the application in the global directory. """ try: # We may need to create the parent directory # if this is the first time an application from this user # is downloaded. file_name = os.path.join(DEFAULT_FERRY_APPS, app + ext) os.makedirs(os.path.dirname(file_name)) with open(file_name, "w") as f: f.write(content) return file_name except IOError as e: logging.error(e) return None except OSError as os: logging.error(os) return None def _clean_rules(self): """ Get rid of all the forwarding rules. """ self.network.clean_rules() def _install_perform_build(self, options, num_tries): # Normally we don't want to build the Dockerfiles, # but sometimes we may for testing, etc. build = False if options and '-b' in options: build = True if options and '-u' in options: if len(options['-u']) > 0 and options['-u'][0] != True: logging.warning("performing select rebuild (%s)" % str(options['-u'])) self.build_from_list(options['-u'], DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build, recurse=False) else: logging.warning("performing forced rebuild") self.build_from_dir(DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build) else: # We want to be selective about which images # to rebuild. Useful if one image breaks, etc. to_build = self.check_images(DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO) if len(to_build) > 0: logging.warning("performing select rebuild (%s)" % str(to_build)) self.build_from_list(to_build, DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build) # Check that all the images were built. not_installed = self._check_all_images() if len(not_installed) == 0: return 'installed ferry' else: logging.error('images not built: ' + str(not_installed)) if num_tries == 0: return 'Some images were not installed. Please type \'ferry install\' again.' else: # Try building the images again. logging.info("retrying install (%d)" % num_tries) return self._install_perform_build(options, num_tries - 1) def install(self, args, options): # Check if the host is actually 64-bit. If not raise a warning and quit. if not _supported_arch(): return 'Your architecture appears to be 32-bit.\nOnly 64-bit architectures are supported at the moment.' if not _supported_python(): return 'You appear to be running Python3.\nOnly Python2 is supported at the moment.' if not _supported_lxc(): return 'Either LXC is not installed or you are running an older version of LXC.\nOnly versions > 0.7.5 are supported.' if not _supported_docker(): return 'Either Docker is not installed or you are running an older version of Docker.\nOnly versions >= 1.2.0 are supported.' if not _supported_bridge(): return 'The brctl command was not found. Please install the network bridge utilities.' if not _has_ferry_user(): return 'You do not appear to have the \'docker\' group configured. Please create the \'docker\' group and try again.' # Create the various directories. try: if not os.path.isdir(DOCKER_DIR): os.makedirs(DOCKER_DIR) self._change_permission(DOCKER_DIR) except OSError as e: logging.error("Could not install Ferry.\n") logging.error(e.strerror) sys.exit(1) # Make sure that the Ferry keys have the correct # ownership & permission. self._check_and_change_ssh_keyperm() # Start the Ferry docker daemon. If it does not successfully # start, print out a msg. logging.warning("all prerequisites met...") start, msg = self._start_docker_daemon(options) if not start: logging.error('ferry docker daemon not started') return msg # Perform the actual build/download. Sometimes the download # may fail so give the user a chance to automatically retry # a few times before giving up. if options and '-r' in options: num_tries = int(options['-r']) else: num_tries = 0 return self._install_perform_build(options, num_tries) def _check_all_images(self): not_installed = [] images = ['mongodb', 'ferry-base', 'hadoop-base', 'hadoop', 'hadoop-client', 'hive-metastore', 'gluster', 'openmpi', 'openmpi-client', 'cassandra', 'cassandra-client', 'titan', 'spark'] for i in images: if not self._check_image_installed("%s/%s" % (DEFAULT_DOCKER_REPO, i)): not_installed.append(i) return not_installed def _check_and_pull_image(self, image_name): if not self._check_image_installed(image_name): self._pull_image(image_name, on_client=False) return self._check_image_installed(image_name) def _check_image_installed(self, image_name): cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect %s 2> /dev/null' % image_name output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() == '[]': return False else: return True def _transfer_config(self, config_dirs): """ Transfer the configuration to the containers. """ for c in config_dirs: container = c[0] from_dir = c[1] to_dir = c[2] self.fabric.copy([container], from_dir, to_dir) def _read_public_key(self, private_key): s = private_key.split("/") p = os.path.splitext(s[len(s) - 1])[0] return p def _check_and_change_ssh_keyperm(self): os.chmod(DEFAULT_SSH_KEY, 0600) uid, gid = _get_ferry_user() os.chown(DEFAULT_SSH_KEY, uid, gid) def start_web(self, options=None, clean=False): start, msg = self._start_docker_daemon(options) if not clean and not start: # We are trying to start the web services but the Docker # daemon won't start. If we're cleaning, it's not a big deal. logging.error(msg) sys.exit(1) # Check if the ssh key permission is properly set. self._check_and_change_ssh_keyperm() # Check if we're operating in naked mode. If so, # we just needed to start the docker daemon, so we're all done! if options and '-n' in options: return # Check if the user-application directory exists. # If not, create it. try: if not os.path.isdir(DEFAULT_FERRY_APPS): os.makedirs(DEFAULT_FERRY_APPS) self._change_permission(DEFAULT_FERRY_APPS) except OSError as e: logging.error("Could not create application directory.\n") logging.error(e.strerror) sys.exit(1) # Check if the Mongo directory exists yet. If not # go ahead and create it. try: if not os.path.isdir(DEFAULT_MONGO_DB): os.makedirs(DEFAULT_MONGO_DB) self._change_permission(DEFAULT_MONGO_DB) if not os.path.isdir(DEFAULT_MONGO_LOG): os.makedirs(DEFAULT_MONGO_LOG) self._change_permission(DEFAULT_MONGO_LOG) except OSError as e: logging.error("Could not start ferry servers.\n") logging.error(e.strerror) sys.exit(1) # Check if the Mongo image is built. if not self._check_image_installed('%s/mongodb' % DEFAULT_DOCKER_REPO): logging.error("Could not start ferry servers.\n") logging.error("MongoDB images not found. Try executing 'ferry install'.") sys.exit(1) # Check if there are any other Mongo instances runnig. self._clean_web() # Start the Mongo server. Create a new configuration and # manually start the container. private_key = self.cli._get_ssh_key(options) volumes = { DEFAULT_MONGO_LOG : self.mongo.container_log_dir, DEFAULT_MONGO_DB : self.mongo.container_data_dir } mongoplan = {'image':'ferry/mongodb', 'type':'ferry/mongodb', 'keydir': { '/service/keys' : DEFAULT_KEY_DIR }, 'keyname': self._read_public_key(private_key), 'privatekey': private_key, 'volumes':volumes, 'volume_user':DEFAULT_FERRY_OWNER, 'ports':[], 'exposed':self.mongo.get_working_ports(1), 'internal':self.mongo.get_internal_ports(1), 'hostname':'ferrydb', 'netenable':True, 'args': 'trust' } mongoconf = self.mongo.generate(1) mongoconf.uuid = 'fdb-' + str(uuid.uuid4()).split('-')[0] containers = self.fabric.alloc(mongoconf.uuid, mongoconf.uuid, [mongoplan], "MONGO") if containers and len(containers) > 0: mongobox = containers[0] else: logging.error("Could not start MongoDB image") sys.exit(1) ip = mongobox.internal_ip _touch_file('/tmp/ferry/mongodb.ip', ip, root=True) # Once the container is started, we'll need to copy over the # configuration files, and then manually send the 'start' command. s = { 'container':mongobox, 'data_dev':'eth0', 'data_ip':mongobox.internal_ip, 'manage_ip':mongobox.internal_ip, 'host_name':mongobox.host_name, 'type':mongobox.service_type, 'args':mongobox.args } config_dirs, entry_point = self.mongo.apply(mongoconf, [s]) self._transfer_config(config_dirs) self.mongo.start_service([mongobox], entry_point, self.fabric) # Set the MongoDB env. variable. my_env = os.environ.copy() my_env['MONGODB'] = ip # Sleep a little while to let Mongo start receiving. time.sleep(2) # Start the DHCP server logging.warning("starting dhcp server") # cmd = 'gunicorn -t 3600 -b 127.0.0.1:5000 -w 1 ferry.ip.dhcp:app &' cmd = 'python %s/ip/dhcp.py 127.0.0.1 5000 &' % FERRY_HOME Popen(cmd, stdout=PIPE, shell=True, env=my_env) time.sleep(2) # Reserve the Mongo IP. self.network.reserve_ip(ip) # Start the Ferry HTTP server. Read in the web configuration # so that we know how many workers to start, etc. workers, bind, port = self._get_worker_info() logging.warning("starting API servers on (%s:%s) and (mongo:%s)" % (bind, port, ip)) # cmd = 'gunicorn -e FERRY_HOME=%s -t 3600 -w %d -b %s:%s ferry.http.httpapi:app &' % (FERRY_HOME, workers, bind, port) cmd = 'export FERRY_HOME=%s && python %s/http/httpapi.py %s %s &' % (FERRY_HOME, FERRY_HOME, bind, port) Popen(cmd, shell=True, env=my_env) def _force_stop_web(self): logging.warning("stopping docker http servers") cmd = 'pkill -f gunicorn' Popen(cmd, stdout=PIPE, shell=True) def stop_web(self, key): # Shutdown the mongo instance if os.path.exists('/tmp/ferry/mongodb.ip'): f = open('/tmp/ferry/mongodb.ip', 'r') ip = f.read().strip() f.close() cmd = 'LC_ALL=C && ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s /service/sbin/startnode stop' % (key, ip) logging.warning(cmd) output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() logging.warning(output) cmd = 'LC_ALL=C && ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s /service/sbin/startnode halt' % (key, ip) logging.warning(cmd) output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() logging.warning(output) os.remove('/tmp/ferry/mongodb.ip') # Kill all the gunicorn instances. logging.warning("stopping http servers") cmd = 'ps -eaf | grep httpapi | awk \'{print $2}\' | xargs kill -15' Popen(cmd, stdout=PIPE, shell=True) cmd = 'ps -eaf | grep ferry.ip.dhcp | awk \'{print $2}\' | xargs kill -15' Popen(cmd, stdout=PIPE, shell=True) def _clean_web(self): docker = DOCKER_CMD + ' -H=' + DOCKER_SOCK cmd = docker + ' ps | grep ferry/mongodb | awk \'{print $1}\' | xargs ' + docker + ' stop ' logging.warning("cleaning previous mongo resources") logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) child.stdout.read() child.stderr.read() def _copytree(self, src, dst): for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d) else: shutil.copy2(s, d) def _change_permission(self, location): uid, gid = _get_ferry_user() os.chown(location, uid, gid) if os.path.isdir(location): os.chmod(location, 0774) for entry in os.listdir(location): self._change_permission(os.path.join(location, entry)) else: # Check if this file has a file extension. If not, # then assume it's a binary. s = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH if len(location.split(".")) == 1: s |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH os.chmod(location, s) """ Check if the dockerfiles are already built. """ def check_images(self, image_dir, repo): if self._docker_running(): build_images = [] for f in os.listdir(image_dir): dockerfile = image_dir + '/' + f + '/Dockerfile' image_names = self._check_dockerfile(dockerfile, repo) if len(image_names) > 0: build_images += image_names return build_images else: logging.error("ferry daemon not started") """ Build the docker images """ def build_from_list(self, to_build, image_dir, repo, build=False, recurse=True): if self._docker_running(): built_images = {} for f in os.listdir(image_dir): logging.warning("transforming dockerfile") self._transform_dockerfile(image_dir, f, repo) for f in os.listdir("/tmp/dockerfiles/"): dockerfile = '/tmp/dockerfiles/' + f + '/Dockerfile' images = self._get_image(dockerfile) intersection = [i for i in images if i in to_build] if len(intersection) > 0: image = images.pop(0) logging.warning("building image " + image) self._build_image(image, dockerfile, repo, built_images, recurse=recurse, build=build) if len(images) > 0: logging.warning("tagging images " + image) self._tag_images(image, repo, images) # After building everything, get rid of the temp dir. shutil.rmtree("/tmp/dockerfiles") else: logging.error("ferry daemon not started") """ Build the docker images """ def build_from_dir(self, image_dir, repo, build=False): if self._docker_running(): built_images = {} for f in os.listdir(image_dir): self._transform_dockerfile(image_dir, f, repo) for f in os.listdir("/tmp/dockerfiles/"): dockerfile = "/tmp/dockerfiles/" + f + "/Dockerfile" images = self._get_image(dockerfile) image = images.pop(0) self._build_image(image, dockerfile, repo, built_images, recurse=True, build=build) if len(images) > 0: logging.warning("tagging images " + image) self._tag_images(image, repo, images) # After building everything, get rid of the temp dir. # shutil.rmtree("/tmp/dockerfiles") else: logging.error("ferry daemon not started") def _docker_running(self): return os.path.exists('/var/run/ferry.sock') def _check_dockerimage(self, image, repo): qualified = repo + '/' + image cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect ' + qualified + ' 2> /dev/null' output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() == '[]': return image else: return None def _check_dockerfile(self, dockerfile, repo): not_installed = [] images = self._get_image(dockerfile) for image in images: i = self._check_dockerimage(image, DEFAULT_DOCKER_REPO) if i: not_installed.append(image) return not_installed def _transform_dockerfile(self, image_dir, f, repo): if not os.path.exists("/tmp/dockerfiles/" + f): shutil.copytree(image_dir + '/' + f, '/tmp/dockerfiles/' + f) out_file = "/tmp/dockerfiles/" + f + "/Dockerfile" out = open(out_file, "w+") uid, gid = _get_ferry_user() download_url = _get_download_url() changes = { "USER" : repo, "DOWNLOAD_URL" : download_url, "DOCKER" : gid } for line in open(image_dir + '/' + f + '/Dockerfile', "r"): s = Template(line).substitute(changes) out.write(s) out.close() def _build_image(self, image, f, repo, built_images, recurse=False, build=False): base = self._get_base(f) if recurse and base != "ubuntu:14.04": image_dir = os.path.dirname(os.path.dirname(f)) dockerfile = image_dir + '/' + base + '/Dockerfile' self._build_image(base, dockerfile, repo, built_images, recurse, build) if not image in built_images: if base == "ubuntu:14.04": self._pull_image(base) built_images[image] = True self._compile_image(image, repo, os.path.dirname(f), build) def _get_image(self, dockerfile): names = [] for l in open(dockerfile, 'r'): if l.strip() != '': s = l.split() if len(s) > 0: if s[0].upper() == 'NAME': names.append(s[1].strip()) return names def _get_base(self, dockerfile): base = None for l in open(dockerfile, 'r'): s = l.split() if len(s) > 0: if s[0].upper() == 'FROM': base = s[1].strip().split("/") return base[-1] return base def _continuous_print(self, process, on_client=True): while True: try: out = process.stdout.read(15) if out == '': break else: if on_client: sys.stdout.write(out) sys.stdout.flush() else: logging.warning("downloading image...") except IOError as e: logging.warning(e) try: errmsg = process.stderr.readline() if errmsg and errmsg != '': logging.warning(errmsg) else: logging.warning("downloaded image!") except IOError: pass def _pull_image(self, image, tag=None, on_client=True): if not tag: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' pull %s' % image else: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' pull %s:%s' % (image, tag) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) self._continuous_print(child, on_client=on_client) # Now tag the image with the 'latest' tag. if tag and tag != 'latest': cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s:%s %s:%s' % (image, tag, image, 'latest') logging.warning(cmd) Popen(cmd, stdout=PIPE, shell=True) def _compile_image(self, image, repo, image_dir, build=False): # Now build the image. if build: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' build --rm=true -t' + ' %s/%s %s' % (repo, image, image_dir) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) self._continuous_print(child) # Now tag the image. cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s %s/%s:%s' % (repo, image, repo, image, ferry.__version__) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) else: # Just pull the image from the public repo. image_name = "%s/%s" % (repo, image) self._pull_image(image_name, tag=ferry.__version__) def _tag_images(self, image, repo, alternatives): for a in alternatives: cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s:%s %s/%s:%s' % (repo, image, ferry.__version__, repo, a, ferry.__version__) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s:latest %s/%s:latest' % (repo, image, repo, a) logging.warning(cmd) child = Popen(cmd, stdout=PIPE, shell=True) def _clean_images(self): cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' | grep none | awk \'{print $1}\' | xargs ' + DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' rmi' Popen(cmd, stdout=PIPE, shell=True) def _is_parent_dir(self, pdir, cdir): pdirs = pdir.split("/") cdirs = cdir.split("/") # Parent directory can never be longer than # the child directory. if len(pdirs) > len(cdirs): return False for i in range(0, len(pdirs)): # The parent directory shoudl always match # the child directory. Ignore the start and end # blank spaces caused by "split". if pdirs[i] != "" and pdirs[i] != cdirs[i]: return False return True def _is_running_btrfs(self): logging.warning("checking for btrfs") cmd = 'cat /etc/mtab | grep btrfs | awk \'{print $2}\'' output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() != "": dirs = output.strip().split("\n") for d in dirs: if self._is_parent_dir(d, DOCKER_DIR): return True return False def _start_docker_daemon(self, options=None): # Check if the Ferry bridge has been created. _create_bridge() # Check if the docker daemon is already running try: if not self._docker_running(): # Use the ferry0 bridge. nflag = ' -b ferry0' # Use the LXC backend. This backend option must be specified # for Docker versions greater than 0.9.0. _, ver = _get_docker_version() if ver > (0, 9, 0): lflag = ' -e lxc' else: lflag = '' # Figure out which storage backend to use. Right # now we only support BTRFS or DeviceMapper, since # AUFS seems to break on some occasions. if self._is_running_btrfs(): logging.warning("using btrfs backend") bflag = ' -s btrfs' else: logging.warning("using devmapper backend") bflag = ' -s devicemapper' # Explicitly supply the DNS. if options and '-d' in options: logging.warning("using custom dns") dflag = '' for d in options['-d']: dflag += ' --dns %s' % d else: logging.warning("using public dns") dflag = ' --dns 8.8.8.8 --dns 8.8.4.4' # We need to fix this so that ICC is set to false. icc = ' --icc=true' cmd = 'nohup ' + DOCKER_CMD + ' -d' + ' -H=' + DOCKER_SOCK + ' -g=' + DOCKER_DIR + ' -p=' + DOCKER_PID + nflag + dflag + lflag + bflag + icc + ' 1>%s 2>&1 &' % DEFAULT_DOCKER_LOG logging.warning(cmd) Popen(cmd, stdout=PIPE, shell=True) # Wait a second to let the docker daemon do its thing. time.sleep(3) return True, "Ferry daemon running on /var/run/ferry.sock" else: return False, "Ferry appears to be already running. If this is an error, please type \'ferry clean\' and try again." except OSError as e: logging.error("could not start docker daemon.\n") logging.error(e.strerror) sys.exit(1) def _stop_docker_daemon(self, force=False): if force or self._docker_running(): logging.warning("stopping docker daemon") cmd = 'pkill -f ' + DOCKER_CMD Popen(cmd, stdout=PIPE, shell=True) try: os.remove('/var/run/ferry.sock') except OSError: pass
def __init__(self): self.repo = 'public' self.docker_user = '******' self.cli = DockerCLI() self.network = DHCPClient(self._get_gateway())
class DockerFabric(object): def __init__(self): self.repo = 'public' self.docker_user = '******' self.cli = DockerCLI() self.network = DHCPClient(self._get_gateway()) def _get_gateway(self): cmd = "ifconfig drydock0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'" gw = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() cmd = "ifconfig drydock0 | grep 'inet addr:' | cut -d: -f4 | awk '{ print $1}'" netmask = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() mask = map(int, netmask.split(".")) cidr = 1 if mask[3] == 0: cidr = 8 if mask[2] == 0: cidr *= 2 return "%s/%d" % (gw, 32 - cidr) def _get_host(self): cmd = "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'" return Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip() """ Read the location of the directory containing the keys used to communicate with the containers. """ def _read_key_dir(self): f = open(ferry.install.DEFAULT_DOCKER_KEY, 'r') k = f.read().strip().split("://") return k[1], k[0] """ Fetch the current docker version. """ def version(self): return self.cli.version() """ Get the filesystem type associated with docker. """ def get_fs_type(self): return self.cli.get_fs_type() """ Restart the stopped containers. """ def restart(self, container_info): containers = [] for c in container_info: container = self.cli.start(c['container'], c['type'], c['keys'], c['volumes'], c['args']) container.default_user = self.docker_user containers.append(container) # We should wait for a second to let the ssh server start # on the containers (otherwise sometimes we get a connection refused) time.sleep(2) return containers """ Allocate several instances. """ def alloc(self, container_info): containers = [] mounts = {} for c in container_info: # Get a new IP address for this container and construct # a default command. ip = self.network.assign_ip(c) gw = self._get_gateway().split("/")[0] lxc_opts = ["lxc.network.type = veth", "lxc.network.ipv4 = %s" % ip, "lxc.network.ipv4.gateway = %s" % gw, "lxc.network.link = drydock0", "lxc.network.name = eth0", "lxc.network.flags = up"] c['default_cmd'] = "/service/sbin/startnode init" # Check if we need to forward any ports. host_map = {} for p in c['ports']: p = str(p) s = p.split(":") if len(s) > 1: host = s[0] dest = s[1] else: host = self.network.random_port() dest = s[0] host_map[dest] = [{'HostIp' : '0.0.0.0', 'HostPort' : host}] self.network.forward_rule('0.0.0.0/0', host, ip, dest) # Start a container with a specific image, in daemon mode, # without TTY, and on a specific port container = self.cli.run(service_type = c['type'], image = c['image'], volumes = c['volumes'], keys = c['keys'], open_ports = host_map.keys(), host_map = host_map, expose_group = c['exposed'], hostname = c['hostname'], default_cmd = c['default_cmd'], args= c['args'], lxc_opts = lxc_opts) if container: container.default_user = self.docker_user container.internal_ip = ip containers.append(container) self.network.set_owner(ip, container.container) if 'name' in c: container.name = c['name'] if 'volume_user' in c: mounts[container] = {'user':c['volume_user'], 'vols':c['volumes'].items()} # We should wait for a second to let the ssh server start # on the containers (otherwise sometimes we get a connection refused) time.sleep(2) # Check if we need to set the file permissions # for the mounted volumes. for c, i in mounts.items(): for _, v in i['vols']: self.cmd([c], 'chown -R %s %s' % (i['user'], v)) return containers """ Stop the running instances """ def stop(self, containers): for c in containers: self.cli.stop(c.container) """ Remove the running instances """ def remove(self, containers): for c in containers: for p in c.ports.keys(): self.network.delete_rule(c.internal_ip, p) self.network.free_ip(c.internal_ip) self.cli.remove(c.container) """ Save/commit the running instances """ def snapshot(self, containers, cluster_uuid, num_snapshots): snapshots = [] for c in containers: snapshot_name = '%s-%s-%s:SNAPSHOT-%s' % (c.image, cluster_uuid, c.host_name, num_snapshots) snapshots.append( {'image' : snapshot_name, 'base' : c.image, 'type' : c.service_type, 'name' : c.name, 'args' : c.args, 'ports': c.ports} ) self.cli.commit(c, snapshot_name) return snapshots """ Upload these containers to the specified registry. """ def deploy(self, containers, registry=None): deployed = [] for c in containers: image_name = '%s-%s:DEPLOYED' % (c.image, c.host_name) deployed.append( {'image' : image_name, 'base' : c.image, 'type' : c.service_type, 'name' : c.name, 'args' : c.args, 'ports': c.ports} ) if not registry: self.cli.commit(c, image_name) else: self.cli.push(c, registry) return deployed """ Copy over the contents to each container """ def copy(self, containers, from_dir, to_dir): for c in containers: keydir, _ = self._read_key_dir() opts = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' key = '-i ' + keydir + '/id_rsa' scp_cmd = 'scp ' + opts + ' ' + key + ' -r ' + from_dir + ' ' + self.docker_user + '@' + c.internal_ip + ':' + to_dir output = Popen(scp_cmd, stdout=PIPE, shell=True).stdout.read() """ Run a command on all the containers and collect the output. """ def cmd(self, containers, cmd): all_output = {} keydir, _ = self._read_key_dir() key = keydir + '/id_rsa' for c in containers: ip = self.docker_user + '@' + c.internal_ip ssh = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ' + key + ' -t -t ' + ip + ' \'%s\'' % cmd logging.warning(ssh) output = Popen(ssh, stdout=PIPE, shell=True).stdout.read() all_output[c] = output.strip() return all_output