Пример #1
0
class Installer(object):
    def __init__(self, cli=None):
        self.network = DHCPClient()
        self.fabric = DockerFabric(bootstrap=True)
        self.mongo = MongoInitializer()
        self.mongo.fabric = self.fabric
        self.mongo.template_dir = DEFAULT_TEMPLATE_DIR + '/mongo/'
        self.cli = cli

    def get_ferry_account(self):
        """
        Read in the remote Ferry DB account information. Used
        for registering applications. 
        """
        with open(ferry.install.DEFAULT_DOCKER_LOGIN, 'r') as f:
            args = yaml.load(f)
            args = args['ferry']
            if all(k in args for k in ("user","key","server")):
                return args['user'], args['key'], args['server']
        return None, None, None

    def create_signature(self, request, key):
        """
        Generated a signed request.
        """
        return hmac.new(key, request, hashlib.sha256).hexdigest()

    def store_app(self, app, ext, content):
        """
        Store the application in the global directory. 
        """
        try:
            # We may need to create the parent directory
            # if this is the first time an application from this user
            # is downloaded. 
            file_name = os.path.join(DEFAULT_FERRY_APPS, app + ext)
            os.makedirs(os.path.dirname(file_name))
            with open(file_name, "w") as f:
                f.write(content)
            return file_name
        except IOError as e:
            logging.error(e)
            return None
        except OSError as os:
            logging.error(os)
            return None

    def _clean_rules(self):
        self.network.clean_rules()

    def _reset_ssh_key(self, root):
        """
        Reset the temporary ssh key. This function should only be
        called from the server. 
        """
        keydir, tmp = self.cli._read_key_dir(root=root)

        # Only reset temporary keys. User-defined key directories
        # shouldn't be touched. 
        if keydir != DEFAULT_KEY_DIR and tmp == "tmp":
            shutil.rmtree(keydir)

        # Mark that we are using the default package keys
        if root:
            global GLOBAL_ROOT_DIR
            GLOBAL_ROOT_DIR = 'tmp://' + DEFAULT_KEY_DIR
            _touch_file(_get_key_dir(root=True, server=True), GLOBAL_ROOT_DIR, root=True)
        else:
            global GLOBAL_KEY_DIR
            GLOBAL_KEY_DIR = 'tmp://' + DEFAULT_KEY_DIR
            _touch_file(_get_key_dir(root=False, server=True), GLOBAL_KEY_DIR, root=True)
        
    def _process_ssh_key(self, options, root=False):
        """
        Initialize the ssh key location. This method is used
        when starting the ferry server. 
        """
        if root:
            global GLOBAL_ROOT_DIR
            if options and '-k' in options:
                GLOBAL_ROOT_DIR = 'user://' + self.fetch_image_keys(options['-k'][0])
            else:
                GLOBAL_ROOT_DIR = 'tmp://' + DEFAULT_KEY_DIR
                logging.warning("using key directory " + GLOBAL_ROOT_DIR)
                _touch_file(_get_key_dir(root=True, server=True), GLOBAL_ROOT_DIR, root=True)
        else:
            global GLOBAL_KEY_DIR
            if options and '-k' in options:
                GLOBAL_KEY_DIR = 'user://' + self.fetch_image_keys(options['-k'][0])
            else:
                GLOBAL_KEY_DIR = 'tmp://' + DEFAULT_KEY_DIR
                logging.warning("using key directory " + GLOBAL_KEY_DIR)
                _touch_file(_get_key_dir(root=False, server=False), GLOBAL_KEY_DIR, root=False)

    def install(self, args, options):
        # Check if the host is actually 64-bit. If not raise a warning and quit.
        if not _supported_arch():
            return 'Your architecture appears to be 32-bit.\nOnly 64-bit architectures are supported at the moment.'

        if not _supported_python():
            return 'You appear to be running Python3.\nOnly Python2 is supported at the moment.'

        if not _supported_lxc():
            return 'You appear to be running an older version of LXC.\nOnly versions > 0.7.5 are supported.'

        if not _has_ferry_user():
            return 'You do not appear to have the \'docker\' group configured. Please create the \'docker\' group and try again.'

        # Create the various directories.
        try:
            if not os.path.isdir(DOCKER_DIR):
                os.makedirs(DOCKER_DIR)
                self._change_permission(DOCKER_DIR)
        except OSError as e:
            logging.error("Could not install Ferry.\n") 
            logging.error(e.strerror)
            sys.exit(1)

        # Start the Ferry docker daemon. If it does not successfully
        # start, print out a msg. 
        logging.warning("all prerequisites met...")
        start, msg = self._start_docker_daemon(options)
        if not start:
            logging.error('ferry docker daemon not started')
            return msg

        # Normally we don't want to build the Dockerfiles,
        # but sometimes we may for testing, etc. 
        build = False
        if options and '-b' in options:
            build = True

        if options and '-u' in options:
            if len(options['-u']) > 0 and options['-u'][0] != True:
                logging.warning("performing select rebuild (%s)" % str(options['-u']))
                self.build_from_list(options['-u'], 
                                     DEFAULT_IMAGE_DIR,
                                     DEFAULT_DOCKER_REPO, build, recurse=False)
            else:
                logging.warning("performing forced rebuild")
                self.build_from_dir(DEFAULT_IMAGE_DIR, DEFAULT_DOCKER_REPO, build)
        else:
            # We want to be selective about which images
            # to rebuild. Useful if one image breaks, etc. 
            to_build = self.check_images(DEFAULT_IMAGE_DIR,
                                         DEFAULT_DOCKER_REPO)
            if len(to_build) > 0:
                logging.warning("performing select rebuild (%s)" % str(to_build))
                self.build_from_list(to_build, 
                                     DEFAULT_IMAGE_DIR,
                                     DEFAULT_DOCKER_REPO, build)

        # Check that all the images were built.
        not_installed = self._check_all_images()
        if len(not_installed) == 0:
            return 'installed ferry'
        else:
            logging.error('images not built: ' + str(not_installed))
            return 'Some images were not installed. Please type \'ferry install\' again.'

    def _check_all_images(self):
        not_installed = []
        images = ['mongodb', 'ferry-base', 'hadoop-base', 'hadoop', 'hadoop-client',
                  'hive-metastore', 'gluster', 'openmpi', 'openmpi-client', 'cassandra', 'cassandra-client', 
                  'titan', 'spark']
        for i in images:
            if not self._check_image_installed("%s/%s" % (DEFAULT_DOCKER_REPO, i)):
                not_installed.append(i)
        return not_installed

    def _check_and_pull_image(self, image_name):
        if not self._check_image_installed(image_name):
            self._pull_image(image_name, on_client=False)

        return self._check_image_installed(image_name)

    def _check_image_installed(self, image_name):
        cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect %s 2> /dev/null' % image_name
        output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
        if output.strip() == '[]':
            return False
        else:
            return True

    def _transfer_config(self, config_dirs):
        """
        Transfer the configuration to the containers. 
        """
        for c in config_dirs:
            container = c[0]
            from_dir = c[1]
            to_dir = c[2]
            self.fabric.copy([container], from_dir, to_dir)

    def start_web(self, options=None, clean=False):
        start, msg = self._start_docker_daemon(options)
        if not clean and not start:
            # We are trying to start the web services but the Docker
            # daemon won't start. If we're cleaning, it's not a big deal. 
            logging.error(msg) 
            sys.exit(1)

        # Check if the user wants to use a specific key directory. 
        self._process_ssh_key(options=options, root=True)
                               
        # Check if the user-application directory exists.
        # If not, create it. 
        try:
            if not os.path.isdir(DEFAULT_FERRY_APPS):
                os.makedirs(DEFAULT_FERRY_APPS)
                self._change_permission(DEFAULT_FERRY_APPS)
        except OSError as e:
            logging.error("Could not create application directory.\n") 
            logging.error(e.strerror)
            sys.exit(1)

        # Check if the Mongo directory exists yet. If not
        # go ahead and create it. 
        try:
            if not os.path.isdir(DEFAULT_MONGO_DB):
                os.makedirs(DEFAULT_MONGO_DB)
                self._change_permission(DEFAULT_MONGO_DB)
            if not os.path.isdir(DEFAULT_MONGO_LOG):
                os.makedirs(DEFAULT_MONGO_LOG)
                self._change_permission(DEFAULT_MONGO_LOG)
        except OSError as e:
            logging.error("Could not start ferry servers.\n") 
            logging.error(e.strerror)
            sys.exit(1)

        # Check if the Mongo image is built.
        if not self._check_image_installed('%s/mongodb' % DEFAULT_DOCKER_REPO):
            logging.error("Could not start ferry servers.\n") 
            logging.error("MongoDB images not found. Try executing 'ferry install'.")
            sys.exit(1)

        # Check if there are any other Mongo instances runnig.
        self._clean_web()

        # Copy over the ssh keys.
        self.cli._check_ssh_key(root=True, server=True)

        # Start the Mongo server. Create a new configuration and
        # manually start the container. 
        keydir, _ = self.cli._read_key_dir(root=True)
        volumes = { DEFAULT_MONGO_LOG : self.mongo.container_log_dir,
                    DEFAULT_MONGO_DB : self.mongo.container_data_dir }
        mongoplan = {'image':'ferry/mongodb',
                     'type':'ferry/mongodb', 
                     'volumes':volumes,
                     'volume_user':DEFAULT_FERRY_OWNER, 
                     'keys': { '/service/keys' : keydir }, 
                     'ports':[],
                     'exposed':self.mongo.get_exposed_ports(1), 
                     'hostname':'ferrydb',
                     'netenable':True, 
                     'args': 'trust'
                     }
        mongoconf = self.mongo.generate(1)
        mongoconf.uuid = 'fdb-' + str(uuid.uuid4()).split('-')[0]
        mongobox = self.fabric.alloc([mongoplan])[0]
        if not mongobox:
            logging.error("Could not start MongoDB image")
            sys.exit(1)

        ip = mongobox.internal_ip
        _touch_file('/tmp/mongodb.ip', ip, root=True)

        # Once the container is started, we'll need to copy over the
        # configuration files, and then manually send the 'start' command. 
        s = { 'container':mongobox,
              'data_dev':'eth0', 
              'data_ip':mongobox.internal_ip, 
              'manage_ip':mongobox.internal_ip,
              'host_name':mongobox.host_name,
              'type':mongobox.service_type,
              'args':mongobox.args }
        config_dirs, entry_point = self.mongo.apply(mongoconf, [s])
        self._transfer_config(config_dirs)
        self.mongo.start_service([mongobox], entry_point, self.fabric)

        # Set the MongoDB env. variable. 
        my_env = os.environ.copy()
        my_env['MONGODB'] = ip

        # Sleep a little while to let Mongo start receiving.
        time.sleep(2)

        # Start the DHCP server
        logging.warning("starting dhcp server")
        cmd = 'gunicorn -t 3600 -b 127.0.0.1:5000 -w 1 ferry.ip.dhcp:app &'
        Popen(cmd, stdout=PIPE, shell=True, env=my_env)
        time.sleep(2)

        # Reserve the Mongo IP.
        self.network.reserve_ip(ip)

        # Start the Ferry HTTP servers
        logging.warning("starting http servers on port 4000 and mongo %s" % ip)
        cmd = 'gunicorn -e FERRY_HOME=%s -t 3600 -w 3 -b 127.0.0.1:4000 ferry.http.httpapi:app &' % FERRY_HOME
        Popen(cmd, stdout=PIPE, shell=True, env=my_env)

    def stop_web(self):
        # Shutdown the mongo instance
        if os.path.exists('/tmp/mongodb.ip'):
            f = open('/tmp/mongodb.ip', 'r')
            ip = f.read().strip()
            f.close()

            keydir, tmp = self.cli._read_key_dir(root=True)
            key = keydir + "/id_rsa"
            cmd = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s /service/sbin/startnode stop' % (key, ip)
            logging.warning(cmd)
            output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
            logging.warning(output)
            cmd = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s /service/sbin/startnode halt' % (key, ip)
            logging.warning(cmd)
            output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
            logging.warning(output)
            os.remove('/tmp/mongodb.ip')

        # Kill all the gunicorn instances. 
        logging.warning("stopping http servers")
        cmd = 'ps -eaf | grep httpapi | awk \'{print $2}\' | xargs kill -15'
        Popen(cmd, stdout=PIPE, shell=True)
        cmd = 'ps -eaf | grep ferry.ip.dhcp | awk \'{print $2}\' | xargs kill -15'
        Popen(cmd, stdout=PIPE, shell=True)

    def _clean_web(self):
        docker = DOCKER_CMD + ' -H=' + DOCKER_SOCK
        cmd = docker + ' ps | grep ferry/mongodb | awk \'{print $1}\' | xargs ' + docker + ' stop '
        logging.warning("cleaning previous mongo resources")
        logging.warning(cmd)
        child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        child.stdout.read()
        child.stderr.read()

    def _copytree(self, src, dst):
        for item in os.listdir(src):
            s = os.path.join(src, item)
            d = os.path.join(dst, item)
            if os.path.isdir(s):
                shutil.copytree(s, d)
            else:
                shutil.copy2(s, d)

    def _change_permission(self, location):
        uid, gid = _get_ferry_user()
        os.chown(location, uid, gid)

        if os.path.isdir(location):        
            os.chmod(location, 0774)
            for entry in os.listdir(location):
                self._change_permission(os.path.join(location, entry))
        else:
            # Check if this file has a file extension. If not,
            # then assume it's a binary.
            s = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH
            if len(location.split(".")) == 1:
                s |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
            os.chmod(location, s)

    """
    Ask for the key directory.
    """
    def fetch_image_keys(self, key_dir=None):
        if key_dir and os.path.exists(key_dir):
            return key_dir
        else:
            return DEFAULT_KEY_DIR

    """
    Check if the dockerfiles are already built. 
    """
    def check_images(self, image_dir, repo):
        if self._docker_running():
            build_images = []
            for f in os.listdir(image_dir):
                dockerfile = image_dir + '/' + f + '/Dockerfile'
                image_names = self._check_dockerfile(dockerfile, repo)
                if len(image_names) > 0:
                    build_images += image_names
            return build_images
        else:
            logging.error("ferry daemon not started")

    """
    Build the docker images
    """
    def build_from_list(self, to_build, image_dir, repo, build=False, recurse=True):
        if self._docker_running():
            built_images = {}
            for f in os.listdir(image_dir):
                logging.warning("transforming dockerfile")
                self._transform_dockerfile(image_dir, f, repo)

            for f in os.listdir("/tmp/dockerfiles/"):
                dockerfile = '/tmp/dockerfiles/' + f + '/Dockerfile'
                images = self._get_image(dockerfile)
                intersection = [i for i in images if i in to_build]
                if len(intersection) > 0:
                    image = images.pop(0)
                    logging.warning("building image " + image)
                    self._build_image(image, dockerfile, repo, built_images, recurse=recurse, build=build)

                    if len(images) > 0:
                        logging.warning("tagging images " + image)
                        self._tag_images(image, repo, images)

            # After building everything, get rid of the temp dir.
            # shutil.rmtree("/tmp/dockerfiles")
        else:
            logging.error("ferry daemon not started")

    """
    Build the docker images
    """
    def build_from_dir(self, image_dir, repo, build=False):
        if self._docker_running():
            built_images = {}
            for f in os.listdir(image_dir):
                self._transform_dockerfile(image_dir, f, repo)
            for f in os.listdir("/tmp/dockerfiles/"):
                dockerfile = "/tmp/dockerfiles/" + f + "/Dockerfile"
                images = self._get_image(dockerfile)
                image = images.pop(0)
                self._build_image(image, dockerfile, repo, built_images, recurse=True, build=build)

                if len(images) > 0:
                    logging.warning("tagging images " + image)
                    self._tag_images(image, repo, images)

            # After building everything, get rid of the temp dir.
            # shutil.rmtree("/tmp/dockerfiles")
        else:
            logging.error("ferry daemon not started")

    def _docker_running(self):
        return os.path.exists('/var/run/ferry.sock')

    def _check_dockerfile(self, dockerfile, repo):
        not_installed = []
        images = self._get_image(dockerfile)
        for image in images:
            qualified = DEFAULT_DOCKER_REPO + '/' + image
            cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect ' + qualified + ' 2> /dev/null'
            output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
            if output.strip() == '[]':
                not_installed.append(image)
        return not_installed

    def _transform_dockerfile(self, image_dir, f, repo):
        if not os.path.exists("/tmp/dockerfiles/" + f):
            shutil.copytree(image_dir + '/' + f, '/tmp/dockerfiles/' + f)
    
        out_file = "/tmp/dockerfiles/" + f + "/Dockerfile"
        out = open(out_file, "w+")
        uid, gid = _get_ferry_user()
        download_url = _get_download_url()
        changes = { "USER" : repo,
                    "DOWNLOAD_URL" : download_url,
                    "DOCKER" : gid }
        for line in open(image_dir + '/' + f + '/Dockerfile', "r"):
            s = Template(line).substitute(changes)
            out.write(s)
        out.close()

    def _build_image(self, image, f, repo, built_images, recurse=False, build=False):
        base = self._get_base(f)
        if recurse and base != "ubuntu:14.04":
            image_dir = os.path.dirname(os.path.dirname(f))
            dockerfile = image_dir + '/' + base + '/Dockerfile'
            self._build_image(base, dockerfile, repo, built_images, recurse, build)

        if not image in built_images:
            if base == "ubuntu:14.04":
                self._pull_image(base)

            built_images[image] = True
            self._compile_image(image, repo, os.path.dirname(f), build)

    def _get_image(self, dockerfile):
        names = []
        for l in open(dockerfile, 'r'):
            if l.strip() != '':
                s = l.split()
                if len(s) > 0:
                    if s[0].upper() == 'NAME':
                        names.append(s[1].strip())
        return names

    def _get_base(self, dockerfile):
        base = None
        for l in open(dockerfile, 'r'):
            s = l.split()
            if len(s) > 0:
                if s[0].upper() == 'FROM':
                    base = s[1].strip().split("/")
                    return base[-1]
        return base

    def _continuous_print(self, process, on_client=True):
        while True:
            try:
                out = process.stdout.read(15)
                if out == '':
                    break
                else:
                    if on_client:
                        sys.stdout.write(out)
                        sys.stdout.flush()
                    else:
                        logging.warning("downloading image...")
            except IOError as e:
                logging.warning(e)

        try:
            errmsg = process.stderr.readline()
            if errmsg and errmsg != '':
                logging.warning(errmsg)
            else:
                logging.warning("downloaded image!")
        except IOError:
            pass

    def _pull_image(self, image, tag=None, on_client=True):
        if not tag:
            cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' pull %s' % image
        else:
            cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' pull %s:%s' % (image, tag)

        logging.warning(cmd)
        child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        self._continuous_print(child, on_client=on_client)

        # Now tag the image with the 'latest' tag. 
        if tag and tag != 'latest':
            cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s:%s %s:%s' % (image, tag, image, 'latest')
            logging.warning(cmd)
            Popen(cmd, stdout=PIPE, shell=True)
        
    def _compile_image(self, image, repo, image_dir, build=False):
        # Now build the image. 
        if build:
            cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' build --rm=true -t' + ' %s/%s %s' % (repo, image, image_dir)
            logging.warning(cmd)
            child = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
            self._continuous_print(child)

            # Now tag the image. 
            cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s %s/%s:%s' % (repo, image, repo, image, ferry.__version__)
            logging.warning(cmd)
            child = Popen(cmd, stdout=PIPE, shell=True)
        else:
            # Just pull the image from the public repo. 
            image_name = "%s/%s" % (repo, image)
            self._pull_image(image_name, tag=ferry.__version__)

    def _tag_images(self, image, repo, alternatives):
        for a in alternatives:
            cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s:%s %s/%s:%s' % (repo, image, ferry.__version__, repo, a, ferry.__version__)
            logging.warning(cmd)
            child = Popen(cmd, stdout=PIPE, shell=True)
            cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' tag' + ' %s/%s:latest %s/%s:latest' % (repo, image, repo, a)
            logging.warning(cmd)
            child = Popen(cmd, stdout=PIPE, shell=True)

    def _clean_images(self):
        cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' | grep none | awk \'{print $1}\' | xargs ' + DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' rmi'
        Popen(cmd, stdout=PIPE, shell=True)

    def _is_parent_dir(self, pdir, cdir):
        pdirs = pdir.split("/")
        cdirs = cdir.split("/")

        # Parent directory can never be longer than
        # the child directory. 
        if len(pdirs) > len(cdirs):
            return False
            
        for i in range(0, len(pdirs)):
            # The parent directory shoudl always match
            # the child directory. Ignore the start and end
            # blank spaces caused by "split". 
            if pdirs[i] != "" and pdirs[i] != cdirs[i]:
                return False

        return True

    def _is_running_btrfs(self):
        logging.warning("checking for btrfs")
        cmd = 'cat /etc/mtab | grep btrfs | awk \'{print $2}\''
        output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
        if output.strip() != "":
            dirs = output.strip().split("\n")
            for d in dirs:
                if self._is_parent_dir(d, DOCKER_DIR):
                    return True
        return False
        
    def _start_docker_daemon(self, options=None):
        # Check if the docker daemon is already running
        try:
            if not self._docker_running():
                bflag = ''
                if self._is_running_btrfs():
                    logging.warning("using btrfs backend")
                    bflag = ' -s btrfs'

                # Explicitly supply the DNS.
                if options and '-n' in options:
                    logging.warning("using custom dns")
                    dflag = ''
                    for d in options['-n']:
                        dflag += ' --dns %s' % d
                else:
                    logging.warning("using public dns")
                    dflag = ' --dns 8.8.8.8 --dns 8.8.4.4'

                # We need to fix this so that ICC is set to false. 
                icc = ' --icc=true'
                cmd = 'nohup ' + DOCKER_CMD + ' -d' + ' -H=' + DOCKER_SOCK + ' -g=' + DOCKER_DIR + ' -p=' + DOCKER_PID + dflag + bflag + icc + ' 1>%s  2>&1 &' % DEFAULT_DOCKER_LOG
                logging.warning(cmd)
                Popen(cmd, stdout=PIPE, shell=True)

                # Wait a second to let the docker daemon do its thing.
                time.sleep(2)
                return True, "Ferry daemon running on /var/run/ferry.sock"
            else:
                return False, "Ferry appears to be already running. If this is an error, please type \'ferry clean\' and try again."
        except OSError as e:
            logging.error("could not start docker daemon.\n") 
            logging.error(e.strerror)
            sys.exit(1)

    def _stop_docker_daemon(self, force=False):
        if force or self._docker_running():
            logging.warning("stopping docker daemon")
            cmd = 'pkill -f docker-ferry'
            Popen(cmd, stdout=PIPE, shell=True)
            try:
                os.remove('/var/run/ferry.sock')
            except OSError:
                pass

    def _get_gateway(self):
        cmd = "LC_MESSAGES=C ifconfig drydock0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
        gw = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip()

        cmd = "LC_MESSAGES=C ifconfig drydock0 | grep 'inet addr:' | cut -d: -f4 | awk '{ print $1}'"
        netmask = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip()
        mask = map(int, netmask.split("."))
        cidr = 1
        if mask[3] == 0:
            cidr = 8
        if mask[2] == 0:
            cidr *= 2

        return "%s/%d" % (gw, 32 - cidr)
Пример #2
0
class DockerManager(object):
    SSH_PORT = 22

    def __init__(self):
        # Image names
        self.DOCKER_GLUSTER = DEFAULT_DOCKER_REPO + '/gluster'
        self.DOCKER_HADOOP = DEFAULT_DOCKER_REPO + '/hadoop'
        self.DOCKER_HADOOP_CLIENT = DEFAULT_DOCKER_REPO + '/hadoop-client'
        self.DOCKER_HIVE = DEFAULT_DOCKER_REPO + '/hive-metastore'
        self.DOCKER_CASSANDRA = DEFAULT_DOCKER_REPO + '/cassandra'
        self.DOCKER_CASSANDRA_CLIENT = DEFAULT_DOCKER_REPO + '/cassandra-client'
        self.DOCKER_TITAN = DEFAULT_DOCKER_REPO + '/titan'
        self.DOCKER_MPI = DEFAULT_DOCKER_REPO + '/openmpi'
        self.DOCKER_MPI_CLIENT = DEFAULT_DOCKER_REPO + '/openmpi'

        # Generate configuration.
        self.config = ConfigFactory()

        # Docker tools
        self.docker = DockerFabric()
        self.deploy = DeployEngine(self.docker)

        # Initialize the state. 
        self._init_state_db()

    """
    Contact the state database. 
    """
    def _init_state_db(self):
        self.mongo = MongoClient(os.environ['MONGODB'], 27017, connectTimeoutMS=6000)

        self.cluster_collection = self.mongo['state']['clusters']
        self.service_collection = self.mongo['state']['services']
        self.snapshot_collection = self.mongo['state']['snapshots']

    def _serialize_containers(self, containers):
        info = []
        for c in containers:
            info.append(c.json())
        return info

    """
    Update the service configuration. 
    """
    def _update_service_configuration(self, service_uuid, service_info):
        service = self.service_collection.find_one( {'uuid':service_uuid} )
        if not service:
            self.service_collection.insert( service_info )
        else:
            self.service_collection.update( {'uuid' : service_uuid},
                                            {'$set': service_info} )
    """
    Get the storage information. 
    """
    def _get_service_configuration(self, service_uuid, detailed=False):
        info = self.service_collection.find_one( {'uuid':service_uuid}, {'_id':False} )
        if detailed:
            return info
        else:
            return info['entry']

    def _get_inspect_info(self, service_uuid):
        raw_info = self._get_service_configuration(service_uuid, detailed=True)
        json_reply = {'uuid' : service_uuid}

        # Get individual container information
        json_reply['containers'] = []
        for c in raw_info['containers']:
            json_reply['containers'].append(c)

        # Now get the entry information
        json_reply['entry'] = raw_info['entry']

        # Check if this service has a user-defined
        # unique name.
        if 'uniq' in raw_info:
            json_reply['uniq'] = raw_info['uniq']
        else:
            json_reply['uniq'] = None

        return json_reply

    def _get_snapshot_info(self, stack_uuid):
        v = self.cluster_collection.find_one( {'uuid' : stack_uuid} )
        s = self.snapshot_collection.find_one( {'snapshot_uuid':v['snapshot_uuid']} )
        if s:
            time = s['snapshot_ts'].strftime("%m/%w/%Y (%I:%M %p)")
            return { 'snapshot_ts' : time,
                     'snapshot_uuid' : v['snapshot_uuid'] }

    def _get_service(self, service_type):
        service = None
        if service_type == 'mpi':
            service = self.config.mpi
        elif service_type == 'yarn':
            service = self.config.yarn
        elif service_type == 'gluster':
            service = self.config.gluster
        elif service_type == 'cassandra':
            service = self.config.cassandra
        elif service_type == 'titan':
            service = self.config.titan
        elif service_type == 'hadoop':
            service = self.config.hadoop
        elif service_type == 'hive':
            service = self.config.hadoop
        elif service_type == 'hadoop-client':
            service = self.config.hadoop_client
        elif service_type == 'cassandra-client':
            service = self.config.cass_client
        elif service_type == 'mpi-client':
            service = self.config.mpi_client
        else:
            logging.error("unknown service " + service_type)
        return service

    """
    Helper method to copy directories. shutil fails if the 
    destination already exists. 
    """
    def _copytree(src, dst):
        for item in os.listdir(src):
            s = os.path.join(src, item)
            d = os.path.join(dst, item)
            if os.path.isdir(s):
                shutil.copytree(s, d)
            else:
                shutil.copy2(s, d)

    def _copy_instance_logs(self, instance, to_dir):
        service = self._get_service(instance.service_type)
        log_dir = service.container_log_dir

        # We're performing a reverse lookup. 
        for d in instance.volumes.keys():
            if instance.volumes[d] == log_dir:
                self._copytree(d, to_dir)
                return

    def copy_logs(self, stack_uuid, to_dir):
        connectors, compute, storage = self._get_cluster_instances(stack_uuid)
        storage_dir = to_dir + '/' + stack_uuid + '/storage'
        compute_dir = to_dir + '/' + stack_uuid + '/compute'
        connector_dir = to_dir + '/' + stack_uuid + '/connectors'

        for c in connectors:
            for i in c['instances']:
                self._copy_instance_logs(i, connector_dir)
        for c in compute:
            for i in c['instances']:
                self._copy_instance_logs(i, compute_dir)
        for c in storage:
            for i in c['instances']:
                self._copy_instance_logs(i, storage_dir)

    """
    Inspect a deployed stack. 
    """
    def inspect_deployed(self, uuid, registry):
        json_reply = {}

        # Need to inspect the registry to make sure
        # that all the images are available. 

        return json.dumps(json_reply, 
                          sort_keys=True,
                          indent=2,
                          separators=(',',':'))

    """
    Inspect a running stack. 
    """
    def inspect_stack(self, stack_uuid):
        json_reply = {}

        # Get the collection of all backends and connector UUIDS.
        cluster = self.cluster_collection.find_one( {'uuid': stack_uuid} )
        connector_uuids = cluster['connectors']
        storage_uuids = []
        compute_uuids = []
        for b in cluster['backends']['uuids']:
            if b['storage'] != None:
                storage_uuids.append(b['storage'])

            if b['compute'] != None:
                for c in b['compute']:
                    compute_uuids.append(c)

        # For each UUID, collect the detailed service information. 
        json_reply['connectors'] = []            
        for uuid in connector_uuids:
            json_reply['connectors'].append(self._get_inspect_info(uuid))

        json_reply['storage'] = []
        for uuid in storage_uuids:
            json_reply['storage'].append(self._get_inspect_info(uuid))

        json_reply['compute'] = []
        for uuid in compute_uuids:
            json_reply['compute'].append(self._get_inspect_info(uuid))

        # Now append some snapshot info. 
        json_reply['snapshots'] = self._get_snapshot_info(stack_uuid)

        return json.dumps(json_reply, 
                          sort_keys=True,
                          indent=2,
                          separators=(',',':'))

    """
    Query the available snapshots. 
    """
    def query_snapshots(self, constraints=None):
        json_reply = {}

        values = self.snapshot_collection.find()
        for v in values:
            c = self.cluster_collection.find_one( {'uuid':v['cluster_uuid']} )
            time = v['snapshot_ts'].strftime("%m/%w/%Y (%I:%M %p)")
            json_reply[v['snapshot_uuid']] = { 'uuid' : v['snapshot_uuid'],
                                               'base' : c['base'], 
                                               'snapshot_ts' : time }
        return json.dumps(json_reply, 
                          sort_keys=True,
                          indent=2,
                          separators=(',',':'))
    
    """
    Query the available stacks. 
    """
    def query_stacks(self, constraints=None):
        json_reply = {}

        if constraints:
            values = self.cluster_collection.find(constraints)
        else:
            values = self.cluster_collection.find()

        for v in values:
            time = ''
            s = self.snapshot_collection.find_one( {'snapshot_uuid':v['snapshot_uuid']} )
            if s:
                time = v['ts'].strftime("%m/%w/%Y (%I:%M %p)")
            json_reply[v['uuid']] = { 'uuid' : v['uuid'],
                                      'base' : v['base'], 
                                      'ts' : time,
                                      'backends' : v['backends']['uuids'],
                                      'connectors': v['connectors'],
                                      'status' : v['status']}
        return json.dumps(json_reply, 
                          sort_keys=True,
                          indent=2,
                          separators=(',',':'))

    """
    Query the deployed applications. 
    """
    def query_deployed(self, conf=None):
        json_reply = {}

        cursors = self.deploy.find(conf=conf)
        for c in cursors:
            for v in c:
                time = v['ts'].strftime("%m/%w/%Y (%I:%M %p)")
                c = self.cluster_collection.find_one( {'uuid':v['cluster_uuid']} )
                
                json_reply[v['uuid']] = { 'uuid' : v['uuid'],
                                          'base' : c['base'], 
                                          'ts' : time,
                                          'backends' : c['backends']['uuids'],
                                          'connectors': c['connectors'],
                                          'status': 'deployed' }
        return json.dumps(json_reply, 
                          sort_keys=True,
                          indent=2,
                          separators=(',',':'))

    """
    Allocate new UUIDs. 
    """
    def _new_service_uuid(self):
        services = self.service_collection.find()
        return "se-" + str(services.count())

    def _new_stack_uuid(self):
        clusters = self.cluster_collection.find()
        return "sa-" + str(clusters.count())

    def _new_snapshot_uuid(self, cluster_uuid):
        return "sn-%s-%s" % (cluster_uuid, str(uuid.uuid4()))

    """
    Determine if the supplied UUID is a valid snapshot. 
    """
    def is_snapshot(self, snapshot_uuid):
        v = self.snapshot_collection.find_one( {'snapshot_uuid':snapshot_uuid} )
        if v:
            return True
        else:
            return False

    """
    Check if the UUID is of a stopped application. 
    """
    def is_stopped(self, uuid, conf=None):
        cluster = self.cluster_collection.find_one( {'uuid':uuid} )
        if cluster:
            return cluster['status'] == 'stopped'
        return False

    """
    Check if the UUID is of a stopped application. 
    """
    def is_removed(self, uuid, conf=None):
        cluster = self.cluster_collection.find_one( {'uuid':uuid} )
        if cluster:
            return cluster['status'] == 'removed'
        return False

    """
    Check if the UUID is of a deployed application. 
    """
    def is_deployed(self, uuid, conf=None):
        v = self.deploy.find( one=True, 
                              spec = {'uuid':uuid},
                              conf = conf )
        if v:
            return True
        else:
            return False

    """
    Get the base image of this cluster. 
    """
    def get_base_image(self, uuid):
        cluster = self.cluster_collection.find_one( {'uuid':uuid} )
        if cluster:
            return cluster['base']
        return None

    """
    Create a new data directory
    """
    def _new_data_dir(self, service_uuid, storage_type, storage_id):
        # First check if this data directory already exists. If so,
        # go ahead and delete it (this will hopefully get rid of all xattr stuff)
        new_dir = 'tmp/%s/data_%s' % (service_uuid, storage_type + '_' + str(storage_id))
        return self._create_dir(new_dir, replace=True)

    """
    Create a new log directory
    """
    def _new_log_dir(self, service_uuid, storage_type, storage_id, replace = False):
        # First check if this data directory already exists. If so,
        # go ahead and delete it (this will hopefully get rid of all xattr stuff)
        new_dir = 'tmp/%s/log_%s' % (service_uuid, storage_type + '_' + str(storage_id))
        return self._create_dir(new_dir, replace=replace)

    def _create_dir(self, new_dir, replace=False):
        # See if we need to delete an existing data dir.
        if os.path.exists(new_dir) and replace:
            logging.warning("deleting dir " + new_dir)
            shutil.rmtree(new_dir)

        try:
            # Now create the new directory and assign
            # the right permissions. 
            sh.mkdir('-p', new_dir)
        except:
            logging.warning(new_dir + " already exists")

        try:
            uid, gid = ferry.install._get_ferry_user()
            os.chown(new_dir, uid, gid)
            os.chmod(new_dir, 0774)
        except OSError as e:
            logging.warning("could not change permissions for " + new_dir)

        return os.path.abspath(new_dir)

    def _get_service_environment(self,
                                 service, 
                                 instance, 
                                 num_instances):
        container_dir = service.container_data_dir
        log_dir = service.container_log_dir
        host_name = service.new_host_name(instance)
        ports = service.get_necessary_ports(num_instances)
        exposed = service.get_exposed_ports(num_instances)
        
        # Add SSH port for management purposes. 
        exposed.append(DockerManager.SSH_PORT)

        return container_dir, log_dir, host_name, ports, exposed
    """
    Prepare the environment for storage containers.
    """
    def _prepare_storage_environment(self, 
                                     service_uuid, 
                                     num_instances, 
                                     storage_type, 
                                     layers,
                                     args = None,
                                     replace = False):
        # Generate the data volumes. This basically defines which
        # directories on the host get mounted in the container. 
        ports = []
        exposed = []
        plan = {'localhost':{'containers':[]}}

        # Get the actual number of containers needed. 
        if storage_type == 'gluster':
            instances = self.config.gluster.get_total_instances(num_instances, layers)
        elif storage_type == 'cassandra':
            instances = self.config.cassandra.get_total_instances(num_instances, layers)
        elif storage_type == 'hadoop':
            instances = self.config.hadoop.get_total_instances(num_instances, layers)

        # Now get the new container-specific information. 
        i = 0
        for t in instances:
            instance_type = self._get_instance_image(t)
            service = self._get_service(t)
            container_dir, log_dir, host_name, ports, exposed = self._get_service_environment(service, i, num_instances)
            new_log_dir = self._new_log_dir(service_uuid, t, i, replace=replace)
            dir_info = { new_log_dir : log_dir }

            # Only use a data directory mapping if we're not
            # using BTRFS (this is to get around the xattr problem). 
            if self.docker.get_fs_type() != "btrfs":
                new_data_dir = self._new_data_dir(service_uuid, t, i)
                dir_info[new_data_dir] = container_dir

            container_info = {'image':instance_type,
                              'type':t, 
                              'volumes':dir_info,
                              'volume_user':DEFAULT_FERRY_OWNER, 
                              'ports':ports,
                              'exposed':exposed, 
                              'hostname':host_name,
                              'args':args}
            plan['localhost']['containers'].append(container_info)
            i += 1

        return plan

    """
    Prepare the environment for compute containers.
    """
    def _prepare_compute_environment(self, 
                                     service_uuid, 
                                     num_instances, 
                                     compute_type,
                                     layers, 
                                     args = None):
        # Generate the data volumes. This basically defines which
        # directories on the host get mounted in the container. 
        ports = []
        exposed = []
        instance_type = ''
        plan = {'localhost':{'containers':[]}}

        # Get the actual number of containers needed. 
        if compute_type == 'yarn':
            instances = self.config.yarn.get_total_instances(num_instances, layers)
        elif compute_type == 'mpi':
            instances = self.config.mpi.get_total_instances(num_instances, layers)

        i = 0
        for t in instances:
            instance_type = self._get_instance_image(t)
            service = self._get_service(t)
            container_dir, log_dir, host_name, ports, exposed = self._get_service_environment(service, i, num_instances)
            new_log_dir = self._new_log_dir(service_uuid, t, i)
            dir_info = { new_log_dir : log_dir }
            container_info = {'image':instance_type,
                              'volumes':dir_info,
                              'volume_user':DEFAULT_FERRY_OWNER, 
                              'type':t, 
                              'ports':ports,
                              'exposed':exposed, 
                              'hostname':host_name,
                              'args':args}
            plan['localhost']['containers'].append(container_info)
            i += 1
        return plan

    """
    Fetch the instance type. If the UUID is not associated with a running
    service, then just use the raw image. Otherwise, look for a snapshot image. 
    """
    def _get_instance_image(self, instance_type, uuid=None):
        image = None
        if instance_type == 'hadoop-client':
            image = self.DOCKER_HADOOP_CLIENT
        elif instance_type == 'cassandra-client':
            image = self.DOCKER_CASSANDRA_CLIENT
        elif instance_type == 'mpi-client':
            image = self.DOCKER_MPI_CLIENT
        elif instance_type == 'gluster':
            image = self.DOCKER_GLUSTER
        elif instance_type == 'cassandra':
            image = self.DOCKER_CASSANDRA
        elif instance_type == 'titan':
            image = self.DOCKER_TITAN
        elif instance_type == 'hadoop':
            image = self.DOCKER_HADOOP
        elif instance_type == 'hive':
            image = self.DOCKER_HIVE
        elif instance_type == 'mpi':
            image = self.DOCKER_MPI
        elif instance_type == 'yarn':
            image = self.DOCKER_HADOOP

        return image

    """
    Prepare the environment for connector containers.
    """
    def _prepare_connector_environment(self, 
                                       service_uuid, 
                                       connector_type, 
                                       instance_type=None,
                                       name=None, 
                                       args=None):
        ports = []
        exposed = []
        plan = {'localhost':{'containers':[]}}

        # Determine the instance type from the connector type. 
        if not instance_type:
            instance_type = self._get_instance_image(connector_type)

        service = self._get_service(connector_type)
        container_dir, log_dir, host_name, ports, exposed = self._get_service_environment(service, 0, 1)
        new_log_dir = self._new_log_dir(service_uuid, connector_type, 0)
        dir_info = { new_log_dir : log_dir }
        container_info = { 'image':instance_type,
                           'volumes':dir_info,
                           'volume_user':DEFAULT_FERRY_OWNER, 
                           'type':connector_type, 
                           'ports':ports,
                           'exposed':exposed, 
                           'hostname':host_name,
                           'name':name, 
                           'args':args}
        plan['localhost']['containers'].append(container_info)
        return plan

    """
    Transfer the configuration to the containers. 
    """
    def _transfer_config(self, config_dirs):
        for c in config_dirs:
            container = c[0]
            from_dir = c[1]
            to_dir = c[2]
            self.docker.copy([container], from_dir, to_dir)

    """
    Transfer these environment variables to the containers.
    Since the user normally interacts with these containers by 
    logging in (via ssh), we must place these variables in the profile. 
    """
    def _transfer_env_vars(self, containers, env_vars):
        for k in env_vars.keys():
            self.docker.cmd(containers, 
                            "echo export %s=%s >> /etc/profile" % (k, env_vars[k]))
    """
    Start the containers on the specified environment
    """
    def _start_containers(self, plan):
        return self.docker.alloc(plan['localhost']['containers']);

    """
    Restart the stopped containers. 
    """
    def _restart_containers(self, container_info):
        return self.docker.restart(container_info)

    """
    Register the set of services under a single cluster identifier. 
    """
    def register_stack(self, backends, connectors, base, uuid=None):
        if not uuid:
            cluster_uuid = self._new_stack_uuid()
        else:
            cluster_uuid = uuid

        ts = datetime.datetime.now()
        cluster = { 'uuid' : cluster_uuid,
                    'backends':backends,
                    'connectors':connectors,
                    'num_snapshots':0,
                    'snapshot_ts':'', 
                    'snapshot_uuid':base, 
                    'base':base,
                    'status': 'running',
                    'ts':ts }

        if not uuid:
            self.cluster_collection.insert( cluster )
        else:
            self._update_stack(uuid, cluster)

        return cluster_uuid

    """
    Helper method to update a cluster's status. 
    """
    def _update_stack(self, cluster_uuid, state):
        self.cluster_collection.update( {'uuid' : cluster_uuid},
                                        {'$set' : state} )

    def _get_cluster_instances(self, cluster_uuid):
        all_connectors = []
        all_storage = []
        all_compute = []
        cluster = self.cluster_collection.find_one( {'uuid':cluster_uuid} )
        if cluster:
            backends = cluster['backends']
            connector_uuids = cluster['connectors']
            for c in connector_uuids:
                connectors = {'uuid' : c,
                              'instances' : []}
                connector_info = self._get_service_configuration(c, detailed=True)
                for connector in connector_info['containers']:
                    connector_instance = DockerInstance(connector)
                    connectors['instances'].append(connector_instance)
                    connectors['type'] = connector_instance.service_type
                all_connectors.append(connectors)

            # Collect all the UUIDs of the backend containers. 
            # and stop them. The backend is considered ephemeral!
            for b in backends['uuids']:
                if b['storage'] != None:
                    storage = {'uuid' : b['storage'],
                               'instances' : []}
                    storage_info = self._get_service_configuration(b['storage'], detailed=True)
                    for s in storage_info['containers']:
                        storage_instance = DockerInstance(s)
                        storage['instances'].append(storage_instance)
                        storage['type'] = storage_instance.service_type
                    all_storage.append(storage)

                if b['compute'] != None:
                    for c in b['compute']:
                        compute = {'uuid' : c,
                                   'instances' : []}
                        compute_info = self._get_service_configuration(c, detailed=True)
                        for container in compute_info['containers']:
                            compute_instance = DockerInstance(container)
                            compute['instances'].append(compute_instance)
                            compute['type'] = compute_instance.service_type
                        all_compute.append(compute)
            return all_connectors, all_compute, all_storage

    """
    Stop a running cluster.
    """
    def _stop_stack(self, cluster_uuid):
        connectors, compute, storage = self._get_cluster_instances(cluster_uuid)
        for c in connectors:
            self._stop_service(c['uuid'], c['instances'], c['type'])
        for c in compute:
            self._stop_service(c['uuid'], c['instances'], c['type'])
        for s in storage:
            self._stop_service(s['uuid'], s['instances'], s['type'])

    def _purge_stack(self, cluster_uuid):
        volumes = []
        connectors, compute, storage = self._get_cluster_instances(cluster_uuid)
        for c in connectors:
            self.docker.remove(c['instances'])
        for c in compute:
            self.docker.remove(c['instances'])
        for s in storage:
            for i in s['instances']:
                for v in i.volumes.keys():
                    volumes.append(v)
            self.docker.remove(s['instances'])

        # Now remove the data directories. 
        for v in volumes:
            shutil.rmtree(v)
    
    """
    Take a snapshot of an existing stack. 
    """
    def _snapshot_stack(self, cluster_uuid):
        cluster = self.cluster_collection.find_one( {'uuid':cluster_uuid} )
        if cluster:
            # We need to deserialize the docker containers from the cluster/service
            # description so that the snapshot code has access to certain pieces
            # of information (service type, etc.). 
            connectors = []
            connector_uuids = cluster['connectors']
            for c in connector_uuids:
                connector_info = self._get_service_configuration(c, detailed=True)
                connectors.append(DockerInstance(connector_info['containers'][0]))
            cs_snapshots = self.docker.snapshot(connectors, 
                                                cluster_uuid, 
                                                cluster['num_snapshots'])

            # Register the snapshot in the snapshot state. 
            snapshot_uuid = self._new_snapshot_uuid(cluster_uuid)
            snapshot_ts = datetime.datetime.now()
            snapshot_state = { 'snapshot_ts' : snapshot_ts, 
                               'snapshot_uuid' : snapshot_uuid,
                               'snapshot_cs' : cs_snapshots,
                               'cluster_uuid' : cluster_uuid}
            self.snapshot_collection.insert( snapshot_state )

            # Now update the cluster state. 
            cluster_state = { 'num_snapshots' : cluster['num_snapshots'] + 1,
                              'snapshot_uuid' : snapshot_uuid }
            self.cluster_collection.update( {'uuid':cluster_uuid}, 
                                             {"$set": cluster_state } )

    """
    Allocate a new compute cluster.
    """
    def allocate_compute(self,
                         compute_type, 
                         storage_uuid,
                         args, 
                         num_instances=1,
                         layers=[]):
        # Allocate a UUID.
        service_uuid = self._new_service_uuid()
        service = self._get_service(compute_type)

        # Generate the data volumes. This basically defines which
        # directories on the host get mounted in the container. 
        plan = self._prepare_compute_environment(service_uuid, num_instances, compute_type, layers, args)

        # Get the entry point for the storage layer. 
        storage_entry = self._get_service_configuration(storage_uuid)

        # Allocate all the containers. 
        containers = self._start_containers(plan)

        # Generate a configuration dir.
        config_dirs, entry_point = self.config.generate_compute_configuration(service_uuid, 
                                                                              containers, 
                                                                              service, 
                                                                              args, 
                                                                              [storage_entry])

        # Now copy over the configuration.
        self._transfer_config(config_dirs)

        container_info = self._serialize_containers(containers)
        service = {'uuid':service_uuid, 
                   'containers':container_info, 
                   'class':'compute',
                   'type':compute_type,
                   'entry':entry_point,
                   'storage':storage_uuid, 
                   'status':'running'}
        self._update_service_configuration(service_uuid, service)

        # After the docker instance start, we need to start the
        # actual storage service (gluster, etc.). 
        self._start_service(service_uuid, containers, compute_type)
        return service_uuid
        
    def _start_service(self,
                       uuid,
                       containers,
                       service_type):
        entry_point = self._get_service_configuration(uuid)
        service = self._get_service(service_type)
        service.start_service(containers, entry_point, self.docker)

    def _stop_service(self,
                      uuid,
                      containers,
                      service_type):
        entry_point = self._get_service_configuration(uuid)
        service = self._get_service(service_type)
        service.stop_service(containers, entry_point, self.docker)

    """
    Create a storage cluster and start a particular
    personality on that cluster. 
    """
    def allocate_storage(self, 
                         storage_type, 
                         num_instances=1,
                         layers=[], 
                         args=None,
                         replace=False):
        # Allocate a UUID.
        service_uuid = self._new_service_uuid()
        service = self._get_service(storage_type)

        # Generate the data volumes. This basically defines which
        # directories on the host get mounted in the container. 
        plan = self._prepare_storage_environment(service_uuid, num_instances, storage_type, layers, args, replace)

        # Allocate all the containers. 
        containers = self._start_containers(plan)

        # Generate a configuration dir.
        config_dirs, entry_point = self.config.generate_storage_configuration(service_uuid, 
                                                                              containers, 
                                                                              service, 
                                                                              args)

        # Now copy over the configuration.
        self._transfer_config(config_dirs)

        container_info = self._serialize_containers(containers)
        service = {'uuid':service_uuid, 
                   'containers':container_info, 
                   'class':'storage',
                   'type':storage_type,
                   'entry':entry_point,
                   'status':'running'}
        self._update_service_configuration(service_uuid, service)

        # After the docker instance start, we need to start the
        # actual storage service (gluster, etc.). 
        self._start_service(service_uuid, containers, storage_type)
        return service_uuid

    """
    Get the default deployment conf file. 
    """
    def _get_default_conf(self):        
        return FERRY_HOME + '/data/conf/deploy_default.json'

    """
    Get the deployment configuration parameters. 
    """
    def _get_deploy_params(self, mode, conf):
        # First just find and read the configuration file. 
        if conf == 'default':
            conf = self._get_default_conf()

        # Read the configuration file.
        if os.path.exists(conf):
            f = open(conf, 'r').read()
            j = json.loads(f)

            # Now find the right configuration.
            if mode in j:
                j[mode]['_mode'] = mode
                return j[mode]

        return None

    """
    Deploy an existing stack. 
    """
    def deploy_stack(self, cluster_uuid, params=None):
        containers = []
        cluster = self.cluster_collection.find_one( {'uuid':cluster_uuid} )
        if cluster:
            connector_uuids = cluster['connectors']
            for c in connector_uuids:
                connector_info = self._get_service_configuration(c, detailed=True)
                containers.append(DockerInstance(connector_info['containers'][0]))

            self.deploy.deploy(cluster_uuid, containers, params)

            # Check if we need to try starting the
            # stack right away. 
            if params and 'start-on-create' in params:
                return True
        return False
    """
    Manage the stack.
    """
    def manage_stack(self,
                     stack_uuid,
                     action):
        status = 'running'
        if(action == 'snapshot'):        
            # The user wants to take a snapshot of the current stack. This
            # doesn't actually stop anything.
            self._snapshot_stack(stack_uuid)
        elif(action == 'stop'):
            self._stop_stack(stack_uuid)

            status = 'stopped'
            service_status = { 'uuid':stack_uuid, 'status':status }
            self._update_stack(stack_uuid, service_status)
        elif(action == 'rm'):
            # First need to check if the stack is stopped.
            if self.is_stopped(stack_uuid):
                self._purge_stack(stack_uuid)

                status = 'removed'
                service_status = { 'uuid':stack_uuid, 'status':status }
                self._update_stack(stack_uuid, service_status)
            else:
                return { 'uuid' : stack_uuid,
                         'status' : False,
                         'msg': 'Stack is running. Please stop first' }

        return { 'uuid' : stack_uuid,
                 'status' : True,
                 'msg': status }

    """
    Lookup the stopped backend info. 
    """
    def fetch_stopped_backend(self, uuid):
        service = self.cluster_collection.find_one( {'uuid':uuid} )
        if service:
            return service['backends']['backend']

    """
    Lookup the snapshot backend info. 
    """
    def fetch_snapshot_backend(self, snapshot_uuid):
        snapshot = self.cluster_collection.find_one( {'snapshot_uuid':snapshot_uuid} )
        if snapshot:
            return snapshot['backends']['backend']

    """
    Lookup the deployed backend info. 
    """
    def fetch_deployed_backend(self, app_uuid, conf=None):
        app = self.deploy.find( one = True,
                                spec = { 'uuid' : app_uuid },
                                conf = conf )
        stack = self.cluster_collection.find_one( {'uuid':app['cluster_uuid'] } )
        if stack:
            return stack['backends']['backend']

    """
    Lookup the deployed application connector info and instantiate. 
    """
    def allocate_stopped_connectors(self, 
                                     app_uuid, 
                                     backend_info,
                                     conf = None):
        connector_info = []
        cluster = self.cluster_collection.find_one( {'uuid':app_uuid} )
        if cluster:
            for cuid in cluster['connectors']:
                # Retrieve the actual service information. This will
                # contain the container ID. 
                s = self._get_service_configuration(cuid, detailed=True)
                for c in s['containers']:
                    connector_info.append(self.restart_connector(service_uuid = app_uuid,
                                                                 connector_type = c['type'],
                                                                 backend = backend_info,
                                                                 name = c['name'], 
                                                                 args = c['args'],
                                                                 container = c['container']))
        return connector_info

    """
    Lookup the deployed application connector info and instantiate. 
    """
    def allocate_deployed_connectors(self, 
                                     app_uuid, 
                                     backend_info,
                                     conf = None):
        connector_info = []
        app = self.deploy.find( one = True,
                                spec = { 'uuid' : app_uuid },
                                conf = conf)
        if app:
            for c in app['connectors']:
                connector_info.append(self.allocate_connector(connector_type = c['type'],
                                                              backend = backend_info,
                                                              name = c['name'], 
                                                              args = c['args'],
                                                              image = c['image']))
        return connector_info
                
    """
    Lookup the snapshot connector info and instantiate. 
    """
    def allocate_snapshot_connectors(self, 
                                     snapshot_uuid, 
                                     backend_info):
        connector_info = []
        snapshot = self.snapshot_collection.find_one( {'snapshot_uuid':snapshot_uuid} )
        if snapshot:
            for s in snapshot['snapshot_cs']:
                connector_info.append(self.allocate_connector(connector_type = s['type'],
                                                              backend = backend_info,
                                                              name = s['name'], 
                                                              args = s['args'],
                                                              image = s['image']))
        return connector_info
                

    """
    Restart a stopped connector with an existing storage service. 
    """
    def restart_connector(self,
                          service_uuid, 
                          connector_type, 
                          backend=None,
                          name=None, 
                          args=None,
                          container=None):
        service = self._get_service(connector_type)

        # Allocate all the containers. 
        container = self._restart_containers({ 'container': container,
                                               'type' : connector_type,
                                               'args' : args})

        # Initialize the connector and connect to the storage. 
        storage_entry = []
        compute_entry = []
        for b in backend:
            if b['storage']:
                storage_entry.append(self._get_service_configuration(b['storage']))
            if b['compute']:
                for c in b['compute']:
                    compute_entry.append(self._get_service_configuration(c))

        # Generate the environment variables that will be 
        # injected into the containers. 
        env_vars = self.config.generate_env_vars(storage_entry,
                                                 compute_entry)

        # Now generate the configuration files that will be
        # transferred to the containers. 
        config_dirs, entry_point = self.config.generate_connector_configuration(service_uuid, 
                                                                                [container], 
                                                                                service,
                                                                                storage_entry,
                                                                                compute_entry,
                                                                                args)
        # Now copy over the configuration.
        self._transfer_config(config_dirs)
        self._transfer_env_vars([container], env_vars)

        # Update the connector state. 
        container_info = self._serialize_containers([container])
        service = {'uuid':service_uuid, 
                   'containers':container_info, 
                   'class':'connector',
                   'type':connector_type,
                   'entry':entry_point,
                   'uniq': name, 
                   'status':'running'}
        self._update_service_configuration(service_uuid, service)

        # Start the connector personality. 
        self._start_service(service_uuid, [container], connector_type)
        return service_uuid
                
    """
    Allocate a new connector and associate with an existing storage service. 
    """
    def allocate_connector(self,
                           connector_type, 
                           backend=None,
                           name=None, 
                           args=None,
                           image=None):
        # Allocate a UUID.
        service_uuid = self._new_service_uuid()
        service = self._get_service(connector_type)

        # Generate the data volumes. This basically defines which
        # directories on the host get mounted in the container. 
        plan = self._prepare_connector_environment(service_uuid = service_uuid, 
                                                   connector_type = connector_type, 
                                                   instance_type = image,
                                                   name = name,
                                                   args = args)


        # Allocate all the containers. 
        containers = self._start_containers(plan)

        # Initialize the connector and connect to the storage. 
        storage_entry = []
        compute_entry = []
        if backend:
            for b in backend:
                if b['storage']:
                    storage_entry.append(self._get_service_configuration(b['storage']))
                if b['compute']:
                    for c in b['compute']:
                        compute_entry.append(self._get_service_configuration(c))

        # Generate the environment variables that will be 
        # injected into the containers. 
        env_vars = self.config.generate_env_vars(storage_entry,
                                                 compute_entry)

        # Now generate the configuration files that will be
        # transferred to the containers. 
        config_dirs, entry_point = self.config.generate_connector_configuration(service_uuid, 
                                                                                containers, 
                                                                                service,
                                                                                storage_entry,
                                                                                compute_entry,
                                                                                args)
        # Now copy over the configuration.
        self._transfer_config(config_dirs)
        self._transfer_env_vars(containers, env_vars)

        # Update the connector state. 
        container_info = self._serialize_containers(containers)
        service = {'uuid':service_uuid, 
                   'containers':container_info, 
                   'class':'connector',
                   'type':connector_type,
                   'entry':entry_point,
                   'uniq': name, 
                   'status':'running'}
        self._update_service_configuration(service_uuid, service)

        # Start the connector personality. 
        self._start_service(service_uuid, containers, connector_type)
        return service_uuid

    """
    Fetch the current docker version.
    """
    def version(self):
        return self.docker.version()