示例#1
0
    def __init__(self, bootstrap=False):
        self.name = "cloud"
        self.repo = 'public'

        self._init_cloudfabric()
        self.bootstrap = bootstrap
        self.cli = DockerCLI()
        self.cli.key = self.launcher._get_host_key()
        self.docker_user = self.cli.docker_user
        self.inspector = CloudInspector(self)

        # The system returns information regarding
        # the instance types.
        self.system = self.launcher.system
示例#2
0
    def __init__(self, bootstrap=False):
        self.name = "local"
        self.repo = 'public'
        self.cli = DockerCLI(ferry.install.DOCKER_REGISTRY)
        self.docker_user = self.cli.docker_user
        self.inspector = DockerInspector(self.cli)
        self.bootstrap = bootstrap

        # The system returns information regarding
        # the instance types.
        self.system = System()

        # Bootstrap mode means that the DHCP network
        # isn't available yet, so we can't use the network.
        if not bootstrap:
            self.network = DHCPClient(ferry.install._get_gateway())
示例#3
0
    def __init__(self, bootstrap=False):
        self.repo = 'public'
        self.docker_user = '******'
        self.cli = DockerCLI(ferry.install.DOCKER_REGISTRY)
        self.bootstrap = bootstrap

        # Bootstrap mode means that the DHCP network
        # isn't available yet, so we can't use the network. 
        if not bootstrap:
            self.network = DHCPClient(self._get_gateway())
示例#4
0
文件: cloud.py 项目: TENorbert/ferry
    def __init__(self, bootstrap=False):
        self.name = "cloud"
        self.repo = 'public'

        self._init_cloudfabric()
        self.bootstrap = bootstrap
        self.cli = DockerCLI()
        self.cli.key = self.launcher._get_host_key()
        self.docker_user = self.cli.docker_user
        self.inspector = CloudInspector(self)

        # The system returns information regarding 
        # the instance types. 
        self.system = self.launcher.system
示例#5
0
文件: local.py 项目: TENorbert/ferry
    def __init__(self, bootstrap=False):
        self.name = "local"
        self.repo = 'public'
        self.cli = DockerCLI(ferry.install.DOCKER_REGISTRY)
        self.docker_user = self.cli.docker_user
        self.inspector = DockerInspector(self.cli)
        self.bootstrap = bootstrap

        # The system returns information regarding 
        # the instance types. 
        self.system = System()

        # Bootstrap mode means that the DHCP network
        # isn't available yet, so we can't use the network. 
        if not bootstrap:
            self.network = DHCPClient(ferry.install._get_gateway())
示例#6
0
 def __init__(self):
     self.repo = 'public'
     self.docker_user = '******'
     self.cli = DockerCLI()
示例#7
0
class DockerFabric(object):
    def __init__(self):
        self.repo = 'public'
        self.docker_user = '******'
        self.cli = DockerCLI()

    """
    Read the location of the directory containing the keys
    used to communicate with the containers. 
    """
    def _read_key_dir(self):
        f = open(ferry.install.DEFAULT_DOCKER_KEY, 'r')
        return f.read().strip()
 
    """
    Fetch the current docker version.
    """
    def version(self):
        return self.cli.version()

    """
    Get the filesystem type associated with docker. 
    """
    def get_fs_type(self):
        return self.cli.get_fs_type()

    """
    Restart the stopped containers.
    """
    def restart(self, container_info):
        container = self.cli.start(container_info['container'],
                                   container_info['type'],
                                   container_info['args'])
        container.default_user = self.docker_user
        return container

    """
    Allocate several instances.
    """
    def alloc(self, container_info):
        containers = []
        mounts = {}
        for c in container_info:
            # Start a container with a specific image, in daemon mode,
            # without TTY, and on a specific port
            container = self.cli.run(service_type = c['type'], 
                                     image = c['image'], 
                                     volumes = c['volumes'],
                                     keys = c['keys'], 
                                     phys_net = None, 
                                     security_group = c['ports'],
                                     expose_group = c['exposed'], 
                                     hostname = c['hostname'],
                                     args= c['args'])
            container.default_user = self.docker_user
            containers.append(container)

            # Not all containers have a unique name. 
            if 'name' in c:
                container.name = c['name']

            if 'volume_user' in c:
                mounts[container] = {'user':c['volume_user'],
                                     'vols':c['volumes'].items()}

        # We should wait for a second to let the ssh server start
        # on the containers (otherwise sometimes we get a connection refused)
        time.sleep(2)

        # Check if we need to set the file permissions
        # for the mounted volumes. 
        for c, i in mounts.items():
            for _, v in i['vols']:
                self.cmd([c], 'chown -R %s %s' % (i['user'], v))

        return containers

    """
    Stop the running instances
    """
    def stop(self, containers):
        for c in containers:
            self.cli.stop(c.container)

    """
    Remove the running instances
    """
    def remove(self, containers):
        for c in containers:
            self.cli.remove(c.container)

    """
    Save/commit the running instances
    """
    def snapshot(self, containers, cluster_uuid, num_snapshots):
        snapshots = []
        for c in containers:
            snapshot_name = '%s-%s-%s:SNAPSHOT-%s' % (c.image, 
                                                      cluster_uuid,
                                                      c.host_name,
                                                      num_snapshots)
            snapshots.append( {'image' : snapshot_name,
                               'base' : c.image,
                               'type' : c.service_type, 
                               'name' : c.name, 
                               'args' : c.args} )
            self.cli.commit(c, snapshot_name)
        return snapshots

    """
    Upload these containers to the specified registry.
    """
    def deploy(self, containers, registry=None):
        deployed = []
        for c in containers:
            image_name = '%s-%s:DEPLOYED' % (c.image, 
                                             c.host_name)
            deployed.append( {'image' : image_name,
                              'base' : c.image,
                              'type' : c.service_type, 
                              'name' : c.name, 
                              'args' : c.args} )
            if not registry:
                self.cli.commit(c, image_name)
            else:
                self.cli.push(c, registry)
        return deployed

    """
    Copy over the contents to each container
    """
    def copy(self, containers, from_dir, to_dir):
        for c in containers:
            opts = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
            key = '-i ' + self._read_key_dir() + '/id_rsa'
            scp_cmd = 'scp ' + opts + ' ' + key + ' -r ' + from_dir + ' ' + self.docker_user + '@' + c.internal_ip + ':' + to_dir
            output = Popen(scp_cmd, stdout=PIPE, shell=True).stdout.read()

    """
    Run a command on all the containers and collect the output. 
    """
    def cmd(self, containers, cmd):
        all_output = {}
        key = self._read_key_dir() + '/id_rsa'
        for c in containers:
            ip = self.docker_user + '@' + c.internal_ip
            ssh = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ' + key + ' -t -t ' + ip + ' \'%s\'' % cmd
            logging.warning(ssh)
            output = Popen(ssh, stdout=PIPE, shell=True).stdout.read()
            all_output[c] = output.strip()

        return all_output
示例#8
0
class LocalFabric(object):
    def __init__(self, bootstrap=False):
        self.name = "local"
        self.repo = 'public'
        self.cli = DockerCLI(ferry.install.DOCKER_REGISTRY)
        self.docker_user = self.cli.docker_user
        self.inspector = DockerInspector(self.cli)
        self.bootstrap = bootstrap

        # The system returns information regarding
        # the instance types.
        self.system = System()

        # Bootstrap mode means that the DHCP network
        # isn't available yet, so we can't use the network.
        if not bootstrap:
            self.network = DHCPClient(ferry.install._get_gateway())

    def _get_host(self):
        cmd = "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
        return Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip()

    def get_data_dir(self):
        if 'FERRY_SCRATCH' in os.environ:
            scratch_dir = os.environ['FERRY_SCRATCH']
        else:
            scratch_dir = os.path.join(
                ferry.install._get_ferry_dir(server=True), 'scratch')

        if not os.path.isdir(scratch_dir):
            os.makedirs(scratch_dir)

        return scratch_dir

    def version(self):
        """
        Fetch the current docker version.
        """
        return self.cli.version()

    def get_fs_type(self):
        """
        Get the filesystem type associated with docker. 
        """
        return self.cli.get_fs_type()

    def quit(self):
        """
        Quit the local fabric. 
        """
        logging.info("quitting local fabric")

    def restart(self, cluster_uuid, service_uuid, containers):
        """
        Restart the stopped containers.
        """
        new_containers = []
        for c in containers:
            container = self.cli.start(image=c.image,
                                       container=c.container,
                                       service_type=c.service_type,
                                       keydir=c.keydir,
                                       keyname=c.keyname,
                                       privatekey=c.privatekey,
                                       volumes=c.volumes,
                                       args=c.args,
                                       inspector=self.inspector)
            container.default_user = self.docker_user
            new_containers.append(container)

        # We should wait for a second to let the ssh server start
        # on the containers (otherwise sometimes we get a connection refused)
        time.sleep(2)
        return new_containers

    def alloc(self, cluster_uuid, service_uuid, container_info, ctype):
        """
        Allocate several instances.
        """
        containers = []
        mounts = {}
        for c in container_info:
            # Get a new IP address for this container and construct
            # a default command.
            gw = ferry.install._get_gateway().split("/")[0]

            # Check if we should use the manual LXC option.
            if not 'netenable' in c:
                ip = self.network.assign_ip(c)
                lxc_opts = [
                    "lxc.network.type = veth",
                    "lxc.network.ipv4 = %s/24" % ip,
                    "lxc.network.ipv4.gateway = %s" % gw,
                    "lxc.network.link = ferry0", "lxc.network.name = eth0",
                    "lxc.network.flags = up"
                ]

                # Check if we need to forward any ports.
                host_map = {}
                for p in c['ports']:
                    p = str(p)
                    s = p.split(":")
                    if len(s) > 1:
                        host = s[0]
                        dest = s[1]
                    else:
                        host = self.network.random_port()
                        dest = s[0]
                    host_map[dest] = [{'HostIp': '0.0.0.0', 'HostPort': host}]
                    self.network.forward_rule('0.0.0.0/0', host, ip, dest)
                host_map_keys = host_map.keys()
            else:
                lxc_opts = None
                host_map = None
                host_map_keys = []

            # Start a container with a specific image, in daemon mode,
            # without TTY, and on a specific port
            if not 'default_cmd' in c:
                c['default_cmd'] = "/service/sbin/startnode init"
            container = self.cli.run(service_type=c['type'],
                                     image=c['image'],
                                     volumes=c['volumes'],
                                     keydir=c['keydir'],
                                     keyname=c['keyname'],
                                     privatekey=c['privatekey'],
                                     open_ports=host_map_keys,
                                     host_map=host_map,
                                     expose_group=c['exposed'],
                                     hostname=c['hostname'],
                                     default_cmd=c['default_cmd'],
                                     args=c['args'],
                                     lxc_opts=lxc_opts,
                                     inspector=self.inspector,
                                     background=False)
            if container:
                container.default_user = self.docker_user
                containers.append(container)
                if not 'netenable' in c:
                    container.internal_ip = ip
                    container.external_ip = ip
                    self.network.set_owner(ip, container.container)

                if 'name' in c:
                    container.name = c['name']

                if 'volume_user' in c:
                    mounts[container] = {
                        'user': c['volume_user'],
                        'vols': c['volumes'].items()
                    }

                # We should wait for a second to let the ssh server start
                # on the containers (otherwise sometimes we get a connection refused)
                time.sleep(3)

        # Check if we need to set the file permissions
        # for the mounted volumes.
        for c, i in mounts.items():
            for _, v in i['vols']:
                self.cmd([c], 'chown -R %s %s' % (i['user'], v))

        return containers

    def stop(self, cluster_uuid, service_uuid, containers):
        """
        Forceably stop the running containers
        """
        for c in containers:
            if type(c) is dict:
                self.cli.stop(c['container'])
            else:
                self.cli.stop(c.container)

    def remove(self, cluster_uuid, service_uuid, containers):
        """
        Remove the running instances
        """
        for c in containers:
            for p in c.ports.keys():
                self.network.delete_rule(c.internal_ip, p)
            self.network.free_ip(c.internal_ip)
            self.cli.remove(c.container)

    def snapshot(self, containers, cluster_uuid, num_snapshots):
        """
        Save/commit the running instances
        """
        snapshots = []
        for c in containers:
            snapshot_name = '%s-%s-%s:SNAPSHOT-%s' % (
                c.image, cluster_uuid, c.host_name, num_snapshots)
            snapshots.append({
                'image': snapshot_name,
                'base': c.image,
                'type': c.service_type,
                'name': c.name,
                'args': c.args,
                'ports': c.ports
            })
            self.cli.commit(c, snapshot_name)
        return snapshots

    def push(self, image, registry=None):
        """
        Push an image to a remote registry.
        """
        return self.cli.push(image, registry)

    def pull(self, image):
        """
        Pull a remote image to the local registry. 
        """
        return self.cli.pull(image)

    def halt(self, cluster_uuid, service_uuid, containers):
        """
        Safe stop the containers. 
        """
        cmd = '/service/sbin/startnode halt'
        for c in containers:
            self.cmd_raw(c.privatekey, c.internal_ip, cmd, c.default_user)

    def copy(self, containers, from_dir, to_dir):
        """
        Copy over the contents to each container
        """
        for c in containers:
            self.copy_raw(c.privatekey, c.internal_ip, from_dir, to_dir,
                          c.default_user)

    def copy_raw(self, key, ip, from_dir, to_dir, user):
        if key:
            opts = '-o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
            scp = 'scp ' + opts + ' -i ' + key + ' -r ' + from_dir + ' ' + user + '@' + ip + ':' + to_dir
            logging.warning(scp)
            robust_com(scp)

    def cmd(self, containers, cmd):
        """
        Run a command on all the containers and collect the output. 
        """
        all_output = {}
        for c in containers:
            output = self.cmd_raw(c.privatekey, c.internal_ip, cmd,
                                  c.default_user)
            if output.strip() != "":
                all_output[c.host_name] = output.strip()
        return all_output

    def cmd_raw(self, key, ip, cmd, user):
        if key:
            ip = user + '@' + ip
            ssh = 'LC_ALL=C && ssh -o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ' + key + ' -t -t ' + ip + ' \'%s\'' % cmd
            logging.warning(ssh)
            out, _, _ = robust_com(ssh)
            return out
        else:
            return ''

    def login(self):
        """
        Login to a remote registry. Use the login credentials
        found in the user's home directory. 
        """
        config = ferry.install.read_ferry_config()
        args = config['docker']
        if all(k in args for k in ("user", "password", "email")):
            if 'server' in args:
                server = args['server']
            else:
                server = ''
            return self.cli.login(user=args['user'],
                                  password=args['password'],
                                  email=args['email'],
                                  registry=server)
        logging.error("Could not open login credentials " +
                      ferry.install.DEFAULT_LOGIN_KEY)
        return False
示例#9
0
class DockerFabric(object):
    def __init__(self, bootstrap=False):
        self.repo = 'public'
        self.docker_user = '******'
        self.cli = DockerCLI(ferry.install.DOCKER_REGISTRY)
        self.bootstrap = bootstrap

        # Bootstrap mode means that the DHCP network
        # isn't available yet, so we can't use the network. 
        if not bootstrap:
            self.network = DHCPClient(self._get_gateway())

    def _get_gateway(self):
        """
        Get the gateway address in CIDR notation. This defines the
        range of IP addresses available to the containers. 
        """
        cmd = "LC_MESSAGES=C ifconfig drydock0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
        gw = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip()

        cmd = "LC_MESSAGES=C ifconfig drydock0 | grep 'inet addr:' | cut -d: -f4 | awk '{ print $1}'"
        netmask = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip()
        mask = map(int, netmask.split("."))
        cidr = 1
        if mask[3] == 0:
            cidr = 8
        if mask[2] == 0:
            cidr *= 2

        return "%s/%d" % (gw, 32 - cidr)

    def _get_host(self):
        cmd = "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
        return Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip()

    def _read_key_dir(self):
        """
        Read the location of the directory containing the keys
        used to communicate with the containers. 
        """
        keydir = ferry.install._get_key_dir(root=self.bootstrap, server=True)
        with open(keydir, 'r') as f: 
            k = f.read().strip().split("://")
            return k[1], k[0]
 
    def version(self):
        """
        Fetch the current docker version.
        """
        return self.cli.version()

    def get_fs_type(self):
        """
        Get the filesystem type associated with docker. 
        """
        return self.cli.get_fs_type()

    def restart(self, containers):
        """
        Restart the stopped containers.
        """
        new_containers = []
        for c in containers:
            container = self.cli.start(c.container,
                                       c.service_type,
                                       c.keys,
                                       c.volumes,
                                       c.args)
            container.default_user = self.docker_user
            new_containers.append(container)

        # We should wait for a second to let the ssh server start
        # on the containers (otherwise sometimes we get a connection refused)
        time.sleep(2)
        return new_containers

    def alloc(self, container_info):
        """
        Allocate several instances.
        """
        containers = []
        mounts = {}
        for c in container_info:
            # Get a new IP address for this container and construct
            # a default command. 
            gw = self._get_gateway().split("/")[0]

            # Check if we should use the manual LXC option. 
            if not 'netenable' in c:
                ip = self.network.assign_ip(c)
                lxc_opts = ["lxc.network.type = veth",
                            "lxc.network.ipv4 = %s/24" % ip, 
                            "lxc.network.ipv4.gateway = %s" % gw,
                            "lxc.network.link = drydock0",
                            "lxc.network.name = eth0",
                            "lxc.network.flags = up"]

                # Check if we need to forward any ports. 
                host_map = {}
                for p in c['ports']:
                    p = str(p)
                    s = p.split(":")
                    if len(s) > 1:
                        host = s[0]
                        dest = s[1]
                    else:
                        host = self.network.random_port()
                        dest = s[0]
                    host_map[dest] = [{'HostIp' : '0.0.0.0',
                                       'HostPort' : host}]
                    self.network.forward_rule('0.0.0.0/0', host, ip, dest)
                host_map_keys = host_map.keys()
            else:
                lxc_opts = None
                host_map = None
                host_map_keys = []

            # Start a container with a specific image, in daemon mode,
            # without TTY, and on a specific port
            c['default_cmd'] = "/service/sbin/startnode init"
            container = self.cli.run(service_type = c['type'], 
                                     image = c['image'], 
                                     volumes = c['volumes'],
                                     keys = c['keys'], 
                                     open_ports = host_map_keys,
                                     host_map = host_map, 
                                     expose_group = c['exposed'], 
                                     hostname = c['hostname'],
                                     default_cmd = c['default_cmd'],
                                     args= c['args'],
                                     lxc_opts = lxc_opts)
            if container:
                container.default_user = self.docker_user
                containers.append(container)
                if not 'netenable' in c:
                    container.internal_ip = ip
                    self.network.set_owner(ip, container.container)

                if 'name' in c:
                    container.name = c['name']

                if 'volume_user' in c:
                    mounts[container] = {'user':c['volume_user'],
                                         'vols':c['volumes'].items()}

                # We should wait for a second to let the ssh server start
                # on the containers (otherwise sometimes we get a connection refused)
                time.sleep(2)

        # Check if we need to set the file permissions
        # for the mounted volumes. 
        for c, i in mounts.items():
            for _, v in i['vols']:
                self.cmd([c], 'chown -R %s %s' % (i['user'], v))

        return containers

    def stop(self, containers):
        """
        Forceably stop the running containers
        """
        for c in containers:
            self.cli.stop(c['container'])

    def remove(self, containers):
        """
        Remove the running instances
        """
        for c in containers:
            for p in c.ports.keys():
                self.network.delete_rule(c.internal_ip, p)
            self.network.free_ip(c.internal_ip)
            self.cli.remove(c.container)

    def snapshot(self, containers, cluster_uuid, num_snapshots):
        """
        Save/commit the running instances
        """
        snapshots = []
        for c in containers:
            snapshot_name = '%s-%s-%s:SNAPSHOT-%s' % (c.image, 
                                                      cluster_uuid,
                                                      c.host_name,
                                                      num_snapshots)
            snapshots.append( {'image' : snapshot_name,
                               'base' : c.image,
                               'type' : c.service_type, 
                               'name' : c.name, 
                               'args' : c.args,
                               'ports': c.ports} )
            self.cli.commit(c, snapshot_name)
        return snapshots

    def deploy(self, containers, registry=None):
        """
        Upload these containers to the specified registry.
        """
        deployed = []
        for c in containers:
            image_name = '%s-%s:DEPLOYED' % (c.image, 
                                             c.host_name)
            deployed.append( {'image' : image_name,
                              'base' : c.image,
                              'type' : c.service_type, 
                              'name' : c.name, 
                              'args' : c.args,
                              'ports': c.ports} )
            if not registry:
                self.cli.commit(c, image_name)
            else:
                self.cli.push(c.image, registry)
        return deployed

    def push(self, image, registry=None):
        """
        Push an image to a remote registry.
        """        
        return self.cli.push(image, registry)

    def pull(self, image):
        """
        Pull a remote image to the local registry. 
        """        
        return self.cli.pull(image)

    def halt(self, containers):
        """
        Safe stop the containers. 
        """
        cmd = '/service/sbin/startnode halt'
        for c in containers:
            self.cmd_raw(c.internal_ip, cmd)

    def copy(self, containers, from_dir, to_dir):
        """
        Copy over the contents to each container
        """
        for c in containers:
            self.copy_raw(c.internal_ip, from_dir, to_dir)

    def copy_raw(self, ip, from_dir, to_dir):
        keydir, _ = self._read_key_dir()
        opts = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
        key = '-i ' + keydir + '/id_rsa'
        scp = 'scp ' + opts + ' ' + key + ' -r ' + from_dir + ' ' + self.docker_user + '@' + ip + ':' + to_dir
        logging.warning(scp)
        output = Popen(scp, stdout=PIPE, shell=True).stdout.read()

    def cmd(self, containers, cmd):
        """
        Run a command on all the containers and collect the output. 
        """
        all_output = {}
        for c in containers:
            output = self.cmd_raw(c.internal_ip, cmd)
            all_output[c.host_name] = output.strip()
        return all_output

    def cmd_raw(self, ip, cmd):
        keydir, _ = self._read_key_dir()
        key = keydir + '/id_rsa'
        ip = self.docker_user + '@' + ip
        ssh = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ' + key + ' -t -t ' + ip + ' \'%s\'' % cmd
        logging.warning(ssh)
        output = Popen(ssh, stdout=PIPE, shell=True).stdout.read()
        return output

    def login(self):
        """
        Login to a remote registry. Use the login credentials
        found in the user's home directory. 
        """
        with open(ferry.install.DEFAULT_DOCKER_LOGIN, 'r') as f:
            args = yaml.load(f)
            args = args['docker']
            if all(k in args for k in ("user","password","email")):
                if 'server' in args:
                    server = args['server']
                else:
                    server = ''
                return self.cli.login(user = args['user'], 
                                      password = args['password'],
                                      email = args['email'],
                                      registry = server)
        logging.error("Could not open login credentials " + ferry.install.DEFAULT_LOGIN_KEY)
        return False
示例#10
0
class CloudFabric(object):
    def __init__(self, bootstrap=False):
        self.name = "cloud"
        self.repo = 'public'

        self._init_cloudfabric()
        self.bootstrap = bootstrap
        self.cli = DockerCLI()
        self.cli.key = self.launcher._get_host_key()
        self.docker_user = self.cli.docker_user
        self.inspector = CloudInspector(self)

        # The system returns information regarding
        # the instance types.
        self.system = self.launcher.system

    def _load_class(self, class_name):
        """
        Dynamically load a class
        """
        s = class_name.split("/")
        module_path = s[0]
        clazz_name = s[1]
        module = importlib.import_module(module_path)
        for n, o in inspect.getmembers(module):
            if inspect.isclass(o):
                if o.__module__ == module_path and o.__name__ == clazz_name:
                    return o(self)
        return None

    def _init_cloudfabric(self):
        conf = ferry.install.read_ferry_config()

        # The actual cloud launcher. This lets us customize
        # launching into different cloud environments that each
        # may be slightly different (HP Cloud, Rackspace, etc).
        launcher = conf["system"]["mode"]
        self.launcher = self._load_class(launcher)

        # Determine if we are using this fabric in proxy
        # mode. Proxy mode means that the client is external
        # to the network, but controller has direct access.
        self.proxy = bool(conf["system"]["proxy"])

        # Check if the launcher supports proxy mode.
        if self.proxy and not self.launcher.support_proxy():
            logging.error("%s does not support proxy mode" %
                          self.launcher.name)

    def get_data_dir(self):
        return "/ferry/data"

    def version(self):
        """
        Fetch the current docker version.
        """
        return "0.1"

    def get_fs_type(self):
        """
        Get the filesystem type associated with docker. 
        """
        return "xfs"

    def quit(self):
        """
        Quit the cloud fabric. 
        """
        logging.info("quitting cloud fabric")
        self.launcher.quit()

    def restart(self, cluster_uuid, service_uuid, containers):
        """
        Restart the stopped containers.
        """
        # First need to restart all the virtual machines.
        logging.warning("restarting virtual machines...")
        addrs = self.launcher._restart_stack(cluster_uuid, service_uuid)

        # Then need to restart Ferry on all the hosts.
        logging.warning("restarting ferry...")
        cmd = "source /etc/profile && ferry server -n"
        for ip in addrs:
            output, err, _ = self.cmd_raw(self.cli.key, ip, cmd,
                                          self.docker_user)

        # Finally, restart the stopped containers.
        logging.warning("restarting containers...")
        cmd = "cat /ferry/containers/container.pid && rm /ferry/containers/container.pid"
        for c in containers:
            # Before restarting the containers, we need to learn their
            # container IDs. It should be stored on a cidfile.
            output, err, _ = self.cmd_raw(self.cli.key, c.external_ip, cmd,
                                          self.launcher.ssh_user)
            c.container = output.strip()
            self.cli.start(image=c.image,
                           container=c.container,
                           service_type=c.service_type,
                           keydir=c.keydir,
                           keyname=c.keyname,
                           privatekey=c.privatekey,
                           volumes=c.volumes,
                           args=c.args,
                           server=c.external_ip,
                           user=self.launcher.ssh_user,
                           inspector=self.inspector,
                           background=True)
        return containers

    def _copy_public_keys(self, container, server):
        """
        Copy over the public ssh key to the server so that we can start the
        container correctly. 
        """

        keydir = container['keydir'].values()[0]
        self.copy_raw(key=self.cli.key,
                      ip=server,
                      from_dir=keydir + "/" + container["keyname"],
                      to_dir="/ferry/keys/",
                      user=self.launcher.ssh_user)

    def _verify_public_keys(self, server):
        """
        Verify that the public key has been copied over correctly. 
        """
        out, _, _ = self.cmd_raw(key=self.cli.key,
                                 ip=server,
                                 cmd="ls /ferry/keys",
                                 user=self.launcher.ssh_user)
        if out and out.strip() == "":
            return False
        elif out:
            logging.warning("found ssh key: " + out.strip())
            return True
        else:
            return False

    def _verify_ferry_server(self, server):
        """
        Verify that the docker daemon is actually running on the server. 
        """

        # Try a couple times before giving up.
        for i in range(0, 2):
            out, err, success = self.cmd_raw(
                key=self.cli.key,
                ip=server,
                cmd="if [ -f /var/run/ferry.pid ]; then echo \"launched\"; fi",
                user=self.launcher.ssh_user)
            if success and out and out.strip() != "":
                logging.warning("docker daemon " + out.strip())
                return True
            elif not success:
                return False
            else:
                time.sleep(6)
        return False

    def _execute_server_init(self, server):
        """
        Restart the Ferry docker daemon. 
        """
        out, err, _ = self.cmd_raw(key=self.cli.key,
                                   ip=server,
                                   cmd="ferry server -n && sleep 3",
                                   user=self.launcher.ssh_user)
        logging.warning("restart ferry out: " + out)
        logging.warning("restart ferry err: " + err)

    def execute_docker_containers(self,
                                  cinfo,
                                  lxc_opts,
                                  private_ip,
                                  server_ip,
                                  background=True,
                                  simulate=False):
        """
        Run the Docker container and use the cloud inspector to get information
        about the container/VM.
        """

        host_map = None
        host_map_keys = []
        mounts = {}

        if not 'default_cmd' in cinfo:
            cinfo['default_cmd'] = "/service/sbin/startnode init"
        container = self.cli.run(service_type=cinfo['type'],
                                 image=cinfo['image'],
                                 volumes=cinfo['volumes'],
                                 keydir={'/service/keys': '/ferry/keys'},
                                 keyname=cinfo['keyname'],
                                 privatekey=cinfo['privatekey'],
                                 open_ports=host_map_keys,
                                 host_map=host_map,
                                 expose_group=cinfo['exposed'],
                                 hostname=cinfo['hostname'],
                                 default_cmd=cinfo['default_cmd'],
                                 args=cinfo['args'],
                                 lxc_opts=lxc_opts,
                                 server=server_ip,
                                 user=self.launcher.ssh_user,
                                 inspector=self.inspector,
                                 background=background,
                                 simulate=simulate)
        if container:
            container.manage_ip = server_ip
            container.internal_ip = private_ip
            if self.proxy:
                # Otherwise, the controller can only interact with the
                # VMs via their public IP address.
                container.external_ip = server_ip
            else:
                # When the fabric controller is acting in proxy mode,
                # it can contact the VMs via their private addresses.
                container.external_ip = private_ip

            container.vm = self.launcher.default_personality
            container.default_user = self.cli.docker_user

            if 'name' in cinfo:
                container.name = cinfo['name']

            if 'volume_user' in cinfo:
                mounts[container] = {
                    'user': cinfo['volume_user'],
                    'vols': cinfo['volumes'].items()
                }

            # We should wait for a second to let the ssh server start
            # on the containers (otherwise sometimes we get a connection refused)
            time.sleep(3)

            return container, mounts
        else:
            return None, None

    def alloc(self, cluster_uuid, service_uuid, container_info, ctype):
        """
        Allocate a new service cluster. 
        """
        containers = self.launcher.alloc(cluster_uuid, service_uuid,
                                         container_info, ctype, self.proxy)

        if not containers:
            # The underlying cloud infrastructure could not allocate
            # the service cluster. Sometimes it's just a temporary glitch, so
            # get rid of the attempt and try some more.
            logging.error("Failed to allocate service cluster. Trying again.")
            self.launcher._delete_stack(cluster_uuid, service_uuid)
            return self.launcher.alloc(cluster_uuid, service_uuid,
                                       container_info, ctype, self.proxy)
        else:
            return containers

    def stop(self, cluster_uuid, service_uuid, containers):
        """
        Stop the running containers
        """
        self.remove(cluster_uuid, service_uuid, containers)

    def halt(self, cluster_uuid, service_uuid, containers):
        """
        Safe stop the containers. 
        """

        # Stop the containers in the VMs. Stopping the container
        # should jump us back out to the host. Afterwards, quit
        # ferry so that we can restart later.
        halt = '/service/sbin/startnode halt'
        ferry = 'ferry quit'
        for c in containers:
            self.cmd_raw(c.privatekey, c.external_ip, halt, c.default_user)
            self.cmd_raw(self.cli.key, c.manage_ip, ferry,
                         self.launcher.ssh_user)

        # Now go ahead and stop the VMs.
        self.launcher._stop_stack(cluster_uuid, service_uuid)

    def remove(self, cluster_uuid, service_uuid, containers):
        """
        Remove the running instances
        """
        self.launcher._delete_stack(cluster_uuid, service_uuid)

    def copy(self, containers, from_dir, to_dir):
        """
        Copy over the contents to each container
        """
        for c in containers:
            self.copy_raw(c.privatekey, c.external_ip, from_dir, to_dir,
                          c.default_user)

    def copy_raw(self, key, ip, from_dir, to_dir, user):
        opts = '-o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
        scp = 'scp ' + opts + ' -i ' + key + ' -r ' + from_dir + ' ' + user + '@' + ip + ':' + to_dir
        logging.warning(scp)
        robust_com(scp)

    def cmd(self, containers, cmd):
        """
        Run a command on all the containers and collect the output. 
        """
        all_output = {}
        for c in containers:
            output, _, _ = self.cmd_raw(c.privatekey, c.external_ip, cmd,
                                        c.default_user)
            if output.strip() != "":
                all_output[c.host_name] = output.strip()
        return all_output

    def cmd_raw(self, key, ip, cmd, user):
        ip = user + '@' + ip
        ssh = 'LC_ALL=C && ssh -o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ' + key + ' -t -t ' + ip + ' \'%s\'' % cmd
        logging.warning(ssh)
        return robust_com(ssh)
示例#11
0
文件: cloud.py 项目: TENorbert/ferry
class CloudFabric(object):

    def __init__(self, bootstrap=False):
        self.name = "cloud"
        self.repo = 'public'

        self._init_cloudfabric()
        self.bootstrap = bootstrap
        self.cli = DockerCLI()
        self.cli.key = self.launcher._get_host_key()
        self.docker_user = self.cli.docker_user
        self.inspector = CloudInspector(self)

        # The system returns information regarding 
        # the instance types. 
        self.system = self.launcher.system

    def _load_class(self, class_name):
        """
        Dynamically load a class
        """
        s = class_name.split("/")
        module_path = s[0]
        clazz_name = s[1]
        module = importlib.import_module(module_path)
        for n, o in inspect.getmembers(module):
            if inspect.isclass(o):
                if o.__module__ == module_path and o.__name__ == clazz_name:
                    return o(self)
        return None

    def _init_cloudfabric(self):
        conf = ferry.install.read_ferry_config()

        # The actual cloud launcher. This lets us customize 
        # launching into different cloud environments that each
        # may be slightly different (HP Cloud, Rackspace, etc). 
        launcher = conf["system"]["mode"]
        self.launcher = self._load_class(launcher)

        # Determine if we are using this fabric in proxy
        # mode. Proxy mode means that the client is external
        # to the network, but controller has direct access. 
        self.proxy = bool(conf["system"]["proxy"])

        # Check if the launcher supports proxy mode. 
        if self.proxy and not self.launcher.support_proxy():
            logging.error("%s does not support proxy mode" % self.launcher.name)

    def get_data_dir(self):
        return "/ferry/data"

    def installed_images(self):
        """
        List all the installed Docker images. We should really
        contact the index server responsible for serving out
        images and ask it. 
        """
        images = []
        image_string = self.cli.images()
        for image in image_string.split():
            image_name = image.strip()
            if image_name != "REPOSITORY" and image_name != "<none>":
                images.append(image_name)
        return images

    def version(self):
        """
        Fetch the current docker version.
        """
        return "0.1"

    def get_fs_type(self):
        """
        Get the filesystem type associated with docker. 
        """
        return "xfs"

    def quit(self):
        """
        Quit the cloud fabric. 
        """
        logging.info("quitting cloud fabric")
        self.launcher.quit()

    def restart(self, cluster_uuid, service_uuid, containers):
        """
        Restart the stopped containers.
        """
        # First need to restart all the virtual machines.
        logging.warning("restarting virtual machines...")
        addrs = self.launcher._restart_stack(cluster_uuid, service_uuid)
        
        # Then need to restart Ferry on all the hosts. 
        logging.warning("restarting ferry...")
        cmd = "source /etc/profile && ferry server -n"
        for ip in addrs:
            output, err, _ = self.cmd_raw(self.cli.key, ip, cmd, self.docker_user)

        # Finally, restart the stopped containers. 
        logging.warning("restarting containers...")
        cmd = "cat /ferry/containers/container.pid && rm /ferry/containers/container.pid"
        for c in containers:
            # Before restarting the containers, we need to learn their
            # container IDs. It should be stored on a cidfile. 
            output, err, _ = self.cmd_raw(self.cli.key, c.external_ip, cmd, self.launcher.ssh_user)
            c.container = output.strip()
            self.cli.start(image = c.image,
                           container = c.container, 
                           service_type = c.service_type,
                           keydir = c.keydir,
                           keyname = c.keyname,
                           privatekey = c.privatekey,
                           volumes = c.volumes,
                           args = c.args,
                           server = c.external_ip, 
                           user = self.launcher.ssh_user,
                           inspector = self.inspector,
                           background = True)
        return containers

    def _copy_public_keys(self, container, server):
        """
        Copy over the public ssh key to the server so that we can start the
        container correctly. 
        """

        keydir = container['keydir'].values()[0]
        self.copy_raw(key = self.cli.key,
                      ip = server, 
                      from_dir = keydir + "/" + container["keyname"], 
                      to_dir = "/ferry/keys/",
                      user = self.launcher.ssh_user)

    def _verify_public_keys(self, server):
        """
        Verify that the public key has been copied over correctly. 
        """
        out, _, _ = self.cmd_raw(key = self.cli.key, 
                                 ip = server, 
                                 cmd = "ls /ferry/keys",
                                 user = self.launcher.ssh_user)
        if out and out.strip() == "":
            return False
        elif out:
            logging.warning("found ssh key: " + out.strip())
            return True
        else:
            return False

    def _verify_ferry_server(self, server):
        """
        Verify that the docker daemon is actually running on the server. 
        """

        # Try a couple times before giving up. 
        for i in range(0, 2):
            out, err, success = self.cmd_raw(key = self.cli.key, 
                                             ip = server, 
                                             cmd = "if [ -f /var/run/ferry.pid ]; then echo \"launched\"; fi",
                                             user = self.launcher.ssh_user)
            if success and out and out.strip() != "":
                logging.warning("docker daemon " + out.strip())
                return True
            elif not success:
                return False
            else:
                time.sleep(6)
        return False

    def _execute_server_init(self, server):
        """
        Restart the Ferry docker daemon. 
        """
        out, err, _ = self.cmd_raw(key = self.cli.key, 
                                   ip = server, 
                                   cmd = "ferry server -n && sleep 3",
                                   user = self.launcher.ssh_user)
        logging.warning("restart ferry out: " + out)
        logging.warning("restart ferry err: " + err)
        
    def execute_docker_containers(self, cinfo, lxc_opts, private_ip, server_ip, background=True, simulate=False):
        """
        Run the Docker container and use the cloud inspector to get information
        about the container/VM.
        """

        host_map = None
        host_map_keys = []
        mounts = {}        

        if not 'default_cmd' in cinfo:
            cinfo['default_cmd'] = "/service/sbin/startnode init"
        container = self.cli.run(service_type = cinfo['type'], 
                                 image = cinfo['image'], 
                                 volumes = cinfo['volumes'],
                                 keydir = { '/service/keys' : '/ferry/keys' }, 
                                 keyname = cinfo['keyname'], 
                                 privatekey = cinfo['privatekey'], 
                                 open_ports = host_map_keys,
                                 host_map = host_map, 
                                 expose_group = cinfo['exposed'], 
                                 hostname = cinfo['hostname'],
                                 default_cmd = cinfo['default_cmd'],
                                 args= cinfo['args'],
                                 lxc_opts = lxc_opts,
                                 server = server_ip,
                                 user = self.launcher.ssh_user, 
                                 inspector = self.inspector,
                                 background = background,
                                 simulate= simulate)
        if container:
            container.manage_ip = server_ip
            container.internal_ip = private_ip
            if self.proxy:
                # Otherwise, the controller can only interact with the
                # VMs via their public IP address. 
                container.external_ip = server_ip
            else:
                # When the fabric controller is acting in proxy mode, 
                # it can contact the VMs via their private addresses. 
                container.external_ip = private_ip

            container.vm = self.launcher.default_personality
            container.default_user = self.cli.docker_user

            if 'name' in cinfo:
                container.name = cinfo['name']

            if 'volume_user' in cinfo:
                mounts[container] = {'user':cinfo['volume_user'],
                                     'vols':cinfo['volumes'].items()}

            # We should wait for a second to let the ssh server start
            # on the containers (otherwise sometimes we get a connection refused)
            time.sleep(3)

            return container, mounts
        else:
            return None, None

    def alloc(self, cluster_uuid, service_uuid, container_info, ctype):
        """
        Allocate a new service cluster. 
        """
        containers = self.launcher.alloc(cluster_uuid, service_uuid, container_info, ctype, self.proxy)
        
        if not containers:
            # The underlying cloud infrastructure could not allocate
            # the service cluster. Sometimes it's just a temporary glitch, so
            # get rid of the attempt and try some more. 
            logging.error("Failed to allocate service cluster. Trying again.")
            self.launcher._delete_stack(cluster_uuid, service_uuid)
            return self.launcher.alloc(cluster_uuid, service_uuid, container_info, ctype, self.proxy)
        else:
            return containers

    def stop(self, cluster_uuid, service_uuid, containers):
        """
        Stop the running containers
        """
        self.remove(cluster_uuid, service_uuid, containers)

    def halt(self, cluster_uuid, service_uuid, containers):
        """
        Safe stop the containers. 
        """

        # Stop the containers in the VMs. Stopping the container
        # should jump us back out to the host. Afterwards, quit
        # ferry so that we can restart later. 
        halt = '/service/sbin/startnode halt'
        ferry = 'ferry quit'
        for c in containers:
            self.cmd_raw(c.privatekey, c.external_ip, halt, c.default_user)
            self.cmd_raw(self.cli.key, c.manage_ip, ferry, self.launcher.ssh_user)

        # Now go ahead and stop the VMs. 
        self.launcher._stop_stack(cluster_uuid, service_uuid)

    def remove(self, cluster_uuid, service_uuid, containers):
        """
        Remove the running instances
        """
        self.launcher._delete_stack(cluster_uuid, service_uuid)

    def copy(self, containers, from_dir, to_dir):
        """
        Copy over the contents to each container
        """
        for c in containers:
            self.copy_raw(c.privatekey, c.external_ip, from_dir, to_dir, c.default_user)

    def copy_raw(self, key, ip, from_dir, to_dir, user):
        opts = '-o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
        scp = 'scp ' + opts + ' -i ' + key + ' -r ' + from_dir + ' ' + user + '@' + ip + ':' + to_dir
        logging.warning(scp)
        robust_com(scp)
        
    def cmd(self, containers, cmd):
        """
        Run a command on all the containers and collect the output. 
        """
        all_output = {}
        for c in containers:
            output, _, _ = self.cmd_raw(c.privatekey, c.external_ip, cmd, c.default_user)
            if output.strip() != "":
                all_output[c.host_name] = output.strip()
        return all_output

    def cmd_raw(self, key, ip, cmd, user):
        ip = user + '@' + ip
        ssh = 'LC_ALL=C && ssh -o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ' + key + ' -t -t ' + ip + ' \'%s\'' % cmd
        logging.warning(ssh)
        return robust_com(ssh)
示例#12
0
文件: local.py 项目: TENorbert/ferry
class LocalFabric(object):
    def __init__(self, bootstrap=False):
        self.name = "local"
        self.repo = 'public'
        self.cli = DockerCLI(ferry.install.DOCKER_REGISTRY)
        self.docker_user = self.cli.docker_user
        self.inspector = DockerInspector(self.cli)
        self.bootstrap = bootstrap

        # The system returns information regarding 
        # the instance types. 
        self.system = System()

        # Bootstrap mode means that the DHCP network
        # isn't available yet, so we can't use the network. 
        if not bootstrap:
            self.network = DHCPClient(ferry.install._get_gateway())

    def _get_host(self):
        cmd = "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
        return Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip()

    def get_data_dir(self):
        if 'FERRY_SCRATCH' in os.environ:
            scratch_dir = os.environ['FERRY_SCRATCH']
        else:
            scratch_dir = os.path.join(ferry.install._get_ferry_dir(server=True), 'scratch')

        if not os.path.isdir(scratch_dir):
            os.makedirs(scratch_dir)

        return scratch_dir

    def installed_images(self):
        """
        List all the installed Docker images. 
        """
        images = []
        image_string = self.cli.images()
        for image in image_string.split():
            image_name = image.strip()
            if image_name != "REPOSITORY" and image_name != "<none>":
                images.append(image_name)
        return images

    def version(self):
        """
        Fetch the current docker version.
        """
        return self.cli.version()

    def get_fs_type(self):
        """
        Get the filesystem type associated with docker. 
        """
        return self.cli.get_fs_type()

    def quit(self):
        """
        Quit the local fabric. 
        """
        logging.info("quitting local fabric")

    def restart(self, cluster_uuid, service_uuid, containers):
        """
        Restart the stopped containers.
        """
        new_containers = []
        for c in containers:
            container = self.cli.start(image = c.image,
                                       container = c.container,
                                       service_type = c.service_type,
                                       keydir = c.keydir,
                                       keyname = c.keyname,
                                       privatekey = c.privatekey,
                                       volumes = c.volumes,
                                       args = c.args,
                                       inspector = self.inspector)
            container.default_user = self.docker_user
            new_containers.append(container)

        # We should wait for a second to let the ssh server start
        # on the containers (otherwise sometimes we get a connection refused)
        time.sleep(2)
        return new_containers

    def alloc(self, cluster_uuid, service_uuid, container_info, ctype):
        """
        Allocate several instances.
        """
        containers = []
        mounts = {}
        for c in container_info:
            # Get a new IP address for this container and construct
            # a default command. 
            gw = ferry.install._get_gateway().split("/")[0]

            # Check if we should use the manual LXC option. 
            if not 'netenable' in c:
                ip = self.network.assign_ip(c)
                lxc_opts = ["lxc.network.type = veth",
                            "lxc.network.ipv4 = %s/24" % ip, 
                            "lxc.network.ipv4.gateway = %s" % gw,
                            "lxc.network.link = ferry0",
                            "lxc.network.name = eth0",
                            "lxc.network.flags = up"]

                # Check if we need to forward any ports. 
                host_map = {}
                for p in c['ports']:
                    p = str(p)
                    s = p.split(":")
                    if len(s) > 1:
                        host = s[0]
                        dest = s[1]
                    else:
                        host = self.network.random_port()
                        dest = s[0]
                    host_map[dest] = [{'HostIp' : '0.0.0.0',
                                       'HostPort' : host}]
                    self.network.forward_rule('0.0.0.0/0', host, ip, dest)
                host_map_keys = host_map.keys()
            else:
                lxc_opts = None
                host_map = None
                host_map_keys = []

            # Start a container with a specific image, in daemon mode,
            # without TTY, and on a specific port
            if not 'default_cmd' in c:
                c['default_cmd'] = "/service/sbin/startnode init"
            container = self.cli.run(service_type = c['type'], 
                                     image = c['image'], 
                                     volumes = c['volumes'],
                                     keydir = c['keydir'], 
                                     keyname = c['keyname'], 
                                     privatekey = c['privatekey'], 
                                     open_ports = host_map_keys,
                                     host_map = host_map, 
                                     expose_group = c['exposed'], 
                                     hostname = c['hostname'],
                                     default_cmd = c['default_cmd'],
                                     args= c['args'],
                                     lxc_opts = lxc_opts,
                                     inspector = self.inspector,
                                     background = False)
            if container:
                container.default_user = self.docker_user
                containers.append(container)
                if not 'netenable' in c:
                    container.internal_ip = ip
                    container.external_ip = ip
                    self.network.set_owner(ip, container.container)

                if 'name' in c:
                    container.name = c['name']

                if 'volume_user' in c:
                    mounts[container] = {'user':c['volume_user'],
                                         'vols':c['volumes'].items()}

                # We should wait for a second to let the ssh server start
                # on the containers (otherwise sometimes we get a connection refused)
                time.sleep(3)

        # Check if we need to set the file permissions
        # for the mounted volumes. 
        for c, i in mounts.items():
            for _, v in i['vols']:
                self.cmd([c], 'chown -R %s %s' % (i['user'], v))

        return containers

    def stop(self, cluster_uuid, service_uuid, containers):
        """
        Forceably stop the running containers
        """
        for c in containers:
            if type(c) is dict:
                self.cli.stop(c['container'])
            else:
                self.cli.stop(c.container)

    def remove(self, cluster_uuid, service_uuid, containers):
        """
        Remove the running instances
        """
        for c in containers:
            for p in c.ports.keys():
                self.network.delete_rule(c.internal_ip, p)
            self.network.free_ip(c.internal_ip)
            self.cli.remove(c.container)

    def snapshot(self, containers, cluster_uuid, num_snapshots):
        """
        Save/commit the running instances
        """
        snapshots = []
        for c in containers:
            snapshot_name = '%s-%s-%s:SNAPSHOT-%s' % (c.image, 
                                                      cluster_uuid,
                                                      c.host_name,
                                                      num_snapshots)
            snapshots.append( {'image' : snapshot_name,
                               'base' : c.image,
                               'type' : c.service_type, 
                               'name' : c.name, 
                               'args' : c.args,
                               'ports': c.ports} )
            self.cli.commit(c, snapshot_name)
        return snapshots

    def push(self, image, registry=None):
        """
        Push an image to a remote registry.
        """        
        return self.cli.push(image, registry)

    def pull(self, image):
        """
        Pull a remote image to the local registry. 
        """        
        return self.cli.pull(image)

    def halt(self, cluster_uuid, service_uuid, containers):
        """
        Safe stop the containers. 
        """
        cmd = '/service/sbin/startnode halt'
        for c in containers:
            self.cmd_raw(c.privatekey, c.internal_ip, cmd, c.default_user)

    def copy(self, containers, from_dir, to_dir):
        """
        Copy over the contents to each container
        """
        for c in containers:
            self.copy_raw(c.privatekey, c.internal_ip, from_dir, to_dir, c.default_user)

    def copy_raw(self, key, ip, from_dir, to_dir, user):
        if key:
            opts = '-o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
            scp = 'scp ' + opts + ' -i ' + key + ' -r ' + from_dir + ' ' + user + '@' + ip + ':' + to_dir
            logging.warning(scp)
            robust_com(scp)

    def cmd(self, containers, cmd):
        """
        Run a command on all the containers and collect the output. 
        """
        all_output = {}
        for c in containers:
            output = self.cmd_raw(c.privatekey, c.internal_ip, cmd, c.default_user)
            if output.strip() != "":
                all_output[c.host_name] = output.strip()
        return all_output

    def cmd_raw(self, key, ip, cmd, user):
        if key:
            ip = user + '@' + ip
            ssh = 'LC_ALL=C && ssh -o ConnectTimeout=20 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ' + key + ' -t -t ' + ip + ' \'%s\'' % cmd
            logging.warning(ssh)
            out, _, _ = robust_com(ssh)
            return out
        else:
            return ''

    def login(self):
        """
        Login to a remote registry. Use the login credentials
        found in the user's home directory. 
        """
        config = ferry.install.read_ferry_config()
        args = config['docker']
        if all(k in args for k in ("user","password","email")):
            if 'server' in args:
                server = args['server']
            else:
                server = ''
            return self.cli.login(user = args['user'], 
                                  password = args['password'],
                                  email = args['email'],
                                  registry = server)
        logging.error("Could not open login credentials " + ferry.install.DEFAULT_LOGIN_KEY)
        return False
示例#13
0
文件: fabric.py 项目: zbyufei/ferry
 def __init__(self):
     self.repo = 'public'
     self.docker_user = '******'
     self.cli = DockerCLI()
     self.network = DHCPClient(self._get_gateway())
示例#14
0
文件: fabric.py 项目: zbyufei/ferry
class DockerFabric(object):
    def __init__(self):
        self.repo = 'public'
        self.docker_user = '******'
        self.cli = DockerCLI()
        self.network = DHCPClient(self._get_gateway())

    def _get_gateway(self):
        cmd = "ifconfig drydock0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
        gw = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip()

        cmd = "ifconfig drydock0 | grep 'inet addr:' | cut -d: -f4 | awk '{ print $1}'"
        netmask = Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip()
        mask = map(int, netmask.split("."))
        cidr = 1
        if mask[3] == 0:
            cidr = 8
        if mask[2] == 0:
            cidr *= 2

        return "%s/%d" % (gw, 32 - cidr)

    def _get_host(self):
        cmd = "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
        return Popen(cmd, stdout=PIPE, shell=True).stdout.read().strip()

    """
    Read the location of the directory containing the keys
    used to communicate with the containers. 
    """
    def _read_key_dir(self):
        f = open(ferry.install.DEFAULT_DOCKER_KEY, 'r')
        k = f.read().strip().split("://")
        return k[1], k[0]
 
    """
    Fetch the current docker version.
    """
    def version(self):
        return self.cli.version()

    """
    Get the filesystem type associated with docker. 
    """
    def get_fs_type(self):
        return self.cli.get_fs_type()

    """
    Restart the stopped containers.
    """
    def restart(self, container_info):
        containers = []
        for c in container_info:
            container = self.cli.start(c['container'],
                                       c['type'],
                                       c['keys'],
                                       c['volumes'],
                                       c['args'])
            container.default_user = self.docker_user
            containers.append(container)

        # We should wait for a second to let the ssh server start
        # on the containers (otherwise sometimes we get a connection refused)
        time.sleep(2)
        return containers

    """
    Allocate several instances.
    """
    def alloc(self, container_info):
        containers = []
        mounts = {}
        for c in container_info:
            # Get a new IP address for this container and construct
            # a default command. 
            ip = self.network.assign_ip(c)
            gw = self._get_gateway().split("/")[0]

            lxc_opts = ["lxc.network.type = veth",
                        "lxc.network.ipv4 = %s" % ip, 
                        "lxc.network.ipv4.gateway = %s" % gw,
                        "lxc.network.link = drydock0",
                        "lxc.network.name = eth0",
                        "lxc.network.flags = up"]

            c['default_cmd'] = "/service/sbin/startnode init"

            # Check if we need to forward any ports. 
            host_map = {}
            for p in c['ports']:
                p = str(p)
                s = p.split(":")
                if len(s) > 1:
                    host = s[0]
                    dest = s[1]
                else:
                    host = self.network.random_port()
                    dest = s[0]
                host_map[dest] = [{'HostIp' : '0.0.0.0',
                                   'HostPort' : host}]
                self.network.forward_rule('0.0.0.0/0', host, ip, dest)

            # Start a container with a specific image, in daemon mode,
            # without TTY, and on a specific port
            container = self.cli.run(service_type = c['type'], 
                                     image = c['image'], 
                                     volumes = c['volumes'],
                                     keys = c['keys'], 
                                     open_ports = host_map.keys(),
                                     host_map = host_map, 
                                     expose_group = c['exposed'], 
                                     hostname = c['hostname'],
                                     default_cmd = c['default_cmd'],
                                     args= c['args'],
                                     lxc_opts = lxc_opts)
            if container:
                container.default_user = self.docker_user
                container.internal_ip = ip
                containers.append(container)
                self.network.set_owner(ip, container.container)

                if 'name' in c:
                    container.name = c['name']

                if 'volume_user' in c:
                    mounts[container] = {'user':c['volume_user'],
                                         'vols':c['volumes'].items()}

                # We should wait for a second to let the ssh server start
                # on the containers (otherwise sometimes we get a connection refused)
                time.sleep(2)

                # Check if we need to set the file permissions
                # for the mounted volumes. 
                for c, i in mounts.items():
                    for _, v in i['vols']:
                        self.cmd([c], 'chown -R %s %s' % (i['user'], v))

        return containers

    """
    Stop the running instances
    """
    def stop(self, containers):
        for c in containers:
            self.cli.stop(c.container)

    """
    Remove the running instances
    """
    def remove(self, containers):
        for c in containers:
            for p in c.ports.keys():
                self.network.delete_rule(c.internal_ip, p)
            self.network.free_ip(c.internal_ip)
            self.cli.remove(c.container)

    """
    Save/commit the running instances
    """
    def snapshot(self, containers, cluster_uuid, num_snapshots):
        snapshots = []
        for c in containers:
            snapshot_name = '%s-%s-%s:SNAPSHOT-%s' % (c.image, 
                                                      cluster_uuid,
                                                      c.host_name,
                                                      num_snapshots)
            snapshots.append( {'image' : snapshot_name,
                               'base' : c.image,
                               'type' : c.service_type, 
                               'name' : c.name, 
                               'args' : c.args,
                               'ports': c.ports} )
            self.cli.commit(c, snapshot_name)
        return snapshots

    """
    Upload these containers to the specified registry.
    """
    def deploy(self, containers, registry=None):
        deployed = []
        for c in containers:
            image_name = '%s-%s:DEPLOYED' % (c.image, 
                                             c.host_name)
            deployed.append( {'image' : image_name,
                              'base' : c.image,
                              'type' : c.service_type, 
                              'name' : c.name, 
                              'args' : c.args,
                              'ports': c.ports} )
            if not registry:
                self.cli.commit(c, image_name)
            else:
                self.cli.push(c, registry)
        return deployed

    """
    Copy over the contents to each container
    """
    def copy(self, containers, from_dir, to_dir):
        for c in containers:
            keydir, _ = self._read_key_dir()
            opts = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
            key = '-i ' + keydir + '/id_rsa'
            scp_cmd = 'scp ' + opts + ' ' + key + ' -r ' + from_dir + ' ' + self.docker_user + '@' + c.internal_ip + ':' + to_dir
            output = Popen(scp_cmd, stdout=PIPE, shell=True).stdout.read()

    """
    Run a command on all the containers and collect the output. 
    """
    def cmd(self, containers, cmd):
        all_output = {}
        keydir, _ = self._read_key_dir()
        key = keydir + '/id_rsa'
        for c in containers:
            ip = self.docker_user + '@' + c.internal_ip
            ssh = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ' + key + ' -t -t ' + ip + ' \'%s\'' % cmd
            logging.warning(ssh)
            output = Popen(ssh, stdout=PIPE, shell=True).stdout.read()
            all_output[c] = output.strip()

        return all_output