Exemplo n.º 1
0
    def test_update_headers(self):
        sample_headers = {
            'X-Docker-Locale': 'en-US',
        }

        def f(self, headers=None):
            return headers

        client = APIClient()
        client._auth_configs = {}

        g = update_headers(f)
        assert g(client, headers=None) is None
        assert g(client, headers={}) == {}
        assert g(client, headers={'Content-type': 'application/json'}) == {
            'Content-type': 'application/json',
        }

        client._auth_configs = {
            'HttpHeaders': sample_headers
        }

        assert g(client, headers=None) == sample_headers
        assert g(client, headers={}) == sample_headers
        assert g(client, headers={'Content-type': 'application/json'}) == {
            'Content-type': 'application/json',
            'X-Docker-Locale': 'en-US',
        }
Exemplo n.º 2
0
 def test_kwargs_from_env_tls(self):
     os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
                       DOCKER_CERT_PATH=TEST_CERT_DIR,
                       DOCKER_TLS_VERIFY='1')
     kwargs = kwargs_from_env(assert_hostname=False)
     self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
     self.assertTrue('ca.pem' in kwargs['tls'].ca_cert)
     self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
     self.assertTrue('key.pem' in kwargs['tls'].cert[1])
     self.assertEqual(False, kwargs['tls'].assert_hostname)
     self.assertTrue(kwargs['tls'].verify)
     try:
         client = APIClient(**kwargs)
         self.assertEqual(kwargs['base_url'], client.base_url)
         self.assertEqual(kwargs['tls'].ca_cert, client.verify)
         self.assertEqual(kwargs['tls'].cert, client.cert)
     except TypeError as e:
         self.fail(e)
Exemplo n.º 3
0
 def test_kwargs_from_env_tls(self):
     os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
                       DOCKER_CERT_PATH=TEST_CERT_DIR,
                       DOCKER_TLS_VERIFY='1')
     kwargs = kwargs_from_env(assert_hostname=False)
     assert 'https://192.168.59.103:2376' == kwargs['base_url']
     assert 'ca.pem' in kwargs['tls'].ca_cert
     assert 'cert.pem' in kwargs['tls'].cert[0]
     assert 'key.pem' in kwargs['tls'].cert[1]
     assert kwargs['tls'].assert_hostname is False
     assert kwargs['tls'].verify
     try:
         client = APIClient(**kwargs)
         assert kwargs['base_url'] == client.base_url
         assert kwargs['tls'].ca_cert == client.verify
         assert kwargs['tls'].cert == client.cert
     except TypeError as e:
         self.fail(e)
Exemplo n.º 4
0
 def test_kwargs_from_env_tls_verify_false(self):
     os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
                       DOCKER_CERT_PATH=TEST_CERT_DIR,
                       DOCKER_TLS_VERIFY='')
     kwargs = kwargs_from_env(assert_hostname=True)
     assert 'tcp://192.168.59.103:2376' == kwargs['base_url']
     assert 'ca.pem' in kwargs['tls'].ca_cert
     assert 'cert.pem' in kwargs['tls'].cert[0]
     assert 'key.pem' in kwargs['tls'].cert[1]
     assert kwargs['tls'].assert_hostname is True
     assert kwargs['tls'].verify is False
     parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True)
     try:
         client = APIClient(**kwargs)
         assert parsed_host == client.base_url
         assert kwargs['tls'].cert == client.cert
         assert not kwargs['tls'].verify
     except TypeError as e:
         self.fail(e)
Exemplo n.º 5
0
    def cleanup(self):
        client = APIClient()
        # Remove unused volumes.
        volumes = client.volumes({'dangling': True})
        if volumes and volumes['Volumes']:
            for volume in volumes['Volumes']:
                try:
                    client.remove_volume(volume['Name'])
                except Exception:
                    pass

        # Remove unused images.
        images = client.images(filters={'dangling': True})
        if images:
            for image in images:
                try:
                    client.remove_image(image['Id'], force=True)
                except Exception:
                    pass
Exemplo n.º 6
0
 def __init__(self, name, process_id=None):
     self.name = name
     self.process_id = process_id
     self._docker_client = docker.from_env()
     self._docker_api_client = APIClient(kwargs_from_env())
     self._hosts = None
Exemplo n.º 7
0
class Network(object):
    """Atomix test network."""
    def __init__(self, name, process_id=None):
        self.name = name
        self.process_id = process_id
        self._docker_client = docker.from_env()
        self._docker_api_client = APIClient(kwargs_from_env())
        self._hosts = None

    @property
    def docker_network(self):
        try:
            return self._docker_client.networks.get(self.name)
        except docker.errors.NotFound:
            raise UnknownNetworkError(self.name)

    @property
    def subnet(self):
        return str(
            self._docker_api_client.inspect_network(
                self.name)['IPAM']['Config'][0]['Subnet'])

    @property
    def gateway(self):
        return str(
            self._docker_api_client.inspect_network(
                self.name)['IPAM']['Config'][0]['Gateway'])

    @property
    def hosts(self):
        if self._hosts is None:
            self._hosts = self._create_hosts_iterator()
        return self._hosts

    def _create_hosts_iterator(self):
        """Creates a host iterator from available hosts by inspecting existing containers attached to the network."""
        ips = set([self.gateway] + [
            str(IPv4Interface(container['IPv4Address']).ip)
            for container in self._docker_api_client.inspect_network(self.name)
            ['Containers'].values()
        ])
        for host in IPv4Network(unicode(self.subnet)).hosts():
            host = str(host)
            if host not in ips:
                yield host

    def setup(self, supernet='172.18.0.0/16', subnet=None, gateway=None):
        """Sets up the network."""
        def find_subnet():
            docker_subnets = []
            for network in self._docker_client.networks.list():
                network_info = self._docker_api_client.inspect_network(
                    network.name)
                if len(network_info['IPAM']['Config']) > 0:
                    docker_subnets.append(
                        str(network_info['IPAM']['Config'][0]['Subnet']))
            for subnet in IPv4Network(
                    unicode(supernet)).subnets(new_prefix=24):
                if str(subnet) not in docker_subnets:
                    return str(subnet)
            raise UnknownNetworkError(
                "Cannot find available subnet from supernet {}".format(
                    supernet))

        if subnet is None:
            subnet = find_subnet()

        self._hosts = iter(
            [str(host) for host in IPv4Network(unicode(subnet)).hosts()])
        if gateway is None:
            gateway = str(next(self._hosts))

        ipam_pool = docker.types.IPAMPool(subnet=subnet, gateway=gateway)
        ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
        logger.info("Creating network")
        labels = {
            'atomix-test': 'true',
            'atomix-process': self.process_id or '',
            'atomix-cluster': self.name
        }
        self._docker_client.networks.create(self.name,
                                            driver='bridge',
                                            ipam=ipam_config,
                                            labels=labels)

    def teardown(self):
        """Tears down the network."""
        logger.info("Removing network")
        try:
            self.docker_network.remove()
        except:
            pass

    def partition(self, local, remote=None):
        """Partitions the given local from the given remote using a bi-directional partition."""
        local, remote = self._get_node(local), self._get_node(remote)
        return self.bipartition(local, remote)

    def unipartition(self, local, remote=None):
        """Partitions the given local from the given remote."""
        local, remote = self._get_node(local), self._get_node(remote)
        if remote is not None:
            return self._partition(local, remote)
        else:
            disruptions = []
            for name, ip in self._interfaces():
                if name != local:
                    disruptions.append(self._partition(local, name))
            return with_context(*disruptions)

    def bipartition(self, node1, node2=None):
        """Creates a bi-directional partition between the two nodes."""
        node1, node2 = self._get_node(node1), self._get_node(node2)
        if node2 is not None:
            return with_context(self._partition(node1, node2),
                                self._partition(node2, node1))
        else:
            disruptions = []
            for name, ip in self._interfaces():
                if name != node1:
                    disruptions.append(self.bipartition(node1, name))
            return with_context(*disruptions)

    def heal(self, local=None, remote=None):
        """Heals partitions."""
        local, remote = self._get_node(local), self._get_node(remote)
        if local is not None and remote is not None:
            self._heal(local, remote)
            self._heal(remote, local)
        elif local is not None:
            for name, ip in self._interfaces():
                if name != local:
                    self._heal(local, name)
                    self._heal(name, local)
        else:
            for name1, ip1 in self._interfaces():
                for name2, ip2 in self._interfaces():
                    if name1 != name2:
                        self._heal(name1, name2)
                        self._heal(name2, name1)

    def partition_halves(self):
        """Partitions the network into two halves."""
        disruptions = []
        ips = self._interfaces()
        for i in range(len(ips)):
            if i % 2 == 0:
                for j in range(len(ips)):
                    if i != j and j % 2 == 1:
                        disruptions.append(
                            self.bipartition(ips[i][0], ips[j][0]))
        return with_context(*disruptions)

    def partition_random(self):
        """Partitions a random node."""
        node1 = self._random_interface()[0]
        node2 = self._random_interface()[0]
        while node1 == node2:
            node2 = self._random_interface()
        return with_context(self.bipartition(node1, node2))

    def partition_bridge(self, node=None):
        """Partitions a node as a bridge to two sides of a cluster."""
        if node is None:
            node = self._random_interface()[0]
        else:
            node = self._get_node(node)

        interfaces = self._interfaces()
        disruptions = []
        for i in range(len(interfaces)):
            if i % 2 == 0 and interfaces[i][0] != node:
                for j in range(len(interfaces)):
                    if i != j and j % 2 == 1 and interfaces[j][0] != node:
                        disruptions.append(
                            self.bipartition(interfaces[i][0],
                                             interfaces[j][0]))
        return with_context(*disruptions)

    def partition_isolate(self, node=None):
        """Isolates the given node from all its peers."""
        if node is None:
            node = self._random_interface()[0]
        else:
            node = self._get_node(node)

        disruptions = []
        for name, ip in self._interfaces():
            if name != node:
                disruptions.append(self.bipartition(node, name))
        return with_context(*disruptions)

    def delay(self,
              node=None,
              latency=50,
              jitter=10,
              correlation=.75,
              distribution='normal'):
        """Delays packets to the given node."""
        if node is None:
            return with_context(*[
                self._delay(name, latency, jitter, correlation, distribution)
                for name, ip in self._interfaces()
            ])
        return self._delay(self._get_node(node), latency, jitter, correlation,
                           distribution)

    def drop(self, node=None, probability=.02, correlation=.25):
        """Drops packets to the given node."""
        if node is None:
            return with_context(*[
                self._drop(name, probability, correlation)
                for name, ip in self._interfaces()
            ])
        return self._drop(self._get_node(node), probability, correlation)

    def reorder(self, node=None, probability=.02, correlation=.5):
        """Reorders packets to the given node."""
        if node is None:
            return with_context(*[
                self._reorder(name, probability, correlation)
                for name, ip in self._interfaces()
            ])
        return self._reorder(self._get_node(node), probability, correlation)

    def duplicate(self, node=None, probability=.005, correlation=.05):
        """Duplicates packets to the given node."""
        if node is None:
            return with_context(*[
                self._duplicate(name, probability, correlation)
                for name, ip in self._interfaces()
            ])
        return self._duplicate(self._get_node(node), probability, correlation)

    def corrupt(self, node=None, probability=.02):
        """Duplicates packets to the given node."""
        if node is None:
            return with_context(*[
                self._corrupt(name, probability)
                for name, ip in self._interfaces()
            ])
        return self._corrupt(self._get_node(node), probability)

    def restore(self, node=None):
        """Restores packets to the given node to normal order."""
        if node is None:
            for name, ip in self._interfaces():
                self._restore(name)
        else:
            self._restore(self._get_node(node))

    def _partition(self, local, remote):
        """Partitions the given local from the given remote."""
        logger.info("Cutting off link %s->%s", local, remote)
        self._run_in_container(local, 'iptables', '-A', 'INPUT', '-s',
                               self._get_ip(remote), '-j', 'DROP', '-w')
        return with_context(lambda: self.heal(local, remote))

    def _heal(self, local, remote):
        """Heals a partition from the given local to the given remote."""
        logger.info("Restoring link %s->%s", local, remote)
        self._run_in_container(local, 'iptables', '-D', 'INPUT', '-s',
                               self._get_ip(remote), '-j', 'DROP', '-w')

    def _delay(self,
               node,
               latency=50,
               jitter=10,
               correlation=.75,
               distribution='normal'):
        """Delays packets to the given node."""
        latency, jitter, correlation = self._millize(latency), self._millize(
            jitter), self._percentize(correlation)
        logger.info(
            "Delaying packets to %s (latency=%s, jitter=%s, correlation=%s, distribution=%s)",
            node, latency, jitter, correlation, distribution)
        self._run_in_container(node, 'tc', 'qdisc', 'add', 'dev', 'eth0',
                               'root', 'netem', 'delay', latency, jitter,
                               correlation, 'distribution', distribution)
        return with_context(lambda: self.restore(node))

    def _drop(self, node, probability=.02, correlation=.25):
        """Drops packets to the given node."""
        probability, correlation = self._percentize(
            probability), self._percentize(correlation)
        logger.info("Dropping packets to %s (probability=%s, correlation=%s)",
                    node, probability, correlation)
        self._run_in_container(node, 'tc', 'qdisc', 'add', 'dev', 'eth0',
                               'root', 'netem', 'loss', probability,
                               correlation)
        return with_context(lambda: self.restore(node))

    def _reorder(self, node, probability=.02, correlation=.5):
        """Reorders packets to the given node."""
        probability, correlation = self._percentize(
            probability), self._percentize(correlation)
        logger.info(
            "Reordering packets to %s (probability=%s, correlation=%s)", node,
            probability, correlation)
        self._run_in_container(node, 'tc', 'qdisc', 'add', 'dev', 'eth0',
                               'root', 'netem', 'reorder', probability,
                               correlation)
        return with_context(lambda: self.restore(node))

    def _duplicate(self, node, probability=.005, correlation=.05):
        """Duplicates packets to the given node."""
        probability, correlation = self._percentize(
            probability), self._percentize(correlation)
        logger.info(
            "Duplicating packets to %s (probability=%s, correlation=%s)", node,
            probability, correlation)
        self._run_in_container(node, 'tc', 'qdisc', 'add', 'dev', 'eth0',
                               'root', 'netem', 'duplicate', probability,
                               correlation)
        return with_context(lambda: self.restore(node))

    def _corrupt(self, node, probability=.02):
        """Duplicates packets to the given node."""
        probability = self._percentize(probability)
        logger.info("Corrupting packets to %s (probability=%s)", node,
                    probability)
        self._run_in_container(node, 'tc', 'qdisc', 'add', 'dev', 'eth0',
                               'root', 'netem', 'corrupt', probability)
        return with_context(lambda: self.restore(node))

    def _restore(self, node):
        """Restores packets to the given node to normal order."""
        logger.info("Restoring packets to %s", node)
        self._run_in_container(node, 'tc', 'qdisc', 'del', 'dev', 'eth0',
                               'root')

    def _run_in_container(self, node, *command):
        command = ' '.join([shlex_quote(str(arg)) for arg in command])
        self._get_container(node).exec_run(command)

    def _percentize(self, d, digits=2):
        return '{}%'.format(round(d * 100, digits))

    def _millize(self, ms):
        return '{}ms'.format(ms)

    def _interfaces(self):
        """Lists the IPs used in the network."""
        containers = self._docker_api_client.inspect_network(
            self.name)['Containers']
        return sorted([(container['Name'],
                        str(IPv4Interface(container['IPv4Address']).ip))
                       for container in containers.values()],
                      key=lambda x: x[0])

    def _random_interface(self):
        """Returns a random interface name, ip pair."""
        return random.choice(self._interfaces())

    def _get_container(self, name):
        return self._docker_client.containers.get(name)

    def _get_ip(self, name):
        return dict(self._interfaces())[name]

    def _get_node(self, name):
        if name is None:
            return None
        try:
            return '{}-{}'.format(self.name, int(name))
        except ValueError:
            return name
class dockerfile_creater():
    def __init__(self, dockerdir=getcwd(), dockername=config.get("variable", "dockername"), registry=config.get("variable", "registry")):
        """Parameter:\n\
            dockerdir: Arbeits Verzeichnis (Default: aktueles Verzeichnis)\n\
            dockername: Filename des generirten Dockerfiles (Default: Dockerfile)\n\
            repository: Docker Repository (Default: dockerregistry:5000)\n\
                Mach der initialisirung kann <Class>.<Variable> manual gesetzt werden:\n\
            .healthttp: Befehl um ein http Test auf diesen Basis-Beriebsystem zu machen"""

        self.git_clean = config.get("befehle", "git_clean").split()
        self.git_get = config.get("befehle", "git_get").split()
        self.copy = config.get("befehle", "copy").split()
        self.copydir = config.get("befehle", "copydir").split()
        self.delete = config.get("befehle", "delete").split()
        self.deldir = config.get("befehle", "deldir").split()
        self.wget = config.get("befehle", "wget").split()
        self.unzip = config.get("befehle", "unzip").split()
        self.pipsearch = config.get("befehle", "pip_privet_search").split()

        self.dockertag = config.get("befehle", "dockertag").split()
        self.dockerpush = config.get("befehle", "dockerpush").split()
        self.dockerrmi = config.get("befehle", "dockerrmi").split()
        environ["DOCKER_HOST"] = config.get("variable", "deamon")
        self.docker_deamon = APIClient(base_url=config.get("variable", "deamon"), version=config.get("variable", "deamon_version"))
        self.pippurl = config.get("variable", "privet_pip")

        self.workdir = getcwd()
        self.dockerfile = []
        self.copyfiles = []
        self.tag = None
        self.gitdir = ""
        self.httpfile = ""

        self.healthhttp = ""

        self.registry = registry
        self.dockerdir = dockerdir
        self.dockername = dockername
        self.ptag = config.get("variable", "product_tag")
        self.otag = config.get("variable", "old_tag")
        self.ttag = config.get("variable", "test_tag")
        self.ltag = config.get("variable", "latest_tag")
        self.ftag = config.get("variable", "fail_tag")
        if len(argv) > 1:
            if argv[1] == "-p":
                self.production = True
        else:
            self.production = False

    # Schreibe Config

    @staticmethod
    def write_config():
        """Schreibt ein Config file Beispiel"""
        with open('dockerfile_generator_example.cfg', 'wb') as configfile:
            config.write(configfile)

    # git Methoden

    def git_clean_m(self, path):
        """Aktualisirt ein Git Verzeichnis\n\
        Parameter: Verzeichnis-Path des Git Repository"""
        chdir(path)
        call(self.git_clean)
        chdir(self.workdir)

    def git_get_m(self, repository):
        """Holt ein git-Repository ins Work Verzeichnis\n\
        Parameter: repository URL von githup. Das root Verzeichnis wird in das Atribut gitdir uebertragen."""
        chdir(self.dockerdir)
        call(self.git_get + [repository])
        self.gitdir = repository.split("/")[-1][:-4]
        self.copyfiles.append(repository.split("/")[-1][:-4])
        chdir(self.workdir)

    # File Methoden in Dockerverzeichnis

    def copy_in(self, pathlist, autoadd=False):
        """Methode um Dateien von Orginal Verzeichnis zum Arbeits Verzeichnis zu kopiren\n\
        Parameter: Liste aus Path Elemeneten zu der zu kopirenden Datei\n\
                   Format: [path, path, ..., kompletter path in gewuenschten git subverzeichnis]\n\
                   autoadd: Programinternen Flack, wird von der add Methode benutzt, nicht manuell benutzen."""
        include_dir = self.path2file(pathlist)
        if autoadd:
            filelist = listdir(include_dir)
            for item in filelist:
                if isdir(pjoin(include_dir, item)):
                    call(self.copydir + [pjoin(include_dir, item), self.dockerdir])
                else:
                    call(self.copy + [pjoin(include_dir, item), self.dockerdir])
            self.copyfiles = self.copyfiles + filelist
        else:
            if isdir(pjoin(include_dir)):
                call(self.copydir + [pjoin(include_dir), self.dockerdir])
            else:
                call(self.copy + [pjoin(include_dir), self.dockerdir])
            self.copyfiles = self.copyfiles + pathlist[-1:]

    def copy_HTTP(self, url):
        """Methode um Dateien von Internet zum Arbeits Verzeichnis zu kopiren\n\
        Parameter: URL zu der zu kopirenden Datei"""
        chdir(self.dockerdir)
        call(self.wget + [url])
        chdir(self.workdir)
        self.httpfile = url.split("/")[-1]
        self.copyfiles.append(url.split("/")[-1])

    def rm_copy_files(self, debug=False):
        """Methode um alle kopirten Orginal Dateien aus den Arbeits Verzeichnis zu loeschen\n\
        Parameter: debug: behalten des generierten Dockerfiles."""
        chdir(self.dockerdir)
        for item in self.copyfiles:
            if isdir(item):
                call(self.deldir + [item])
            else:
                call(self.delete + [item])
        if not debug:
            call(self.delete + [self.dockername])
        chdir(self.workdir)

    # Bearbeite Dockerfile Methoden

    def line_add(self, insert, file=False, nocash=False):
        """Fuegt eine Zeile oder eine Datei in Dockerfile ein\n\
        Parameter: Text der Zeile oder Datei-Pfat"""
        if nocash:
            self.dockerfile.append("RUN echo %i\n" % (randint(1, 100000)))
        if file:
            fh = open(insert, "r")
            for line in fh:
                self.dockerfile.append("%s\n" % line)
            fh.close()
        else:
            self.dockerfile.append("%s\n" % insert)

    def line_del(self, text):
        """Loescht alle Zeilen aus den Dockerfile die einen Suchbegrif enthalten\n\
        Parameter: Suchbegrif"""
        templist = self.dockerfile
        self.dockerfile = []
        for line in templist:
            if line.find(text) > -1:
                continue
            self.dockerfile.append(line)

    def line_rewrite(self, search, text, nocash=False):
        """Ersetzt alle Zeilen aus den Dockerfile die einen Suchbegrif enthalten\n\
        Parameter: search = Suchbegrif\n\
                   text   = Text der zu ersetzenden Zeile"""
        if nocash:
            self.dockerfile.append("RUN echo %i\n" % (randint(1, 100000)))
        templist = self.dockerfile
        self.dockerfile = []
        for line in templist:
            if line.find(search) > -1:
                self.dockerfile.append("%s\n" % text)
            else:
                self.dockerfile.append(line)

    def add_dockerverzeichnis(self, pathlist=None, dirflack=True, delexpose=True, start=False):
        """Fuegt ein Docker Produkt ein\n\
        Parameter:\n\
            pathlist: Liste aus Path Elementen die zum Verzeichnis der importirten Dockerfiles fuehren\n\
                      Das Format der Liste ist [git-Verzeichnis, Subverzeichnis, Subverzeichnis, ...]\n\
                      Ist das Verzeichnis nicht in github mus githup auf False gesetzt werden\n\
                      In Verzeichnis wird eine Datei Names 'Dockerfile' erwartet\n\
            dirflack: Flack ob die Orginaldateien kopiert werden sollen oder nicht (Default: True)\n\
                 ACHTUNG: enthaelt das Verzeichnis eine .dockerignore dann ist ein automatisches kopiren ueber\n\
                          dieses Flack nicht moeglich. Die Dateien muessen manuell mit copy_in-Methode kopiert werden\n\
            delexpose: Flack um EXPOSE angaben zu loeschen (Default: True)\n\
            start: Flack ob das hinzugefuegte Docker Produckt das Start-Base Docker Produkt ist (Default: False)"""
        pathlist = pathlist or []
        if not pathlist:
            stdout.write("\n")
            stdout.write("\nSubverzeichnis zum Dockerfile: \n\n")
            file = stdin.readline()[:-1]
            stdout.write("\n")
        else:
            # gitdir = pathlist[0]
            file = self.path2file(pathlist + ["Dockerfile"])
        fh = open(file, "r")
        for line in fh:
            if line.strip().startswith("FROM") and not start:
                continue
            if line.strip().startswith("EXPOSE") and delexpose:
                continue
            self.dockerfile.append(line)
        fh.close()
        if dirflack:
            self.copy_in(pathlist, autoadd=True)

    def add_uwsgi_product(self, conf_dir, initfile="production.ini", cmd="pserve /production.ini", nocash=True, version=True):
        self.copy_in(conf_dir + [initfile])
        modul_list = []
        iniconf = ConfigParser()
        iniconf.read(initfile)
        modul_list.append(self.rawmodul_to_modul(iniconf['app:main']['use']))
        main_modul = self.rawmodul_to_modul(iniconf['app:main']['use'])
        modul_list.append(main_modul)
        try:
            modul_includes = iniconf['app:main']['pyramid.includes']
            modul_includes = modul_includes.split("\n")
            for modul in modul_includes:
                modul_list.append(self.rawmodul_to_modul(modul))
        except:
            pass

        modul_list = sorted(set(modul_list), key=modul_list.index)
        try:
            modul_list.remove("")
        except:
            pass

        self.dockerfile.append("ADD %s /\n" % initfile)
        self.dockerfile.append("RUN python -VV; pip -V; uname -a\n")
        if nocash:
            self.dockerfile.append("RUN echo %i\n" % (randint(1, 100000)))
        self.dockerfile.append("RUN pip --disable-pip-version-check --no-cache-dir --no-color install %s\n" % (" ".join(modul_list)))
        self.dockerfile.append("CMD %s\n" % cmd)

        if version:
            self.tag = "%s:%s" % (main_modul, self.get_modul_version(main_modul))

    def add_betriebsystem_clean(self, base):
        text = "RUN"
        for i in range(1, 10):
            if config.has_option("betriebsystem", "%s_%i" % (base, i)):
                text = "%s %s \ \n &&" % (text, config.get("betriebsystem", "%s_%i" % (base, i)))
        self.line_add(text[:-7])

    def add_healthcheck(self, command, interval=config.getint("variable", "healthckeck_interval"),
                        timeout=config.getint("variable", "healthcheck_timeout"), httpbase="NONE"):
        """Fuegt eine HEALTHCHECK Zeile in das Dockerfile ein.\n\
            Parameter:\n\
                command: Testbefehl oder URL der Testseite\n\
                interval: in Sekunden\n\
                timeout: in Sekunden\n\
                http: Flack ob das das comando ein Befehl ist oder eine URL"""
        command = self.http_command(httpbase, command)
        self.line_add("HEALTHCHECK --interval=%is --timeout=%is CMD %s || exit 1" % (interval, timeout, command))

    # Dockerfile abarbeiten Methoden

    def start_dockerfile(self, image="scratch", fromreg=True):
        """generiere Dockerfile Header\n\
            image: Tag von Docker - Images das in FROM stehen soll oder welches Base Betribsystem eingebaut werden soll\n\
            base: Betriebsystem auf den das Image basiert"""

        image_list = image.split(":")
        if fromreg and not image == "scratch":
            image = "%s/%s" % (self.registry, image)
            image_list[0] = "%s/%s" % (self.registry, image_list[0])
        if not image == "scratch":
            self.docker_deamon.pull(image_list[0], tag=image_list[1])
        self.dockerfile.append("FROM %s\n" % image)
        chdir(self.workdir)

    def write_dockerfile(self, nogpg=False):
        """Schreibt das generirte Dockerfile ins Arbeitsverzeichnis\n\
        Parameter: Flack um gpg abfragen in Orginal Dockerfile nicht zu uebernemen"""
        fh = open(pjoin(self.dockerdir, self.dockername), "w")
        for line in self.dockerfile:
            if not nogpg:
                fh.writelines(line)
            else:
                if line.find(".asc") > -1 or line.find(" gpg ") > -1:
                    pass
                else:
                    fh.writelines(line)

    def build_image(self, tag=None, nocache=False):
        """Generirt das Docker Images (Tagt die alte Version auf old um)\n\
        Parameter: \n\
            Tag: Tag von Image\n\
            buildoption: Zusatz Optionen zum Build\n\
            errordirclean: Flack ob nach einen fehlgeschlagenen Build das Arbeitsverzeichnis gesaubert werden soll oder nicht"""
        self.tag = tag or self.tag
        if self.tag is None:
            stdout.write("\n")
            stdout.write("Docker-Image Bezeichnung: \n\n")
            self.tag = stdin.readline()[:-1]
            stdout.write("\n")
        if self.existst_tag(self.tag):
            self.retag_and_push(self.tag, "%s:%s" % (self.tag_to_rep(), self.ttag), False)

        output = [line for line in self.docker_deamon.build(self.dockerdir, self.tag, dockerfile=self.dockername, rm=True, nocache=nocache)]
        for line in output:
            line = eval(line)
            if "stream" in line.keys():
                print(line["stream"][:-1])
            if "error" in line.keys():
                print(line["error"])
                exit(1)
        if not self.existst_tag("%s:%s" % (self.tag_to_rep(), self.ttag), self.tag, True):
            self.retag_and_push("%s:%s" % (self.tag_to_rep(), self.ttag), "%s:%s" % (self.tag_to_rep(), self.otag), False)
        if self.existst_tag("%s:%s" % (self.tag_to_rep(), self.ttag)):
            call(self.dockerrmi + ["%s:%s" % (self.tag_to_rep(), self.ttag)])

    def test_image(self, cmd, waittime=config.getint("variable", "test_waittime"), noservice=False, toreg=True, httpbase=None):
        """Testet das generierte Image\n\
        Parameter:\n\
            waittime: Wartezeit bis der Test ausgefuert werden soll in Sekunden um denn Dienst Zeit zu geben zu starten (Default: 20s)\n\
            cmd: Test Befehl\n\
            toreg: Flack um zu bestimmen das das getestete Image in die Registry gepusht wird oder nicht (Default: True)\n\
            noservice: Image ist kein Service-Image\n\
            http: Flack ob das das comando ein Befehl ist oder eine URL"""
        httpbase = httpbase or "NONE"
        cmd = self.http_command(httpbase, cmd)
        if noservice:
            container = self.docker_deamon.create_container(image=self.tag, name="autotest_%s" % (self.tag.split(":")[0]), command="/bin/sh", tty=True)
        else:
            container = self.docker_deamon.create_container(image=self.tag, name="autotest_%s" % (self.tag.split(":")[0]), detach=True)
        self.docker_deamon.start(container["Id"])
        for i in tqdm(range(waittime), ascii=True, desc="Image Test in"):
            sleep(1)
        try:
            execInstantz = self.docker_deamon.exec_create(container["Id"], cmd, tty=True)
        except:
            stdout.write("ERROR:\nImage ist abgestuerzt\n")
            self.image_fail()
            return False
        out_text = self.docker_deamon.exec_start(execInstantz["Id"])
        if self.docker_deamon.exec_inspect(execInstantz["Id"])["ExitCode"] > 0:
            stdout.write("ERROR:\n%s\n" % out_text)
            resolt = False
        else:
            resolt = True
        self.docker_deamon.stop(container["Id"])
        self.docker_deamon.remove_container(container["Id"])
        if resolt:
            self.to_registry(push=toreg)
        else:
            if not resolt:
                self.image_fail()
            exit(1)

    # Registry Methoden

    def to_registry(self, reg=None, push=True):
        """Bennent ein Docker-Image um fuer die Registry (Tagt die alte Version auf old um)\n\
        Parameter: \n\
            reg: Rgistry URL (Default: self.registry)\n\
            push: Flack ob das Docker Image in das Registry gepusht werden soll"""
        reg = reg or self.registry

        if self.existst_tag("%s:%s" % (self.tag_to_rep(), self.otag)):
            self.retag_and_push("%s:%s" % (self.tag_to_rep(), self.otag), "%s/%s:%s" % (reg, self.tag_to_rep(), self.otag), push)

        self.retag_and_push(self.tag, "%s/%s" % (reg, self.tag), push)
        self.retag_and_push(self.tag, "%s/%s:%s" % (reg, self.tag_to_rep(), self.ltag), push)
        if self.production:
            self.retag_and_push(self.tag, "%s/%s:%s" % (reg, self.tag_to_rep(), self.ptag), push)

    def retag_and_push(self, oldtag, newtag, push=True):
        """Aendert ein Tag und Pusht das in die Registry\n\
            Parameter: \n\
                oldtag: existirendes Tag\n\
                newtag: gewuenschtes Tag\n\
                push: Flack ob das neu getackte Image ins Registry gepusht werden soll"""
        call(self.dockertag + [oldtag, newtag])
        if push:
            call(self.dockerpush + [newtag])

    # Docker Image sub Methoden

    def existst_tag(self, tag, stag=None, test_id=False):
        """Sucht ob ein Tag existirt\n\
            Parameter:\n\
                tag: Tag der gesucht wird\n\
                stag: Tag der mit tag verglichen wird wenn testID gesetzt ist\n\
                test_id: Einzeltag suche (False) oder TagID vergleich von tag und stag (True)"""
        stag = stag or ""
        tag_id = "0"
        sorce_id = "1"
        image_list = self.docker_deamon.images(name="%s" % (self.tag_to_rep(tag)))
        for image in image_list:
            if image["RepoTags"] is None:
                continue
            imagetags = image["RepoTags"]
            for imagetag in imagetags:
                if imagetag == tag:
                    if not test_id:
                        return True
                    tag_id = image["Id"]
                if imagetag == stag:
                    sorce_id = image["Id"]
                if tag_id == sorce_id:
                    return True
        return False

    def image_fail(self):
        call(self.dockertag + [self.tag, "%s:%s" % (self.tag_to_rep(), self.ftag)])
        if self.existst_tag("%s:%s" % (self.tag_to_rep(), self.otag)):
            self.retag_and_push("%s:%s" % (self.tag_to_rep(), self.otag), self.tag, False)
            call(self.dockerrmi + ["%s:%s" % (self.tag_to_rep(), self.otag)])

    # String umforatirung Methoden

    @staticmethod
    def http_command(httpbase, cmd):
        if config.has_option("healthttp", httpbase):
            return "%s %s" % (config.get("healthttp", httpbase), cmd)
        else:
            return cmd

    @staticmethod
    def path2file(pathlist):
        """Methode um aus einer Liste von Path Elementen ein Path-string zu erstellen\n\
        Parameter: Liste aus Path Elementen"""
        file = ""
        for item in pathlist:
            file = pjoin(file, item)
        return file

    def tag_to_rep(self, tag=None):
        """Gibt das Repository von einen Tag zurueck\n\
         Parameter: ein Tag (Default: self.tag)"""
        tag = tag or self.tag
        split_tag = tag.split(":")
        return ":".join(split_tag[:-1])

    @staticmethod
    def rawmodul_to_modul(rawmodul):
        rawmodul = rawmodul.split(":", 1)
        if len(rawmodul) > 1:
            rawmodul = rawmodul[1]
        else:
            rawmodul = rawmodul[0]
        rawmodul = rawmodul.split("#", 1)[0]
        rawmodul = rawmodul.split(".", 1)[0]
        return rawmodul

    def get_modul_version(self, modul):
        out = check_output(self.pipsearch + [self.pippurl] + [modul]).decode('utf-8')
        out = out.split("(", 1)[1]
        out = out.split(")", 1)[0]
        return out
Exemplo n.º 9
0
    def snapshot_load(self, snapshots=(), volumes=()):
        options = list(set(self.snapshot_ls()) & set(snapshots))
        client = APIClient()
        try:
            client.inspect_image('iamdork/rsync')
        except APIError:
            client.pull('iamdork/rsync')
        if len(options):
            name = options[-1]

            snapshot = '%s/%s' % (self.snapshot, name)
            if not os.path.isdir(snapshot):
                log.error("Snapshot %s of project %s doesn't exist." % (name, self.project))
                return

            for v in volumes:
                log.info("Restoring volume %s from %s/%s." % (v, snapshot, v))
                sync = client.create_container(
                    image='iamdork/rsync',
                    volumes=['/destination', '/source'],
                    host_config=client.create_host_config(binds=[
                        '%s/%s:/source' % (snapshot, v),
                        '%s:/destination' % volumes[v].full_name
                    ]),
                )

                try:
                    client.start(sync)
                    while client.inspect_container(sync)['State']['Running']:
                        time.sleep(0.5)
                finally:
                    client.remove_container(sync)
            return name
        return None
Exemplo n.º 10
0
    def snapshot_save(self, snapshots=(), volumes=()):
        client = APIClient()
        for name in snapshots:

            snapshot = '%s/%s' % (self.snapshot, name)

            for v in volumes:
                log.info("Saving volume %s to %s/%s." % (v, snapshot, v))
                try:
                    client.inspect_image('iamdork/rsync')
                except APIError:
                    client.pull('iamdork/rsync')

                sync = client.create_container(
                    image='iamdork/rsync',
                    volumes=['/destination', '/source'],
                    cpu_shares=256,
                    host_config=client.create_host_config(binds=[
                        '%s/%s:/destination' % (snapshot, v),
                        '%s:/source' % volumes[v].full_name
                    ]),
                )

                try:
                    client.start(sync)
                    while client.inspect_container(sync)['State']['Running']:
                        time.sleep(0.5)
                finally:
                    client.remove_container(sync)
Exemplo n.º 11
0
    def snapshot_rm(self, snapshots=()):
        client = APIClient()
        try:
            client.inspect_image('alpine:3.4')
        except APIError:
            client.pull('alpine:3.4')
        for name in snapshots:
            snapshot = '%s/%s' % (self.snapshot, name)
            if not os.path.isdir(snapshot):
                log.error("Snapshot %s of project %s doesn't exist." % (name, self.project))
                continue

            container = client.create_container(
                command='rm -rf /snapshots/%s' % name,
                image='alpine:3.4',
                volumes=['/snapshots'],
                host_config=client.create_host_config(binds=[
                    '%s:/snapshots' % self.snapshot,
                ]),
            )

            try:
                client.start(container)
                while client.inspect_container(container)['State']['Running']:
                    time.sleep(0.5)
            finally:
                client.remove_container(container)
            yield name