def test_creates_volume(self):
        results = []
        # Check volumes is created for json first
        containers = fileparser.get_containers(self.docker_def_json)
        for container in containers:
            # Remove existing volume mappings from head container
            container.volumes = [] if container.name == 'head' else container.volumes

        conts, var = add_networking(containers, self.write_dir)
        for container in conts:
            if container.name.startswith('head'):
                # Check volumes created
                created = self.run_check(container.volumes, isjson=True)
                results.append(
                    True) if created and len(container.volumes) is 1 else False

        # Check volume is created for list
        containers = fileparser.get_containers(self.docker_def_list)
        for container in containers:
            # Remove existing volumes mappings from head container
            container.volumes = [] if container.name == 'head' else container.volumes

        conts, var = add_networking(containers, self.write_dir)
        for container in conts:
            if container.name.startswith('head'):
                # Check volumes created
                created = self.run_check(container.volumes, islist=True)
                results.append(
                    True) if created and len(container.volumes) is 1 else False

        assert all(item is True for item in results) and len(results) is 2
 def test_check_json_volumes(self):
     containers = fileparser.get_containers(self.docker_def_json)
     conts, var = add_networking(containers, self.write_dir)
     os.system("rm " + self.write_dir + "hostfile.json")
     for container in conts:
         if 'head' in container.name:
             result = self.run_check(container.volumes, isjson=True)
             assert result is True and len(
                 container.volumes) == 2 and var is True
    def test_check_json(self):
        containers = fileparser.get_containers(self.docker_def_json)
        add_networking(containers, self.write_dir)

        with open(self.write_dir + 'hostfile.json', 'r') as f:
            data = json.load(f)

        passes = []
        for val in data['green']:
            passes.append(True) if val.startswith("green") else passes.append(
                False)

        for val in data['red']:
            passes.append(True) if val.startswith("red") else passes.append(
                False)

        # Checks that all values are true, and length is correct
        assert all(item is True for item in passes) and len(
            data['red']) == 1 and len(data['green']) == 2
    def test_check_links(self):
        containers = fileparser.get_containers(self.docker_def_envs)
        conts, var = add_networking(containers, self.write_dir)

        for container in conts:
            if 'tail' in container.name:
                tail = container.name

        for container in conts:
            if 'head' in container.name:
                result = self.check_envs(container.environment_vars, tail)
                assert result is True and len(
                    container.environment_vars) == 2 and var is True
    def test_creates_env(self):
        jsonf = self.docker_def_envs
        containers = fileparser.get_containers(jsonf)

        for container in containers:
            # remove all environment variables
            if container.name == 'head':
                container.environment_vars = []

        conts, var = add_networking(containers, self.write_dir)
        for container in containers:
            # get slave containers full name
            if container.name.startswith('tail'):
                slave = container.name

        for container in conts:
            if container.name.startswith('head'):
                # check environment variables
                result = self.check_envs(container.environment_vars, slave)
                assert len(container.environment_vars) == 1 and result
    def test_check_list(self):
        containers = fileparser.get_containers(self.docker_def_list)
        conts, var = add_networking(containers, self.write_dir)

        expected = []
        for container in conts:
            if container.name.startswith('tail'):
                expected.append(container.name)

        with open(self.write_dir + 'hostfile', 'r') as f:
            lines = f.readlines()

        results = []
        for line in lines:
            line = line.replace("\n", "")
            results.append(True) if line in expected else results.append(False)

        # Check all results are true, and same number of entries in expected as result
        assert all(item is True
                   for item in results) and len(expected) == len(results)
Example #7
0
    def __init__(self, containers, hosts):
        """
        :param containers: List of container objects
        :param hosts: list of hosts
        """
        self.hosts = hosts

        if self.hosts is None:
            logger.debug(
                "Retrieving assigned hosts with environment variable PBS_NODEFILE"
            )
            self.hosts = get_hosts(os.environ.get("PBS_NODEFILE"))

        self.containers = containers
        # get hostname so we can differentiate when running containers, no need to ssh into current machine to execute
        self.hostname = socket.gethostname()
        self.int_container = None

        self.user = os.popen("id -u $USER").read().replace("\n", "")
        self.job_id = os.environ.get('PBS_JOBID')
        self.work_dir = os.environ.get("PBS_O_WORKDIR")

        cgroups_dir = settings.cgroup_dir

        self.cpu_shares = [
            'cat', cgroups_dir + '/cpu/torque/' + self.job_id + '/cpu.shares'
        ]

        self.cpus = [
            'cat',
            cgroups_dir + '/cpuset/torque/' + self.job_id + '/cpuset.cpus'
        ]

        self.memory = [
            'cat', cgroups_dir + '/memory/torque/' + self.job_id +
            '/memory.limit_in_bytes'
        ]

        self.memory_swappiness = [
            'cat', cgroups_dir + '/memory/torque/' + self.job_id +
            '/memory.swappiness'
        ]

        self.memory_swap_limit = [
            'cat', cgroups_dir + '/memory/torque/' + self.job_id +
            '/memory.memsw.limit_in_bytes'
        ]

        self.kernel_memory_limit = [
            'cat', cgroups_dir + '/memory/torque/' + self.job_id +
            '/memory.kmem.limit_in_bytes'
        ]

        context = os.path.realpath(__file__)
        path = re.sub('dgrid/scheduling/schedulers/Torque6/SSHExecutor\.py.*',
                      "", context)
        self.script_dir = path + "/dgrid-scripts/"
        logger.debug("script directory is: " + self.script_dir)

        # Strip out current host from list
        try:
            logger.debug(self.hosts)
            self.hosts.remove(self.hostname)
            logger.debug(self.hosts)
        except ValueError:
            raise HostValueError(
                'Hostname of execution host not in assigned hosts list')

        # The method add_networking will randomise container names.
        # If networking has been defined by the user,env variables, volumes mapping required will be added to container
        self.containers, self.create_net = add_networking(
            self.containers, self.work_dir)
        self.network_name = ""

        # Get the interactive container, and remove from list
        for container in self.containers:
            if container.interactive == 'True':
                self.int_container = container
                self.containers.remove(self.int_container)

        if self.int_container is None:
            raise InteractiveContainerNotSpecified(
                'An interactive container must be specified for logging')

        self.local_run = False
        self.local_pid = None
        # Get Fabric to throw an RemoteExecutionError when remote commands fail, instead of aborting
        env.abort_exception = RemoteExecutionError