Ejemplo n.º 1
0
 def _run_docker_container(self, container_name, working_dir, command):
     """
     Triggers the simulation run in a separate Docker container.
     :param container_name: the container's name
     :param working_dir: working directory on your host's file system
     :param command: container command (eg. name of script, cli arguments ...) Must match to your docker entry point.
     """
     try:
         system_platform = platform.system()
         if system_platform == "Windows":
             self._docker_client.containers.run(
                 image=self._docker_image,
                 command=command,
                 mounts=[
                     Mount(target='/mnt/data',
                           source=str(working_dir.resolve()),
                           type='bind')
                 ],
                 #working_dir='/simulation',
                 name=container_name,
                 environment={
                     # If you need add your environment variables here
                 },
                 log_config=LogConfig(type=LogConfig.types.JSON,
                                      config={
                                          'max-size': '500m',
                                          'max-file': '3'
                                      }))
         else:
             user_id = os.getuid()
             self._docker_client.containers.run(
                 image=self._docker_image,
                 command=command,
                 mounts=[
                     Mount(target='/simulation',
                           source=str(working_dir.resolve()),
                           type='bind')
                 ],
                 working_dir='/simulation',
                 name=container_name,
                 environment={
                     # If you need add your environment variables here
                 },
                 log_config=LogConfig(type=LogConfig.types.JSON,
                                      config={
                                          'max-size': '500m',
                                          'max-file': '3'
                                      }),
                 user=user_id)
     except DockerException as e:
         logger.warning(f'Could not run {container_name}: {e}.')
     finally:
         try:
             self.write_container_logs_and_remove_it(
                 container_name=container_name, working_dir=working_dir)
         except NotFound:
             logger.warning(
                 f'Can not save logs for {container_name}, because container does not exist'
             )
Ejemplo n.º 2
0
def run_container(name,
                  image,
                  command=None,
                  environment=None,
                  ro=None,
                  rw=None,
                  links=None,
                  detach=True,
                  volumes_from=None,
                  port_bindings=None,
                  log_syslog=False):
    """
    Wrapper for docker create_container, start calls

    :param log_syslog: bool flag to redirect container's logs to host's syslog

    :returns: container info dict or None if container couldn't be created

    Raises PortAllocatedError if container couldn't start on the
    requested port.
    """
    binds = ro_rw_to_binds(ro, rw)
    log_config = LogConfig(type=LogConfig.types.JSON)
    if log_syslog:
        log_config = LogConfig(type=LogConfig.types.SYSLOG,
                               config={'syslog-tag': name})

    host_config = HostConfig(version=MINIMUM_API_VERSION,
                             binds=binds,
                             log_config=log_config,
                             links=links,
                             volumes_from=volumes_from,
                             port_bindings=port_bindings)

    c = _get_docker().create_container(
        name=name,
        image=image,
        command=command,
        environment=environment,
        volumes=binds_to_volumes(binds),
        detach=detach,
        stdin_open=False,
        tty=False,
        ports=list(port_bindings) if port_bindings else None,
        host_config=host_config)
    try:
        _get_docker().start(container=c['Id'])
    except APIError as e:
        if 'address already in use' in e.explanation:
            try:
                _get_docker().remove_container(name, force=True)
            except APIError:
                pass
            raise PortAllocatedError()
        raise
    return c
Ejemplo n.º 3
0
def get_log_config(logging_dict):
    log_driver = logging_dict.get('driver', "") if logging_dict else ""
    log_options = logging_dict.get('options', None) if logging_dict else None
    return LogConfig(
        type=log_driver,
        config=log_options
    )
Ejemplo n.º 4
0
def gen_director_container_params(client: DockerClient, site_id: int,
                                  site_data: Dict[str, Any]) -> Dict[str, Any]:
    extra_env: Dict[str, str] = {}

    params = gen_director_shared_params(client, site_id, site_data)

    env = params.pop("env", [])
    env.extend("{}={}".format(name, val) for name, val in extra_env.items())

    params.update({
        "working_dir":
        "/site",
        "nano_cpus":
        convert_cpu_limit(site_data["resource_limits"]["cpus"]),
        "mem_limit":
        convert_memory_limit(site_data["resource_limits"]["mem_limit"]),
        "privileged":
        False,
        "read_only":
        False,
        "environment":
        env,
        "log_config":
        LogConfig(
            type=LogConfig.types.JSON,
            config={
                "max-size": "1k",
                "max-file": "1"
            },
        ),
        "network":
        "director-sites",
    })

    return params
Ejemplo n.º 5
0
 def _run_kwargs(self):
     return {
         "init":
         True,
         "network_disabled":
         True,
         "mem_limit":
         f"{self._memory_limit}g",
         # Set to the same as mem_limit to avoid using swap
         "memswap_limit":
         f"{self._memory_limit}g",
         "cpu_period":
         settings.COMPONENTS_CPU_PERIOD,
         "cpu_quota":
         settings.COMPONENTS_CPU_QUOTA,
         "cpu_shares":
         settings.COMPONENTS_CPU_SHARES,
         "cpuset_cpus":
         self._cpuset_cpus,
         "runtime":
         settings.COMPONENTS_DOCKER_RUNTIME,
         "cap_drop": ["all"],
         "security_opt": ["no-new-privileges"],
         "pids_limit":
         settings.COMPONENTS_PIDS_LIMIT,
         "log_config":
         LogConfig(type=LogConfig.types.JSON, config={"max-size": "1g"}),
     }
Ejemplo n.º 6
0
 def test_create_host_config_obj_logconfig(self):
     obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
     config = create_host_config(version=DEFAULT_DOCKER_API_VERSION,
                                 log_config=obj)
     self.assertIn('LogConfig', config)
     self.assertTrue(isinstance(config['LogConfig'], LogConfig))
     self.assertEqual(obj, config['LogConfig'])
Ejemplo n.º 7
0
 def test_create_host_config_obj_logconfig(self):
     obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
     config = create_host_config(version=DEFAULT_DOCKER_API_VERSION,
                                 log_config=obj)
     assert 'LogConfig' in config
     assert isinstance(config['LogConfig'], LogConfig)
     assert obj == config['LogConfig']
Ejemplo n.º 8
0
    async def process_image(self, image, ports):
        '''
        Make descision about image usage
        and run container if yes.
        '''
        self.logger.info('')
        if self.docker_mode == 'no':
            print("no start container")
            return

        print("start container")
        log_params = {'max-size': '10m', 'max-file': '3'}
        log_config = LogConfig(type=LogConfig.types.JSON, config=log_params)
        container = self.docker_client.containers.run(image,
                                                      detach=True,
                                                      log_config=log_config,
                                                      ports=ports)

        self.logger.info('container {} {} {}'.format(container.short_id,
                                                     container.name,
                                                     container.status))

        status_execution = DeploymentStatusExecution.closed
        status_result = DeploymentStatusResult.success

        await self.ddi.deploymentBase[self.action_id
                                      ].feedback(status_execution,
                                                 status_result,
                                                 ['Install completed'])
Ejemplo n.º 9
0
    def __init__(
        self,
        *,
        job_id: str,
        job_class: Model,
        exec_image: File,
        exec_image_sha256: str,
    ):
        super().__init__()
        self._job_id = job_id
        self._job_label = f"{job_class._meta.app_label}-{job_class._meta.model_name}-{job_id}"
        self._job_class = job_class
        self._exec_image = exec_image
        self._exec_image_sha256 = exec_image_sha256

        client_kwargs = {"base_url": settings.COMPONENTS_DOCKER_BASE_URL}

        if settings.COMPONENTS_DOCKER_TLSVERIFY:
            tlsconfig = TLSConfig(
                verify=True,
                client_cert=(
                    settings.COMPONENTS_DOCKER_TLSCERT,
                    settings.COMPONENTS_DOCKER_TLSKEY,
                ),
                ca_cert=settings.COMPONENTS_DOCKER_TLSCACERT,
            )
            client_kwargs.update({"tls": tlsconfig})

        self._client = docker.DockerClient(**client_kwargs)

        self._labels = {"job": f"{self._job_label}", "traefik.enable": "false"}

        self._run_kwargs = {
            "init":
            True,
            "network_disabled":
            True,
            "mem_limit":
            settings.COMPONENTS_MEMORY_LIMIT,
            # Set to the same as mem_limit to avoid using swap
            "memswap_limit":
            settings.COMPONENTS_MEMORY_LIMIT,
            "cpu_period":
            settings.COMPONENTS_CPU_PERIOD,
            "cpu_quota":
            settings.COMPONENTS_CPU_QUOTA,
            "cpu_shares":
            settings.COMPONENTS_CPU_SHARES,
            "cpuset_cpus":
            self.cpuset_cpus,
            "runtime":
            settings.COMPONENTS_DOCKER_RUNTIME,
            "cap_drop": ["all"],
            "security_opt": ["no-new-privileges"],
            "pids_limit":
            settings.COMPONENTS_PIDS_LIMIT,
            "log_config":
            LogConfig(type=LogConfig.types.JSON, config={"max-size": "1g"}),
        }
Ejemplo n.º 10
0
def create_nodes(clients, boot_ip, bootID, poet_ip, network_name):
    print("creating clients")
    tools.update_dict(
        params.client_params, **{
            "bootnodes":
            tools.node_string(bootID, boot_ip, BOOTSTRAP_PORT, BOOTSTRAP_PORT),
            "poet-server":
            '{0}:{1}'.format(poet_ip, POET_SERVER_PORT),
            "genesis-time":
            GENESIS_TIME.isoformat('T', 'seconds')
        })

    print("running clients with args: " +
          tools.dict_to_args(params.client_params))
    jsonbaseport = 9191
    grpcbaseport = 9081
    for name, pubkey in clients.items():
        tools.update_dict(params.client_params, **{"coinbase": pubkey})
        node = client.containers.run(
            node_image,
            log_config=LogConfig(type=LogConfig.types.FLUENTD,
                                 config={
                                     'tag': 'docker.{{.ID}}',
                                     'fluentd-sub-second-precision': 'true'
                                 }),
            volumes={
                os.path.abspath(GENESIS_ACCOUNTS): {
                    'bind': params.client_params["genesis-conf"],
                    'mode': 'rw'
                }
            },
            detach=True,
            ports={
                "9090": jsonbaseport,
                "9091": grpcbaseport
            },
            network=network_name,
            name=name,
            command=tools.dict_to_args(params.client_params))
        # client.networks.get(network_name).connect(node)
        containers.append({"cont": node})
        # idxes[node.name] = i
        print(tools.bcolors.OKYELLOW + "Client created " + node.name +
              " connect wallet to json: 127.0.0.1:" + str(jsonbaseport) +
              "/gRPC 127.0.0.1:" + str(grpcbaseport) + " to access this node" +
              tools.bcolors.ENDC)
        jsonbaseport += 1
        grpcbaseport += 1
    print("Finished creating clients")
Ejemplo n.º 11
0
def run_container_gradle_syslog():
    command = "./gradlew build"
    container = client.containers.run(
        image="android_ci",
        command=command,
        log_config=LogConfig(type=LogConfig.types.SYSLOG, config={}),
        working_dir="/home/gradle/project/",
        volumes={
            '/home/vagrant/workspace/we_are_nutrition-android': {
                'bind': '/home/gradle/project/',
                'mode': 'rw'
            }
        },
        extra_hosts={"nexus.nespresso.com": "192.168.216.107"},
        detach=True)
Ejemplo n.º 12
0
def create_poet(network_name, poet_ip, bootstrap_ip):
    print("Creating poet container")
    networking_config = advclient.create_networking_config(
        {network_name: advclient.create_endpoint_config(ipv4_address=poet_ip)})
    host_cfg = advclient.create_host_config(log_config=LogConfig(
        type=LogConfig.types.FLUENTD, config={'tag': 'docker.poet_{{.ID}}'}))
    tools.update_dict(params.poet_params,
                      **{"nodeaddr": '{0}:{1}'.format(bootstrap_ip, '9091')})
    p = advclient.create_container(poet_image,
                                   host_config=host_cfg,
                                   detach=True,
                                   networking_config=networking_config,
                                   command=tools.dict_to_args(
                                       params.poet_params))
    advclient.start(p['Id'])
    return client.containers.get(p['Id'])
Ejemplo n.º 13
0
    def __init__(
        self,
        image,
        tag="latest",
        docker_client=None,
        name="",
        cleanup_policy=DockerCleanup.KEEP_FAILED,
        reuse_policy=DockerReuse.NEW_ONLY,
        detach=False,
        journal_tag="",
    ):
        if docker_client is None:
            self.client: docker.DockerClient = DockerClient.get()
        else:
            self.client: docker.DockerClient = docker_client
        self.image = f"{image}:{tag}"
        self.rep = image
        self.tag = tag
        self.user_home = str(Path.home())
        self.user_id = os.getuid()
        self.working_dir = str(Path.cwd())
        self.name = name
        self.journal_tag = journal_tag
        self.hostname = name
        self.detach = detach
        self.cleanupPolicy = cleanup_policy
        self.reuse_policy = reuse_policy
        self.run_args = {}
        self._container = None
        self._log_clb: Union[
            ContainerLogWriter, None
        ] = None  # callback to handle logoutput of container

        # last call in init.
        self._apply_default_volumes()
        self._apply_default_environment()
        self.set_run_args()
        if self.journal_tag != "":
            self.set_log_driver(
                LogConfig(
                    type=LogConfig.types.JOURNALD, config={"tag": self.journal_tag}
                )
            )
Ejemplo n.º 14
0
    def run(cls,
            image,
            name,
            ip,
            view,
            port='13800',
            repl_factor=2,
            command=None):
        container_obj = cls.docker_client.containers.run(
            image,
            name=name,
            detach=True,
            command=command,
            log_config=LogConfig(type=LogConfig.types.JSON),
            environment={
                'REPL_FACTOR': repl_factor,
                'VIEW': view,
                'ADDRESS': '{}:{}'.format(ip, port)
            })

        return cls(ip, view, repl_factor, container_obj)
Ejemplo n.º 15
0
def create_bootstrap(coinbase, network_name, bootstrap_ip, poet_ip):
    print("Creating bootstrap container")
    networking_config = advclient.create_networking_config({
        network_name:
        advclient.create_endpoint_config(ipv4_address=bootstrap_ip)
    })
    tools.update_dict(
        params.bootstrap_params, **{
            "genesis-time": GENESIS_TIME.isoformat('T', 'seconds'),
            "coinbase": coinbase,
            "poet-server": '{0}:{1}'.format(poet_ip, POET_SERVER_PORT),
            "events-url": 'tcp://0.0.0.0:{0}'.format(EVENTS_PORT)
        })

    host_cfg = advclient.create_host_config(
        log_config=LogConfig(type=LogConfig.types.FLUENTD,
                             config={'tag': 'docker.{{.ID}}'}),
        port_bindings={
            9090: 9090,
            9091: 9091,
            EVENTS_PORT: EVENTS_PORT
        },
        binds={
            os.path.abspath(GENESIS_ACCOUNTS): {
                'bind': params.client_params["genesis-conf"],
                'mode': 'rw'
            }
        })
    print("bootstrap params:", tools.dict_to_args(params.bootstrap_params))
    bts = advclient.create_container(node_image,
                                     name="bootstrap",
                                     host_config=host_cfg,
                                     detach=True,
                                     networking_config=networking_config,
                                     ports=[9090, 9091, EVENTS_PORT],
                                     command=tools.dict_to_args(
                                         params.bootstrap_params))
    advclient.start(bts['Id'])
    return client.containers.get(bts['Id'])
Ejemplo n.º 16
0
def spawner_hook(spawner):
    """Add some custom logic just before launching the user container"""

    spawner.environment = {
        'AUTHENTICATOR': spawner.userdata['authenticator'],
        'IOTLAB_LOGIN': spawner.userdata['username'],
        'IOTLAB_PASSWORD': spawner.userdata['password'],
        'IOTLAB_SITES': IOTLAB_SITES,
    }
    if IOTLAB_USE_CUSTOM_API_URL is True:
        spawner.environment.update({'IOTLAB_API_URL': IOTLAB_API_URL})

    # Directly jump in the training directory
    spawner.notebook_dir = '{}/training'.format(WORK_DIR)

    log_config = LogConfig(
        type=LogConfig.types.SYSLOG,
        config={
            "tag":
            volume_name_template.format(username=spawner.userdata['username'])
        })
    spawner.extra_host_config.update({"log_config": log_config})
Ejemplo n.º 17
0
 def create_container(self,
                      network,
                      ip,
                      name,
                      labels=None,
                      environment=None):
     lc = LogConfig(type=LogConfig.types.JSON, config={'max-size': '1g'})
     hc = self._client.create_host_config(
         binds=['/var/run:/var/run:rw', '/etc/simulator:/etc/simulator:rw'],
         log_config=lc)
     nc = self._client.create_networking_config(
         {network: self._client.create_endpoint_config(ipv4_address=ip)})
     container = self._client.create_container(self.my_image,
                                               host_config=hc,
                                               name=name,
                                               tty=True,
                                               hostname=name,
                                               detach=True,
                                               networking_config=nc,
                                               labels=labels,
                                               environment=environment)
     self._client.start(container=container.get('Id'))
     return container.get('Id')
Ejemplo n.º 18
0
from docker.types import LogConfig
import docker
from services.docker_service import docker_low_level_client

log_configuration = LogConfig(type=LogConfig.types.GELF,
                              config={"gelf-address": "udp://127.0.0.1:12201"})

host_configuration = docker_low_level_client.create_host_config(
    log_config=log_configuration, port_bindings={5000: None})
Ejemplo n.º 19
0
try:
    container = client.containers.get(FORWARDER_CONTAINER_NAME)
    print(f"Found existing container {container.id}")
    container.stop()
    container.remove()
except docker.errors.NotFound:
    pass

container = client.containers.run(
    CONTAINER_IMAGE,
    FLUENT_BIT_COMMAND,
    name=FORWARDER_CONTAINER_NAME,
    detach=True,
    #remove=True,
    log_config=LogConfig(type=LogConfig.types.JOURNALD),
    mounts=[
        Mount(source="/var/log/journal", target="/var/log/journal", type="bind", read_only=True),
        Mount(source="/etc/machine-id", target="/etc/machine-id", type="bind", read_only=True),
        Mount(source=data_mount["Source"], target="/data", type="bind", read_only=False),
    ],
    restart_policy={"Name": "always"},
    #user="******",
)
print(f"Created new container {container.id}")

while True:
    container = client.containers.get(container.id)
    assert container.status in ["running", "creating"]
    time.sleep(10)
Ejemplo n.º 20
0
def start_docker_sandbox(host_to_deploy, uid, sid, wid, wname,
                         sandbox_image_name):
    """ Launch the docker run command remotely
    Parameters:
    host_to_deploy set(hostname, ip): IP is used to connect docker, the pair is given as extra host (/etc/host) to the launched container
    uid - user id, typically cleansed email address, e.g. jdoe_at_example_com
    sid - sandbox id
    wid - workflow id
    """
    ulimit_nofile = docker.types.Ulimit(name='nofile',
                                        soft=262144,
                                        hard=262144)
    ulimit_list = [ulimit_nofile]

    # set up the env variables
    env_vars = {}
    env_vars["MFN_HOSTNAME"] = host_to_deploy[0]
    env_vars["MFN_ELASTICSEARCH"] = os.getenv("MFN_ELASTICSEARCH")
    env_vars["MFN_QUEUE"] = "127.0.0.1:" + os.getenv("MFN_QUEUE").split(':')[1]
    env_vars["MFN_DATALAYER"] = host_to_deploy[0] + ":" + os.getenv(
        "MFN_DATALAYER").split(':')[1]
    env_vars["USERID"] = uid
    env_vars["SANDBOXID"] = sid
    env_vars["WORKFLOWID"] = wid
    env_vars["WORKFLOWNAME"] = wname
    endpoint_key = hashlib.sha256(str(time.time()).encode()).hexdigest()
    env_vars["MFN_ENDPOINT_KEY"] = endpoint_key
    env_vars["HTTP_PROXY"] = os.getenv("HTTP_PROXY")
    env_vars["HTTPS_PROXY"] = os.getenv("HTTPS_PROXY")
    env_vars["http_proxy"] = os.getenv("http_proxy")
    env_vars["https_proxy"] = os.getenv("https_proxy")
    env_vars["no_proxy"] = os.getenv("no_proxy")

    lc = LogConfig(type=LogConfig.types.JSON,
                   config={
                       "max-size": "50m",
                       "max-file": "5"
                   })

    success = False
    try:
        client = docker.DockerClient(base_url="tcp://" + host_to_deploy[1] +
                                     ":2375")  # use IP address
        success = True
    except Exception as exc:
        print("Error launching sandbox; can't connect to: " +
              host_to_deploy[1] + ":2375")
        print(traceback.format_exc())
        success = False

    if success:
        try:
            sandbox = client.containers.get(sid)
            sandbox.stop()
            sandbox.remove(force=True)
        except Exception as exc:
            pass

    if success:
        try:
            print("Starting sandbox docker container for: " + uid + " " + sid +
                  " " + wid + " " + sandbox_image_name)
            print("Docker daemon: " + "tcp://" + host_to_deploy[1] + ":2375" +
                  ", environment variables: " + str(env_vars))
            client.containers.run(
                sandbox_image_name,
                init=True,
                detach=True,
                ports={"8080/tcp": None},
                ulimits=ulimit_list,
                auto_remove=True,
                name=sid,
                environment=env_vars,
                extra_hosts={host_to_deploy[0]: host_to_deploy[1]},
                log_config=lc)
            # TEST/DEVELOPMENT: no auto_remove to access sandbox logs
            #client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc)
        except Exception as exc:
            print("Error launching sandbox: " + str(host_to_deploy) + " " +
                  uid + " " + sid + " " + wid)
            print(traceback.format_exc())
            success = False
        finally:
            client.close()

    return success, endpoint_key
Ejemplo n.º 21
0
    def run_job(self, job, gpu_ids=None, remove_working_dir=True):
        import subprocess
        self._job = job

        job = self.job
        job_id = job['job_id']

        running_jobs[job_id] = job

        routing_map['job_id'][job_id] = my_url

        _extract_job_bundle_to_working_dir_if_not_exist(job_id)

        job['spec']['detach'] = True

        lc = LogConfig(type=LogConfig.types.JSON,
                       config={
                           'max-size': '1g',
                           'labels': 'atlas_logging'
                       })
        job['spec']['log_config'] = lc

        if len(job['spec']['image'].split(':')) < 2:
            job['spec']['image'] = job['spec']['image'] + ':latest'

        if gpu_ids:
            if len(gpu_ids) > 0:
                job['spec']['environment'][
                    "NVIDIA_VISIBLE_DEVICES"] = ",".join(gpu_ids)

        container = None
        try:
            job['start_time'] = time()
            self._container = self._client.containers.run(**job['spec'])
            container = self._container
            logging.info(f"[Worker {self._worker_id}] - Job {job_id} started")

        except Exception as e:
            logging.info(
                f"[Worker {self._worker_id}] - Job {job_id} failed to start " +
                str(e))
            job['logs'] = str(e)
            self.stop_job(timeout=0)
            return

        tracker_clients.running(job)

        try:
            return_code = container.wait()
            job['logs'] = container.logs()
            try:
                job['logs'] = job['logs'].decode()
            except (UnicodeDecodeError, AttributeError):
                pass
        except Exception as e:
            job['end_time'] = time()
            logging.info(
                f"[Worker {self._worker_id}] - Worker {self._worker_id} failed to reconnect to job {job_id}, killing job now"
            )

            self.stop_job(timeout=0)
        else:
            job['end_time'] = time()
            job['return_code'] = return_code
            logging.info(
                f"[Worker {self._worker_id}] - Job {job_id} finished with return code {return_code}"
            )

            if not return_code['StatusCode']:
                completed_jobs[job_id] = job
                tracker_clients.completed(job)
            else:
                failed_jobs[job_id] = job
                tracker_clients.failed(job)

            self._cleanup_job(container, job_id, remove_working_dir)

        finally:
            self._delete_running_job(job_id)
            if gpu_ids:
                self._unlock_gpus(gpu_ids)
Ejemplo n.º 22
0
 def __init__(self):
     self.__docker_client = docker.from_env()
     self.__log_config = LogConfig(type=LogConfig.types.JSON,
                                   config={
                                       'max-size': '1g',
                                       'labels': 'status,apps'
                                   })
     self.__create_config = self.__docker_client.api.create_host_config(
         log_config=self.__log_config)
     self.__list_apps = list()
     self.__config_loader = ConfigLoader()
     self.__datas_default = self.__config_loader.get_datas_default()
     self.__apps_list = self.__config_loader.get_datas_apps()
     try:
         self.__py_docker_dir = getcwd()
         self.__domains = self.__datas_default['domains']
         self.__py_docker_configs = self.__py_docker_dir + self.__datas_default[
             'py_docker_configs']
         self.__py_docker_functions = self.__py_docker_dir + self.__datas_default[
             'py_docker_functions']
         self.__py_docker_templates_dir = self.__py_docker_dir + self.__datas_default[
             'py_docker_templates']
         for app in self.__apps_list:
             self.__backend = app['backend']
             if (self.__backend):
                 self.__list_apps.append(app)
         for app in self.__apps_list:
             self.__volumes_conteiner = []
             self.__volumes_binds = []
             self.__name = app['name']
             self.__base_image = app['base_image']
             self.__network = app['network']
             self.__ip = app['ip']
             self.__aliases = app['aliases']
             self.__image = app['image']
             self.__ports = app['ports']
             self.__port_bindings = loads(app['port_bindings'])
             self.__dockerfile_path = self.__py_docker_dir + app[
                 'dockerfile_path']
             self.__dockerfile = app['dockerfile']
             self.__tag = app['tag']
             self.__template = app['template']
             self.__template_file = app['template_file']
             self.__backend = app['backend']
             self.__volumes = app['volumes']
             self.__active = app['active']
             if (not self.__active):
                 stopped_conteiner = self.__stop_conteiner(self.__name)
                 if (stopped_conteiner):
                     continue
             for volume in self.__volumes:
                 volumes = volume.split(";")
                 volume_host = self.__dockerfile_path + volumes[0]
                 if (not path.exists(volume_host)):
                     mkdir(volume_host)
                 volume_host = volume_host + ":" + volumes[1]
                 self.__volumes_binds.append(volume_host)
                 if ":" in volumes[1]:
                     self.__volumes_conteiner.append(
                         volumes[1].split(":")[0])
                 else:
                     self.__volumes_conteiner.append(volumes[1])
             """
                 Create template.
             """
             try:
                 if (self.__template):
                     if (self.__create_dockerfile(
                             self.__name, self.__template_file,
                             self.__base_image,
                             self.__py_docker_templates_dir,
                             self.__dockerfile_path)):
                         print("\t[ OK ] Dockerfile created.")
                     if (not self.__backend):
                         self.__create_haproxy_cfg(
                             self.__name, self.__ip, self.__network,
                             self.__py_docker_templates_dir,
                             self.__dockerfile_path, self.__domains)
                         print("\t[ OK ] HAProxy file created.")
             except TypeError as err:
                 print("\t[ Apps __init__ ] ERROR:", err)
             """
                 Function to create imagens Docker.
             """
             self.__create_images_docker(
                 app_name=self.__name,
                 dockerfile_path=self.__dockerfile_path,
                 dockerfile=self.__dockerfile,
                 tag=self.__tag,
                 base_image=self.__base_image)
             """
                 Docker RUN.
             """
             self.__docker_run(app_name=self.__name,
                               network=self.__network,
                               ip=self.__ip,
                               aliases=self.__aliases,
                               image=self.__image,
                               ports=self.__ports,
                               port_bindings=self.__port_bindings,
                               volumes_conteiner=self.__volumes_conteiner,
                               volumes_bind=self.__volumes_binds)
             # sys.exit(1)
     except TypeError as err:
         print("[ Apps __init__ ] ERROR :", err)
         sys.exit(1)
     except ValueError as err:
         print("[ Apps __init__ ] ERROR :", err)
         sys.exit(1)
Ejemplo n.º 23
0
import os
import schedule
import threading
import time
import functools
import docker
from docker.types import LogConfig
from flask import Flask, abort

app = Flask(__name__)

lc = LogConfig(type='splunk',
               config={
                   'splunk-token': os.environ['SPLUNK_TOKEN'],
                   'splunk-url': 'https://cloud.humio.com',
                   'splunk-format': 'json',
                   'tag': '{{.Name}}/{{.ID}}'
               })


def with_logging(func):
    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        print('Job "%s" started' % func.__name__)
        result = func(*args, **kwargs)
        print('Job "%s" completed' % func.__name__)
        return result

    return wrapper
Ejemplo n.º 24
0
def get_logs_config(config):
    return LogConfig(type=LogConfig.types.JSON, config=config)
Ejemplo n.º 25
0
 def test_logconfig_invalid_config_type(self):
     with pytest.raises(ValueError):
         LogConfig(type=LogConfig.types.JSON, config='helloworld')
Ejemplo n.º 26
0
    def __init__(
        self,
        *,
        job_id: uuid.UUID,
        input_files: Tuple[File, ...],
        exec_image: File,
        exec_image_sha256: str,
        results_file: Path,
    ):
        super().__init__()
        self._job_id = str(job_id)
        self._input_files = input_files
        self._exec_image = exec_image
        self._exec_image_sha256 = exec_image_sha256
        self._io_image = settings.CONTAINER_EXEC_IO_IMAGE
        self._results_file = results_file

        client_kwargs = {"base_url": settings.CONTAINER_EXEC_DOCKER_BASE_URL}

        if settings.CONTAINER_EXEC_DOCKER_TLSVERIFY:
            tlsconfig = TLSConfig(
                verify=True,
                client_cert=(
                    settings.CONTAINER_EXEC_DOCKER_TLSCERT,
                    settings.CONTAINER_EXEC_DOCKER_TLSKEY,
                ),
                ca_cert=settings.CONTAINER_EXEC_DOCKER_TLSCACERT,
            )
            client_kwargs.update({"tls": tlsconfig})

        self._client = docker.DockerClient(**client_kwargs)

        self._input_volume = f"{self._job_id}-input"
        self._output_volume = f"{self._job_id}-output"

        self._run_kwargs = {
            "labels": {
                "job_id": self._job_id
            },
            "network_disabled":
            True,
            "mem_limit":
            settings.CONTAINER_EXEC_MEMORY_LIMIT,
            # Set to the same as mem_limit to avoid using swap
            "memswap_limit":
            settings.CONTAINER_EXEC_MEMORY_LIMIT,
            "cpu_period":
            settings.CONTAINER_EXEC_CPU_PERIOD,
            "cpu_quota":
            settings.CONTAINER_EXEC_CPU_QUOTA,
            # Use the default weight
            "cpu_shares":
            1024,
            "runtime":
            settings.CONTAINER_EXEC_DOCKER_RUNTIME,
            "cap_drop": ["all"],
            "security_opt": ["no-new-privileges"],
            "pids_limit":
            64,
            "log_config":
            LogConfig(type=LogConfig.types.JSON, config={"max-size": "1g"}),
        }