def login_to_registry(client: docker.DockerClient, registry: DockerRegistry): """ Logs in to Docker registry (if it is remote). Corresponding credentials should be specified as environment variables per registry: e.g., if registry host is "168.32.25.1:5000" then "168_32_25_1_5000_USERNAME" and "168_32_25_1_5000_PASSWORD" variables should be specified :param client: Docker client instance :param registry: Docker registry descriptor :return: nothing """ if isinstance(registry, RemoteDockerRegistry): host_for_env = registry.host.replace('.', '_').replace(':', '_') username_var = f'{host_for_env}_username'.upper() username = os.getenv(username_var) password_var = f'{host_for_env}_password'.upper() password = os.getenv(password_var) if username and password: client.login(registry=registry.host, username=username, password=password) logger.info('Logged in to remote registry at host %s', registry.host) else: logger.warning( 'Skipped logging in to remote registry at host %s because no credentials given. ' + 'You could specify credentials as %s and %s environment variables.', registry.host, username_var, password_var)
def __init__(self, root, datmo_directory_name, docker_execpath="docker", docker_socket=None): super(DockerEnvironmentDriver, self).__init__() if not docker_socket: if platform.system() != "Windows": docker_socket = "unix:///var/run/docker.sock" self.root = root # Check if filepath exists if not os.path.exists(self.root): raise PathDoesNotExist( __("error", "controller.environment.driver.docker.__init__.dne", root)) self._datmo_directory_name = datmo_directory_name self._datmo_directory_path = os.path.join(self.root, self._datmo_directory_name) self.environment_directory_name = "environment" self.environment_directory_path = os.path.join( self._datmo_directory_path, self.environment_directory_name) self.docker_execpath = docker_execpath self.docker_socket = docker_socket if self.docker_socket: self.client = DockerClient(base_url=self.docker_socket) self.prefix = [self.docker_execpath, "-H", self.docker_socket] else: self.client = DockerClient() self.prefix = [self.docker_execpath] self._is_connected = False self._is_initialized = self.is_initialized self.type = "docker" with open(docker_config_filepath) as f: self.docker_config = json.load(f)
def execute(self, callFunc): import paramiko from docker import DockerClient from sshtunnel import SSHTunnelForwarder import time for host in self.adress: user = host.split('@', 1)[0] addr = host.split('@', 1)[1] port = self.localConnectionPort forward = SSHTunnelForwarder( (addr, 22), ssh_username=user, ssh_pkey="/var/ssh/rsa_key", ssh_private_key_password="******", remote_bind_address=(addr, 2375), local_bind_address=('127.0.0.1', port)) forward.start() time.sleep(3) dockerConection = DockerClient( 'tcp://127.0.0.1:{port}'.format(port=port)) res = callFunc(dockerConection) dockerConection.close() del dockerConection forward.stop() return res
def _check_docker_client(client: DockerClient) -> None: try: client.ping() except (DockerException, RequestException) as e: raise BLDRSetupFailed( 'Cannot connect to Docker daemon. Is Docker daemon running?\nAdditional info: {}'.format(e) )
def docker(): client = DockerClient("unix:///var/run/docker.sock") already_swarm = client.info()["Swarm"]["LocalNodeState"] == "active" if not already_swarm: client.swarm.init() network = client.networks.create("dockerspawner-test-network", driver="overlay", attachable=True) network.connect("dockerspawner-test") try: yield client finally: for service in client.services.list(): if service.name.startswith("jupyterhub-client"): service.scale(0) for _ in range(10): if not service.tasks(): break sleep(1) service.remove() network.disconnect("dockerspawner-test") network.remove() if not already_swarm: client.swarm.leave(True)
def docker_client(): # todo improve when yellowbox is upgraded try: ret = DockerClient.from_env() ret.ping() except Exception: return DockerClient(base_url="tcp://localhost:2375") else: return ret
def container(request): if request.session.get('is_login', None): if request.method == 'GET': page = request.GET.get('page') rows = request.GET.get('limit') hostid = request.GET.get('hostid') containerid = request.GET.get('containerid') ip = EwsHost.objects.get(pk=hostid).ip if containerid: try: client = DockerClient(base_url='tcp://' + ip + ':2375') containerinfo = client.containers.get(containerid).attrs return HttpResponse(json.dumps(containerinfo)) except Exception as ex: return HttpResponse(json.dumps({"API调用异常"})) elif not containerid: i = (int(page) - 1) * int(rows) j = (int(page) - 1) * int(rows) + int(rows) # 根据ip,调用docker engine api获取容器 client = DockerClient(base_url='tcp://' + ip + ':2375') containers = client.containers.list(all=True) total = len(containers) containers = containers[i:j] resultdict = {} dict = [] for cont in containers: dic = {} dic['short_id'] = cont.short_id dic['name'] = cont.name dic['status'] = cont.status dict.append(dic) resultdict['code'] = 0 resultdict['msg'] = "" resultdict['count'] = total resultdict['data'] = dict return JsonResponse(resultdict, safe=False) if request.method == 'DELETE': containerid = QueryDict(request.body).get('containerid') hostid = QueryDict(request.body).get('hostid') ip = EwsHost.objects.get(pk=hostid).ip if (not containerid) or (not ip): return HttpResponse( json.dumps({ "status": 2, "msg": "缺少变量containerid和hostid" })) try: client = DockerClient(base_url='tcp://' + ip + ':2375') containerins = client.containers.get(containerid) containerins.remove(v=True, force=True) return HttpResponse(json.dumps({"status": 0})) except Exception as ex: return HttpResponse(json.dumps({ "status": 1, "msg": "API调用异常" }))
def _new_client(self) -> DockerClient: """Make a new Docker client.""" client = DockerClient(base_url=current_app.config['DOCKER_HOST']) # Log in to the ECR registry with Docker only if we require the # ability to pull the converter image. if current_app.config['CONVERTER_IMAGE_PULL']: username, password = self._get_ecr_login() ecr_registry, _ = self.image[0].split('/', 1) client.login(username, password, registry=ecr_registry) return client
def image(request): if request.session.get('is_login', None): if request.method == 'GET': page = request.GET.get('page') rows = request.GET.get('limit') id = request.GET.get('hostid') imageid = request.GET.get('imageid') ip = EwsHost.objects.get(pk=id).ip if imageid: try: client = DockerClient(base_url='tcp://' + ip + ':2375') imageinfo = client.images.get(imageid).attrs return HttpResponse(json.dumps(imageinfo)) except Exception as ex: return HttpResponse(json.dumps({"API调用异常"})) elif not imageid: i = (int(page) - 1) * int(rows) j = (int(page) - 1) * int(rows) + int(rows) # 根据ip,调用docker engine api获取镜像 client = DockerClient(base_url='tcp://' + ip + ':2375') images = client.images.list() total = len(images) images = images[i:j] resultdict = {} dict = [] for img in images: dic = {} dic['short_id'] = img.short_id dic['repotag'] = img.attrs.get('RepoTags') dict.append(dic) resultdict['code'] = 0 resultdict['msg'] = "" resultdict['count'] = total resultdict['data'] = dict return JsonResponse(resultdict, safe=False) if request.method == 'DELETE': imageid = QueryDict(request.body).get('imageid') hostid = QueryDict(request.body).get('hostid') ip = EwsHost.objects.get(pk=hostid).ip if (not imageid) or (not ip): return HttpResponse( json.dumps({ "status": 2, "msg": "缺少变量imageid和hostid" })) try: client = DockerClient(base_url='tcp://' + ip + ':2375') client.images.remove(imageid, force=True) return HttpResponse(json.dumps({"status": 0})) except Exception as ex: return HttpResponse(json.dumps({ "status": 1, "msg": "API调用异常" }))
def __init__(self, docker_url=None, verbose=False): self._client = DockerClient(base_url=docker_url) self._docker_url = docker_url self._verbose = verbose try: self._client.ping() except APIError as err: logger.exception(err) raise DockerOperationError( 'Failed to connect to the Docker Engine.')
def deploy_worker(name: str = "", prebuilt: str = '') -> None: """ Deploy a new Docker container to a new Docker Machine instance. :param name: Name of the new machine. If not provided, a name in the format redbot-n will be used, where n is the current number of machines known to Redbot. :param prebuilt: Whether the container should first be built locally, then shipped to the other machines, instead of building it on each machine. """ config = { 'vcenter': get_core_setting('vcenter_host'), 'username': get_core_setting('vcenter_user'), 'password': get_core_setting('vcenter_password'), 'network': get_core_setting('vcenter_mgmt_network'), 'network': get_core_setting('vcenter_attack_network'), 'hostsystem': get_core_setting('vcenter_deploy_host'), 'pool': get_core_setting('vcenter_pool'), 'folder': get_core_setting('vcenter_folder'), 'datastore': random.choice(get_core_setting('vcenter_datacenter').split(',')) } print("Deploy with config", config) build_mode = get_core_setting('build_mode') file, image = None, None # Prepare Container Image if prebuilt: c = DockerClient() image = c.images.get(prebuilt) elif build_mode == 'local': c = DockerClient() image = deploy_container(c) elif build_mode == 'virtualbox': m, c = deploy_docker_machine( 'redbot-' + str(storage.scard('machines')), 'virtualbox') image = deploy_container(c) if image: file = image.save() # Deploy built image to target m, c = deploy_docker_machine(name or 'redbot-' + str(storage.scard('machines')), config=config) image = deploy_container(c, file) images.append(image)
class DockerController: def __init__(self): self.client = DockerClient(base_url='tcp://10.200.10.1:2375') def _info(self): print(self.client.info()) def show_running_containers(self): return self.client.containers.list() def build_image(self, path, tag): # path = os.path.join(os.getcwd(), 'files', dir_name) self.client.build(path=path, tag=tag) def show_images(self): return self.client.images() def run_container(self, image: str, internal_web_port: int, build_name=None, command=None): ssh_port = get_no_port_being_used() web_port = get_no_port_being_used() container = self.client.containers.run(image=image, ports={ '22/tcp': ssh_port, f'{internal_web_port}/tcp': web_port }, name=build_name, command=command, detach=True, pids_limit=MAX_PID) logging.log(logging.INFO, f"ssh port is {ssh_port}, web port is {web_port}") container_info = {} container_info['id'] = container.id container_info['ssh_port'] = ssh_port container_info['web_port'] = web_port return container_info def rm_container(self, container_id: str): container = self.client.containers.get(container_id) # container.stop() # 直接删除 container.remove(force=True) logging.log(logging.INFO, f'{container.id} has been removed') def exec_container(self, container_id: str, command): self.client.containers.get(container_id).exec_run(command)
def _build_images_from_dockerfiles(self): """ Build Docker images for each local Dockerfile found in the package: self.local_docker_files """ if GK_STANDALONE_MODE: return # do not build anything in standalone mode dc = DockerClient() LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files)) for k, v in self.local_docker_files.iteritems(): for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False): LOG.debug("DOCKER BUILD: %s" % line) LOG.info("Docker image created: %s" % k)
def _is_docker_running(client: docker.DockerClient) -> bool: """ Check if docker binary and docker daemon are available :param client: DockerClient instance :return: true or false """ try: client.info() return True except (ImportError, IOError, DockerException): return False
def _build_images_from_dockerfiles(self): """ Build Docker images for each local Dockerfile found in the package: self.local_docker_files """ if GK_STANDALONE_MODE: return # do not build anything in standalone mode dc = DockerClient() LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files)) for k, v in self.local_docker_files.iteritems(): for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False): LOG.debug("DOCKER BUILD: %s" % line) LOG.info("Docker image created: %s" % k)
def docker_client(): """ Starts docker client from the environment, with a fallback to default TCP port (for running from within virtual machines) """ try: ret = DockerClient.from_env() ret.ping() except Exception: # pragma: no cover ret = DockerClient(base_url='tcp://localhost:2375') ret.ping() with closing(ret): yield ret
def perform_logins(self, client: DockerClient): self._logger.info('Running logins for docker client') if not self.has_providers: self._logger.info('No providers present, skipping...') return for entry in self._providers.values(): provider = entry[self.PROVIDER_KEY] if provider.should_authenticate(entry[self.LAST_LOGIN_KEY]): (user, password, registry) = provider.obtain_auth() client.login(username=user, password=password, registry=registry) entry['last_login'] = datetime.now()
def __init__(self): base_url = Config.get("docker_base_url", None) version = Config.get("docker_version", None) tls = Config.get("docker_tls", None) if base_url is None: self.docker_cli = DockerClient.from_env() else: self.docker_cli = DockerClient(base_url=base_url, version=version, tls=tls) self.thread_count = Config.get("thread_count") self.image_info: self.__Image = None self.image = None self.container = None
class DockerPreliminaryInformation: def __init__(self, unixsock): self.clinobjc = DockerClient(base_url=unixsock) def get_docker_info(self): """ Returns container station information """ return self.clinobjc.info() def get_docker_version(self): """ Returns container station versioning """ return self.clinobjc.version()
def run(self): client = DockerClient(base_url='unix://var/run/docker.sock') source = self.command['src'] credentials = self.command['credentials'] # Start 'lofar-stage2' container container = client.containers.run(image='lofar-stage2', auto_remove=True, detach=True, tty=True, network='lofar-net', environment={ 'LOFAR_USER': credentials['lofarUsername'], 'LOFAR_PASS': credentials['lofarPassword'] }) # Reload container information sleep(1) container.reload() # bootstrap hostname = container.attrs['Config']['Hostname'] response = post(url=f'http://{hostname}:5000/stage', json={ 'id': self.identifier, 'cmd': self.command, 'webhook': self.webhook, 'options': {} }) return loads(response.text)
def __init__(self, client): self.client = client self.dclient = DockerClient(**self.client._connect_params) self.dclient.api = client self.parameters = TaskParameters(client) self.check_mode = self.client.check_mode self.results = { u'changed': False, u'actions': [] } self.diff = self.client.module._diff self.diff_tracker = DifferenceTracker() self.diff_result = dict() self.existing_plugin = self.get_existing_plugin() state = self.parameters.state if state == 'present': self.present() elif state == 'absent': self.absent() elif state == 'enable': self.enable() elif state == 'disable': self.disable() if self.diff or self.check_mode or self.parameters.debug: if self.diff: self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after() self.results['diff'] = self.diff_result
def StartContainer(): suri_src_dir = os.path.split(os.path.dirname( os.path.realpath(__file__)))[0] print "Using base src dir: " + suri_src_dir if GOT_DOCKERPY_API < 2: cli = Client() cli.start( 'suri-buildbot', port_bindings={ 8010: 8010, 22: None }, binds={ suri_src_dir: { 'bind': '/data/oisf', 'ro': True }, os.path.join(suri_src_dir, 'qa', 'docker', 'buildbot.cfg'): { 'bind': '/data/buildbot/master/master.cfg', 'ro': True } }) else: cli = DockerClient() cli.containers.get('suri-buildbot').start() sys.exit(0)
def __init__(self, name=None, image=None, auth=None, n_cores=None, n_replicas=None, bolt_port=None, http_port=None, https_port=None, debug_port=None, debug_suspend=None, dir_spec=None, config=None, env=None): from docker import DockerClient self.name = name or self._random_name() self.docker = DockerClient.from_env(version="auto") self.image = resolve_image(image or self.default_image) self.auth = Auth(*auth) if auth else make_auth() if self.auth.user != "neo4j": raise ValueError("Auth user must be 'neo4j' or empty") self.machines = {} self.network = None self.routing_tables = {"system": Neo4jRoutingTable()} self.console = None
def web_driver_docker_client(self) -> Optional[DockerClient]: if not self.ssh_login_info: return None SSHAgent.add_keys((self.ssh_login_info["key_file"], )) # since a bug in docker package https://github.com/docker-library/python/issues/517 that need to explicitly # pass down the port for supporting ipv6 user = self.ssh_login_info['user'] hostname = normalize_ipv6_url(self.ssh_login_info['hostname']) try: return DockerClient(base_url=f"ssh://{user}@{hostname}:22", timeout=DOCKER_API_CALL_TIMEOUT) except paramiko.ssh_exception.BadHostKeyException as exc: system_host_keys_path = os.path.expanduser("~/.ssh/known_hosts") system_host_keys = paramiko.hostkeys.HostKeys(system_host_keys_path) if system_host_keys.pop(exc.hostname, None): system_host_keys.save(system_host_keys_path) return DockerClient(base_url=f"ssh://{user}@{hostname}:22", timeout=DOCKER_API_CALL_TIMEOUT)
def __init__(self): from docker import DockerClient self.client = DockerClient( base_url=config.DOCKER_BASE_URL, timeout=config.DOCKER_TIMEOUT, num_pools=config.DOCKER_NUM_POOLS )
def __init__(self, base_master_name, base_slave_name, base_tag, tag_decoration): super(TagBareImageProvider, self).__init__(tag_decoration) self.base_master_name = base_master_name self.base_slave_name = base_slave_name self.base_tag = base_tag self.client = DockerClient()
def __init__(self, context, spec, build_status=None, docker_version='auto'): self.context = context self.spec = spec self.repo_name = context.repository.split('/')[-1] self.commit_hash = context.source['commit']['hash'] self.build_status = build_status or BuildStatus( bitbucket, context.source['repository']['full_name'], self.commit_hash, 'badwolf/test', url_for('log.build_log', sha=self.commit_hash, _external=True)) self.docker = DockerClient( base_url=current_app.config['DOCKER_HOST'], timeout=current_app.config['DOCKER_API_TIMEOUT'], version=docker_version, ) vault_url = spec.vault.url or current_app.config['VAULT_URL'] vault_token = spec.vault.token or current_app.config['VAULT_TOKEN'] if vault_url and vault_token: self.vault = hvac.Client(url=vault_url, token=vault_token) else: self.vault = None
def _check_docker_image_exists(self, image_name): """ Query the docker service and check if the given image exists :param image_name: name of the docker image :return: """ return len(DockerClient().images.list(name=image_name)) > 0
def load_image_from_file(name): from docker import DockerClient docker = DockerClient.from_env(version="auto") with open(name, "rb") as f: images = docker.images.load(f.read()) image = images[0] return image.tags[0]
def __init__( self, tag, base_url='unix://var/run/docker.sock', container_recipe='/tmp', container_conda_bld='/home/{username}/conda-bld', host_conda_bld=None, image='condaforge/linux-anvil', verbose=False, ): """ Builds a container based on `image`, adding the local user and group to the container. """ self.tag = tag self.image = image self.host_conda_bld = host_conda_bld self.verbose = verbose uid = os.getuid() usr = pwd.getpwuid(uid) self.user_info = dict(uid=uid, gid=usr.pw_gid, groupname=grp.getgrgid(usr.pw_gid).gr_name, username=usr.pw_name) self.container_recipe = container_recipe self.container_conda_bld = container_conda_bld.format(**self.user_info) self.docker = DockerClient(base_url=base_url) self._build = None
def __init__(self, name, network, image, bolt_address, http_address, auth, **config): self.name = name self.network = network self.image = "{}:{}".format(self.repository, image) self.bolt_address = bolt_address self.http_address = http_address self.auth = auth self.docker = DockerClient.from_env() environment = { "NEO4J_AUTH": "/".join(self.auth), "NEO4J_ACCEPT_LICENSE_AGREEMENT": "yes", } for key, value in config.items(): environment["NEO4J_" + key.replace("_", "__").replace(".", "_")] = value ports = { "7474/tcp": self.http_address, "7687/tcp": self.bolt_address, } self.container = self.docker.containers.create(self.image, detach=True, environment=environment, hostname="{}.{}".format(self.name, self.network.name), name="{}.{}".format(self.name, self.network.name), network=self.network.name, ports=ports) self.ip_address = None
def find_and_stop(cls, service_name): docker = DockerClient.from_env(version="auto") for container in docker.containers.list(all=True): if container.name.endswith(".{}".format(service_name)): container.stop() container.remove(force=True) docker.networks.get(service_name).remove()
def __init__(self, name, **parameters): self.docker = DockerClient.from_env() self.image = self.fix_image(parameters.get("image")) self.user = parameters.get("user", "neo4j") self.password = parameters.get("password", "password") self.network = self.docker.networks.create(name) self.machines = [] self.routers = [] self.bolt_port_range = range(17600, 17700) self.http_port_range = range(17400, 17500)
def get_context_data(self, **kwargs): context = super().get_context_data() client = DockerClient('unix://var/run/docker.sock') context['containers'] = client.containers() return context