def dump_docker_image(): """generate file from docker image""" LOG.debug("Dumping Docker Image %s" % CONF.tag) dc = DockerClient(base_url=CONF.clients_chef.url) with open("/tmp/temp.tar", 'wb') as image_tar: image_tar.write(dc.get_image("%s:latest" % CONF.tag).data) del dc collector.collect()
def test_2(): cli = Client(base_url="unix://var/run/docker.sock") img = cli.get_image('ubuntu') tar = open("ubuntu.tar", 'w') tar.write(img.data) tar.close() pass
class DockerClient(object): """ Wrapper for Docker client """ def __init__(self, host="unix:///var/run/docker.sock"): """Connect to docker server""" self.host = host self.container = None self.image_name = None self.cookbook_name = None try: self.dc = DC(base_url=self.host) except DockerException as e: LOG.error("Docker client error: %s" % e) raise e def generate_image(self, dockerfile, chef_name): """generate docker image""" status = True self.cookbook_name = chef_name # configure chef-solo with open("solo.json.sample", "r") as f: cont = f.read() with open("solo.json", "w") as f: f.write(cont.replace("<<COOKBOOKNAME>>", self.cookbook_name)) # inject cookbook to dockerfile LOG.debug("injecting cookbook to dockerfile...") with open(dockerfile, "r") as f: cont = f.read() with open("Dockerfile", "w") as df: df.write(cont.replace("<<COOKBOOKNAME>>", self.cookbook_name)) # generate image self.image_name = "docker-%s" % self.cookbook_name LOG.debug("generating image %s" % self.image_name) resp = self.dc.build( path=os.path.split(os.path.abspath(__file__))[0], rm=True, tag=self.image_name, decode=True, ) for l in resp: if "error" in l.keys(): status = False LOG.error(l['errorDetail']['message']) if "stream" in l.keys(): LOG.debug(l['stream'].replace("\n","")) return status def save_image(self): """ Faster for machines with lots of ram """ with open('%s.tar' % self.image_name, 'wb') as image_tar: image_tar.write(self.dc.get_image("%s:latest" % self.image_name).data) def save_image_cmd(self): """Slower but safer for low ram machines""" host = "" if self.host: host = "-H %s " % self.host cmd = "docker {host}save -o {dest} {name}:latest".format(host=host, name=self.image_name, dest='%s.tar' % self.image_name) subprocess.call([cmd], shell=True)
def dsplice(merge_images, tag=None, interactive=False, skip_import=False): if len(merge_images) < 2: print('at least two images must be provided for merge') return client = Client(base_url='unix://var/run/docker.sock') work_dir = tempfile.mkdtemp() layers_dir = work_dir + '/layers' build_dir = work_dir + '/build' os.mkdir(layers_dir) os.mkdir(build_dir) images = [] print('exporting images...') for img in merge_images: print('%s: exporting' % img, end='') res = client.get_image(img) rprint('%s: extracting' % img) tmpdir = tempfile.mkdtemp() tarfile.open(fileobj=io.BytesIO(res.data), mode='r|').extractall(tmpdir) with open(tmpdir + '/manifest.json') as of: layers = [ l.split('/')[0] for l in \ json.loads(of.read())[0]['Layers'] ] #move all layers to common folder for layer in layers: src = '%s/%s/layer.tar' % (tmpdir, layer) dst = '%s/%s.tar' % (layers_dir, layer) rprint('%s: gathering layer %s' % (img, layer)) if not os.path.exists(dst): shutil.move(src, dst) log.debug('mv %s -> %s' % (src,dst)) shutil.rmtree(tmpdir) extract_dir = '%s/%s' % (work_dir, img.replace('/', '-')) os.mkdir(extract_dir) rprint('%s: done\n' % (img)) images.append({ 'name': img, 'layers': layers, 'dir': extract_dir }) all_layers = [ i['layers'] for i in images ] shared_layers = set(all_layers[0]).intersection(*all_layers[1:]) #create image base using shared layers print('\nextracting image layers...') for layer in images[0]['layers']: if layer in shared_layers: rprint('extracting shared layers: %s' % layer) tar = tarfile.open('%s/%s.tar' % (layers_dir, layer)) tar.extractall(build_dir) rprint('extracting shared layers: done\n') #extract all layers for each image to own dir for i in images: uniq_layers = [ l for l in i['layers'] if l not in shared_layers ] for layer in uniq_layers: rprint('extracting unique layers: %s' % layer) tar = tarfile.open('%s/%s.tar' % (layers_dir, layer)) tar.extractall(i['dir']) rprint('extracting unique layers: done\n') merge_dirs([ i['dir'] for i in images ], build_dir, interactive=interactive) rprint('building new image...\n') arcpath = '%s/image.tar' % work_dir tar = tarfile.open(arcpath, mode='a') tar.add(build_dir, arcname='/') if skip_import: shutil.move(arcpath, os.getcwd()) else: print('importing...') if tag: client.import_image(arcpath, repository=tag) else: client.import_image(arcpath) shutil.rmtree(work_dir) print('done!')
import re from docker import Client app=sys.argv[1] username=sys.argv[2] datapath=sys.argv[3] containername=sys.argv[4] yunohostid=sys.argv[5] #Get the hostname hostname = socket.gethostname() imagename = hostname+'/'+app #Connect to docker socket cli = Client(base_url='unix://docker.sock') # Save the image save = cli.get_image(image=imagename) save_tar = open("/home/yunohost.backup/docker/images/"+imagename+".tar", "w") save_tar.write(save.data) save_tar.close() # Save the container export = cli.export(container=containername) export_tar = open("/home/yunohost.backup/docker/containers/"+containername+".tar", "w") export_tar.write(export.data) export_tar.close() exit()
class DockerOrchestration(object): """ Class to perform operations on docker for various data source manipulations, updating configurations and handle users. """ def __init__(self, headers, docker_host, docker_port, payload=None): """ Initialize the grafana connection object necessary to perform any configuration or object manipulation :param api_key: :param headers: :param docker_host: :param docker_port: :return: """ if not docker_host: raise ValueError( "Docker hostname value is %s missing. Instance cannot be initialized" % docker_host) if not docker_port: raise ValueError( "Docker hostport value is %s missing. Instance cannot be initialized" % docker_port) if not headers: raise ValueError( "Docker connection header value is missing. Instance cannot be initialized" ) if not isinstance(docker_host, str): raise ValueError("Docker hostname must be of string type") if not isinstance(headers, dict): raise ValueError("Docker deader must be of dict type") self.headers = headers self.docker_host = docker_host self.docker_port = docker_port self.payload = payload self.docker_client = Client(base_url='unix:///var/run/docker.sock') self.client = docker.from_env(assert_hostname=False) def create_docker_container(self, command=None): """ To create the new container on the VM :return: """ container = self.docker_client.create_container( image='busybox:latest', command='/bin/sleep 30') print("Container created : %s" % container) def update_docker_container(self): """ To update the existing docker container :return: """ update_container = self.docker_client.update_container() print(update_container) def create_docker_volume(self, command=None): """ Create and register a named volume :param command: :return: """ volume = self.docker_client.create_volume(name='foobar', driver='local', driver_opts={ 'foo': 'bar', 'baz': 'false' }) print(volume) def create_docker_network(self, name_of_network, driver, options=None): """ To create a network :param name_of_network: :param driver: :param options: :return: """ network = self.docker_client.create_network(name='foobar', driver='local', driver_opts={ 'foo': 'bar', 'baz': 'false' }) print(network) def connect_container_to_network(self, container_id_or_name=None, network_id=None): """ TO connect a container to network :return: """ network = self.docker_client.connect_container_to_network( '%s', '%s') % (container_id_or_name, network_id) print(network) def disconnect_container_to_network(self, container_id_or_name=None, network_id=None): """ TO connect a container to network :return: """ network = self.docker_client.disconnect_container_from_network( '%s', '%s') % (container_id_or_name, network_id) print(network) def docker_version(self): """ To return the version of current running docker on the server. :return: """ version = self.docker_client.version() return version def get_docker_volumes(self): """ To get the volumes of currently registered by the docker daemon :return: """ volumes = self.docker_client.volumes() print(volumes) def get_docker_version(self, client): """ To get the docker version :return: """ version_number = self.client.version() print(version_number) def create_container_with_mount_docker_tempfs(self, name=None, cmd=None, payload=None): """ To create a container with the specified paths to be mounted with tmpfs. :param name: :param cmd: :param payload: :return: """ create_container = self.docker_client.create_container() pass def build_docker( self, dockerfile=None, ): """ To build the docker from the docker file. :return: """ f = BytesIO(dockerfile.encode('utf-8')) response = [ line for line in self.docker_client.build( fileobj=f, rm=True, tag='yourname/volume') ] print(response) def get_docker_images(self): """ TO list the docker images :return: """ image_list = self.docker_client.images() print(image_list) def get_docker_image(self, docker_image_name=None): """ To get an image from docker daemon. :return: """ image = self.docker_client.get_image("fedora:latest") image_tar = open('/tmp/fedora - latest.tar', 'w') image_tar.write(image.data) image_tar.close() def import_docker_image(self): """ To import docker image. :return: """ image_list = self.docker_client.import_image() print(image_list) def import_docker_image_from_data(self): """ To import image from data :return: """ image_list = self.docker_client.import_image_from_data() print(image_list) def import_docker_image_from_image(self): """ To import image from data :return: """ image_list = self.docker_client.import_image_from_image() print(image_list) def import_docker_image_from_stream(self): """ To import image from data :return: """ image_list = self.docker_client.import_image_from_stream() print(image_list) def import_docker_image_from_url(self): """ To import image from data :return: """ image_list = self.docker_client.import_image_from_url() print(image_list) def get_docker_networks(self): """ Get the list of network for the dockers :return: """ network_list = self.docker_client.networks() print(network_list) def push_docker_image_to_repository( self, dockerfile=None, ): """ To build the docker from the docker file. :return: """ f = BytesIO(dockerfile.encode('utf-8')) response = [ line for line in self.docker_client.build( fileobj=f, rm=True, tag='yourname/volume') ] print(response) def pull_docker_image_to_repository( self, dockerfile=None, ): """ To pull the docker image from repository :param dockerfile: :return: """ for line in self.docker_client.pull('busybox', stream=True): print(json.dumps(json.loads(line), indent=4)) def search_docker_image(self, docker_image_name=None): """ To search and return docker image :param docker_image_name: :return: """ response = self.docker_client.search('%s') % docker_image_name print(response[:2]) def start_docker_container(self, docker_image_name=None, docker_command=None): """ To start the docker after it has created. :param docker_image_name: :param docker_command: :return: """ container = self.docker_client.create_container( image='busybox:latest', command='/bin/sleep 30') response = self.docker_client.start(container=container.get('Id')) print(response) def restart_docker_container(self, container_dict=None, time_out=None): """ To re-start the docker after it has created. :param docker_image_name: :param docker_command: :return: """ response = self.docker_client.restart(container_dict, time_out) print(response) def check_status_for_docker_container(self, container_name=None): """ To generate the statistics for the specific container. :param docker_image_name: :param docker_command: :return: """ stats_obj = self.docker_client.stats('elasticsearch') #self.docker_client.stop('elasticsearch') for stat in stats_obj: print(stat) def remove_docker_image( self, dockerfile=None, ): """ To remove docker image :param dockerfile: :return: """ response = [ line for line in self.docker_client.push('yourname/app', stream=True) ] print(response)