def setup_container_host(host_type, daemon_url, timeout=5): """ Setup a container host for deploying cluster on it :param host_type: Docker host type :param daemon_url: Docker daemon url :param timeout: timeout to wait :return: True or False """ if not daemon_url or not daemon_url.startswith("tcp://"): logger.error("Invalid daemon_url={}".format(daemon_url)) return False if host_type not in HOST_TYPES: logger.error("Invalid host_type={}".format(host_type)) return False try: client = Client(base_url=daemon_url, version="auto", timeout=timeout) net_names = [x["Name"] for x in client.networks()] for cs_type in CONSENSUS_PLUGINS: net_name = CLUSTER_NETWORK + "_{}".format(cs_type) if net_name in net_names: logger.warning( "Network {} already exists, use it!".format(net_name)) else: if host_type == HOST_TYPES[0]: # single client.create_network(net_name, driver='bridge') elif host_type == HOST_TYPES[1]: # swarm client.create_network(net_name, driver='overlay') else: logger.error("No-supported host_type={}".format(host_type)) return False except Exception as e: logger.error("Exception happens!") logger.error(e) return False return True
def setup_container_host(host_type, daemon_url, timeout=5): """ Setup a container host for deploying cluster on it :param host_type: Docker host type :param daemon_url: Docker daemon url :param timeout: timeout to wait :return: True or False """ if not daemon_url or not daemon_url.startswith("tcp://"): logger.error("Invalid daemon_url={}".format(daemon_url)) return False if host_type not in HOST_TYPES: logger.error("Invalid host_type={}".format(host_type)) return False try: client = Client(base_url=daemon_url, version="auto", timeout=timeout) net_names = [x["Name"] for x in client.networks()] for cs_type in CONSENSUS_PLUGINS: net_name = CLUSTER_NETWORK + "_{}".format(cs_type) if net_name in net_names: logger.warning("Network {} already exists, use it!".format( net_name)) else: if host_type == HOST_TYPES[0]: # single client.create_network(net_name, driver='bridge') elif host_type == HOST_TYPES[1]: # swarm client.create_network(net_name, driver='overlay') else: logger.error("No-supported host_type={}".format(host_type)) return False except Exception as e: logger.error("Exception happens!") logger.error(e) return False return True
class Docker_interface: def __init__(self, net_name='tosker_net', tmp_dir='/tmp', socket='unix://var/run/docker.sock'): self._log = Logger.get(__name__) self._net_name = net_name self._cli = Client(base_url=os.environ.get('DOCKER_HOST') or socket) self._tmp_dir = tmp_dir # TODO: aggiungere un parametro per eliminare i container se esistono gia'! def create(self, con, cmd=None, entrypoint=None, saved_image=False): def create_container(): tmp_dir = path.join(self._tmp_dir, con.name) try: os.makedirs(tmp_dir) except: pass saved_img_name = '{}/{}'.format(self._net_name, con.name) img_name = con.image if saved_image and self.inspect(saved_img_name): img_name = saved_img_name self._log.debug('container: {}'.format(con.get_str_obj())) con.id = self._cli.create_container( name=con.name, image=img_name, entrypoint=entrypoint if entrypoint else con.entrypoint, command=cmd if cmd else con.cmd, environment=con.env, detach=True, # stdin_open=True, ports=[key for key in con.ports.keys()] if con.ports else None, volumes=['/tmp/dt'] + ([k for k, v in con.volume.items()] if con.volume else []), networking_config=self._cli.create_networking_config({ self._net_name: self._cli.create_endpoint_config(links=con.link # ,aliases=['db'] ) }), host_config=self._cli.create_host_config( port_bindings=con.ports, # links=con.link, binds=[tmp_dir + ':/tmp/dt'] + ([v + ':' + k for k, v in con.volume.items()] if con.volume else []), )).get('Id') assert isinstance(con, Container) if con.to_build: self._log.debug('start building..') # utility.print_json( self._cli.build(path='/'.join(con.dockerfile.split('/')[0:-1]), dockerfile='./' + con.dockerfile.split('/')[-1], tag=con.image, pull=True, quiet=True) # ) self._log.debug('stop building..') elif not saved_image: # TODO: da evitare se si deve utilizzare un'immagine custom self._log.debug('start pulling.. {}'.format(con.image)) utility.print_json(self._cli.pull(con.image, stream=True), self._log.debug) self._log.debug('end pulling..') try: create_container() except errors.APIError as e: self._log.debug(e) # self.stop(con) self.delete(con) create_container() # raise e def stop(self, container): name = self._get_name(container) try: return self._cli.stop(name) except errors.NotFound as e: self._log.error(e) def start(self, container, wait=False): name = self._get_name(container) self._cli.start(name) if wait: self._log.debug('wait container..') self._cli.wait(name) utility.print_byte(self._cli.logs(name, stream=True), self._log.debug) def delete(self, container): name = self._get_name(container) try: self._cli.remove_container(name, v=True) except (errors.NotFound, errors.APIError) as e: self._log.error(e) raise e def exec_cmd(self, container, cmd): name = self._get_name(container) if not self.is_running(name): return False try: exec_id = self._cli.exec_create(name, cmd) status = self._cli.exec_start(exec_id) # TODO: verificare attendibilita' di questo check! check = 'rpc error:' != status[:10].decode("utf-8") self._log.debug('check: {}'.format(check)) return check except errors.APIError as e: self._log.error(e) return False except requests.exceptions.ConnectionError as e: # TODO: questo errore arriva dopo un timeout di 10 secodi self._log.error(e) return False def create_volume(self, volume): assert isinstance(volume, Volume) self._log.debug('volume opt: {}'.format(volume.get_all_opt())) return self._cli.create_volume(volume.name, volume.driver, volume.get_all_opt()) def delete_volume(self, volume): name = self._get_name(volume) return self._cli.remove_volume(name) def get_containers(self, all=False): return self._cli.containers(all=all) def get_volumes(self): volumes = self._cli.volumes() return volumes['Volumes'] or [] def inspect(self, item): name = self._get_name(item) try: return self._cli.inspect_container(name) except errors.NotFound: pass try: return self._cli.inspect_image(name) except errors.NotFound: pass try: return self._cli.inspect_volume(name) except errors.NotFound: return None def remove_all_containers(self): for c in self.get_containers(all=True): self.stop(c['Id']) self.delete(c['Id']) def remove_all_volumes(self): for v in self.get_volumes(): self.delete_volume(v['Name']) def create_network(self, name, subnet='172.25.0.0/16'): # docker network create -d bridge --subnet 172.25.0.0/16 isolated_nw # self.delete_network(name) try: self._cli.create_network(name=name, driver='bridge', ipam={'subnet': subnet}, check_duplicate=True) except errors.APIError: self._log.debug('network already exists!') def delete_network(self, name): assert isinstance(name, str) try: self._cli.remove_network(name) except errors.APIError: self._log.debug('network not exists!') def delete_image(self, name): assert isinstance(name, str) try: self._cli.remove_image(name) except errors.NotFound: pass # TODO: splittare questo metodo in due, semantica non chiara! def update_container(self, node, cmd, saved_image=True): assert isinstance(node, Container) # self._log.debug('container_conf: {}'.format(node.host_container)) stat = self.inspect(node.image) old_cmd = stat['Config']['Cmd'] or None old_entry = stat['Config']['Entrypoint'] or None if self.inspect(node): self.stop(node) self.delete(node) self.create(node, cmd=cmd, entrypoint='', saved_image=saved_image) self.start(node.id, wait=True) self.stop(node.id) name = '{}/{}'.format(self._net_name, node.name) self._cli.commit(node.id, name) self.stop(node) self.delete(node) self.create(node, cmd=node.cmd or old_cmd, entrypoint=node.entrypoint or old_entry, saved_image=True) self._cli.commit(node.id, name) def is_running(self, container): name = self._get_name(container) stat = self.inspect(name) stat = stat is not None and stat['State']['Running'] is True self._log.debug('State: {}'.format(stat)) return stat def _get_name(self, name): if isinstance(name, six.string_types): return name else: assert isinstance(name, (Container, Volume)) return name.name
class Swarmpose(): #initialise the swarmpose class def __init__(self, yamal, manager, network="dockernet"): self.HOST, self.PORT = manager.split(':') #Connect to remote daemon self.cli = Client(base_url='tcp://' + self.HOST + ':' + self.PORT) #parse the yaml file into a dictionary of dictionaries self.nodes = self.parseConfig(yamal) #check if an overlay network exists if not create one (default='Dockernet') self.network = network if (self.networkExists(network) != True): self.createOverlayNetwork(network) def createOverlayNetwork(self, name): self.cli.create_network(name=name, driver="overlay") def networkExists(self, network): networks = self.cli.networks(names=[network]) return (len(networks) != 0) #parse the yamal file and return a dictionary def parseConfig(self, file): with open(file, 'r') as fh: nodes=yaml.load(fh) return nodes #removes all containers (in no particular order) from the swarm def removeAllContainers(self): #don't look back! print("**** Clearing Containers ****") for name,val in self.nodes.items(): try: print("Purging %s ..." % name) self.cli.remove_container(name, force=True) except errors.NotFound as e: print(e.explanation.decode('UTF-8')) #create containers on the swarm def createContainers(self): for name, config in self.nodes.items(): try: expose = None if ('ports' in config): expose = dict(item.split(':') for item in self.nodes[name]['ports']) container = self.cli.create_container(image=config['image'] , ports=config['expose'], name=name, host_config=self.cli.create_host_config(port_bindings=expose, network_mode=self.network)) result = self.cli.inspect_container(container=name) print("Created %s container on node %s" % (name, result['Node']['Addr'])) if (expose is not None): print ("Will expose ports " + str(expose) + " on " + result['Node']['Addr']) except errors.APIError as e: print(e.explanation.decode('UTF-8')) #run the image with the given name def runImage(self, name): try: self.cli.start(container=name) result = self.cli.inspect_container(container=name) print('Container %s started on node %s' % (name, result['Node']['Addr'])) except errors.APIError as e: print(e.explanation.decode('UTF-8')) #start the application described by config file in dependancy order def start(self): print('**** Starting Application on Swarm ****') dependancy_list = self.genDependancyList() for name in dependancy_list: self.runImage(name) #stop images (from the config file) currently running on swarm in reverse dependancy order def stopImage(self, container): try: print("Stopping image "+ container) self.cli.stop(container) except errors.APIError as e: print(e.explanation.decode('UTF-8')) #stop the application described by config file def stop(self): print("**** Stopping Application *****") depend_list = self.genDependancyList() for name in reversed(depend_list): self.stopImage(name) def genDependancyList(self): #find nodes with no dependancies starting_nodes = {name:config for name,config in self.nodes.items() if 'links' not in config} #find nodes with dependancies remaining_nodes = {name:self.nodes[name] for name in self.nodes.keys() if name not in starting_nodes.keys()} dependancy_list = list(starting_nodes.keys()) while len(remaining_nodes) > 0: nextNode = self.nextNodeRunning(remaining_nodes, starting_nodes) dependancy_list.append(nextNode) remaining_nodes.pop(nextNode, None) starting_nodes[nextNode] = self.nodes[nextNode] return dependancy_list #returns the name of the node which can be started next #this is based on the remaining nodes and the nodes that have been started def nextNodeRunning(self, remaining_nodes, nodes_ran): for name, config in remaining_nodes.items(): #if a nodes links are a subset of the nodes ran (dependancies have been fullfiled) if set(config['links']).issubset(set(list(nodes_ran.keys()))): return name
class DockerOrchestration(object): """ Class to perform operations on docker for various data source manipulations, updating configurations and handle users. """ def __init__(self, headers, docker_host, docker_port, payload=None): """ Initialize the grafana connection object necessary to perform any configuration or object manipulation :param api_key: :param headers: :param docker_host: :param docker_port: :return: """ if not docker_host: raise ValueError( "Docker hostname value is %s missing. Instance cannot be initialized" % docker_host) if not docker_port: raise ValueError( "Docker hostport value is %s missing. Instance cannot be initialized" % docker_port) if not headers: raise ValueError( "Docker connection header value is missing. Instance cannot be initialized" ) if not isinstance(docker_host, str): raise ValueError("Docker hostname must be of string type") if not isinstance(headers, dict): raise ValueError("Docker deader must be of dict type") self.headers = headers self.docker_host = docker_host self.docker_port = docker_port self.payload = payload self.docker_client = Client(base_url='unix:///var/run/docker.sock') self.client = docker.from_env(assert_hostname=False) def create_docker_container(self, command=None): """ To create the new container on the VM :return: """ container = self.docker_client.create_container( image='busybox:latest', command='/bin/sleep 30') print("Container created : %s" % container) def update_docker_container(self): """ To update the existing docker container :return: """ update_container = self.docker_client.update_container() print(update_container) def create_docker_volume(self, command=None): """ Create and register a named volume :param command: :return: """ volume = self.docker_client.create_volume(name='foobar', driver='local', driver_opts={ 'foo': 'bar', 'baz': 'false' }) print(volume) def create_docker_network(self, name_of_network, driver, options=None): """ To create a network :param name_of_network: :param driver: :param options: :return: """ network = self.docker_client.create_network(name='foobar', driver='local', driver_opts={ 'foo': 'bar', 'baz': 'false' }) print(network) def connect_container_to_network(self, container_id_or_name=None, network_id=None): """ TO connect a container to network :return: """ network = self.docker_client.connect_container_to_network( '%s', '%s') % (container_id_or_name, network_id) print(network) def disconnect_container_to_network(self, container_id_or_name=None, network_id=None): """ TO connect a container to network :return: """ network = self.docker_client.disconnect_container_from_network( '%s', '%s') % (container_id_or_name, network_id) print(network) def docker_version(self): """ To return the version of current running docker on the server. :return: """ version = self.docker_client.version() return version def get_docker_volumes(self): """ To get the volumes of currently registered by the docker daemon :return: """ volumes = self.docker_client.volumes() print(volumes) def get_docker_version(self, client): """ To get the docker version :return: """ version_number = self.client.version() print(version_number) def create_container_with_mount_docker_tempfs(self, name=None, cmd=None, payload=None): """ To create a container with the specified paths to be mounted with tmpfs. :param name: :param cmd: :param payload: :return: """ create_container = self.docker_client.create_container() pass def build_docker( self, dockerfile=None, ): """ To build the docker from the docker file. :return: """ f = BytesIO(dockerfile.encode('utf-8')) response = [ line for line in self.docker_client.build( fileobj=f, rm=True, tag='yourname/volume') ] print(response) def get_docker_images(self): """ TO list the docker images :return: """ image_list = self.docker_client.images() print(image_list) def get_docker_image(self, docker_image_name=None): """ To get an image from docker daemon. :return: """ image = self.docker_client.get_image("fedora:latest") image_tar = open('/tmp/fedora - latest.tar', 'w') image_tar.write(image.data) image_tar.close() def import_docker_image(self): """ To import docker image. :return: """ image_list = self.docker_client.import_image() print(image_list) def import_docker_image_from_data(self): """ To import image from data :return: """ image_list = self.docker_client.import_image_from_data() print(image_list) def import_docker_image_from_image(self): """ To import image from data :return: """ image_list = self.docker_client.import_image_from_image() print(image_list) def import_docker_image_from_stream(self): """ To import image from data :return: """ image_list = self.docker_client.import_image_from_stream() print(image_list) def import_docker_image_from_url(self): """ To import image from data :return: """ image_list = self.docker_client.import_image_from_url() print(image_list) def get_docker_networks(self): """ Get the list of network for the dockers :return: """ network_list = self.docker_client.networks() print(network_list) def push_docker_image_to_repository( self, dockerfile=None, ): """ To build the docker from the docker file. :return: """ f = BytesIO(dockerfile.encode('utf-8')) response = [ line for line in self.docker_client.build( fileobj=f, rm=True, tag='yourname/volume') ] print(response) def pull_docker_image_to_repository( self, dockerfile=None, ): """ To pull the docker image from repository :param dockerfile: :return: """ for line in self.docker_client.pull('busybox', stream=True): print(json.dumps(json.loads(line), indent=4)) def search_docker_image(self, docker_image_name=None): """ To search and return docker image :param docker_image_name: :return: """ response = self.docker_client.search('%s') % docker_image_name print(response[:2]) def start_docker_container(self, docker_image_name=None, docker_command=None): """ To start the docker after it has created. :param docker_image_name: :param docker_command: :return: """ container = self.docker_client.create_container( image='busybox:latest', command='/bin/sleep 30') response = self.docker_client.start(container=container.get('Id')) print(response) def restart_docker_container(self, container_dict=None, time_out=None): """ To re-start the docker after it has created. :param docker_image_name: :param docker_command: :return: """ response = self.docker_client.restart(container_dict, time_out) print(response) def check_status_for_docker_container(self, container_name=None): """ To generate the statistics for the specific container. :param docker_image_name: :param docker_command: :return: """ stats_obj = self.docker_client.stats('elasticsearch') #self.docker_client.stop('elasticsearch') for stat in stats_obj: print(stat) def remove_docker_image( self, dockerfile=None, ): """ To remove docker image :param dockerfile: :return: """ response = [ line for line in self.docker_client.push('yourname/app', stream=True) ] print(response)