def instalar(self,name,template): with open("Templates/%s.json"%template,"r") as f: comandos = json.loads(f.read()) d = Docker() s = SSH() for c in comandos.get("comandos"): print s.exec_command(d.execCommand(name,c))
def docker(command): """Execute a docker command. Example: sc docker run <container id> """ processdocker = Docker(command) processdocker.sanity_check() processdocker.do_command()
def test_restart_instance(self, mocker, executor, printing): self.mock_executor.return_value.execute_async.return_value.expect.return_value = 1 # didn't find test_net self.mock_executor.return_value.execute_sync_with_output.side_effect = [ ('', 'removed container'), ('', 'created network') ] subject = Docker('mario', 27) subject.restart_instance() assert self.mock_print.mock_calls == [ mocker.call('\tRemoving docker container mario'), mocker.call('removed container'), mocker.call('\tCreating the docker network test_net'), mocker.call('created network') ] assert self.mock_executor.return_value.execute_sync_with_output.mock_calls == [ mocker.call(['docker', 'container', 'rm', '--force', 'mario']), mocker.call([ 'docker', 'network', 'create', '--subnet', '172.20.0.0/16', 'test_net' ]), ] self.mock_executor.return_value.execute_async.assert_called_with( ['docker', 'network', 'list', '--filter', 'name=test_net']) self.mock_executor.return_value.execute_async.return_value.expect.assert_called_with( ['test_net', pexpect.EOF], timeout=None)
def main(socket, filter, notifier, **kwargs): regex = None if filter: regex = fnmatch.translate(filter) docker = Docker(socket) push = notifier(**kwargs) names = {} for e in docker.events(): if e["status"] == "create": id = e["id"] res = docker.inspect(id) name = res["Name"][1:] names[id] = name if not regex or regex.match(name): push.create(id, name) if e["status"] == "die": id = e["id"] name = names[id] if id in names else None if not regex or regex.match(name): push.die(id, name) if id in names: del names[id]
def test_node_property(self, node_docker_commands): subject = Docker('mario', 27) subject.node.shell() self.mock_node_docker_commands.assert_called_with( subject.machine_name(), subject.get_external_ip) self.mock_node_docker_commands.return_value.shell.assert_called_with()
def generate_cluster_info(self, cluster_name, ambari_server_fqdn_ip_pairs, service_server_fqdn_ip_pairs, ambari_agent_fqdn_ip_pairs, docker_num): """ generate VM and docker info for this cluster set up parameter of the class instance as this info :param cluster_name: the name of the cluster :param ambari_server_fqdn_ip_pairs: the domain name and IP pairs for Ambari-server :param service_server_fqdn_ip_pairs: the domain name and IP pairs for VMs with Ambari-agent installed :param ambari_agent_fqdn_ip_pairs: the domain name and IP pairs for VM with Docker containers :param docker_num: the number of Dockers inside each VMs :return: None """ weave_ip_base = Config.ATTRIBUTES["weave_ip_base"] weave_ip_mask = Config.ATTRIBUTES["weave_ip_mask"] current_ip = weave_ip_base for vm_domain_name, vm_ip in ambari_server_fqdn_ip_pairs: current_ip = self._increase_ip(current_ip, 1) weave_dns_ip = current_ip vm = VM(vm_ip, vm_domain_name, weave_dns_ip, weave_ip_mask) current_ip = self._increase_ip(current_ip, 1) vm.weave_internal_ip = current_ip self.ambari_server_vm.append(vm) for vm_domain_name, vm_ip in service_server_fqdn_ip_pairs: current_ip = self._increase_ip(current_ip, 1) weave_dns_ip = current_ip vm = VM(vm_ip, vm_domain_name, weave_dns_ip, weave_ip_mask) current_ip = self._increase_ip(current_ip, 1) vm.weave_internal_ip = current_ip self.service_server_vm_list.append(vm) vm_index = 0 for vm_domain_name, vm_ip in ambari_agent_fqdn_ip_pairs: current_ip = self._increase_ip(current_ip, 1) weave_dns_ip = current_ip vm = VM(vm_ip, vm_domain_name, weave_dns_ip, weave_ip_mask) for docker_index in range(0, docker_num): current_ip = self._increase_ip(current_ip, 1) docker_ip_str = current_ip total_docker_index = vm_index * docker_num + docker_index docker_domain_name = Docker.get_weave_domain_name(cluster_name, total_docker_index) docker = Docker(docker_ip_str, str(weave_ip_mask), docker_domain_name) vm.add_docker(docker) vm_index += 1 self.ambari_agent_vm_list.append(vm) self.cluster_name = cluster_name self.create_time = str(datetime.datetime.now()) self.state = Cluster.STATE_FREE # update config file. # This step makes the user avoid reconfiguring the IP for next cluster creation Config.update("weave", "weave_ip_base", current_ip)
def runChapter(d): mailer_cid = d.run(['-d', 'dockerinaction/ch2_mailer']) Docker.print_command_id('running mailer', mailer_cid) webCid = d.create(['nginx']) d.print_command_id('created web', webCid) agent_cid = d.create( ['--link', '%s:insideweb' % webCid, '--link', '%s:insidemailer' % mailer_cid, 'dockerinaction/ch2_agent']) d.print_command_id('created agent', agent_cid) webRid = d.start(webCid) d.print_command_id('running web', webRid) agentRid = d.start(agent_cid) d.print_command_id('running agent', agentRid)
def contratar(self,name,template): d = Docker() s = SSH() s.exec_command(d.createContainer(name)) #metodo da propria classe self.instalar(name,template) endereco = s.exec_command(d.getContainerAddress(name)) endereco = json.loads(endereco) print endereco[0].get("NetworkSettings")\ .get("Networks")\ .get("bridge")\ .get("IPAddress")
def test_start_instance_when_network_exists(self, executor, printing): self.mock_executor.return_value.execute_async.return_value.expect.return_value = 0 # found test_net subject = Docker('mario', 27) subject.start_instance() self.mock_executor.return_value.execute_async.assert_called_with( ['docker', 'network', 'list', '--filter', 'name=test_net']) self.mock_executor.return_value.execute_async.return_value.expect.assert_called_with( ['test_net', pexpect.EOF], timeout=None) assert self.mock_print.call_count == 0 assert self.mock_executor.return_value.execute_sync_with_output.call_count == 0
def test_stop_instance(self, mocker, executor, printing): self.mock_executor.return_value.execute_sync_with_output.return_value = ( '', 'removed container') subject = Docker('mario', 27) subject.stop_instance() assert self.mock_print.mock_calls == [ mocker.call('\tRemoving docker container mario'), mocker.call('removed container') ] self.mock_executor.return_value.execute_sync_with_output.assert_called_with( ['docker', 'container', 'rm', '--force', 'mario']) assert subject.node.descriptor == ''
def generate_cluster_info(self, VM_IP_list, cluster_name, docker_num): config = Config() config.load() Docker_IP_base = config.ATTRIBUTES["Docker_IP_base"].split(".") Docker_IP_mask = config.ATTRIBUTES["Docker_IP_mask"] VM_index = 0 for VM_IP in VM_IP_list: vm = VM(VM_IP) for Docker_index in range(0, docker_num): total_Docker_index = VM_index * docker_num + Docker_index docker_IP = self.__increase_IP__(Docker_IP_base, total_Docker_index) docker_IP_str = str(docker_IP[0]) + "." + str(docker_IP[1]) + "." + \ str(docker_IP[2]) + "." + str(docker_IP[3]) docker_hostname = cluster_name + "-" + str( VM_index) + "-" + str(Docker_index) docker = Docker(docker_IP_str, str(Docker_IP_mask), docker_hostname) # print docker vm.add_docker(docker) VM_index = VM_index + 1 self.VM_list.append(vm) self.VMs_num = len(VM_IP_list) self.cluster_name = cluster_name
def test_construction_executor(self, executor): self.mock_executor.return_value = 'executor instance' subject = Docker('mario', 27) self.mock_executor.assert_called_with() assert subject.executor == 'executor instance'
def configure_dockers(self): for bee_vm in self.__bee_vm_list: docker = Docker(self.__docker_conf) bee_vm.add_docker_container(docker) # Get Dockers ready in parallel self.__bee_vm_list[0].get_docker_img(self.__bee_vm_list) for bee_vm in self.__bee_vm_list: bee_vm.start_docker("/usr/sbin/sshd -D") bee_vm.docker_update_uid() bee_vm.docker_update_gid()
def __init__(self): self.running = True self.matches = [] self.docker = Docker() self.commands = { 'select': self.select_command, 'history': self.history_command, 'clear': self.clear_command, 'exit': self.exit_command, } self.history = os.path.join(os.environ['HOME'], '.docker_ctl_history') #tab completion readline.parse_and_bind('tab: complete') # history self.init_history() # completer readline.set_completer(self.completer) # save history when exiting atexit.register(readline.write_history_file, self.history)
def configure_dockers(self): for node in self.__bee_aws_list: docker = Docker(self.__docker_conf) node.add_docker_container(docker) master = self.__bee_aws_list[0] master.get_docker_img(self.__bee_aws_list) for node in self.__bee_aws_list: node.start_docker("/usr/sbin/sshd -D") node.docker_update_uid(1000) node.docker_update_gid(1000)
def get_docker_instances(): with open('pokemon_name.json', 'r') as pokemon_names: data = pokemon_names.read() names = json.loads(data) index = 2 instances = [] for name in names: instances.extend([Docker(name, index)]) index += 1 return instances[0:253]
def handle_message(self, body, message): print('Received message: {0!r}'.format(body)) submission_id = body['args'][0] test_f, sol_f, lang = get_test_and_solution(submission_id) docker = Docker() test = open('/app/evaluator_tests/' + test_f) solution = open('/app/evaluator_submissions/' + sol_f) out = docker.run(lang, test, solution) metadata = json.loads(out) try: if metadata['score']: status = 'P' else: status = 'F' score = metadata['score'] except KeyError: status = 'F' score = 0 if update_submission(submission_id, score, out, status): message.ack()
def test_start_instance_when_network_does_not_exist( self, mocker, executor, printing): self.mock_executor.return_value.execute_async.return_value.expect.return_value = 1 # didn't find test_net self.mock_executor.return_value.execute_sync_with_output.return_value = ( '', 'created test_net') subject = Docker('mario', 27) subject.start_instance() self.mock_executor.return_value.execute_async.assert_called_with( ['docker', 'network', 'list', '--filter', 'name=test_net']) self.mock_executor.return_value.execute_async.return_value.expect.assert_called_with( ['test_net', pexpect.EOF], timeout=None) assert self.mock_print.mock_calls == [ mocker.call('\tCreating the docker network test_net'), mocker.call('created test_net') ] self.mock_executor.return_value.execute_sync_with_output.assert_called_with( [ 'docker', 'network', 'create', '--subnet', '172.20.0.0/16', 'test_net' ])
def test_stop_last_instance(self, mocker, executor, printing, last_instance): self.mock_executor.return_value.execute_async.return_value.expect.return_value = 0 # found test_net self.mock_executor.return_value.execute_sync_with_output.return_value = ( '', 'removed container') subject = Docker('mario', 27) subject.stop_instance() assert self.mock_print.mock_calls == [ mocker.call('\tRemoving docker container mario'), mocker.call('removed container'), mocker.call('\tRemoving docker network test_net') ] assert self.mock_executor.return_value.execute_sync_with_output.mock_calls == [ mocker.call(['docker', 'container', 'rm', '--force', 'mario']), mocker.call(['docker', 'network', 'remove', 'test_net']) ] self.mock_executor.return_value.execute_async.assert_called_with( ['docker', 'network', 'list', '--filter', 'name=test_net']) self.mock_executor.return_value.execute_async.return_value.expect.assert_called_with( ['test_net', pexpect.EOF], timeout=None)
def print_description(self): print "cluster name: ", self.cluster_name print "create time: ", self.create_time print "state: ", self.state print print "Ambari Server: " ambari_server_vm = self.get_ambari_server_vm() if ambari_server_vm is None: print "None" else: print ambari_server_vm.domain_name, " ", ambari_server_vm.external_ip, " ",\ ambari_server_vm.weave_internal_ip print print "Service Server with Ambari Agent directly installed: " if len(self.service_server_vm_list) == 0: print "None" for vm in self.service_server_vm_list: print vm.weave_domain_name, " ", vm.external_ip, " ", vm.weave_internal_ip print print "Ambari Agent in Docker Container: " int_list = [] for vm in self.ambari_agent_vm_list: for docker in vm.docker_list: int_list.append(int(docker.get_index())) interval_list = self._get_int_interval(int_list) for interval in interval_list: interval_str = "" if interval[0] == interval[1]: interval_str = str(interval(0)) else: interval_str = "[{0}-{1}]".format(interval[0], interval[1]) print Docker.get_pattern_presentation(self.cluster_name, interval_str) print
def load_cluster_info(self, filename): file = open(filename) self.cluster_name = file.next().split()[1] self.VMs_num = int(file.next().split()[1]) for VM_index in range(0, self.VMs_num): vm = VM(file.next().split()[1]) docker_num = int(file.next().split()[1]) for Docker_index in range(0, docker_num): line = file.next() IP = line.split()[0].split("/")[0] mask = line.split()[0].split("/")[1] hostname = line.split()[1] docker = Docker(IP, mask, hostname) vm.add_docker(docker) self.VM_list.append(vm) file.close()
def load_from_json(json_data): """ load the VM information from a JSON object :param json_data: a map, which is a JSON object :return: a VM object """ external_ip = json_data["external_ip"] domain_name = json_data["domain_name"] weave_dns_ip = json_data["weave_dns_ip"] weave_internal_ip = json_data["weave_internal_ip"] weave_domain_name = json_data["weave_domain_name"] weave_ip_mask = json_data["weave_ip_mask"] docker_list = [] for json_docker in json_data["docker_list"]: docker_list.append(Docker.load_from_json(json_docker)) vm = VM(external_ip, domain_name, weave_dns_ip, weave_ip_mask) vm.docker_list = docker_list vm.weave_internal_ip = weave_internal_ip vm.weave_domain_name = weave_domain_name return vm
def listar_servicos(self): d = Docker() s = SSH() print s.exec_command(d.listContainer())
import os from docker import Docker from docker import Repl docker = Docker('logging-example') repl = Repl(docker) try: volume_name = 'logging-example' docker.volumeCreate(['--driver', 'local', volume_name]) docker.run([ '--name', 'plath', '-d', '--mount', f'type=volume,src={volume_name},dst=/data', 'dockerinaction/ch4_writer_a' ]) some_data = docker.run([ '--rm', '--mount', f'type=volume,src={volume_name},dst=/data', 'alpine:latest', 'head', '/data/logA' ]) print(f'Did we get data?\n{some_data}') log_a_path = docker.volumeInspect( ['--format', '"{{json .Mountpoint}}"', volume_name]) print(f'Logging file source path:\n{log_a_path}') repl.cmdloop('Logging...') except Exception as err: print(f'Error running logging example\n{err}') docker.cleanup()
def test_get_external_ip(self): subject = Docker('mario', 27) result = subject.get_external_ip() assert result == '172.20.0.27'
def cancelar(self,name): d = Docker() s = SSH() print s.exec_command(d.removeContainer(name))
def cancelar(self,name): d = Docker() d.removeContainer(name)
def test_construction_properties(self): subject = Docker('mario', 27) assert subject.machine_name() == 'mario' assert subject.instance_index == 27
class Console: def __init__(self): self.running = True self.matches = [] self.docker = Docker() self.commands = { 'select': self.select_command, 'history': self.history_command, 'clear': self.clear_command, 'exit': self.exit_command, } self.history = os.path.join(os.environ['HOME'], '.docker_ctl_history') #tab completion readline.parse_and_bind('tab: complete') # history self.init_history() # completer readline.set_completer(self.completer) # save history when exiting atexit.register(readline.write_history_file, self.history) # Initialize history def init_history(self): try: # read history from disk readline.read_history_file(self.history) # max history length readline.set_history_length(1000) except: pass # daemon def start_daemon(self): while self.running: try: directive = self.listen() except KeyboardInterrupt: print('') # print a '\n' break # It's empty directive = directive.strip() if directive == '': break # tab char replace to space directive = directive.replace("\t", ' ') cmd = '' args = '' # Get command index = directive.find(' ', 0) if index == -1: cmd = directive else: cmd = directive[0:index] args = directive[index:].strip() # exit if self.commands.has_key(cmd): handler = self.commands[cmd] handler(args) else: self.docker.call(cmd, args) # Stop the daemon def stop_daemon(self): self.running = False # Listen the input def listen(self): if self.docker.in_container(): prompt = "docker(%s\033[0;33m@@%s\033[0m)# " % ( self.docker.container.name, self.docker.container.id) else: prompt = 'docker> ' return raw_input(prompt) # Auto complete def completer(self, text, state): # handle matches if state == 0: commands = [] if self.docker.in_container(): # container's commands commands = self.docker.container.commands.keys() else: # docker-ctl and docker's commands commands = self.commands.keys() + self.docker.commands.keys() if text == '': self.matches = commands else: self.matches = self.matches_generator(text, commands) try: return self.matches[state] except IndexError: pass return None def matches_generator(self, prefix, commands): matches = [] length = len(prefix) for cmd in commands: if cmd.startswith(prefix, 0, length): matches.append(cmd) return matches # Invalid operation def invalid_operation(self, cmd, args): print('Invalid operation: %s %s' % (cmd, args)) # Command: select def select_command(self, args): if args != '': self.docker.enter_container(args) else: print('Invalid container') # Command: history def history_command(self, args): if args == '': len = readline.get_current_history_length() for index in range(1, len + 1): print('%s' % readline.get_history_item(index)) elif args == '--clear': readline.clear_history() else: self.invalid_operation('history', args) # Command: clear def clear_command(self, args): if args == '': Process('clear').execute(True) else: self.invalid_operation('clear', args) # Command: exit def exit_command(self, args): if args == '': if self.docker.in_container(): self.docker.exit_container() else: self.stop_daemon() elif args == '--all': self.stop_daemon() else: self.invalid_operation('exit', args)
class Modulator: def __init__(self): self.config = Config() self.util = Utility() self.docker = Docker() def _load_configuration_dict(self, node): return self.util._load_json(node) def _get_players(self): socketio.emit('players', {'players': self.config.players}) def _set_parameters(self, nc): d = {} for n in range(int(nc['node_counts'])): d[n] = self._load_configuration_dict(n) socketio.emit('parms', d) def _filter(self, k): def walk(k): with Config().tsfiles.open("r") as f: ts = [ line.rstrip('\n') for line in f if k['name'].lower() in line.lower() ] ts.sort() return ts socketio.emit('report_filter', {'node': k['node'], 'ts': walk(k)}) def _watch(self): cnts = self.config.nodecounts docker_state = ['0' for i in range(cnts)] state = [path(f"modulator.playing.{b}").exists() for b in range(cnts)] for b in range(cnts): d = self._load_configuration_dict(str(b)) docker_state[b] = d['docker'] socketio.emit('report_watch', { 'state': state, 'docker_state': docker_state }) for i, b in enumerate(state): if b: player = f"player{i+1}" socketio.emit( f"{player}_name", {player: path(f"modulator.playing.{i}").read_text()}) def _get_mediainfo(self, ts): cmd = ("mediainfo --Output=\"General;%OverallBitRate%\"" f" \"{ts['filename']}\"") rate = str(self.util._execute(cmd)).strip() if len(rate) > 0: res = int(rate) if res > 0: if res > (4 * 10**7): res = 23052768 socketio.emit('report_mediainfo', {'playrate': res}) def _docker_state(self, d): n = self.util._load_json(d['node']) n.update({'docker': d['docker']}) self.util._dump_json(d['node'], n) def _set_overtime(self, ot): n = self.util._load_json(ot['node']) n.update({'ot': ot['ot']}) self.util._dump_json(ot['node'], n) def _play(self, n): n = json.loads(n) slot, docker, rf = n["slot"], n["docker"], n['rf'] cmd = (f"{self.config.modhome}/ModulatorConsole " f"dvb-t " f"-r {self.config.ipaddr} " f"--slot {slot} " f"--playback play " f"--mode loop " f"--file \"{n['file']}\" " f"--bw {n['bw']} " f"--const {n['const']} " f"--coderate {n['coderate']} " f"--guard {n['guard']} " f"--tx {n['tx']} " f"--cell-id {n['cellid']} " f"--playrate {n['playrate']} " f"--rf {int(rf) * 10**6} ") if int(docker): print("docker is enabled") result = self.docker.run(int(n['slot'])) print('docker result:', result) socketio.emit('report_docker_start', { 'status': result, 'slot': slot }) else: res = self.util._execute(cmd) path(f"modulator.playing.{slot}").write_text(n["player"]) self.util._dump_json(slot, n) socketio.emit( 'response', {'status': ['PLAYING', int(n['slot']), n['player'], n['docker']]}) def _stop(self, n): n = json.loads(n) slot = n["slot"] cmd = (f"{self.config.modhome}/ModulatorConsole " f"dvb-t " f"-r {self.config.ipaddr} " f"--slot {n['slot']} " f"--playback stop ") d = self.util._load_json(slot) docker = d['docker'] if int(docker): result = self.docker.stop(n) print('docker result:', result) d.update({'docker': '0'}) self.util._dump_json(slot, d) socketio.emit('report_docker_stop', { 'status': result, 'slot': slot }) res = self.util._execute(cmd) path(f"modulator.playing.{n['slot']}").unlink() socketio.emit('response', {'status': ['STOPPED', int(n['slot']), '', 0]})
import os from docker import Docker from docker import Repl from docker import Image docker = Docker('version') image = Image(docker) try: version = 0.6 image_id = image.build([ f'dockerinaction/mailer-base:{version}', '-f', 'mailer-base.df', '--build-arg', f'VERSION={version}', './resources/ch8/mailer-base' ]) print(f'Finished creating docker image: {image_id}') except Exception as err: print(f'Error running arg_version example\n{err}') image.cleanup()
import subprocess from time import sleep from docker import Docker def runChapter(d): mailer_cid = d.run(['-d', 'dockerinaction/ch2_mailer']) Docker.print_command_id('running mailer', mailer_cid) webCid = d.create(['nginx']) d.print_command_id('created web', webCid) agent_cid = d.create( ['--link', '%s:insideweb' % webCid, '--link', '%s:insidemailer' % mailer_cid, 'dockerinaction/ch2_agent']) d.print_command_id('created agent', agent_cid) webRid = d.start(webCid) d.print_command_id('running web', webRid) agentRid = d.start(agent_cid) d.print_command_id('running agent', agentRid) docker = Docker('dockerinaction') try: timeout = 1 runChapter(docker) print('Running for {0}s'.format(timeout)) sleep(timeout) docker.cleanup() except subprocess.CalledProcessError as err: print('Some error was thrown while executing docker command:\n{0}'.format(err)) docker.cleanup()
def contratar(self, name, template): d = Docker() d.createcontainer(name) self.instalar(name, template) d.getcontaineraddress(name)
def add_docker(self): cprint('[' + self.__task_name + '] Initialize docker', self.__output_color) for bee_os in self.__bee_os_list: docker = Docker(self.__docker_conf) docker.set_shared_dir('/exports/host_share/') bee_os.add_docker_container(docker)
def listar_servicos(self): d = Docker() d.listContainer()
def instalar(self, name, template): with open('templates/%s.json' % template, 'r') as f: comando = json.loads(f.read()) d = Docker() for c in comando.get('comandos'): d.execcommand(name, c)
import os from docker import Docker from docker import Repl config_src = os.path.abspath('resources\\ch3_nginx.conf') config_dest = '/etc/nginx/conf.d/default.conf' log_src = os.path.abspath('resources\\ch3_nginx.log') log_dest = '/var/log/nginx/custom.host.access.log' docker = Docker('dia-nginx') repl = Repl(docker) try: docker.run([ '-d', '--name', 'diaweb', '--mount', f'type=bind,src={config_src},dst={config_dest},readonly=true', '--mount', f'type=bind,src={log_src},dst={log_dest}', '-p', '80:80', 'nginx:latest' ]) repl.cmdloop('Running nginx...') except Exception as err: print('Error running nginx:\n{0}'.format(err)) docker.cleanup()
from docker import Docker from docker import Repl docker = Docker('wordpress') try: db_cid = docker.create(['-e', 'MYSQL_ROOT_PASSWORD=ch2demo', 'mysql:5.7']) docker.start(db_cid) mailer_cid = docker.create(['dockerinaction/ch2_mailer']) docker.start(mailer_cid) wp_cid = docker.create( ['--link', '{0}:mysql'.format(db_cid), '-p', '8080:80', '--read-only', '-v', '/run/apache2/', '--tmpfs', '/tmp', 'wordpress:5.0.0-php7.2-apache']) docker.start(wp_cid) agent_cid = docker.create( ['--link', '{0}:insideweb'.format(wp_cid), '--link', '{0}:insidemailer'.format(mailer_cid), 'dockerinaction/ch2_agent']) docker.start(agent_cid) cmd = Repl(docker) cmd.cmdloop() except Exception as err: print('Error creating wordpress:\n{0}', err) docker.cleanup()
def test_traffic_property(self, traffic_docker_commands): subject = Docker('mario', 27) self.mock_traffic_docker_commands.assert_called_with( subject.machine_name())
def __init__(self): self.config = Config() self.util = Utility() self.docker = Docker()
def contratar(self,name,template): d = Docker() d.createContainer(name) #metodo da propria classe self.instalar(name,template) d.getContainerAddress(name)