def contra_container(cli): cli = Client(base_url='unix://var/run/docker.sock') cid = None if cli: container = cli.create_container( image="contra:latest", #name="contra", user="******", working_dir="/home/contra/files", #command="/usr/bin/run.sh", stdin_open=True, tty=True, volumes=['/home/contra/artifacts'], host_config=cli.create_host_config( binds={ appdir + '/static/artifacts': { 'bind': '/home/contra/artifacts', 'mode': 'rw', }, }, privileged=True, ), ) cid = container.get('Id') return cid
class ContainerServer(Server): def __init__(self, repository=constants.DOCKER_IMAGE_NAME, tag=LATEST_DOCKER_TAG_NAME): Server.__init__(self) docker_image = constants.DOCKER_IMAGE_NAME self.docker = Client(base_url='unix:///var/run/docker.sock') self.docker.pull(repository=repository, tag=tag, stream=False) image = '%s:%s' % (repository, tag) self.container = self.docker.create_container( image=image, ports=[constants.DEFAULT_NODE_BIND_PORT], host_config=self.docker.create_host_config(port_bindings={ constants.DEFAULT_NODE_BIND_PORT: constants.DEFAULT_NODE_BIND_PORT }), command='/bin/sleep 1') print(self.container) def __del__(self): self.docker.remove_container(container=self.container.get('Id')) Server.__del__(self) def start(self, n=1): res = self.docker.start(container=self.container.get('Id')) print(res) return True def stop(self): res = self.docker.stop(container=self.container.get('Id')) print(res) return True
class ContainerServer(Server): def __init__(self, repository=constants.DOCKER_IMAGE_NAME, tag=LATEST_DOCKER_TAG_NAME): Server.__init__(self) docker_image = constants.DOCKER_IMAGE_NAME self.docker = Client(base_url='unix:///var/run/docker.sock') self.docker.pull( repository=repository, tag=tag, stream=False) image = '%s:%s' % (repository, tag) self.container = self.docker.create_container( image=image, ports=[constants.DEFAULT_NODE_BIND_PORT], host_config=self.docker.create_host_config(port_bindings={constants.DEFAULT_NODE_BIND_PORT:constants.DEFAULT_NODE_BIND_PORT}), command='/bin/sleep 1') print(self.container) def __del__(self): self.docker.remove_container(container=self.container.get('Id')) Server.__del__(self) def start(self, n=1): res = self.docker.start(container=self.container.get('Id')) print(res) return True def stop(self): res = self.docker.stop(container=self.container.get('Id')) print(res) return True
def node_exit_handler(addr): collection = db.containers containers = collection.find() resource_shares = {'high':{'cpu_shares' : 1000, 'mem_limit' : '600m'}, 'medium':{'cpu_shares' : 100, 'mem_limit' : '400m'}, 'low':{'cpu_shares' : 10, 'mem_limit' : '200m'}} for container in containers: if container['host_ip'] == addr and container['checkpointed'] == "true": host_ip = choose_least_loaded(container['privelege_level']) cli = Client(base_url=host_ip+":2375") cli.pull(repository=registry+"/"+container['source_image'], tag=container['username']+"_"+container['container_name'], stream=False) #Create image_name = registry+"/"+container['source_image']+":"+container['username']+"_"+container['container_name'] privelege_level = container['privelege_level'] portlist = [] portmap = {} if container['source_image'] == "tomcat": portlist.append(22) portmap[22] = container['ssh_port'] host_config = cli.create_host_config(mem_limit=resource_shares[privelege_level]['mem_limit'], port_bindings=portmap) container_new = cli.create_container(image=image_name,cpu_shares=resource_shares[privelege_level]['cpu_shares'],host_config=host_config,ports=portlist) original_load = host_collection.find({"ip":host_ip})[0][privelege_level] host_collection.update_one({"ip":host_ip},{"$set":{privelege_level:original_load+1}}) collection.update_one({"container_id":container['container_id']},{"$set":{"host_ip":host_ip}}) collection.update_one({"container_id":container['container_id']},{"$set":{"container_id":container_new['Id']}}) #Start if container['status'] == "Started": container_id = container_new['Id'] response = cli.start(container=container_id) executor = cli.exec_create(container=container_id,cmd="bash service ssh start") response = cli.exec_start(executor.get('Id')) print "Failure handler called"
def run(Name, Mail, CPU, Mem): if 'username' in session: import hashlib import random ans = random.uniform(1, 10) hashpass = hashlib.sha1(str(ans).replace('\n', '').encode()).hexdigest() print(hashpass) User.addIns(Name, hashpass) subprocess.call('useradd ' + Name + ' -m -p ' + str(hashpass) + ' -s /bin/hshell', shell=True) subprocess.call('gpasswd -a ' + Name + ' docker', shell=True) clir = Client(base_url='unix://var/run/docker.sock') clirt = clir.create_container(hostname=str(Name + '-dind'), tty=True, detach=True, image='hakurei-dind', name=str(Name), cpu_shares=int(CPU), host_config=clir.create_host_config( mem_limit=str(Mem), privileged=True)) clir.start(clirt.get('Id')) print(clirt.get('Id')) print(Mail) return redirect(url_for('userview')) else: return redirect(url_for('main.index'))
class DockerClient(object): def __init__(self): self.client = Client(base_url='unix://var/run/docker.sock') def build_images(self, dockerfile: str, tag: str): with open(dockerfile) as file: dkfile = BytesIO(file.read().encode('utf-8')) response = [line for line in self.client.build( fileobj=dkfile, rm=True, tag=tag)] return response def run_container(self, image, mem_limit=None, volume_binds: list = None, command=None): container = self.client.create_container(image=image, host_config=self.client.create_host_config( binds=volume_binds, mem_limit=mem_limit), command=command) self.client.start(container) try: self.client.wait(container, timeout=3) except ReadTimeout: print('time out') self.client.stop(container) self.client.remove_container(container) def exec_container(self, container, cmd): container_id = self.client.exec_create(container, cmd)['Id'] return self.client.exec_start(container_id)
def reset_container(client: docker.Client, session, n=0, image='noodles-remote'): host_config = client.create_host_config(port_bindings={22: (10022 + n*100)}) container = client.create_container( image=image, host_config=host_config, labels={'noodles': str(n), 'session': session}) client.start(container) time.sleep(0.5) return container
class DockerController(): def __init__(self, root): self.workingDirectory = Path('/host') self.projectDirectory = Path(self.workingDirectory, Path().resolve().relative_to(root)) self.client = Client(base_url='tcp://127.0.0.1:2375') binds = ['{0}:{1}'.format(root, self.workingDirectory.as_posix())] self.hostConfig = self.client.create_host_config(binds=binds) def prepare(self, image, command): self.container = self.client.create_container( image=image, command=command, volumes=self.workingDirectory.as_posix(), working_dir=self.projectDirectory.as_posix(), host_config=self.hostConfig) def run(self): self.client.start(self.container) logs = self.client.logs(self.container, stream=True) for line in logs: sys.stdout.write(str(line, encoding='UTF-8')) self.client.wait(self.container)
class DockerRunner(object): """ How to naturally run tasks within docker containers """ def __init__(self, image, name=None, remove=True, url=':4000', **kwargs): self.docker = DockerClient(base_url=url, version='1.24') self.image = image self.name = name self.remove = remove kwargs.setdefault('binds', []) kwargs.setdefault('network_mode', 'bridge') self.host_config = self.docker.create_host_config(**kwargs) self.id = None def start_container(self, env): """create a running container (just sleeps)""" cn = container(self.image, self.host_config, self.name, self.docker, env=env) return cn
def setup(self): data_obj = data.Data() path = data_obj.get_work_path() docker = Client() print('Setting up environment, please wait...') volume = os.getcwd() container_name = data_obj.get_path_hash() docker.create_container( 'nimbusoft/appimager', tty=True, command="/bin/bash", name=container_name, volumes=['/mnt/appimager'], host_config=docker.create_host_config(binds={ os.getcwd(): { 'bind': '/mnt/appimager', 'mode': 'rw', } })) docker.start(container_name) print('Setup Complete')
def start_opendcre(): """ Start the OpenDCRE container, with the appropriate ports exposed and files propped in. At a minimum, port 5000 must be opened, and the bmc_info.json file should be propped in as well. Returns: True if successful, False otherwise. """ print "Starting OpenDCRE... " try: cli = Client(base_url='unix://var/run/docker.sock') cli.create_container( image='vaporio/opendcre:v1.2.2', detach=True, ports=[5000], volumes=['/opendcre/bmc_config.json', '/logs'], name='opendcre', command='./start_opendcre_plc_emulator.sh', host_config=cli.create_host_config(binds=[ '/tmp/bmc_config.json:/opendcre/bmc_config.json', '/tmp:/logs' ], port_bindings={5000: [5000]})) cli.start('opendcre') # pause briefly to allow bmc scan to complete time.sleep(3) print "OpenDCRE started successfully." return True except Exception, e: print "Error starting OpenDCRE: {}".format(e) return False
def container_peripherelAccess(self, **kwargs): """ - API creates container and also provides peripherel access. - API is equivalent to create container with host configurations added to it. - Response """ host_config = {} # image = kwargs['image'] # network_disabled = kwargs['network_disabled'] # host_config = {'devices': '/sys/class/leds:/:rwm'} # print image,host_config invoke_clientAPI = Client(base_url='unix://var/run/docker.sock', version='1.18') containerID = invoke_clientAPI.create_container( 'ubuntu', 'true', stdin_open=bool('True'), command=list['/bin/bash'], host_config=invoke_clientAPI.create_host_config( devices=['/dev/sda:rwm'])) # containerID = invoke_clientAPI.create_container(image) return containerID
def create_host_config(volumes, ports): try: port_bindings = ports binds = ['{0}:{1}'.format(k, v) for k, v in volumes.iteritems()] docker_cli = Client(base_url=docker_socket) host_config = docker_cli.create_host_config( binds=binds, port_bindings=port_bindings) except Exception, e: log.exception('failed-host-config-creation', volumes, ports, e=e) raise
def startContainer1(): cli = Client(base_url=DOCKER_HOST) print('container=',cli.containers()) cli.stop("lighting1") cli.remove_container("lighting1") container = cli.create_container(image='aadebuger/lightingserver',name="lighting1",ports=[9090], host_config=cli.create_host_config(port_bindings={ 9090: 9093 })) cli.start(container)
def startContainer1(): cli = Client(base_url=DOCKER_HOST) print('container=', cli.containers()) cli.stop("lighting1") cli.remove_container("lighting1") container = cli.create_container( image='aadebuger/lightingserver', name="lighting1", ports=[9090], host_config=cli.create_host_config(port_bindings={9090: 9093})) cli.start(container)
def create_container(cname, user, giturl): try: error = False errmsg = '' cid = '' host_port = '' print('nodeJsBuild start') print('file creation') cli = Client(base_url='unix://var/run/docker.sock') #tls_config = tls.TLSConfig( # client_cert=('/Users/kasi-mac/.docker/machine/machines/default/cert.pem', '/Users/kasi-mac/.docker/machine/machines/default/key.pem'), # ca_cert='/Users/kasi-mac/.docker/machine/machines/default/ca.pem', verify=True) #cli = Client(base_url='tcp://192.168.99.100:2376', tls=tls_config) print('cli creation') response = [ line for line in cli.build(path=giturl, rm=True, tag=user + '/' + cname) ] print(response) print('cli creation end') container = cli.create_container( image=user + '/' + cname, name=cname, host_config=cli.create_host_config(publish_all_ports=True)) print(container) ccrres = json.loads(json.dumps(container)) if 'Id' not in ccrres: error = True errmsg = 'Container creation failed' else: print('1') cid = ccrres['Id'] print(cid) response = cli.start(container=container.get('Id')) print(response) if response != None: error = True errmsg = 'Container failed to start' else: error = False info = cli.inspect_container(container) print info host_port = 'http://' + str( info['NetworkSettings']['Ports']['8080/tcp'][0] ['HostIp']) + ':' + str(info['NetworkSettings']['Ports'] ['8080/tcp'][0]['HostPort']) print host_port return error, errmsg, cid, host_port except Exception as inst: print inst.args return True, 'Exception', '', ''
def do_docker_create(self, label, parameters, environment, name, image, volumes, memory_limit, folders, command): """ Create necessary directories in a working directory for the mounts in the containers. Write .ini file filled with given parameters in each folder. Create a new docker container from a given image and return the id of the container """ # Create needed folders for mounts for folder in folders: try: os.makedirs(folder, 0o2775) # Path already exists, ignore except OSError: if not os.path.isdir(folder): raise # Create ini file for containers config = configparser.SafeConfigParser() for section in parameters: if not config.has_section(section): config.add_section(section) for key, value in parameters[section].items(): # TODO: find more elegant solution for this! ugh! if not key == 'units': if not config.has_option(section, key): config.set(*map(str, [section, key, value])) for folder in folders: with open(os.path.join(folder, 'input.ini'), 'w') as f: config.write(f) # Yes, the ConfigParser writes to f # Create docker container client = Client(base_url='http://localhost:4000') # We could also pass mem_reservation since docker-py 1.10 config = client.create_host_config(binds=volumes, mem_limit=memory_limit) container = client.create_container( image, # docker image name=name, host_config=config, # mounts command=command, # command to run environment=environment, # {'uuid' = ""} for cloud fs sync labels=label # type of container ) container_id = container.get('Id') return container_id, ""
class Container(object): """容器类,用于创建,销毁,停止,暂停,恢复容器等操作""" def __init__(self): #initial base config self.cli = Client(base_url=DOCKER_BASE_URL, version='auto') self.logger = MyLogger() self.db = Database() def generate_string(self, size=8): """生成指定长度的随机字符串""" return ''.join( random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in range(size)) def create_container(self, tid, mem_limit, hostname, image_name): """ create a container and update status """ #create random mysql_password mysql_password = self.generate_string(8) self.logger.writelog('create mysql password %s' % mysql_password) self.logger.writelog( "create container with image name %s and hostname %s " % (image_name, hostname)) try: c = self.cli.create_container(image=image_name,environment={'MYSQL_PASSWORD':mysql_password},\ name=hostname,hostname=hostname, detach=False,tty=False,ports=[22,3306,80],\ host_config = self.cli.create_host_config(publish_all_ports=True,mem_limit = mem_limit)) docker_id = c.get('Id') self.logger.writelog('container id: %s' % docker_id) self.start_container(docker_id, tid) #get IP address ip = self.cli.inspect_container( docker_id)['NetworkSettings']['IPAddress'] #get port mapping ssh_port = self.cli.port(docker_id, '22')[0]['HostPort'] db_port = self.cli.port(docker_id, '3306')[0]['HostPort'] web_port = self.cli.port(docker_id, '80')[0]['HostPort'] sql = """insert into instances(tid,did,ip,hostname,dbport,webport,sshport,dbpwd) values('%s','%s','%s','%s','%s','%s','%s','%s');""" % ( tid, docker_id, ip, hostname, db_port, web_port, ssh_port, mysql_password) self.logger.writelog("insert record in instances table") self.db.execute(sql) self.logger.writelog("update task status:%d" % ST_RUNNING) self.db.update_status(tid, ST_RUNNING) return docker_id except Exception, err: #create failed self.logger.writelog('create failed: %s ' % err, self.logger.error) self.db.update_status(tid, ST_UNKNOWN) return False
def do_docker_create(self, label, parameters, environment, name, image, volumes, memory_limit, folders, command): """ Create necessary directories in a working directory for the mounts in the containers. Write .ini file filled with given parameters in each folder. Create a new docker container from a given image and return the id of the container """ # Create needed folders for mounts for folder in folders: try: os.makedirs(folder, 0o2775) # Path already exists, ignore except OSError: if not os.path.isdir(folder): raise # Create ini file for containers config = configparser.SafeConfigParser() for section in parameters: if not config.has_section(section): config.add_section(section) for key, value in parameters[section].items(): # TODO: find more elegant solution for this! ugh! if not key == 'units': if not config.has_option(section, key): config.set(*map(str, [section, key, value])) for folder in folders: with open(os.path.join(folder, 'input.ini'), 'w') as f: config.write(f) # Yes, the ConfigParser writes to f # Create docker container client = Client(base_url=settings.DOCKER_URL) # We could also pass mem_reservation since docker-py 1.10 config = client.create_host_config(binds=volumes, mem_limit=memory_limit) container = client.create_container( image, # docker image name=name, host_config=config, # mounts command=command, # command to run environment=environment, # {'uuid' = ""} for cloud fs sync labels=label # type of container ) container_id = container.get('Id') return container_id, ""
def handle(): if request.method == 'POST': lang = request.form['lang'] prog = request.form['prog'] # Implementation goes here. # # 1) Launch a container and run prog inside it # 2) Capture the output and return them as the response. # # Both stdout and stderr should be captured. ### BEGIN STUDENT CODE ### # write prog into ./playground/progfile/ print '-----language:', lang print '-----program:', prog print '--------------------------------------------' fw = open('./playground/progfile/prog', 'w') fw.write(prog) fw.close() command = lang # run with shared volume cmd_container = lang + ' ' + '/home/prog' #print 'cmd:', cmd_container cli = Client(base_url='unix://var/run/docker.sock') container = cli.create_container( image='qichenz:test', command=cmd_container, volumes=['/home'], host_config=cli.create_host_config( binds={ '/home/ubuntu/task2/playground/progfile': { 'bind': '/home', 'mode': 'rw' } })) response = cli.start(container=container.get('Id')) sleep(1) response = cli.logs(container.get('Id'), stdout=True, stderr=False) #print 'stdout:', response response = response + cli.logs( container.get('Id'), stderr=True, stdout=False) #print 'stderr:', cli.logs(container.get('Id'), stderr=True) print response ### END STUDENT CODE ### return response else: return app.send_static_file("index.html")
def run(args: Namespace) -> None: """Run an analysis framework environment""" cli = Client(base_url='unix://var/run/docker.sock') binds = { # type: Dict[str, Dict[str, str]] '/dev/random': { 'bind': '/dev/random' }, '/dev/urandom': { 'bind': '/dev/urandom' } } if args.dynamic_mount: field_name = os.path.realpath( resource_filename('son_analyze.cli', '../../..')) new_entry = { field_name: { 'bind': '/son-analyze', 'mode': 'rw' } } binds.update(new_entry) host_config = cli.create_host_config(port_bindings={8888: 8888}, binds=binds) container = cli.create_container(image=_IMAGE_TAG+':latest', labels=['com.sonata.analyze'], ports=[8888], host_config=host_config) container_id = container.get('Id') cli.start(container=container_id) def cleanup(): """Remove the container""" cli.remove_container(container=container_id, force=True) def signal_term_handler(unused1, unused2): # noqa pylint: disable=unused-argument """Catch signal to clean the containers""" print('Interruption detected, stopping environment') cleanup() sys.exit(1) signal.signal(signal.SIGTERM, signal_term_handler) signal.signal(signal.SIGINT, signal_term_handler) print('Browse http://localhost:8888 \n' 'Type Ctrl-C to exit') exit_code = 0 exit_code = cli.wait(container=container_id) cleanup() sys.exit(exit_code)
def reset_container(client: docker.Client, session, n=0, image='noodles-remote'): host_config = client.create_host_config( port_bindings={22: (10022 + n * 100)}) container = client.create_container(image=image, host_config=host_config, labels={ 'noodles': str(n), 'session': session }) client.start(container) time.sleep(0.5) return container
class dockerAgent(object): """Class for manipulating the docker client.""" host = None client = None ctr = None current_ctr = None def __init__(self): """Loading docker environments""" if platform.system() == 'Darwin' or platform.system() == 'Windows': try: # TLS problem, can be referenced from # https://github.com/docker/machine/issues/1335 from docker.utils import kwargs_from_env self.host = '{0}'.format(urlparse.urlparse( os.environ['DOCKER_HOST']).netloc.split(':')[0]) self.client = Client( base_url='{0}'.format(os.environ['DOCKER_HOST'])) kwargs = kwargs_from_env() kwargs['tls'].assert_hostname = False self.client = Client(**kwargs) except KeyError: self.host = '127.0.0.1' self.client = Client(base_url='unix://var/run/docker.sock') else: self.host = '127.0.0.1' self.client = Client(base_url='unix://var/run/docker.sock') def createHostConfig(self, port_bindings, binds, links): """Create host config for containers""" return self.client.create_host_config(port_bindings=port_bindings, binds=binds, links=links) def startContainer(self, image, name, ports=None, volumes=None, environment=None, host_config=None): """Start containers""" try: self.ctr = self.client.create_container( image=image, name=name, ports=ports, volumes=volumes, environment=environment, host_config=host_config) except (TypeError, APIError), e: Logger.logError("\n" + "[ERROR] " + str(e.explanation)) for line in self.client.pull(image, stream=True): for iterElement in list(jsoniterparse(line)): Logger.logInfo( "[INFO] " + json.dumps(iterElement, indent=4)) self.ctr = self.client.create_container( image=image, name=name, ports=ports, volumes=volumes, environment=environment, host_config=host_config) except NullResource: pass
class Docker: def __init__(self): config = ConfigParser.ConfigParser() config.read( os.path.dirname(os.path.abspath(__file__)) + '/../config.cfg') self.client = Client(base_url='tcp://%s:2376' % config.get('docker', 'server')) def listar_containers(self): for c in self.client.containers(): print c def criar_container(self, nome='novo', imagem='ubuntu', comando='/bin/bash'): container = self.client.create_container( image=imagem, command=comando, name=nome, stdin_open=True, tty=True, detach=True, ports=[80], host_config=self.client.create_host_config(port_bindings={80: 80})) return container def iniciar_container(self, id): self.client.start(container=id) print 'Container iniciado' def parar_container(self, id): self.client.stop(container=id) print 'Container parado.' def rem_container(self, id): self.client.stop(container=id) self.client.remove_container(container=id) print 'Container removido.' def exec_comando(self, id, comando): exec_id = self.client.exec_create(container=id, cmd=comando) resultado = self.client.exec_start(exec_id) return resultado def inspec_container(self, id): container = self.client.inspect_container(container=id) return container
class Container: def __init__(self): self.cli = Client(base_url='unix://var/run/docker.sock') def start(self, service_port, image, cmd): self.container = self.cli.create_container( image, cmd, ports=[service_port], host_config=self.cli.create_host_config( port_bindings={service_port: None})) resp = self.cli.start(container=self.container.get('Id')) """{u'1111/tcp': [{u'HostPort': u'32769', u'HostIp': u'0.0.0.0'}]}""" return self.cli.inspect_container(self.container.get( 'Id'))['NetworkSettings']['Ports']['%d/tcp' % service_port][0]['HostPort']
def POST(self): try: options = {} host_options = {} data = web.input(port_bindings_cont=[''], port_bindings_host=[''], environment_key=[''], environment_value=['']) fields = ['image', 'name', 'command', 'environment', 'ports', 'port_bindings', 'publish_all_ports', 'network'] data['environment'] = 'process' data['port_bindings'] = 'process' for field in fields: if field in data and data[field] is not None and len(data[field])>0 : if field == 'ports': options[field] = json.loads('['+data[field]+']') elif field == 'environment': environment_dic = {} for i in range(len(data['environment_key'])): environment_dic[str(data['environment_key'][i])] = str(data['environment_value'][i]) options[field] = environment_dic elif field == 'port_bindings': port_bindings_dic = {} for i in range(len(data['port_bindings_cont'])): port_bindings_dic[str(data['port_bindings_cont'][i])] = str(data['port_bindings_host'][i]) host_options[field] = port_bindings_dic else: options[field] = data[field] cli = Client(base_url='unix://var/run/docker.sock') options['host_config'] = cli.create_host_config(**host_options) print options result = cli.pull(repository=options['image'],stream=True) web.header('Content-type', 'text/html') web.header('Transfer-Encoding', 'chunked') yield render_plain.layout_top(title="Harbour - Creating container") for json_line in result: line = json.loads(json_line) if 'id' in line and 'status' in line: yield line['id']+" "+line['status']+"\n" new_container = cli.create_container(**options) yield render_plain.notification_plain(message="Container {id} successfully created".format(id=new_container['Id']), status="success") yield render_plain.layout_bottom() except Exception as e: traceback.print_exc() yield "Error creating container: " + str(e)
class TestConnection(unittest.TestCase): def setUp(self): # logging.basicConfig(level=logging.DEBUG) self.docker = Client(base_url='unix://var/run/docker.sock') host_config = self.docker.create_host_config(publish_all_ports=True) self.container = self.docker.create_container("jrabbit/taskd", name="taskc_test", host_config=host_config) self.docker.start(self.container["Id"]) our_exec = self.docker.exec_create(self.container["Id"], "taskd add user Public test_user") self.tc = TaskdConnection() o = self.docker.exec_start(our_exec['Id']) # print o self.tc.uuid = o.split('\n')[0].split()[-1] # print self.tc.uuid self.tc.server = "localhost" c = self.docker.inspect_container("taskc_test") self.tc.port = int(c['NetworkSettings']['Ports']['53589/tcp'][0]['HostPort']) # self.tc.uuid = os.getenv("TEST_UUID") self.tc.group = "Public" self.tc.username = "******" self.tc.client_cert = "taskc/fixture/pki/client.cert.pem" self.tc.client_key = "taskc/fixture/pki/client.key.pem" self.tc.cacert_file = "taskc/fixture/pki/ca.cert.pem" time.sleep(2) def test_connect(self): self.tc._connect() # print self.tc.conn.getpeername() self.assertEqual(self.tc.conn.getpeername(), ('127.0.0.1', self.tc.port)) # make sure we're on TLS v2 per spec self.assertEqual(self.tc.conn.context.protocol, 2) self.tc.conn.close() # from IPython import embed # embed() def test_put(self): assert self.tc.uuid self.tc.put("") tasks = """{"description":"hang up posters","entry":"20141130T081652Z","status":"pending","uuid":"0037aa92-45e5-44a6-8f34-2f92989f173a"} {"description":"make pb ramen","entry":"20141130T081700Z","status":"pending","uuid":"dd9b71db-f51c-4026-9e46-bb099df8dd3f"} {"description":"fold clothes","entry":"20141130T081709Z","status":"pending","uuid":"d0f53865-2f01-42a8-9f9e-3652c63f216d"}""" resp = self.tc.put(tasks) self.assertEqual(resp.status_code, 200) # might not be correct depends on state of taskd def tearDown(self): self.docker.remove_container(self.container['Id'], force=True)
class DockerModule: def __init__(self): try: self.client = Client("tcp://127.0.0.1:2376") print "Conectou!" except Exception as e: print "Falhou ao conectar no docker: ",e def list_containers(self): containers = self.client.containers(all=True) return containers def stop_container(self,id): res = self.client.stop(container=id) print res return res def start_container(self,id): res = self.client.start(container=id) return res def create_container(self,**kwargs): ports = kwargs.get("port").split(":") print ports host_config = self.client.create_host_config(port_bindings={ports[1]:ports[0]}) print host_config res = self.client.create_container(name=kwargs.get("name"), image=kwargs.get("image"), command=kwargs.get("command"), ports=[ports[1]], host_config=host_config, stdin_open=True, detach=True, tty=True) return res def execute_command(self,id,cmd): res = self.client.exec_create(container=id,cmd=cmd) res = self.client.exec_start(res) return res def delete_container(self,container): res = self.stop_container(container) res = self.client.remove_container(container=container) return "Container removed successful!!!"
def create_container(docker_client: docker.Client, args, state: dict): image = args.image if image == '-': image = state['last_image'] state['last_image'] = image cmd = args.cmd if args.cmd else None name = args.name if args.name else None ports = args.publish volumes = args.volume environment_variables = args.env docker_args = {} if 'option' in args: user_docker_options = cmd_to_json.parse_options(args.option) if user_docker_options: docker_args.update(user_docker_options) if volumes: bind_volumes(volumes, docker_args) if ports: bind_ports(ports, docker_args) if 'host_config' in docker_args: docker_args['host_config'] = docker_client.create_host_config(**docker_args['host_config']) # Do these last so that the -o options don't override them docker_args['image'] = image docker_args['command'] = cmd docker_args['name'] = name container = docker_client.create_container(**docker_args) state['last_container'] = container['Id'] if container['Warnings']: print("WARNING:", container['Warnings'], file=sys.stderr) if args.id: print(container['Id']) else: container = docker_client.inspect_container(container) print(container['Name'][1:])
class Docker: def __init__(self): self.client = Client(base_url="tcp://192.168.0.2:2376") def listar_containers(self): for c in self.client.containers(all=True): print c def criar_container(self, nome="novosdad", imagem="ubuntu", comando="/bin/bash"): container = self.client.create_container( image=imagem, command=comando, name=nome, stdin_open=True, tty=True, detach=True, ports=[80, 80], host_config=self.client.create_host_config(port_bindings={80: 80})) return container def iniciar_container(self, id): self.client.start(container=id) print "Container iniciado!" def parar_container(self, id): self.client.stop(container=id) print "Container parado!" def remove_container(self, id): self.client.stop(container=id) self.client.remove_container(container=id) print "Container removido!" def executar_comando(self, id, comando): exec_id = self.client.exec_create(container=id, cmd=comando) resultado = self.client.exec_start(exec_id) return resultado def inspecionar_container(self, id): container = self.client.inspect_container(container=id) return container
def peer_vpn(regionId, vpnId, host=None): ec2 = boto3.client('ec2', region_name=regionId) vpn = ec2.describe_vpn_connections( VpnConnectionIds=[vpnId])['VpnConnections'][0] config = xmltodict.parse(vpn['CustomerGatewayConfiguration']) cfg = { "VGW1": config['vpn_connection']['ipsec_tunnel'][0]['vpn_gateway'] ['tunnel_outside_address']['ip_address'], "VGW2": config['vpn_connection']['ipsec_tunnel'][1]['vpn_gateway'] ['tunnel_outside_address']['ip_address'], "PSK1": config['vpn_connection']['ipsec_tunnel'][0]['ike']['pre_shared_key'], "PSK2": config['vpn_connection']['ipsec_tunnel'][1]['ike']['pre_shared_key'], "VTI1_LOCAL": config['vpn_connection']['ipsec_tunnel'][0]['customer_gateway'] ['tunnel_inside_address']['ip_address'], "VTI2_LOCAL": config['vpn_connection']['ipsec_tunnel'][1]['customer_gateway'] ['tunnel_inside_address']['ip_address'], "VTI1_REMOTE": config['vpn_connection']['ipsec_tunnel'][0]['vpn_gateway'] ['tunnel_inside_address']['ip_address'], "VTI2_REMOTE": config['vpn_connection']['ipsec_tunnel'][1]['vpn_gateway'] ['tunnel_inside_address']['ip_address'], "REMOTE_ASN": config['vpn_connection']['ipsec_tunnel'][0]['vpn_gateway']['bgp'] ['asn'], "LOCAL_ASN": config['vpn_connection']['ipsec_tunnel'][0]['customer_gateway']['bgp'] ['asn'] } dock = Client(host) privileged = dock.create_host_config(privileged=True) container = dock.create_container(image='chandlerding/aws-cgw', hostname=vpn['VpnConnectionId'], detach=True, environment=cfg, name=vpn['VpnConnectionId'], host_config=privileged) dock.start(container=container.get('Id'))
def run(): if 'username' in session: clir = Client(base_url='tcp://' + session['username'] + '.docker:14438') clirt = clir.create_container( tty=True, detach=True, image='0muproject/0mu-flask', name='0mu-Flask-06', ports=['8510', '22'], host_config=clir.create_host_config(port_bindings={ 8510: 8510, 22: 2222 })) clir.start(clirt.get('Id')) return redirect(url_for('dockerview')) else: return redirect(url_for('main.index'))
def start_test(): localIPv4 = socket.gethostbyname(socket.getfqdn()) subprocess.Popen('node ../brokermanager/brokermanager.js &', shell=True) sql = "select public_ip from broker_baremetal" db = MySQLdb.connect("127.0.0.1", "root", "dms123", "dmsDB", charset='utf8') cursor = db.cursor() cursor.execute(sql) db.close() ip = str(cursor.fetchone()[0]) sql = "select id from broker_baremetal" db = MySQLdb.connect("127.0.0.1", "root", "dms123", "dmsDB", charset='utf8') cursor = db.cursor() cursor.execute(sql) db.close() id = int(cursor.fetchone()[0]) cli = DClient(base_url='tcp://' + ip + ':4243') bid = hashlib.sha256(str(random.random()).encode()).hexdigest() # docker container name port = 55500 port2 = 45500 c = cli.create_container(image='broker:0.1', detach=True, environment={ 'cluster': 'tcp://'+localIPv4+':1883', 'brokerid': bid, 'dbhost': localIPv4 }, ports=[1883,3000], name=bid, host_config=cli.create_host_config( port_bindings={ 1883: port, 3000: port2})) cli.start(container=c) sql = "insert into broker_brokers(id, container_id, port, created, " \ "baremetal_id, scaled) values ('%s', '%s', '%d', '%s', '%d', 0)" \ % (bid, c['Id'], port, datetime.now(), id) db = MySQLdb.connect("127.0.0.1", "root", "dms123", "dmsDB", charset='utf8') cursor = db.cursor() cursor.execute(sql) db.commit() db.close() requests.post("http://127.0.0.1:8080/startTools") return '테스트 시작'
class Docker: def __init__(self): self.client = Client(base_url='tcp://192.168.0.2:2376') def listar_containers(self): containers = self.client.containers(all=True) return containers def criar_container(self, nome='novo', imagem='ubuntu', comando='/bin/bash'): container = self.client.create_container( image=imagem, command=comando, name=nome, stdin_open=True, tty=True, detach=True, ports=[80], host_config=self.client.create_host_config(port_bindings={80: 80})) return container def iniciar_container(self, id): self.client.start(container=id) print 'Container iniciado' def parar_container(self, id): self.client.stop(container=id) print 'Container parado.' def rem_container(self, id): self.client.stop(container=id) self.client.remove_container(container=id) print 'Container removido.' def exec_comando(self, id, comando): exec_id = self.client.exec_create(container=id, cmd=comando) resultado = self.client.exec_start(exec_id) return resultado def inspec_container(self, id): container = self.client.inspect_container(container=id) return container
def execute(self, request, pk=None): function_version = self.get_object() # TODO: schedule the function to run on a suitable node based on the desired computing power # TODO: Make sure images are pulled when stacks are defined. cli = Client(base_url='unix://var/run/docker.sock') # temp_dir = mkdtemp() temp_dir = '/Users/gyoanis/tmp' runner_file = open(os.path.join(temp_dir, 'runner.py'), 'w') function_file = open(os.path.join(temp_dir, 'function.py'), 'w') runner_content = """ try: module = __import__('function') module.handler({}) except Exception, e: print e """ runner_file.write(runner_content) runner_file.close() function_file.write(function_version.body) function_file.close() bindings = { temp_dir: { 'bind': '/tmp/python', 'mode': 'ro' } } # TODO: Launch container on background container = cli.create_container(image=function_version.function.stack.docker_image, host_config=cli.create_host_config(binds=bindings), working_dir='/tmp/python', command='python runner.py'.format(temp_dir)) cli.start(container=container.get('Id')) return Response(status=status.HTTP_202_ACCEPTED, data=[])
def create_docker(self,docker_host,*create_kwargs): url = self.docker_base_url[docker_host] cli = Client(base_url=url) container_name,image,command,host_name,volume_name,docker_mount,volumes,ports = create_kwargs if not ports: port_bindings = None docker_ports_list=None else: docker_ports_list = [] print ports port_bindings = {} ports_list = ports.split(";") if len(ports_list)>1: for port_item in ports.split(";"): print port_item docker_port,host_port = port_item.split(":") print docker_port,host_port port_bindings[docker_port]=host_port docker_ports_list.append(docker_port) else: docker_port,host_port = ports_list[0].split(":") port_bindings={ docker_port: host_port } docker_ports_list.append(docker_port) print port_bindings if volume_name and docker_mount: binds=[ "%s:%s"%(volume_name,docker_mount) ] else: binds= None container = cli.create_container(name=container_name, image=image,command=command, tty=True,stdin_open=True, hostname=host_name, volumes = volumes, ports=docker_ports_list, host_config = cli.create_host_config(binds=binds,port_bindings=port_bindings), ) ret = cli.start(container.get("Id")) return ret
def create_torrent_instance(base_dir,base_port): #Connect to docker docker = DockerClient(base_url='unix://var/run/docker.sock') instance = None try: #Do we have a container with the same base_path ? instance = TorrentInstance.objects.get(base_dir=base_dir) # Since we already have an instance, we need to shut it down try: docker.stop(instance.docker_id) # and remove it docker.remove_container(instance.docker_id) except DockerErrors.NotFound: pass except TorrentInstance.DoesNotExist: # We don't have a name, so we need to generate one. Lets get the most recent PKID id = TorrentInstance.objects.order_by('-id')[0].id + 1 name = 'roaringtide-%d' % id # Create the instance instance = TorrentInstance(base_dir=base_dir, base_port=base_port, docker_id=name).save() finally: #Now we need to create it #TODO: catch if the container of that name already exists container = docker.create_container( image="transmission", ports=[base_port,], detach=True, environment={"LISTENPORT":base_port,}, name="roaringtide-%s" % instance.id, volumes = ['/var/lib/transmission',], host_config=docker.create_host_config(port_bindings={ base_port: base_port }, binds={ base_dir: { 'mode': 'rw', 'bind': '/var/lib/transmission' } } ) ) docker.start(container=container.get('Id'))
def driver(request, browser): """ Fixture for selenium Webdriver using docker """ if browser == 'firefox': browser_image = 'selenium/standalone-firefox' browser_dc = DesiredCapabilities.FIREFOX elif browser == 'chrome': browser_image = 'selenium/standalone-chrome' browser_dc = DesiredCapabilities.CHROME cli = Client(version='auto', tls=False) port_conf = cli.create_host_config(publish_all_ports=True, port_bindings={4444: ('127.0.0.1', )}) container = cli.create_container(image=browser_image, ports=[4444], detach=True, host_config=port_conf) cli.start(container.get('Id')) request.addfinalizer(lambda: cli.stop(container.get('Id'))) port_info = cli.port(container['Id'], 4444)[0] selenium_host = 'http://{}:{}/wd/hub'.format(port_info['HostIp'], port_info['HostPort']) timeout = time.time() + 10 success = 'connect to {}'.format(selenium_host) for output in cli.logs(container['Id'], stream=True): if success in output or time.time() > timeout: break else: time.sleep(1) driver = webdriver.Remote(command_executor=selenium_host, desired_capabilities=browser_dc) return driver
def peer_vpn( regionId, vpnId , host=None): ec2 = boto3.client('ec2',region_name=regionId) vpn = ec2.describe_vpn_connections( VpnConnectionIds=[vpnId] )['VpnConnections'][0] config = xmltodict.parse( vpn['CustomerGatewayConfiguration'] ) cfg = { "VGW1" : config['vpn_connection']['ipsec_tunnel'][0]['vpn_gateway']['tunnel_outside_address']['ip_address'] , "VGW2" : config['vpn_connection']['ipsec_tunnel'][1]['vpn_gateway']['tunnel_outside_address']['ip_address'] , "PSK1" : config['vpn_connection']['ipsec_tunnel'][0]['ike']['pre_shared_key'] , "PSK2" : config['vpn_connection']['ipsec_tunnel'][1]['ike']['pre_shared_key'] , "VTI1_LOCAL" : config['vpn_connection']['ipsec_tunnel'][0]['customer_gateway']['tunnel_inside_address']['ip_address'], "VTI2_LOCAL" : config['vpn_connection']['ipsec_tunnel'][1]['customer_gateway']['tunnel_inside_address']['ip_address'], "VTI1_REMOTE" : config['vpn_connection']['ipsec_tunnel'][0]['vpn_gateway']['tunnel_inside_address']['ip_address'], "VTI2_REMOTE" : config['vpn_connection']['ipsec_tunnel'][1]['vpn_gateway']['tunnel_inside_address']['ip_address'], "REMOTE_ASN" : config['vpn_connection']['ipsec_tunnel'][0]['vpn_gateway']['bgp']['asn'] , "LOCAL_ASN" : config['vpn_connection']['ipsec_tunnel'][0]['customer_gateway']['bgp']['asn'] } dock = Client(host); privileged = dock.create_host_config(privileged=True) container = dock.create_container( image='chandlerding/aws-cgw', hostname=vpn['VpnConnectionId'], detach=True, environment=cfg, name=vpn['VpnConnectionId'], host_config=privileged ) dock.start ( container = container.get('Id') )
def setup(self): data_obj = data.Data() path = data_obj.get_work_path() docker = Client() print('Setting up environment, please wait...') volume = os.getcwd() container_name = data_obj.get_path_hash() docker.create_container('nimbusoft/appimager', tty=True, command="/bin/bash", name=container_name, volumes=['/mnt/appimager'], host_config=docker.create_host_config(binds={ os.getcwd(): { 'bind': '/mnt/appimager', 'mode': 'rw', } })) docker.start(container_name) print('Setup Complete')
def create_app_socket(): logs = [] status = json.loads(container_status())['status'] # 如果容器还没有,就开始创建 if status == 'pending': cli = Client(base_url=HOST+':5678') for line in cli.pull(IMAGE, stream=True): socketio.emit('response', json.dumps({'resp': line})) logs.append(line) container = cli.create_container( image=IMAGE, ports=[PORT], name=APP_NAME, host_config=cli.create_host_config(port_bindings={ 3000: (HOST, PORT) }) ) APPS.containerId = container.get('Id') db.session.add(APPS) db.session.commit() response = cli.start(container=container.get('Id')) if response is not None: return json.dumps({'msg': 'false'}) return json.dumps({'msg': 'ok'})
def _start_docker_slave(name, properties): """ Start a slave controller that is a Docker container """ # Create a Docker client client = Client(version='1.21', base_url=properties['engine_url']) # Create a container and store its id in the properties array image = properties['image'] heartbeat_port = properties['heartbeat_port'] rpc_port = properties['rpc_port'] container_id = client.create_container(image=image, command=['/home/sdp/docker_slave.py', name, heartbeat_port, rpc_port ], volumes=['/home/sdp/components/'], host_config=client.create_host_config(binds={ os.getcwd()+'/components': { 'bind': '/home/sdp/components/', 'mode': 'rw', } }))['Id'] # Start it client.start(container_id) info = client.inspect_container(container_id) ip_address = info['NetworkSettings']['IPAddress'] properties['address'] = ip_address properties['state'] = 'running' properties['container_id'] = container_id properties['timeout counter'] = properties['timeout'] logger.info(name + ' started in container ' + container_id + ' at ' + ip_address) # Connect the heartbeat listener to the address it is sending heartbeats # to. heartbeat_listener.connect(ip_address, heartbeat_port)
def ghost_container(cli, image="myghost:latest"): cli = Client(base_url='unix://var/run/docker.sock') cid = None if cli: container = cli.create_container( image=image, working_dir="/myghost", #command="/usr/bin/run.sh", stdin_open=True, tty=True, volumes=['/myghost'], host_config=cli.create_host_config( binds={ appdir + '/ghost': { 'bind': '/myghost', 'mode': 'rw', }, }, privileged=True, ), ) cid = container.get('Id') return cid
class Proxy(object): def __init__(self, server=None, port=2375, version="1.19"): if server is None: self.url = 'unix:///var/run/docker.sock' else: self.url = "tcp://%s:%s" % (server, port) self._cli = Client(base_url=self.url, version=version) assert self._cli.ping() == "OK" def get_all_containers(self): all = [] for attrs in self._cli.containers(): for key in attrs.keys(): attrs[key.lower()] = attrs[key] attrs.pop(key) attrs["url"] = self.url all.append(Container(attrs)) return all def destroy_all_containers(self): for c in self.get_all_containers(): self._cli.remove_container(c.id, force=True) def start_containers(self, image, duplicate, ipgen, cmd=None): for i in range(duplicate): host_config=self._cli.create_host_config(privileged=True) tmp = self._cli.create_container(image, host_config=host_config, command=cmd) cid = tmp.get("Id") LOG.debug("container %s created.", cid) self._cli.start(cid) LOG.debug("container %s started.", cid) ip = ipgen.alloc() command = "docker -H " + self.url + " exec " + cid + " ifconfig eth0 " + ip + " up" ret = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) if ret.wait() != 0: raise Exception("[Error] set up ip for %s failed!cmd=%s" % (cid, cmd))
def start_selenium(request): """Starts selenium container, connects to it and returns a driver""" cli = Client(base_url='unix://var/run/docker.sock') try: cli.remove_container('selenium', force=True) except docker.errors.NotFound: print("Couldn't remove selenium container: not found") # docker-py doesn't automatically pull a container # you should run docker pull selenium/standalone-chrome-debug # before running the test cli.create_container(image=selenium_docker_image, name='selenium', ports=[4444, 5900], host_config=cli.create_host_config( port_bindings={ 4444: ('127.0.0.1', 4444), 5900: 5900 }, extra_hosts={ 'otone.local': docker_host_ip } ) ) cli.start('selenium') print('Starting Selenium container from {0}'.format(selenium_docker_image)) time.sleep(2) driver = webdriver.Remote( 'http://localhost:4444/wd/hub', desired_capabilities=webdriver.DesiredCapabilities.CHROME) def fin(): driver.quit() cli.stop('selenium') cli.remove_container(container='selenium') request.addfinalizer(fin) return driver
# append_to = str(random.randrange(1025, 65000, 2)) # # my_new_list = [x + '/' + proto + ':'+ str(random.randrange(1025, 65000, 2)) for x in config_ports] # # print my_new_list # # # my_dict_port_list = dict(map(str, x.split(':')) for x in my_new_list) # # print my_dict_port_list config_ports = parser.config_params('images')['test_internal_port'].split() print config_ports my_new_list = [x + ':'+ str(random.randrange(1025, 65000, 2)) for x in config_ports] print my_new_list my_dict_port_list = dict(map(str, x.split(':')) for x in my_new_list) print my_dict_port_list container_id = cli.create_container( image='eg_ngphp', hostname='wtfplm', ports=config_ports, host_config=cli.create_host_config(port_bindings=my_dict_port_list), name='wtf') cli.start(container=container_id.get('Id'))
from docker import Client c = Client(base_url='unix:///var/run/docker.sock') container=c.create_container(image="ubuntu:test",name="test1",command="/bin/bash",stdin_open=True,host_config=c.create_host_config(privileged=True,publish_all_ports=True)) c.start(container=container.get('Id')) container=c.create_container(image="ubuntu:test",name="test2",command="/bin/bash",stdin_open=True,host_config=c.create_host_config(privileged=True,publish_all_ports=True)) c.start(container=container.get('Id')) exec_container=c.exec_create(container="test1",cmd="python /home/server.py") response=c.exec_start(exec_id=exec_container.get('Id'),tty=True)
class Configurator(object): """ Configurator performs file operations """ def __init__(self, host_configuration_root, configuration_root): super(Configurator, self).__init__() self.host_config_root = host_configuration_root self.config_root = configuration_root self.cli = Client(base_url="unix://var/run/docker.sock") if not os.path.isdir(self.config_root): print "Error: the configuration folder `" + self.config_root + "` does not exist." return # make sure we have the latest ssh to vpn bridge image self.cli.pull(image_name) # images = cli.images(name=image_name) def create_configuration(self, user, channel, msg_writer): """create_configuration will create the base""" (success, target_dir, host_target_dir) = self._check_configuration(user, channel, msg_writer) if success: return # create the folder for this user if not os.path.exists(target_dir): os.makedirs(target_dir) base_dir = os.path.join(self.config_root, "base") if not os.path.exists(base_dir): msg_writer.send_message( channel, "Sorry, I'm missing the folder `base` that contains the base for each tunnel configuration. I can't create a tunnel for you. Please contact a human to investigate.", ) return copy_tree(base_dir, target_dir) msg_writer.send_message( channel, "Ok, your base configuration exists. Please contact a human to put your `id_rsa.pub` and VPN credentials (`vpn.auth`) in place and to define the port you'll use.", ) def start_container(self, user, channel, msg_writer): """start_container starts a container for the user""" (success, target_dir, host_target_dir) = self._check_configuration(user, channel, msg_writer) if not success: return "Failed to start tunnel." containers = self.cli.containers(all=True, filters={"name": user}) action = "" if len(containers) == 0: msg_writer.send_message( channel, "Okay, I don't already have a tunnel for " + user + ", so I'll create one!" ) target_port = self._target_port(user, channel, msg_writer) host_config = self.cli.create_host_config( cap_add=["NET_ADMIN"], devices=["/dev/net/tun"], port_bindings={22: target_port}, binds={host_target_dir: {"bind": "/vpn", "mode": "rw"}}, ) container = self.cli.create_container( image=image_name, detach=True, volumes=["/vpn"], name=user, ports=[22], host_config=host_config ) response = self.cli.start(container=container.get("Id")) container = self.cli.containers(all=True, filters={"name": user})[0] action = "Started" else: msg_writer.send_message(channel, "Because I already have a tunnel for " + user + ", I'll just restart it.") container = containers[0] msg_writer.send_message(channel, "Restarting tunnel for " + user + ".") self.cli.restart(container) container = self.cli.containers(all=True, filters={"name": user})[0] action = "Restarted" status = container.get("Status") ports = container.get("Ports")[0] port = str(ports.get("PublicPort")) return ( action + " tunnel.\n> Port: " + port + "\n> Status: " + status + "\nPlease let me know when I should `stop` this tunnel." ) def stop_container(self, user, channel, msg_writer): """stop_container stops a container for the user""" (success, target_dir, host_target_dir) = self._check_configuration(user, channel, msg_writer) if not success: return containers = self.cli.containers(all=True, filters={"name": user}) if len(containers) == 0: msg_writer.send_message( channel, "Shucks, I don't have a tunnel for " + user + ". Here I am with nothing to do." ) else: container = containers[0] status = container.get("Status") if "137" in status: msg_writer.send_message( channel, "It looks like the tunnel for " + user + " was previously stopped.\nLet me know if you need me to `start` it again.", ) return msg_writer.send_message( channel, "Okey doke, I'll stop the tunnel for " + user + ".\nHang on, I'll let you know when I'm done." ) self.cli.stop(container) msg_writer.send_message( channel, "Done! The tunnel for " + user + " is now stopped.\nMake sure you tell me if I should `start` it again.", ) def container_status(self, user, channel, msg_writer): """docstring for container_status""" (success, target_dir, host_target_dir) = self._check_configuration(user, channel, msg_writer) if not success: return self._status(user, channel, msg_writer) def _check_configuration(self, user, channel, msg_writer): """_check_configuration checks if we have a container for this user""" target_dir = os.path.join(self.config_root, user) host_target_dir = os.path.join(self.host_config_root, user) if os.path.isdir(target_dir): msg_writer.send_message(channel, "Sweet, I have the configuration directory for " + user + ".") return (True, target_dir, host_target_dir) else: msg_writer.send_message( channel, "Sorry, I'm missing the configuration folder for " + user + ".\nIf you have VPN credentials, you can tell me to `create` and I'll create most of your configuration. A human you trust will have to put the secure bits (`id_rsa.pub` and VPN credentials) in place and define the port you'll use...", ) return (False, target_dir, host_target_dir) def _status(self, user, channel, msg_writer): """looks up and outputs the status of the container for the user""" containers = self.cli.containers(all=True, filters={"name": user}) if len(containers) == 0: msg_writer.send_message( channel, "Sorry, I don't have an active tunnel for " + user + ".\nNo status to report." ) else: container = containers[0] status = container.get("Status") ports = container.get("Ports") port = "n/a" if len(ports) > 0: port = str(ports[0].get("PublicPort")) msg_writer.send_message(channel, "Tunnel for " + user + ".\n> Port: " + port + "\n> Status: " + status) def _target_port(self, user, channel, msg_writer): """_target_port attempt to look up the port that should be used for this user""" msg_writer.send_message(channel, "Going to see if I can figure out which `port` you will use.") target_dir = os.path.join(self.config_root, user) port_file = os.path.join(target_dir, "port") lines = open(port_file).read().splitlines() if len(lines) == 0: msg_writer.send_message( channel, "Sorry, I wasn't able to look up the port number for " + user + ".\nPlease contact a human you trust to make sure that a port number is defined. i.e the file `port` contains a single port number (`30022`).", ) return "30022" port_string = lines[0] try: port = int(port_string) if port < 1024: msg_writer.send_message( channel, "Sorry, I the port number " + str(port) + " for " + user + " is in the restricted range.\nPlease contact a human you trust to make sure that a port between 1024 and 65535 is defined in the file `port`.", ) return "30022" except ValueError: msg_writer.send_message(channel, "ValueError.") port = 30022 port_string = str(port) msg_writer.send_message(channel, "I believe you will be connecting to port `" + port_string + "`.") return port_string
#Create a docker container image from the Dockerfile in this directory response = dockerclient.build(path=".", dockerfile="Dockerfile", tag="pyne/helloworld") #Show the output from the build process for line in response: print line #Show the images now avalible on the docker host print "Images on docker host:" pprint(dockerclient.images()) print "\nCreating container from hello world image:\n" #Create a config object containing the port mappings from the containers port to the host port #The container port should match the one that is exposed in the Dockerfile container_port = 5000 host_port = 4000 host_config=dockerclient.create_host_config(port_bindings={container_port : host_port}) #Create a container from the newly created image, expose the correct ports #and add the config that maps these ports to the host hw_container = dockerclient.create_container(name = "hello", image="pyne/helloworld:latest", ports = [container_port], host_config = host_config) #Start the container response = dockerclient.start(container=hw_container["Id"]) #Check that the container is serving correctlty r = requests.get("http://{}:{}".format(host,host_port)) print "The container said: {}".format(r.text)
class Docker_interface: def __init__(self, net_name='tosker_net', tmp_dir='/tmp', socket='unix://var/run/docker.sock'): self._log = Logger.get(__name__) self._net_name = net_name self._cli = Client(base_url=os.environ.get('DOCKER_HOST') or socket) self._tmp_dir = tmp_dir # TODO: aggiungere un parametro per eliminare i container se esistono gia'! def create(self, con, cmd=None, entrypoint=None, saved_image=False): def create_container(): tmp_dir = path.join(self._tmp_dir, con.name) try: os.makedirs(tmp_dir) except: pass saved_img_name = '{}/{}'.format(self._net_name, con.name) img_name = con.image if saved_image and self.inspect(saved_img_name): img_name = saved_img_name self._log.debug('container: {}'.format(con.get_str_obj())) con.id = self._cli.create_container( name=con.name, image=img_name, entrypoint=entrypoint if entrypoint else con.entrypoint, command=cmd if cmd else con.cmd, environment=con.env, detach=True, # stdin_open=True, ports=[key for key in con.ports.keys()] if con.ports else None, volumes=['/tmp/dt'] + ([k for k, v in con.volume.items()] if con.volume else []), networking_config=self._cli.create_networking_config({ self._net_name: self._cli.create_endpoint_config(links=con.link # ,aliases=['db'] ) }), host_config=self._cli.create_host_config( port_bindings=con.ports, # links=con.link, binds=[tmp_dir + ':/tmp/dt'] + ([v + ':' + k for k, v in con.volume.items()] if con.volume else []), )).get('Id') assert isinstance(con, Container) if con.to_build: self._log.debug('start building..') # utility.print_json( self._cli.build(path='/'.join(con.dockerfile.split('/')[0:-1]), dockerfile='./' + con.dockerfile.split('/')[-1], tag=con.image, pull=True, quiet=True) # ) self._log.debug('stop building..') elif not saved_image: # TODO: da evitare se si deve utilizzare un'immagine custom self._log.debug('start pulling.. {}'.format(con.image)) utility.print_json(self._cli.pull(con.image, stream=True), self._log.debug) self._log.debug('end pulling..') try: create_container() except errors.APIError as e: self._log.debug(e) # self.stop(con) self.delete(con) create_container() # raise e def stop(self, container): name = self._get_name(container) try: return self._cli.stop(name) except errors.NotFound as e: self._log.error(e) def start(self, container, wait=False): name = self._get_name(container) self._cli.start(name) if wait: self._log.debug('wait container..') self._cli.wait(name) utility.print_byte(self._cli.logs(name, stream=True), self._log.debug) def delete(self, container): name = self._get_name(container) try: self._cli.remove_container(name, v=True) except (errors.NotFound, errors.APIError) as e: self._log.error(e) raise e def exec_cmd(self, container, cmd): name = self._get_name(container) if not self.is_running(name): return False try: exec_id = self._cli.exec_create(name, cmd) status = self._cli.exec_start(exec_id) # TODO: verificare attendibilita' di questo check! check = 'rpc error:' != status[:10].decode("utf-8") self._log.debug('check: {}'.format(check)) return check except errors.APIError as e: self._log.error(e) return False except requests.exceptions.ConnectionError as e: # TODO: questo errore arriva dopo un timeout di 10 secodi self._log.error(e) return False def create_volume(self, volume): assert isinstance(volume, Volume) self._log.debug('volume opt: {}'.format(volume.get_all_opt())) return self._cli.create_volume(volume.name, volume.driver, volume.get_all_opt()) def delete_volume(self, volume): name = self._get_name(volume) return self._cli.remove_volume(name) def get_containers(self, all=False): return self._cli.containers(all=all) def get_volumes(self): volumes = self._cli.volumes() return volumes['Volumes'] or [] def inspect(self, item): name = self._get_name(item) try: return self._cli.inspect_container(name) except errors.NotFound: pass try: return self._cli.inspect_image(name) except errors.NotFound: pass try: return self._cli.inspect_volume(name) except errors.NotFound: return None def remove_all_containers(self): for c in self.get_containers(all=True): self.stop(c['Id']) self.delete(c['Id']) def remove_all_volumes(self): for v in self.get_volumes(): self.delete_volume(v['Name']) def create_network(self, name, subnet='172.25.0.0/16'): # docker network create -d bridge --subnet 172.25.0.0/16 isolated_nw # self.delete_network(name) try: self._cli.create_network(name=name, driver='bridge', ipam={'subnet': subnet}, check_duplicate=True) except errors.APIError: self._log.debug('network already exists!') def delete_network(self, name): assert isinstance(name, str) try: self._cli.remove_network(name) except errors.APIError: self._log.debug('network not exists!') def delete_image(self, name): assert isinstance(name, str) try: self._cli.remove_image(name) except errors.NotFound: pass # TODO: splittare questo metodo in due, semantica non chiara! def update_container(self, node, cmd, saved_image=True): assert isinstance(node, Container) # self._log.debug('container_conf: {}'.format(node.host_container)) stat = self.inspect(node.image) old_cmd = stat['Config']['Cmd'] or None old_entry = stat['Config']['Entrypoint'] or None if self.inspect(node): self.stop(node) self.delete(node) self.create(node, cmd=cmd, entrypoint='', saved_image=saved_image) self.start(node.id, wait=True) self.stop(node.id) name = '{}/{}'.format(self._net_name, node.name) self._cli.commit(node.id, name) self.stop(node) self.delete(node) self.create(node, cmd=node.cmd or old_cmd, entrypoint=node.entrypoint or old_entry, saved_image=True) self._cli.commit(node.id, name) def is_running(self, container): name = self._get_name(container) stat = self.inspect(name) stat = stat is not None and stat['State']['Running'] is True self._log.debug('State: {}'.format(stat)) return stat def _get_name(self, name): if isinstance(name, six.string_types): return name else: assert isinstance(name, (Container, Volume)) return name.name
# Parse the configuration file that contains a dictionary with open(sys.argv[1]) as fin: config = eval(fin.read()) # Instantiate the client that will communicate with the Docker daemon cli = Client(base_url='unix://var/run/docker.sock') # Pull the lastest image for line in cli.pull(repository=config['image'], tag=config['tag'], stream=True): sys.stdout.write(line.decode(sys.stdout.encoding)) # Get the list of mountpoints and declare volume mappings volumes = [] for volume in config['volumes']: volumes.append(volume.split(':')[1]) host_config = cli.create_host_config(binds=config['volumes']) # Create a container and start it container = cli.create_container(image=config['image'] + ':' + config['tag'], command='tail -f /dev/null', detach=True, stdin_open=True, tty=True, environment=config['environment'], volumes=volumes, name=config['name'], host_config=host_config) cli.start(container=container.get('Id')) # Execute the commands for cmd in config['cmd']:
app=sys.argv[1] containername=sys.argv[2] yunohostid=sys.argv[3] dockerized=sys.argv[4] #Get the hostname hostname = socket.gethostname() imagename = hostname+'/'+app #Connect to docker socket cli = Client(base_url='unix://docker.sock') #Define port binding if dockerized: config=cli.create_host_config(network_mode='container:'+yunohostid) else: config=cli.create_host_config(port_bindings={8000: ('127.0.0.1',8000)}) #Build docker image with the Dockerfile and disply the output for line in cli.build(path='../build/', tag=imagename): out=json.loads(line) #sys.stdout.write('\r') #print(out['stream']) #sys.stdout.flush() #Create the container and display result container = cli.create_container( image=imagename, detach=True, tty=True,
def pg_server(unused_port_factory, docker: DockerClient, request): pg_name = request.config.getoption('--pg-name') pg_image = request.config.getoption('--pg-image') pg_reuse = request.config.getoption('--pg-reuse') container = None port = None if not pg_name: pg_name = 'db-{}'.format(str(uuid.uuid4())) if pg_name: for item in docker.containers(all=True): for name in item['Names']: if pg_name in name: container = item break if not container: port = unused_port_factory() docker.pull(pg_image) container = docker.create_container( image=pg_image, name=pg_name, ports=[5432], host_config=docker.create_host_config(port_bindings={5432: port}), detach=True) docker.start(container=container['Id']) inspection = docker.inspect_container(container['Id']) host = inspection['NetworkSettings']['IPAddress'] if not port: ports = inspection['NetworkSettings']['Ports'] if '5432/tcp' in ports: port = ports['5432/tcp'][0]['HostPort'] pg_params = { 'database': 'postgres', 'user': '******', 'password': '******', 'host': 'localhost', 'port': port } delay = 0.001 for i in range(100): try: with psycopg2.connect(**pg_params) as conn: with conn.cursor() as cursor: cursor.execute('SELECT version();') break except psycopg2.Error: time.sleep(delay) delay *= 2 else: pytest.fail('Cannot start postgres server') container['host'] = host container['port'] = port container['pg_params'] = pg_params yield container if not pg_reuse: docker.kill(container=container['Id']) docker.remove_container(container['Id'])
class DockerOperator(BaseOperator): """ Execute a command inside a docker container. A temporary directory is created on the host and mounted into a container to allow storing files that together exceed the default disk size of 10GB in a container. The path to the mounted directory can be accessed via the environment variable ``AIRFLOW_TMP_DIR``. :param image: Docker image from which to create the container. :type image: str :param api_version: Remote API version. :type api_version: str :param command: Command to be run in the container. :type command: str or list :param cpus: Number of CPUs to assign to the container. This value gets multiplied with 1024. See https://docs.docker.com/engine/reference/run/#cpu-share-constraint :type cpus: float :param docker_url: URL of the host running the docker daemon. :type docker_url: str :param environment: Environment variables to set in the container. :type environment: dict :param force_pull: Pull the docker image on every run. :type force_pull: bool :param mem_limit: Maximum amount of memory the container can use. Either a float value, which represents the limit in bytes, or a string like ``128m`` or ``1g``. :type mem_limit: float or str :param network_mode: Network mode for the container. :type network_mode: str :param tls_ca_cert: Path to a PEM-encoded certificate authority to secure the docker connection. :type tls_ca_cert: str :param tls_client_cert: Path to the PEM-encoded certificate used to authenticate docker client. :type tls_client_cert: str :param tls_client_key: Path to the PEM-encoded key used to authenticate docker client. :type tls_client_key: str :param tls_hostname: Hostname to match against the docker server certificate or False to disable the check. :type tls_hostname: str or bool :param tls_ssl_version: Version of SSL to use when communicating with docker daemon. :type tls_ssl_version: str :param tmp_dir: Mount point inside the container to a temporary directory created on the host by the operator. The path is also made available via the environment variable ``AIRFLOW_TMP_DIR`` inside the container. :type tmp_dir: str :param user: Default user inside the docker container. :type user: int or str :param volumes: List of volumes to mount into the container, e.g. ``['/host/path:/container/path', '/host/path2:/container/path2:ro']``. :param xcom_push: Does the stdout will be pushed to the next step using XCom. The default is False. :type xcom_push: bool :param xcom_all: Push all the stdout or just the last line. The default is False (last line). :type xcom_all: bool """ template_fields = ('command',) template_ext = ('.sh', '.bash',) @apply_defaults def __init__( self, image, api_version=None, command=None, cpus=1.0, docker_url='unix://var/run/docker.sock', environment=None, force_pull=False, mem_limit=None, network_mode=None, tls_ca_cert=None, tls_client_cert=None, tls_client_key=None, tls_hostname=None, tls_ssl_version=None, tmp_dir='/tmp/airflow', user=None, volumes=None, xcom_push=False, xcom_all=False, *args, **kwargs): super(DockerOperator, self).__init__(*args, **kwargs) self.api_version = api_version self.command = command self.cpus = cpus self.docker_url = docker_url self.environment = environment or {} self.force_pull = force_pull self.image = image self.mem_limit = mem_limit self.network_mode = network_mode self.tls_ca_cert = tls_ca_cert self.tls_client_cert = tls_client_cert self.tls_client_key = tls_client_key self.tls_hostname = tls_hostname self.tls_ssl_version = tls_ssl_version self.tmp_dir = tmp_dir self.user = user self.volumes = volumes or [] self.xcom_push = xcom_push self.xcom_all = xcom_all self.cli = None self.container = None def execute(self, context): logging.info('Starting docker container from image ' + self.image) tls_config = None if self.tls_ca_cert and self.tls_client_cert and self.tls_client_key: tls_config = tls.TLSConfig( ca_cert=self.tls_ca_cert, client_cert=(self.tls_client_cert, self.tls_client_key), verify=True, ssl_version=self.tls_ssl_version, assert_hostname=self.tls_hostname ) self.docker_url = self.docker_url.replace('tcp://', 'https://') self.cli = Client(base_url=self.docker_url, version=self.api_version, tls=tls_config) if ':' not in self.image: image = self.image + ':latest' else: image = self.image if self.force_pull or len(self.cli.images(name=image)) == 0: logging.info('Pulling docker image ' + image) for l in self.cli.pull(image, stream=True): output = json.loads(l) logging.info("{}".format(output['status'])) cpu_shares = int(round(self.cpus * 1024)) with TemporaryDirectory(prefix='airflowtmp') as host_tmp_dir: self.environment['AIRFLOW_TMP_DIR'] = self.tmp_dir self.volumes.append('{0}:{1}'.format(host_tmp_dir, self.tmp_dir)) self.container = self.cli.create_container( command=self.get_command(), cpu_shares=cpu_shares, environment=self.environment, host_config=self.cli.create_host_config(binds=self.volumes, network_mode=self.network_mode), image=image, mem_limit=self.mem_limit, user=self.user ) self.cli.start(self.container['Id']) line = '' for line in self.cli.logs(container=self.container['Id'], stream=True): logging.info("{}".format(line.strip())) exit_code = self.cli.wait(self.container['Id']) if exit_code != 0: raise AirflowException('docker container failed') if self.xcom_push: return self.cli.logs(container=self.container['Id']) if self.xcom_all else str(line.strip()) def get_command(self): if self.command is not None and self.command.strip().find('[') == 0: commands = ast.literal_eval(self.command) else: commands = self.command return commands def on_kill(self): if self.cli is not None: logging.info('Stopping docker container') self.cli.stop(self.container['Id'])
class DockerOperator(BaseOperator): """ Execute a command inside a docker container. A temporary directory is created on the host and mounted into a container to allow storing files that together exceed the default disk size of 10GB in a container. The path to the mounted directory can be accessed via the environment variable ``AIRFLOW_TMP_DIR``. If a login to a private registry is required prior to pulling the image, a Docker connection needs to be configured in Airflow and the connection ID be provided with the parameter ``docker_conn_id``. :param image: Docker image from which to create the container. :type image: str :param api_version: Remote API version. Set to ``auto`` to automatically detect the server's version. :type api_version: str :param command: Command to be run in the container. :type command: str or list :param cpus: Number of CPUs to assign to the container. This value gets multiplied with 1024. See https://docs.docker.com/engine/reference/run/#cpu-share-constraint :type cpus: float :param docker_url: URL of the host running the docker daemon. Default is unix://var/run/docker.sock :type docker_url: str :param environment: Environment variables to set in the container. :type environment: dict :param force_pull: Pull the docker image on every run. Default is false. :type force_pull: bool :param mem_limit: Maximum amount of memory the container can use. Either a float value, which represents the limit in bytes, or a string like ``128m`` or ``1g``. :type mem_limit: float or str :param network_mode: Network mode for the container. :type network_mode: str :param tls_ca_cert: Path to a PEM-encoded certificate authority to secure the docker connection. :type tls_ca_cert: str :param tls_client_cert: Path to the PEM-encoded certificate used to authenticate docker client. :type tls_client_cert: str :param tls_client_key: Path to the PEM-encoded key used to authenticate docker client. :type tls_client_key: str :param tls_hostname: Hostname to match against the docker server certificate or False to disable the check. :type tls_hostname: str or bool :param tls_ssl_version: Version of SSL to use when communicating with docker daemon. :type tls_ssl_version: str :param tmp_dir: Mount point inside the container to a temporary directory created on the host by the operator. The path is also made available via the environment variable ``AIRFLOW_TMP_DIR`` inside the container. :type tmp_dir: str :param user: Default user inside the docker container. :type user: int or str :param volumes: List of volumes to mount into the container, e.g. ``['/host/path:/container/path', '/host/path2:/container/path2:ro']``. :param working_dir: Working directory to set on the container (equivalent to the -w switch the docker client) :type working_dir: str :param xcom_push: Does the stdout will be pushed to the next step using XCom. The default is False. :type xcom_push: bool :param xcom_all: Push all the stdout or just the last line. The default is False (last line). :type xcom_all: bool :param docker_conn_id: ID of the Airflow connection to use :type docker_conn_id: str """ template_fields = ('command', ) template_ext = ( '.sh', '.bash', ) @apply_defaults def __init__(self, image, api_version=None, command=None, cpus=1.0, docker_url='unix://var/run/docker.sock', environment=None, force_pull=False, mem_limit=None, network_mode=None, tls_ca_cert=None, tls_client_cert=None, tls_client_key=None, tls_hostname=None, tls_ssl_version=None, tmp_dir='/tmp/airflow', user=None, volumes=None, working_dir=None, xcom_push=False, xcom_all=False, docker_conn_id=None, *args, **kwargs): super(DockerOperator, self).__init__(*args, **kwargs) self.api_version = api_version self.command = command self.cpus = cpus self.docker_url = docker_url self.environment = environment or {} self.force_pull = force_pull self.image = image self.mem_limit = mem_limit self.network_mode = network_mode self.tls_ca_cert = tls_ca_cert self.tls_client_cert = tls_client_cert self.tls_client_key = tls_client_key self.tls_hostname = tls_hostname self.tls_ssl_version = tls_ssl_version self.tmp_dir = tmp_dir self.user = user self.volumes = volumes or [] self.working_dir = working_dir self.xcom_push_flag = xcom_push self.xcom_all = xcom_all self.docker_conn_id = docker_conn_id self.cli = None self.container = None def get_hook(self): return DockerHook(docker_conn_id=self.docker_conn_id, base_url=self.base_url, version=self.api_version, tls=self.__get_tls_config()) def execute(self, context): self.log.info('Starting docker container from image %s', self.image) tls_config = self.__get_tls_config() if self.docker_conn_id: self.cli = self.get_hook().get_conn() else: self.cli = Client(base_url=self.docker_url, version=self.api_version, tls=tls_config) if ':' not in self.image: image = self.image + ':latest' else: image = self.image if self.force_pull or len(self.cli.images(name=image)) == 0: self.log.info('Pulling docker image %s', image) for l in self.cli.pull(image, stream=True): output = json.loads(l.decode('utf-8')) self.log.info("%s", output['status']) cpu_shares = int(round(self.cpus * 1024)) with TemporaryDirectory(prefix='airflowtmp') as host_tmp_dir: self.environment['AIRFLOW_TMP_DIR'] = self.tmp_dir self.volumes.append('{0}:{1}'.format(host_tmp_dir, self.tmp_dir)) self.container = self.cli.create_container( command=self.get_command(), cpu_shares=cpu_shares, environment=self.environment, host_config=self.cli.create_host_config( binds=self.volumes, network_mode=self.network_mode), image=image, mem_limit=self.mem_limit, user=self.user, working_dir=self.working_dir) self.cli.start(self.container['Id']) line = '' for line in self.cli.logs(container=self.container['Id'], stream=True): line = line.strip() if hasattr(line, 'decode'): line = line.decode('utf-8') self.log.info(line) exit_code = self.cli.wait(self.container['Id']) if exit_code != 0: raise AirflowException('docker container failed') if self.xcom_push_flag: return self.cli.logs(container=self.container['Id'] ) if self.xcom_all else str(line) def get_command(self): if self.command is not None and self.command.strip().find('[') == 0: commands = ast.literal_eval(self.command) else: commands = self.command return commands def on_kill(self): if self.cli is not None: self.log.info('Stopping docker container') self.cli.stop(self.container['Id']) def __get_tls_config(self): tls_config = None if self.tls_ca_cert and self.tls_client_cert and self.tls_client_key: tls_config = tls.TLSConfig(ca_cert=self.tls_ca_cert, client_cert=(self.tls_client_cert, self.tls_client_key), verify=True, ssl_version=self.tls_ssl_version, assert_hostname=self.tls_hostname) self.docker_url = self.docker_url.replace('tcp://', 'https://') return tls_config
class TestRunner(object): """Badwolf test runner""" def __init__(self, context, lock): self.context = context self.lock = lock self.repo_full_name = context.repository self.repo_name = context.repository.split('/')[-1] self.task_id = str(uuid.uuid4()) self.commit_hash = context.source['commit']['hash'] self.build_status = BuildStatus( bitbucket, context.source['repository']['full_name'], self.commit_hash, 'badwolf/test', url_for('log.build_log', sha=self.commit_hash, _external=True)) self.docker = Client( base_url=current_app.config['DOCKER_HOST'], timeout=current_app.config['DOCKER_API_TIMEOUT'], ) def run(self): start_time = time.time() self.branch = self.context.source['branch']['name'] try: self.clone_repository() except git.GitCommandError as e: logger.exception('Git command error') self.update_build_status('FAILED', 'Git clone repository failed') content = ':broken_heart: **Git error**: {}'.format(to_text(e)) if self.context.pr_id: pr = PullRequest(bitbucket, self.repo_full_name) pr.comment(self.context.pr_id, content) else: cs = Changesets(bitbucket, self.repo_full_name) cs.comment(self.commit_hash, content) self.cleanup() return if not self.validate_settings(): self.cleanup() return context = { 'context': self.context, 'task_id': self.task_id, 'build_log_url': url_for('log.build_log', sha=self.commit_hash, _external=True), 'branch': self.branch, 'scripts': self.spec.scripts, } if self.spec.scripts: self.update_build_status('INPROGRESS', 'Test in progress') docker_image_name, build_output = self.get_docker_image() context['build_logs'] = to_text(build_output) context.update({ 'build_logs': to_text(build_output), 'elapsed_time': int(time.time() - start_time), }) if not docker_image_name: self.update_build_status('FAILED', 'Build or get Docker image failed') context['exit_code'] = -1 self.send_notifications(context) self.cleanup() return exit_code, output = self.run_tests_in_container(docker_image_name) if exit_code == 0: # Success logger.info('Test succeed for repo: %s', self.repo_full_name) self.update_build_status('SUCCESSFUL', '1 of 1 test succeed') else: # Failed logger.info('Test failed for repo: %s, exit code: %s', self.repo_full_name, exit_code) self.update_build_status('FAILED', '1 of 1 test failed') context.update({ 'logs': to_text(output), 'exit_code': exit_code, 'elapsed_time': int(time.time() - start_time), }) self.send_notifications(context) # Code linting if self.context.pr_id and self.spec.linters: lint = LintProcessor(self.context, self.spec, self.clone_path) lint.process() self.cleanup() def clone_repository(self): self.clone_path = os.path.join(tempfile.gettempdir(), 'badwolf', self.task_id, self.repo_name) source_repo = self.context.source['repository']['full_name'] # Use shallow clone to speed up bitbucket.clone(source_repo, self.clone_path, depth=50, branch=self.branch) gitcmd = git.Git(self.clone_path) if self.context.target: # Pull Request target_repo = self.context.target['repository']['full_name'] target_branch = self.context.target['branch']['name'] if source_repo == target_repo: target_remote = 'origin' else: # Pull Reuqest across forks target_remote = target_repo.split('/', 1)[0] gitcmd.remote('add', target_remote, bitbucket.get_git_url(target_repo)) gitcmd.fetch(target_remote, target_branch) gitcmd.checkout('FETCH_HEAD') gitcmd.merge('origin/{}'.format(self.branch)) else: # Push to branch or ci retry comment on some commit logger.info('Checkout commit %s', self.commit_hash) gitcmd.checkout(self.commit_hash) gitmodules = os.path.join(self.clone_path, '.gitmodules') if os.path.exists(gitmodules): gitcmd.submodule('update', '--init', '--recursive') def validate_settings(self): conf_file = os.path.join(self.clone_path, current_app.config['BADWOLF_PROJECT_CONF']) if not os.path.exists(conf_file): logger.warning('No project configuration file found for repo: %s', self.repo_full_name) return False self.spec = spec = Specification.parse_file(conf_file) if self.context.type == 'commit' and spec.branch and self.branch not in spec.branch: logger.info( 'Ignore tests since branch %s test is not enabled. Allowed branches: %s', self.branch, spec.branch) return False if not spec.scripts and not spec.linters: logger.warning('No script(s) or linter(s) to run') return False return True def get_docker_image(self): docker_image_name = self.repo_full_name.replace('/', '-') output = [] with self.lock: docker_image = self.docker.images(docker_image_name) if not docker_image or self.context.rebuild: dockerfile = os.path.join(self.clone_path, self.spec.dockerfile) build_options = { 'tag': docker_image_name, 'rm': True, } if not os.path.exists(dockerfile): logger.warning( 'No Dockerfile: %s found for repo: %s, using simple runner image', dockerfile, self.repo_full_name) dockerfile_content = 'FROM messense/badwolf-test-runner\n' fileobj = io.BytesIO(dockerfile_content.encode('utf-8')) build_options['fileobj'] = fileobj else: build_options['dockerfile'] = self.spec.dockerfile build_success = False logger.info('Building Docker image %s', docker_image_name) self.update_build_status('INPROGRESS', 'Building Docker image') res = self.docker.build(self.clone_path, **build_options) for line in res: if b'Successfully built' in line: build_success = True log = to_text(json.loads(to_text(line))['stream']) output.append(log) logger.info('`docker build` : %s', log.strip()) if not build_success: return None, ''.join(output) return docker_image_name, ''.join(output) def run_tests_in_container(self, docker_image_name): command = '/bin/sh -c badwolf-run' environment = {} if self.spec.environments: # TODO: Support run in multiple environments environment = self.spec.environments[0] # TODO: Add more test context related env vars environment.update({ 'DEBIAN_FRONTEND': 'noninteractive', 'CI': 'true', 'CI_NAME': 'badwolf', 'BADWOLF_BRANCH': self.branch, 'BADWOLF_COMMIT': self.commit_hash, 'BADWOLF_BUILD_DIR': '/mnt/src', 'BADWOLF_REPO_SLUG': self.repo_full_name, }) if self.context.pr_id: environment['BADWOLF_PULL_REQUEST'] = to_text(self.context.pr_id) container = self.docker.create_container( docker_image_name, command=command, environment=environment, working_dir='/mnt/src', volumes=['/mnt/src'], host_config=self.docker.create_host_config( privileged=self.spec.privileged, binds={ self.clone_path: { 'bind': '/mnt/src', 'mode': 'rw', }, })) container_id = container['Id'] logger.info('Created container %s from image %s', container_id, docker_image_name) output = [] try: self.docker.start(container_id) self.update_build_status('INPROGRESS', 'Running tests in Docker container') for line in self.docker.logs(container_id, stream=True): output.append(to_text(line)) exit_code = self.docker.wait( container_id, current_app.config['DOCKER_RUN_TIMEOUT']) except (APIError, DockerException, ReadTimeout) as e: exit_code = -1 output.append(to_text(e)) logger.exception('Docker error') finally: try: self.docker.remove_container(container_id, force=True) except (APIError, DockerException): logger.exception('Error removing docker container') return exit_code, ''.join(output) def update_build_status(self, state, description=None): try: self.build_status.update(state, description=description) except BitbucketAPIError: logger.exception('Error calling Bitbucket API') def send_notifications(self, context): exit_code = context['exit_code'] template = 'test_success' if exit_code == 0 else 'test_failure' html = render_template('mail/' + template + '.html', **context) html = sanitize_sensitive_data(html) # Save log html log_dir = os.path.join(current_app.config['BADWOLF_LOG_DIR'], self.commit_hash) if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = os.path.join(log_dir, 'build.html') with open(log_file, 'wb') as f: f.write(to_binary(html)) if exit_code == 0: subject = 'Test succeed for repository {}'.format( self.repo_full_name) else: subject = 'Test failed for repository {}'.format( self.repo_full_name) notification = self.spec.notification emails = notification['emails'] if emails: send_mail(emails, subject, html) slack_webhooks = notification['slack_webhooks'] if slack_webhooks: message = render_template('slack_webhook/' + template + '.md', **context) trigger_slack_webhook(slack_webhooks, message) def cleanup(self): shutil.rmtree(os.path.dirname(self.clone_path), ignore_errors=True)