def add_entry_to_etc_hosts(self): try: with open('/etc/hosts', 'rt') as f: host_names = self.docker_get_compose_option_value( 'web', 'VIRTUAL_HOST', 'environment').replace(',', ' ') phpmyadmin_host = self.docker_get_compose_option_value( 'phpmyadmin', 'VIRTUAL_HOST', 'environment').replace(',', ' ') if phpmyadmin_host: host_names += ' ' + phpmyadmin_host s = f.read() if not host_names in s: s += "\n%s\t\t%s\n" % ('127.0.0.1', host_names) with open('/tmp/etc_hosts.tmp', 'wt') as outf: outf.write(s) run_cmd('sudo mv /tmp/etc_hosts.tmp /etc/hosts') message( "Config entry with hosts: >>>%s<<< has beed succesfully added to /etc/hosts file." % host_names, 'info') else: message( "Config entry with hosts: >>>%s<<< was not added to /etc/hosts file because it already contains entry with this hosts" % host_names, 'info') except: message('Config entry was not added to /etc/hosts.', 'warning')
def get_nginx_proxy_ip(self): if not run_cmd('docker ps -q -f name=nginx-proxy', return_output=True): return False container_ip_address = run_cmd( 'docker inspect --format "{{ .NetworkSettings.IPAddress }}" nginx-proxy', return_output=True) return container_ip_address
def setfacl(self): if os.getuid(): self.docker_run('setfacl -Rm g:www-data:rwX %s' % self.config.WEB['APP_LOCATION']) self.docker_run('setfacl -d -Rm g:www-data:rwX %s' % self.config.WEB['APP_LOCATION']) else: run_cmd('setfacl -Rm g:www-data:rwX %s' % self.config.WEB['APP_LOCATION']) run_cmd('setfacl -d -Rm g:www-data:rwX %s' % self.config.WEB['APP_LOCATION'])
def export_db(self): msg('Export DB') filename = 'database_dump_' + self.config.TIME_STR + '.sql' dump_path = os.path.join(self.config.BUILD_PATH, self.DUMP_EXPORT_LOCATION, filename) run_cmd('mysqldump -h %s -u %s -p%s %s > %s' % (self.HOST, self.ROOT_USER, self.ROOT_PASS, self.NAME, dump_path)) tar = tarfile.open(dump_path + '.tar.gz', 'w:gz') tar.add(dump_path, filename) tar.close() os.remove(dump_path)
def test_run_parallel(self): self.robo_init() split_type = cmd_options.parallel_group_by number_of_groups = int(cmd_options.parallel_test_groups) suites = cmd_options.parallel_test_suites.split(',') self.docker_codecept('build') self.docker_codecept('clean') command_containers_ids = {} # Run parallel testing of suites. if len(suites) > 1: number_of_groups = len(suites) for idx in range(len(suites)): suite = suites[idx] suite_number = idx + 1 command_container_id = self.docker_codecept( 'run tests/%s --html report_parallel_%s.html --xml report_parallel_%s.xml' % (suite, suite_number, suite_number), parallel=True) command_containers_ids[command_container_id] = { 'finished': False, 'suite': suite } # Run parallel testing of groups. else: self.docker_robo('parallel:split-by-%s %s' % (split_type, number_of_groups)) for group in xrange(1, number_of_groups + 1): group_file_path = os.path.join( self.config.BUILD_PATH, self.config.TESTS['LOCATION'], 'tests/_output/parallel_group_%s' % group) if os.path.isfile(group_file_path): command_container_id = self.docker_codecept( 'run --group parallel_group_%s --html report_parallel_%s.html --xml report_parallel_%s.xml -vvv' % (group, group, group), parallel=True) command_containers_ids[command_container_id] = { 'finished': False, 'group': 'parallel_group_%s' % group } if len(command_containers_ids) > 0: number_of_groups = len(command_containers_ids) message( '%s containers with parallel test groups has been started. ' 'We need to wait until all of them will finish.' % number_of_groups, 'info') self.wait_for_all_containers_to_finish(command_containers_ids) self.docker_robo('parallel:merge-htmlresults %s' % (number_of_groups)) self.docker_robo('parallel:merge-xmlresults %s' % (number_of_groups)) else: message('Seems like there is no group files created.', 'info') run_cmd('docker stop selenium-test-%s' % self.docker.base_alias) run_cmd('docker rm selenium-test-%s' % self.docker.base_alias)
def get_container_ip(self): web_container_alias = self._container_alias("web:web").split(':')[0] if not run_cmd('docker ps -q -f name=%s' % web_container_alias, return_output=True): message("Docker is not up for this project and it will be started. It is required to add config entry to /etc/hosts.", 'info') self.docker_up() web_container_ip_address = run_cmd('docker inspect --format "{{ .NetworkSettings.IPAddress }}" %s' % web_container_alias, return_output=True) return web_container_ip_address
def docker_codecept(self, cmd='', parallel=False): link_selenium_name = '--link selenium-test-%s:selenium' % self.docker.base_alias selenium_container_id = run_cmd('docker ps -a -q -f name=selenium-test-%s' % self.docker.base_alias, return_output=True) selenium_running_container_id = run_cmd('docker ps -q -f name=selenium-test-%s' % self.docker.base_alias, return_output=True) selenium_container_removed = False # Case when container is exited. if selenium_container_id and not selenium_running_container_id: run_cmd('docker rm selenium-test-%s' % self.docker.base_alias) selenium_container_removed = True if not selenium_container_id or selenium_container_removed: run_cmd('docker run -d -v /dev/shm:/dev/shm --name selenium-test-%s %s %s %s %s %s' % (self.docker.base_alias, self.docker.get_env_file(), self.docker._get_extra_hosts(), self.docker._get_links(), self.docker._get_hosts(), self.config.TESTS['IMAGES']['selenium_image'][0])) print "Waiting 5 sec." time.sleep(5) if parallel: command_container_id = run_cmd('docker run -d %s %s %s %s %s %s -w %s %s codecept %s' % (self.docker.get_env_file(), self.docker._get_extra_hosts(), self.docker._get_volumes(), self.docker._get_links(), self.docker._get_hosts(), link_selenium_name, os.path.join('/app', self.config.TESTS['LOCATION']), self.config.TESTS['IMAGES']['codecept_image'][0], cmd), return_output=True) return command_container_id else: run_cmd('docker run --rm %s %s %s %s %s %s -w %s %s codecept %s' % (self.docker.get_env_file(), self.docker._get_volumes(), self.docker._get_extra_hosts(), self.docker._get_links(), self.docker._get_hosts(), link_selenium_name, os.path.join('/app', self.config.TESTS['LOCATION']), self.config.TESTS['IMAGES']['codecept_image'][0], cmd))
def docker_codecept(self, cmd='', parallel=False): link_selenium_name = '--link selenium-test-%s:selenium' % self.docker.base_alias selenium_container_id = run_cmd('docker ps -a -q -f name=selenium-test-%s' % self.docker.base_alias, return_output=True) selenium_running_container_id = run_cmd('docker ps -q -f name=selenium-test-%s' % self.docker.base_alias, return_output=True) selenium_container_removed = False # Case when container is exited. if selenium_container_id and not selenium_running_container_id: run_cmd('docker rm selenium-test-%s' % self.docker.base_alias) selenium_container_removed = True if not selenium_container_id or selenium_container_removed: run_cmd('docker run -d --name selenium-test-%s %s %s %s %s' % (self.docker.base_alias, self.docker.get_env_file(), self.docker._get_links(), self.docker._get_hosts(), self.config.TESTS['IMAGES']['selenium_image'][0])) print "Waiting 5 sec." time.sleep(5) if parallel: command_container_id = run_cmd('docker run -d %s %s %s %s %s -w %s %s codecept %s' % (self.docker.get_env_file(), self.docker._get_volumes(), self.docker._get_links(), self.docker._get_hosts(), link_selenium_name, os.path.join('/app', self.config.TESTS['LOCATION']), self.config.TESTS['IMAGES']['codecept_image'][0], cmd), return_output=True) return command_container_id else: run_cmd('docker run --rm %s %s %s %s %s -w %s %s codecept %s' % (self.docker.get_env_file(), self.docker._get_volumes(), self.docker._get_links(), self.docker._get_hosts(), link_selenium_name, os.path.join('/app', self.config.TESTS['LOCATION']), self.config.TESTS['IMAGES']['codecept_image'][0], cmd))
def setfacl(self, group='www-data'): if os.getuid(): self.docker_run('setfacl -Rm g:%s:rwX %s' % (group, self.config.WEB['APP_LOCATION'])) self.docker_run('setfacl -d -Rm g:%s:rwX %s' % (group, self.config.WEB['APP_LOCATION'])) else: run_cmd('setfacl -Rm g:%s:rwX %s' % (group, self.config.WEB['APP_LOCATION'])) run_cmd('setfacl -d -Rm g:%s:rwX %s' % (group, self.config.WEB['APP_LOCATION']))
def export_db(self): msg('Export DB') filename = 'database_dump_' + self.config.TIME_STR + '.sql' dump_path = os.path.join(self.config.BUILD_PATH, self.DUMP_EXPORT_LOCATION, filename) run_cmd( 'mysqldump -h %s -u %s -p%s %s > %s' % (self.HOST, self.ROOT_USER, self.ROOT_PASS, self.NAME, dump_path)) tar = tarfile.open(dump_path + '.tar.gz', 'w:gz') tar.add(dump_path, filename) tar.close() os.remove(dump_path)
def get_container_ip(self): web_container_alias = self._container_alias("web:web").split(':')[0] if not run_cmd('docker ps -q -f name=%s' % web_container_alias, return_output=True): message( "Docker is not up for this project and it will be started. It is required to add config entry to /etc/hosts.", 'info') self.docker_up() web_container_ip_address = run_cmd( 'docker inspect --format "{{ .NetworkSettings.IPAddress }}" %s' % web_container_alias, return_output=True) return web_container_ip_address
def docker_rmi(self): docker_comose = open(self.compose_path) docker_config = yaml.load(docker_comose) docker_comose.close() images_to_remove = [] for container in docker_config: config = docker_config[container] if 'image' in config: images_to_remove.append(docker_config[container]['image']) elif 'build' in config: images_to_remove.append('%s_%s' % (self.base_alias, container)) run_cmd('docker rmi %s' % (' ' .join(images_to_remove)), cwd=self.config.BUILD_PATH)
def docker_rmi(self): docker_comose = open(self.compose_path) docker_config = yaml.load(docker_comose) docker_comose.close() images_to_remove = [] for container in docker_config: config = docker_config[container] if 'image' in config: images_to_remove.append(docker_config[container]['image']) elif 'build' in config: images_to_remove.append('%s_%s' % (self.base_alias, container)) run_cmd('docker rmi %s' % (' '.join(images_to_remove)), cwd=self.config.BUILD_PATH)
def tests_run(self): if ('selenium_image' not in self.config.TESTS['IMAGES']) or ( 'codecept_image' not in self.config.TESTS['IMAGES']): message('selenium_image or codecept_image is missing in TESTS config.', 'error') exit(0) if len(self.config.args) > 2: args = 'tests/' + ' '.join(self.config.args[2:]) else: args = '' self.docker_codecept('build') self.docker_codecept('clean') self.docker_codecept('run %s --html --xml' % args) run_cmd('docker stop selenium-test-%s' % self.docker.base_alias) run_cmd('docker rm selenium-test-%s' % self.docker.base_alias)
def wait_for_all_containers_to_finish(self, command_containers_ids): parallel_tests_finished = False while not parallel_tests_finished: print "\n\nWaiting 10 sec for next test status checking." time.sleep(10) parallel_tests_finished = True for container_id in command_containers_ids: container_tests_info = '' for info_name in ['group', 'suite']: if info_name in command_containers_ids[container_id]: container_tests_info = '(%s: %s)' % ( info_name, command_containers_ids[container_id][info_name]) break test_running = run_cmd('docker ps -q -f id=%s' % container_id, return_output=True) if command_containers_ids[container_id]['finished']: print "Container %s finished %s" % (container_id, container_tests_info) elif not test_running: command_containers_ids[container_id]['finished'] = True print "Container %s finished right now %s" % ( container_id, container_tests_info) else: print "Container %s still running %s" % ( container_id, container_tests_info) parallel_tests_finished = False if parallel_tests_finished: message('All containers finished', 'info')
def wait_for_all_containers_to_finish(self, command_containers_ids): parallel_tests_finished = False while not parallel_tests_finished: print "\n\nWaiting 10 sec for next test status checking." time.sleep(10) parallel_tests_finished = True for container_id in command_containers_ids: container_tests_info = '' for info_name in ['group', 'suite']: if info_name in command_containers_ids[container_id]: container_tests_info = '(%s: %s)' % (info_name, command_containers_ids[container_id][info_name]) break test_running = run_cmd('docker ps -q -f id=%s' % container_id, return_output=True) if command_containers_ids[container_id]['finished']: print "Container %s finished %s" % (container_id, container_tests_info) elif not test_running: command_containers_ids[container_id]['finished'] = True print "Container %s finished right now %s" % (container_id, container_tests_info) else: print "Container %s still running %s" % (container_id, container_tests_info) parallel_tests_finished = False if parallel_tests_finished: message('All containers finished', 'info')
def get_image_id(self, image): image_id = run_cmd('docker inspect --format "{{ .ID }}" --type "image" %s' % image, return_output=True) match = re.search('Error: No such image', image_id) print image_id if match is not None: return False return image_id
def get_image_id(self, image): image_id = run_cmd( 'docker inspect --format "{{ .ID }}" --type "image" %s' % image, return_output=True) match = re.search('Error: No such image', image_id) print image_id if match is not None: return False return image_id
def cleanup(self): if run_cmd('docker ps -a -q -f status=exited', return_output=True): # remove exited containers run_cmd('docker rm -v $(docker ps -a -q -f status=exited)') if run_cmd('docker images -f "dangling=true" -q', return_output=True): # remove unwanted dangling images run_cmd('docker rmi $(docker images -f "dangling=true" -q)') # remove unwanted volumes run_cmd('docker run -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/docker:/var/lib/docker --rm martin/docker-cleanup-volumes')
def cleanup(self): if run_cmd('docker ps -a -q -f status=exited', return_output=True): # remove exited containers run_cmd('docker rm -v $(docker ps -a -q -f status=exited)') if run_cmd('docker images -f "dangling=true" -q', return_output=True): # remove unwanted dangling images run_cmd('docker rmi $(docker images -f "dangling=true" -q)') # remove unwanted volumes run_cmd( 'docker run -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/docker:/var/lib/docker --rm martin/docker-cleanup-volumes' )
def test_run_parallel(self): self.robo_init() split_type = cmd_options.parallel_group_by number_of_groups = int(cmd_options.parallel_test_groups) suites = cmd_options.parallel_test_suites.split(',') self.docker_codecept('build') self.docker_codecept('clean') command_containers_ids = {} # Run parallel testing of suites. if len(suites) > 1: number_of_groups = len(suites) for idx in range(len(suites)): suite = suites[idx] suite_number = idx + 1 command_container_id = self.docker_codecept( 'run tests/%s --html report_parallel_%s.html --xml report_parallel_%s.xml' % (suite, suite_number, suite_number), parallel=True) command_containers_ids[command_container_id] = {'finished': False, 'suite': suite} # Run parallel testing of groups. else: self.docker_robo('parallel:split-by-%s %s' % (split_type, number_of_groups)) for group in xrange(1, number_of_groups + 1): group_file_path = os.path.join(self.config.BUILD_PATH, self.config.TESTS['LOCATION'], 'tests/_output/parallel_group_%s' % group) if os.path.isfile(group_file_path): command_container_id = self.docker_codecept( 'run --group parallel_group_%s --html report_parallel_%s.html --xml report_parallel_%s.xml -vvv' % (group, group, group), parallel=True) command_containers_ids[command_container_id] = {'finished': False, 'group': 'parallel_group_%s' % group} if len(command_containers_ids) > 0: number_of_groups = len(command_containers_ids) message('%s containers with parallel test groups has been started. ' 'We need to wait until all of them will finish.' % number_of_groups, 'info') self.wait_for_all_containers_to_finish(command_containers_ids) self.docker_robo('parallel:merge-htmlresults %s' % (number_of_groups)) self.docker_robo('parallel:merge-xmlresults %s' % (number_of_groups)) else: message('Seems like there is no group files created.', 'info') run_cmd('docker stop selenium-test-%s' % self.docker.base_alias) run_cmd('docker rm selenium-test-%s' % self.docker.base_alias)
def add_entry_to_etc_hosts(self): try: nginx_proxy_ip = self.get_nginx_proxy_ip() if not nginx_proxy_ip: raise Exception with open('/etc/hosts', 'rt') as f: host_names = self.docker_get_compose_option_value('web', 'VIRTUAL_HOST', 'environment').replace(',', ' ') phpmyadmin_host = self.docker_get_compose_option_value('phpmyadmin', 'VIRTUAL_HOST', 'environment').replace(',', ' ') if phpmyadmin_host: host_names += ' ' + phpmyadmin_host s = f.read() if not host_names in s: s += "\n%s\t\t%s\n" % (nginx_proxy_ip, host_names) with open('/tmp/etc_hosts.tmp', 'wt') as outf: outf.write(s) run_cmd('sudo mv /tmp/etc_hosts.tmp /etc/hosts') message("Config entry with hosts: >>>%s<<< has beed succesfully added to /etc/hosts file." % host_names, 'info') else: message("Config entry with hosts: >>>%s<<< was not added to /etc/hosts file because it already contains entry with this hosts" % host_names, 'info') except: message('Config entry was not added to /etc/hosts.', 'warning')
def docker_codecept(self, cmd='', link_selenium=True): link_selenium_chrome_name = '' link_selenium_ff_name = '' if link_selenium: run_cmd('docker run -d --name selenium-test-chrome-%s %s %s %s' % (self.docker.base_alias, self.docker._get_links(), self.docker._get_hosts(), self.config.TESTS['IMAGES']['selenium_chrome_image'][0])) link_selenium_chrome_name = '--link selenium-test-chrome-%s:selenium_ch' % self.docker.base_alias print "Waitng 5 sec." time.sleep(5) run_cmd('docker run -d --name selenium-test-ff-%s %s %s %s' % (self.docker.base_alias, self.docker._get_links(), self.docker._get_hosts(), self.config.TESTS['IMAGES']['selenium_ff_image'][0])) link_selenium_ff_name = '--link selenium-test-ff-%s:selenium_ff' % self.docker.base_alias print "Waitng 5 sec." time.sleep(5) run_cmd('docker run --rm %s %s %s %s %s -w %s %s codecept %s' % (self.docker._get_volumes(), self.docker._get_links(), self.docker._get_hosts(), link_selenium_chrome_name, link_selenium_ff_name, os.path.join('/app', self.config.TESTS['LOCATION']), self.config.TESTS['IMAGES']['codecept_image'][0], cmd))
def docker_update_images(self): run_cmd('docker-compose stop', cwd=self.config.BUILD_PATH) run_cmd('docker-compose rm -f', cwd=self.config.BUILD_PATH) run_cmd('docker-compose pull', cwd=self.config.BUILD_PATH) run_cmd('docker-compose build --pull', cwd=self.config.BUILD_PATH) ALL_DEV_DOCKER_IMAGES = [] for image in self.config.DEV_DOCKER_IMAGES: if isinstance(self.config.DEV_DOCKER_IMAGES[image], tuple): ALL_DEV_DOCKER_IMAGES.append(self.config.DEV_DOCKER_IMAGES[image]) elif isinstance(self.config.DEV_DOCKER_IMAGES[image], list): ALL_DEV_DOCKER_IMAGES += self.config.DEV_DOCKER_IMAGES[image] for image in self.config.TESTS['IMAGES']: if isinstance(self.config.TESTS['IMAGES'][image], tuple): ALL_DEV_DOCKER_IMAGES.append(self.config.TESTS['IMAGES'][image]) elif isinstance(self.config.TESTS['IMAGES'][image], list): ALL_DEV_DOCKER_IMAGES += self.config.TESTS['IMAGES'][image] custom_images_hashes_path = os.path.join(self.config.BUILD_PATH, 'docker', 'custom_images', 'hashes.yml') hashes_content = {} if os.path.exists(custom_images_hashes_path): custom_images_hashes = open(custom_images_hashes_path) hashes_content = yaml.load(custom_images_hashes) custom_images_hashes.close() if type(hashes_content) is not dict: hashes_content = {} for DEV_DOCKER_IMAGE, DEV_DOCKER_IMAGE_DOCKERFILE_PATH in ALL_DEV_DOCKER_IMAGES: if DEV_DOCKER_IMAGE_DOCKERFILE_PATH is not None: needs_rebuild = False dockerfile = open(DEV_DOCKER_IMAGE_DOCKERFILE_PATH + '/Dockerfile') dockerfile_content = dockerfile.read() # Pull image that custom images inherits from and get ID of that image. base_image_match = re.search('FROM (.+)', dockerfile_content) images_id = '' if base_image_match is not None: base_image = base_image_match.group(1) message('Pulling image %s that is used as base image in %s custom image.' % (base_image, DEV_DOCKER_IMAGE), 'info') run_cmd('docker pull %s' % base_image) image_id = self.get_image_id(base_image) # Create hash from base image ID and Dockerfile content and check if it has changed, if yes rebuild it. # This also handles the situation when base image will be updated manually # or from other project thanks to base image id checking. dockerfile_hash = hashlib.md5(image_id + dockerfile_content).hexdigest() if not needs_rebuild and DEV_DOCKER_IMAGE not in hashes_content: message('Hash for image %s not found in %s. ' 'Custom image will be rebuilt as we can\'t check whether it is up to date or not.' % (DEV_DOCKER_IMAGE, custom_images_hashes_path) , 'info') needs_rebuild = True if not needs_rebuild and hashes_content[DEV_DOCKER_IMAGE] != dockerfile_hash: message('Docker file for image %s was changed or base image was updated so image will be rebuilt.' % DEV_DOCKER_IMAGE, 'info') needs_rebuild = True hashes_content[DEV_DOCKER_IMAGE] = dockerfile_hash if needs_rebuild: run_cmd('docker build --no-cache --pull -t %s %s' % (DEV_DOCKER_IMAGE, os.path.join(self.config.BUILD_PATH, DEV_DOCKER_IMAGE_DOCKERFILE_PATH)), cwd=self.config.BUILD_PATH) else: message('Image %s is up to date and will not be rebuilt.' % DEV_DOCKER_IMAGE, 'info') else: run_cmd('docker pull %s' % DEV_DOCKER_IMAGE) if len(hashes_content.keys()) > 0: custom_images_hashes = open(custom_images_hashes_path, 'w') yaml.dump(hashes_content, custom_images_hashes, default_flow_style=False) custom_images_hashes.close()
def docker_update_images(self): run_cmd('docker-compose stop', cwd=self.config.BUILD_PATH) run_cmd('docker-compose rm -f', cwd=self.config.BUILD_PATH) run_cmd('docker-compose pull', cwd=self.config.BUILD_PATH) run_cmd('docker-compose build --pull', cwd=self.config.BUILD_PATH) ALL_DEV_DOCKER_IMAGES = [] for image in self.config.DEV_DOCKER_IMAGES: if isinstance(self.config.DEV_DOCKER_IMAGES[image], tuple): ALL_DEV_DOCKER_IMAGES.append( self.config.DEV_DOCKER_IMAGES[image]) elif isinstance(self.config.DEV_DOCKER_IMAGES[image], list): ALL_DEV_DOCKER_IMAGES += self.config.DEV_DOCKER_IMAGES[image] for image in self.config.TESTS['IMAGES']: if isinstance(self.config.TESTS['IMAGES'][image], tuple): ALL_DEV_DOCKER_IMAGES.append( self.config.TESTS['IMAGES'][image]) elif isinstance(self.config.TESTS['IMAGES'][image], list): ALL_DEV_DOCKER_IMAGES += self.config.TESTS['IMAGES'][image] custom_images_hashes_path = os.path.join(self.config.BUILD_PATH, 'docker', 'custom_images', 'hashes.yml') hashes_content = {} if os.path.exists(custom_images_hashes_path): custom_images_hashes = open(custom_images_hashes_path) hashes_content = yaml.load(custom_images_hashes) custom_images_hashes.close() if type(hashes_content) is not dict: hashes_content = {} for DEV_DOCKER_IMAGE, DEV_DOCKER_IMAGE_DOCKERFILE_PATH in ALL_DEV_DOCKER_IMAGES: if DEV_DOCKER_IMAGE_DOCKERFILE_PATH is not None: needs_rebuild = False dockerfile = open(DEV_DOCKER_IMAGE_DOCKERFILE_PATH + '/Dockerfile') dockerfile_content = dockerfile.read() # Pull image that custom images inherits from and get ID of that image. base_image_match = re.search('FROM (.+)', dockerfile_content) images_id = '' if base_image_match is not None: base_image = base_image_match.group(1) message( 'Pulling image %s that is used as base image in %s custom image.' % (base_image, DEV_DOCKER_IMAGE), 'info') run_cmd('docker pull %s' % base_image) image_id = self.get_image_id(base_image) # Create hash from base image ID and Dockerfile content and check if it has changed, if yes rebuild it. # This also handles the situation when base image will be updated manually # or from other project thanks to base image id checking. dockerfile_hash = hashlib.md5(image_id + dockerfile_content).hexdigest() if not needs_rebuild and DEV_DOCKER_IMAGE not in hashes_content: message( 'Hash for image %s not found in %s. ' 'Custom image will be rebuilt as we can\'t check whether it is up to date or not.' % (DEV_DOCKER_IMAGE, custom_images_hashes_path), 'info') needs_rebuild = True if not needs_rebuild and hashes_content[ DEV_DOCKER_IMAGE] != dockerfile_hash: message( 'Docker file for image %s was changed or base image was updated so image will be rebuilt.' % DEV_DOCKER_IMAGE, 'info') needs_rebuild = True hashes_content[DEV_DOCKER_IMAGE] = dockerfile_hash if needs_rebuild: run_cmd('docker build --no-cache --pull -t %s %s' % (DEV_DOCKER_IMAGE, os.path.join(self.config.BUILD_PATH, DEV_DOCKER_IMAGE_DOCKERFILE_PATH)), cwd=self.config.BUILD_PATH) else: message( 'Image %s is up to date and will not be rebuilt.' % DEV_DOCKER_IMAGE, 'info') else: run_cmd('docker pull %s' % DEV_DOCKER_IMAGE) if len(hashes_content.keys()) > 0: custom_images_hashes = open(custom_images_hashes_path, 'w') yaml.dump(hashes_content, custom_images_hashes, default_flow_style=False) custom_images_hashes.close()
def docker_run(self, cmd): run_cmd(self.docker_command() + ' ' + cmd)
def docker_up(self): run_cmd('docker-compose up -d', cwd=self.config.BUILD_PATH)
def gulp_debug(self): print("GULP DEBUG") run_cmd("docker run --rm -v %s:/var/www/html -w /var/www/html/profiles/contrib/droopler/themes/custom/droopler_theme droptica/gulptheme gulp debug" % self.config.WEB['APP_ROOT']) run_cmd("docker run --rm -v %s:/var/www/html -w /var/www/html/themes/custom/droopler_subtheme droptica/gulptheme gulp debug" % self.config.WEB['APP_ROOT'])
def gulp_dist(self): print("GULP DIST (dumping prod assets)") run_cmd("docker run --rm -v %s:/var/www/html -w /var/www/html/profiles/contrib/droopler/themes/custom/droopler_theme droptica/gulptheme gulp dist" % self.config.WEB['APP_ROOT']) run_cmd("docker run --rm -v %s:/var/www/html -w /var/www/html/themes/custom/droopler_subtheme droptica/gulptheme gulp dist" % self.config.WEB['APP_ROOT'])
def create_db(self): msg('Create DB') run_cmd('mysql -h %s -u %s -p%s -e "CREATE DATABASE IF NOT EXISTS %s;"' % (self.HOST, self.ROOT_USER, self.ROOT_PASS, self.NAME))
def run(self, command): return run_cmd("drush -r %s --uri=%s %s " % (self.path, self.uri, command))
def get_nginx_proxy_ip(self): if not run_cmd('docker ps -q -f name=nginx-proxy', return_output=True): return False container_ip_address = run_cmd('docker inspect --format "{{ .NetworkSettings.IPAddress }}" nginx-proxy', return_output=True) return container_ip_address
def gulp_build(self): print("GULP COMPILE") run_cmd( "docker run --rm -v %s:/app -w /app droptica/babel npm run gulp -- build" % self.config.WEB['APP_ROOT'])
def docker_robo(self, cmd): run_cmd('docker run --rm %s %s %s %s -w %s %s robo %s' % (self.docker._get_extra_hosts(), self.docker._get_volumes(), self.docker._get_links(), self.docker._get_hosts(), os.path.join('/app', self.config.TESTS['LOCATION']), self.config.TESTS['IMAGES']['codecept_image'][0], cmd))
def chmod_private_files(self): for site in self.config.DRUPAL: run_cmd('chmod -Rf 777 %s' % os.path.join(self.config.WEB['APP_ROOT'], self.config.DRUPAL[site]['PRIVATE_FILES_DST'], 'private'))
def docker_stop(self): run_cmd('docker-compose stop', cwd=self.config.BUILD_PATH)
def chmod_files(self): for site in self.config.DRUPAL: run_cmd( 'chmod -Rf 777 %s' % os.path.join(self.config.WEB['APP_ROOT'], self.config.DRUPAL[site]['FILES_DST'], 'files'))
def docker_rm(self): run_cmd('docker-compose rm -f', cwd=self.config.BUILD_PATH)
def create_db(self): msg('Create DB') run_cmd( 'mysql -h %s -u %s -p%s -e "CREATE DATABASE IF NOT EXISTS %s;"' % (self.HOST, self.ROOT_USER, self.ROOT_PASS, self.NAME))
def npm_install(self): print("INSTALL NPM PACKAGES (Node.js)") run_cmd("docker run --rm -v %s:/var/www/html -w /var/www/html/profiles/contrib/droopler/themes/custom/droopler_theme droptica/gulptheme npm install" % self.config.WEB['APP_ROOT']) run_cmd("docker run --rm -v %s:/var/www/html -w /var/www/html/themes/custom/droopler_subtheme droptica/gulptheme npm install" % self.config.WEB['APP_ROOT'])
def import_file(self, file_path): run_cmd( 'mysql -h %s -u %s -p%s %s < %s' % (self.HOST, self.ROOT_USER, self.ROOT_PASS, self.NAME, file_path))
def gulp_compile(self): print("GULP COMPILE (dumping dev assets)") run_cmd("docker run --rm -v %s:/var/www/html -w /var/www/html/profiles/contrib/droopler/themes/custom/droopler_theme droptica/gulptheme gulp compile" % self.config.WEB['APP_ROOT']) run_cmd("docker run --rm -v %s:/var/www/html -w /var/www/html/themes/custom/droopler_subtheme droptica/gulptheme gulp compile" % self.config.WEB['APP_ROOT'])
def drop_db(self): msg('Drop DB') run_cmd('mysql -h %s -u %s -p%s -e "DROP DATABASE %s;"' % (self.HOST, self.ROOT_USER, self.ROOT_PASS, self.NAME))
def chmod_private_files(self): for site in self.config.DRUPAL: run_cmd('chmod -Rf 777 %s' % os.path.join( self.config.WEB['APP_ROOT'], self.config.DRUPAL[site]['PRIVATE_FILES_DST'], 'private'))
def import_file(self, file_path): run_cmd('mysql -h %s -u %s -p%s %s < %s' % (self.HOST, self.ROOT_USER, self.ROOT_PASS, self.NAME, file_path))
def chmod_files(self): for site in self.config.DRUPAL: run_cmd('chmod -Rf 777 %s' % os.path.join(self.config.WEB['APP_ROOT'], self.config.DRUPAL[site]['FILES_DST'], 'files'))