def test_get_container_info_throws_exception(): """ Asserts that the method returns a custom exception when the container for the given project and service is not present. """ with pytest.raises(ContainerUnavailableError): Containers.get_container_info(project='test_project', service='test_service')
def setup_kafka_tailer(self, pane_id): kafka_container_info = Containers.get_container_info(self.containers.project, 'kafka') zk_ip_address = Containers.get_container_ip_address(self.containers.project, 'zookeeper') self._tmux_send_keys(pane_id, "docker exec -it {} bash".format(kafka_container_info.get('Id'))) self._tmux_send_keys( pane_id, "/opt/kafka_2.10-0.8.2.1/bin/kafka-console-consumer.sh --from-beginning --zookeeper {}:2181 --blacklist None".format(zk_ip_address) )
def containers(compose_file, services, dbs, replhandler, rbrsource, schematracker): with Containers(compose_file, services) as containers: # Need to wait for all containers to spin up replication_handler_ip = None while replication_handler_ip is None: replication_handler_ip = Containers.get_container_ip_address( containers.project, replhandler) for db in dbs: db_health_check(containers, db, timeout_seconds) replication_handler_health_check(containers, rbrsource, schematracker, timeout_seconds) yield containers
def test_get_container_ip(containers): """ Asserts that when an existing container is queried for its IP it returns the specified projects and services IP address. """ actual_ip = Containers.get_container_ip_address(project=containers.project, service=ZOOKEEPER, timeout_seconds=5) container_id = Containers.get_container_info(project=containers.project, service=ZOOKEEPER)['Id'] command = "docker inspect --format '{{{{ .NetworkSettings.IPAddress }}}}' {container_id}" \ .format(container_id=container_id) expected_ip = subprocess.check_output([command], shell=True) assert expected_ip.rstrip() == actual_ip
def setup_containers(self): with Containers(self.compose_file, self.services) as self.containers: for db in self.dbs: db_health_check(containers=self.containers, db_name=db, timeout_seconds=120) replication_handler_health_check( containers=self.containers, rbrsource='rbrsource', schematracker='schematracker', timeout_seconds=120 ) yield
def test_get_container_info(containers): """ Asserts that when the specific container being queried is running then it returns the information about the specific container. """ container_info = Containers.get_container_info(project=containers.project, service=ZOOKEEPER) assert container_info is not None assert container_info is not ContainerUnavailableError assert container_info['Labels'].get( 'com.docker.compose.service') == ZOOKEEPER
def containers(compose_file, services, dbs, replhandler, rbrsource, schematracker): with Containers(compose_file, services) as containers: # Need to wait for all containers to spin up replication_handler_ip = None while replication_handler_ip is None: replication_handler_ip = Containers.get_container_ip_address( containers.project, replhandler ) for db in dbs: db_health_check(containers, db, timeout_seconds) replication_handler_health_check(containers, rbrsource, schematracker, timeout_seconds) yield containers
def test_compose_prefix(test_container): file_name = ("docker-compose-opensource.yml" if test_container._is_envvar_set('OPEN_SOURCE_MODE') else "docker-compose.yml") project_name = test_container.project file_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) expected_result = ("docker-compose " "--file={file_path}" "/data_pipeline/testing_helpers/{file_name} " "--project-name={project_name}").format( file_path=file_path, file_name=file_name, project_name=project_name) actual_result = Containers.compose_prefix() assert expected_result == actual_result
def mock_state_cluster_host(self, containers_without_repl_handler, rbrstate): return Containers.get_container_ip_address( project=containers_without_repl_handler.project, service=rbrstate )
def mock_tracker_cluster_host(self, containers_without_repl_handler, schematracker): return Containers.get_container_ip_address( project=containers_without_repl_handler.project, service=schematracker )
def mock_source_cluster_host(self, containers_without_repl_handler): return Containers.get_container_ip_address( project=containers_without_repl_handler.project, service='rbrsource')
def containers(): with Containers() as containers: yield containers
def zookeeper_ip(self, containers_without_repl_handler): return Containers.get_container_ip_address( project=containers_without_repl_handler.project, service='zookeeper' )
def setup_rh_logs(self, pane_id): container_info = Containers.get_container_info(self.containers.project, 'replicationhandler') self._tmux_send_keys(pane_id, 'docker logs -f {}'.format(container_info.get('Id')))
def get_service_host(containers, service_name): return Containers.get_container_ip_address(containers.project, service_name)
def schematizer_ip(self, containers_without_repl_handler): return Containers.get_container_ip_address( project=containers_without_repl_handler.project, service='schematizer' )
def kafka_ip(self, containers_without_repl_handler): return Containers.get_container_ip_address( project=containers_without_repl_handler.project, service='kafka' )
def setup_mysql_shell(self, pane_id): ip_address = Containers.get_container_ip_address(self.containers.project, 'rbrsource') self._tmux_send_keys(pane_id, 'mysql -uyelpdev -h{} --database=yelp'.format(ip_address))
def containers_without_repl_handler(compose_file, services_without_repl_handler, dbs): with Containers(compose_file, services_without_repl_handler) as containers: for db in dbs: db_health_check(containers, db, timeout_seconds) yield containers
def test_container(): return Containers()