Ejemplo n.º 1
0
def started_mysql_8_0():
    try:
        mysql_8_0_node.start_and_wait()
        yield mysql_8_0_node
    finally:
        mysql_8_0_node.close()
        run_and_check(['docker-compose', '-p', cluster.project_name, '-f', mysql_8_0_docker_compose, 'down', '--volumes', '--remove-orphans'])
Ejemplo n.º 2
0
 def start_and_wait(self):
     run_and_check(['docker-compose',
         '-p', cluster.project_name,
         '-f', self.docker_compose,
         'up', '--no-recreate', '-d',
     ])
     self.wait_mysql_to_start(120)
Ejemplo n.º 3
0
def dotnet_container():
    docker_compose = os.path.join(DOCKER_COMPOSE_PATH,
                                  'docker_compose_dotnet_client.yml')
    run_and_check([
        'docker-compose', '-p', cluster.project_name, '-f', docker_compose,
        'up', '--no-recreate', '-d', '--no-build'
    ])
    yield docker.from_env().containers.get(cluster.project_name + '_dotnet1_1')
Ejemplo n.º 4
0
def golang_container():
    docker_compose = os.path.join(DOCKER_COMPOSE_PATH,
                                  'docker_compose_mysql_golang_client.yml')
    run_and_check([
        'docker-compose', '-p', cluster.project_name, '-f', docker_compose,
        'up', '--no-recreate', '-d', '--no-build'
    ])
    yield docker.DockerClient(
        base_url='unix:///var/run/docker.sock',
        version=cluster.docker_api_version,
        timeout=600).containers.get(cluster.project_name + '_golang1_1')
Ejemplo n.º 5
0
def started_mysql_8_0():
    docker_compose = os.path.join(DOCKER_COMPOSE_PATH, 'docker_compose_mysql_8_0_for_materialize_mysql.yml')
    mysql_node = MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', 33308, docker_compose)

    try:
        mysql_node.start_and_wait()
        yield mysql_node
    finally:
        mysql_node.close()
        run_and_check(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'down', '--volumes',
                               '--remove-orphans'])
Ejemplo n.º 6
0
def mysql_killed_while_insert(clickhouse_node, mysql_node, service_name):
    mysql_node.query("CREATE DATABASE kill_mysql_while_insert")
    mysql_node.query(
        "CREATE TABLE kill_mysql_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;"
    )
    clickhouse_node.query(
        "CREATE DATABASE kill_mysql_while_insert ENGINE = MaterializeMySQL('{}:3306', 'kill_mysql_while_insert', 'root', 'clickhouse')"
        .format(service_name))
    check_query(clickhouse_node,
                "SHOW TABLES FROM kill_mysql_while_insert FORMAT TSV",
                'test\n')

    try:

        def insert(num):
            for i in range(num):
                query = "INSERT INTO kill_mysql_while_insert.test VALUES({v});".format(
                    v=i + 1)
                mysql_node.query(query)

        t = threading.Thread(target=insert, args=(10000, ))
        t.start()

        run_and_check([
            'docker-compose', '-p', mysql_node.project_name, '-f',
            mysql_node.docker_compose, 'stop'
        ])
    finally:
        with pytest.raises(QueryRuntimeException) as execption:
            time.sleep(5)
            clickhouse_node.query(
                "SELECT count() FROM kill_mysql_while_insert.test")
        assert "Master maybe lost." in str(execption.value)

        run_and_check([
            'docker-compose', '-p', mysql_node.project_name, '-f',
            mysql_node.docker_compose, 'start'
        ])
        mysql_node.wait_mysql_to_start(120)

        clickhouse_node.query("DETACH DATABASE kill_mysql_while_insert")
        clickhouse_node.query("ATTACH DATABASE kill_mysql_while_insert")

        result = mysql_node.query_and_get_data(
            "SELECT COUNT(1) FROM kill_mysql_while_insert.test")
        for row in result:
            res = str(row[0]) + '\n'
            check_query(clickhouse_node,
                        "SELECT count() FROM kill_mysql_while_insert.test",
                        res)

        mysql_node.query("DROP DATABASE kill_mysql_while_insert")
        clickhouse_node.query("DROP DATABASE kill_mysql_while_insert")
Ejemplo n.º 7
0
    def wait_mysql_to_start(self, timeout=60):
        start = time.time()
        while time.time() - start < timeout:
            try:
                self.alloc_connection()
                print("Mysql Started")
                return
            except Exception as ex:
                print("Can't connect to MySQL " + str(ex))
                time.sleep(0.5)

        run_and_check(['docker-compose', 'ps', '--services', 'all'])
        raise Exception("Cannot wait MySQL container")
Ejemplo n.º 8
0
    def close(self):
        if self.mysql_connection is not None:
            self.mysql_connection.close()

        with open(self.docker_logs_path, "w+") as f:
            try:
                run_and_check([
                    'docker-compose',
                    '-p', cluster.project_name,
                    '-f', self.docker_compose, 'logs',
                ], stdout=f)
            except Exception as e:
                print("Unable to get logs from docker mysql.")
Ejemplo n.º 9
0
def dotnet_container():
    docker_compose = os.path.join(DOCKER_COMPOSE_PATH,
                                  "docker_compose_dotnet_client.yml")
    run_and_check([
        "docker-compose",
        "-p",
        cluster.project_name,
        "-f",
        docker_compose,
        "up",
        "--no-recreate",
        "-d",
        "--no-build",
    ])
    yield docker.from_env().containers.get(cluster.project_name + "_dotnet1_1")
Ejemplo n.º 10
0
def java_container():
    docker_compose = os.path.join(DOCKER_COMPOSE_PATH,
                                  "docker_compose_postgresql_java_client.yml")
    run_and_check([
        "docker-compose",
        "-p",
        cluster.project_name,
        "-f",
        docker_compose,
        "up",
        "--force-recreate",
        "-d",
        "--build",
    ])
    yield docker.DockerClient(
        base_url="unix:///var/run/docker.sock",
        version=cluster.docker_api_version,
        timeout=600,
    ).containers.get(cluster.project_name + "_java_1")
Ejemplo n.º 11
0
def nodejs_container():
    docker_compose = os.path.join(DOCKER_COMPOSE_PATH,
                                  "docker_compose_mysql_js_client.yml")
    run_and_check([
        "docker-compose",
        "--env-file",
        cluster.instances["node"].env_file,
        "-p",
        cluster.project_name,
        "-f",
        docker_compose,
        "up",
        "--force-recreate",
        "-d",
        "--no-build",
    ])
    yield docker.DockerClient(
        base_url="unix:///var/run/docker.sock",
        version=cluster.docker_api_version,
        timeout=600,
    ).containers.get(cluster.project_name + "_mysqljs1_1")
Ejemplo n.º 12
0
def cleanup_environment():
    _NetworkManager.clean_all_user_iptables_rules()
    try:
        result = run_and_check(['docker ps | wc -l'], shell=True)
        if int(result) > 1:
            if int(os.environ.get("PYTEST_CLEANUP_CONTAINERS")) != 1:
                logging.warning(f"Docker containters({int(result)}) are running before tests run. They can be left from previous pytest run and cause test failures.\n"\
                                "You can set env PYTEST_CLEANUP_CONTAINERS=1 or use runner with --cleanup-containers argument to enable automatic containers cleanup.")
            else:
                logging.debug("Trying to kill unstopped containers...")
                run_and_check(
                    [f'docker kill $(docker container list  --all  --quiet)'],
                    shell=True,
                    nothrow=True)
                run_and_check(
                    [f'docker rm $docker container list  --all  --quiet)'],
                    shell=True,
                    nothrow=True)
                logging.debug("Unstopped containers killed")
                r = run_and_check(
                    ['docker-compose', 'ps', '--services', '--all'])
                logging.debug(f"Docker ps before start:{r.stdout}")
        else:
            logging.debug(f"No running containers")
    except Exception as e:
        logging.exception(f"cleanup_environment:{str(e)}")
        pass

    yield
Ejemplo n.º 13
0
from helpers.cluster import ClickHouseCluster, run_and_check

GRPC_PORT = 9100
# It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
NODE_IP = "10.5.172.77"  # Never copy-paste this line
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT_ENCODING = "utf-8"

# Use grpcio-tools to generate *pb2.py files from *.proto.

proto_dir = os.path.join(SCRIPT_DIR, "./protos")
gen_dir = os.path.join(SCRIPT_DIR, "./_gen")
os.makedirs(gen_dir, exist_ok=True)
run_and_check(
    "python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} \
    {proto_dir}/clickhouse_grpc.proto".format(proto_dir=proto_dir,
                                              gen_dir=gen_dir),
    shell=True,
)

sys.path.append(gen_dir)
import clickhouse_grpc_pb2
import clickhouse_grpc_pb2_grpc

# Utilities

node_ip_with_grpc_port = NODE_IP + ":" + str(GRPC_PORT)
config_dir = os.path.join(SCRIPT_DIR, "./configs")
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
    "node",
    ipv4_address=NODE_IP,
Ejemplo n.º 14
0
                                    "configs/ssl_conf.xml",
                                    "configs/dhparam.pem",
                                    "configs/server.crt", "configs/server.key"
                                ],
                                user_configs=["configs/default_passwd.xml"],
                                with_zookeeper=True)

LOADS_QUERY = "SELECT value FROM system.events WHERE event = 'MainConfigLoads'"

# Use grpcio-tools to generate *pb2.py files from *.proto.

proto_dir = Path(__file__).parent / "protos"
gen_dir = Path(__file__).parent / "_gen"
gen_dir.mkdir(exist_ok=True)
run_and_check(
    f"python3 -m grpc_tools.protoc -I{proto_dir!s} --python_out={gen_dir!s} --grpc_python_out={gen_dir!s} \
    {proto_dir!s}/clickhouse_grpc.proto",
    shell=True)

sys.path.append(str(gen_dir))
import clickhouse_grpc_pb2
import clickhouse_grpc_pb2_grpc


@pytest.fixture(name="cluster", scope="module")
def fixture_cluster():
    try:
        cluster.add_zookeeper_startup_command(configure_ports_from_zk)
        cluster.start()
        yield cluster
    finally:
        cluster.shutdown()
Ejemplo n.º 15
0
def setup_dns_server(ip):
    domains_string = "test3.example.com test2.example.com test1.example.com"
    example_file_path = f'{ch_server.env_variables["COREDNS_CONFIG_DIR"]}/example.com'
    run_and_check(f"echo '{ip} {domains_string}' > {example_file_path}",
                  shell=True)