Ejemplo n.º 1
0
def build_and_run(options, request):
    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)
    start_time = str(int(unix_time_milliseconds(datetime.utcnow())))
    run_containers(cmd, options)

    def fin():
        # Stop the containers then remove them and their volumes (--volumes option)
        print("containers stopping", flush=True)
        try:
            # Used for when there are multiple filewriter instances
            # as the service is not called "filewriter"
            multiple_log_options = dict(options)
            multiple_log_options["SERVICE"] = ["filewriter1", "filewriter2"]
            cmd.logs(multiple_log_options)
        except:
            log_options = dict(options)
            log_options["SERVICE"] = ["filewriter"]
            cmd.logs(log_options)
        options["--timeout"] = 30
        cmd.down(options)
        print("containers stopped", flush=True)

    # Using a finalizer rather than yield in the fixture means
    # that the containers will be brought down even if tests fail
    request.addfinalizer(fin)
    # Return the start time so the filewriter knows when to start consuming data
    # from to get all data which was published
    return start_time
Ejemplo n.º 2
0
def up(project_path):
    up_options = {
        "-d": True,
        "--no-color": False,
        "--no-deps": False,
        "--build": False,
        "--abort-on-container-exit": False,
        "--remove-orphans": False,
        "--no-recreate": True,
        "--force-recreate": False,
        "--no-build": False,
        "SERVICE": "",
        "--scale": []
    }

    project = project_from_options(project_path, up_options)
    cmd = TopLevelCommand(project)
    cmd.up(up_options)

    ps_options = {"SERVICE": "", "-q": True}
    containers = sorted(
        project.containers(service_names=ps_options['SERVICE'], stopped=True) +
        project.containers(service_names=ps_options['SERVICE'],
                           one_off=OneOffFilter.only),
        key=attrgetter('name'))

    container_ids = []
    for container in containers:
        container_ids.append(container.id)

    return container_ids
    def __init__(self, path='./'):
        path = './'  # Path to docker-compose directory
        self.options = {
            "--file": ['docker-compose.dev.yaml', ],
            "--no-deps": False,
            "--abort-on-container-exit": False,
            "SERVICE": "",
            "--remove-orphans": False,
            "--no-recreate": True,
            "--force-recreate": False,
            "--build": False,
            '--no-build': False,
            '--no-color': False,
            "--rmi": "none",
            "--volumes": "",
            "--follow": False,
            "--timestamps": False,
            "--tail": "all",
            "-d": True,
            '--always-recreate-deps': False,
            '--scale': []
        }

        self.project = project_from_options(path, self.options)
        self.cli = TopLevelCommand(self.project)
Ejemplo n.º 4
0
def docker_compose(request):
    """
    :type request: _pytest.python.FixtureRequest
    """

    options = {"--no-deps": False,
               "--abort-on-container-exit": False,
               "SERVICE": "",
               "--remove-orphans": False,
               "--no-recreate": True,
               "--force-recreate": False,
               "--build": False,
               '--no-build': False,
               '--no-color': False,
               "--rmi": "none",
               "--volumes": "",
               "--follow": False,
               "--timestamps": False,
               "--tail": "all",
               "-d": True,
               }

    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)
    cmd.up(options)

    def fin():
        cmd.logs(options)
        cmd.down(options)

    request.addfinalizer(fin)
Ejemplo n.º 5
0
def rm(ctx, clustername, volumes):
    cluster_dir = os.path.join(ctx.obj['etc'], clustername)
    if not os.path.exists(cluster_dir):
        click.echo(click.style('wrong cluster name', fg='red', bold=True))
        ctx.exit(-1)
    project = project_from_options(cluster_dir, {})
    project.down(remove_image_type=0, include_volumes=volumes)
    ensure_dir_absent(cluster_dir)

    click.echo(
        click.style('cluster {} removed'.format(clustername), fg='green'))
Ejemplo n.º 6
0
    def __init__(self, docker_compose_file, options=None):
        self.docker_compose_file = docker_compose_file

        # build docker options with default ones + input overrides
        dockerenv_options = self.options
        if options:
            dockerenv_options.update(options)

        project = project_from_options(str(TESTS_DIR), dockerenv_options)
        self.cmd = TopLevelCommand(project)
        self.cmd.up(dockerenv_options)
Ejemplo n.º 7
0
 def before(self, *args, **kwargs):
     """
     Will run `docker-compose up -d` only in case docker-compose is installed locally
     and there is a docker-compose.yml file in resources directory
     """
     compose = self.find_resource_file()
     if compose and is_package_installed('compose'):
         from compose.cli.main import TopLevelCommand, project_from_options
         info('Starting docker-compose. Please wait.')
         self._options['-f'] = join(self._resources, compose),
         self._cmd = TopLevelCommand(
             project_from_options(self._resources, self._options))
         self._cmd.up(self._options)
Ejemplo n.º 8
0
def ps_one_project(ctx, cluster_name):
    cluster_dir = os.path.join(ctx.obj['etc'], cluster_name)
    if not os.path.exists(cluster_dir):
        click.echo(click.style('wrong cluster name', fg='red', bold=True))
        ctx.exit(-1)

    project = project_from_options(cluster_dir, {})
    header, body = ['service type', 'service address'], []
    # 其实都只有一个
    tidb_container = [
        c for s in project.services if 'tidb' in s.name
        for c in s.containers()
    ][0]
    tidb_port = tidb_container.inspect()['NetworkSettings']['Ports']
    tidb_address = 'localhost:{}'.format(tidb_port['4000/tcp'][0].get(
        'HostPort', 'unknown'))

    grafana_container = [
        c for s in project.services if 'grafana' in s.name
        for c in s.containers()
    ][0]
    grafana_port = grafana_container.inspect()['NetworkSettings']['Ports']
    grafana_address = 'localhost:{}'.format(grafana_port['3000/tcp'][0].get(
        'HostPort', 'unknown'))
    service_info = tabulate([
        ('tidb', tidb_address),
        ('grafana', grafana_address),
    ],
                            headers=['service type', 'service address'])

    body = []
    for service in project.services:
        containers = service.containers()
        containers_status = [c.inspect() for c in containers]
        running = len([
            c for c in containers_status if c['State']['Status'] == 'running'
        ])
        body.append(
            (service.name,
             '{}/{} running/total containers'.format(running,
                                                     len(containers_status))))
    service_status = tabulate(body,
                              headers=['service name', 'container status'])

    click.echo(
        tabulate(
            [(cluster_name, service_info, service_status)],
            ['cluster name', 'service info', 'service status'],
            tablefmt='fancy_grid',
        ))
Ejemplo n.º 9
0
def parse_compose_project(project_dir, machine_name, name, compose_files):
    """ Parse the compose files """
    machine_options = get_machine_options(machine_name)
    compose_options = {
        **machine_options,
        "--file": compose_files,
        "--project-name": name,
        "--project-directory": project_dir,
    }

    # out = io.StringIO()
    # with redirect_stderr(out):
    project = project_from_options(project_dir, compose_options)
    return project
Ejemplo n.º 10
0
def rm(project_path):

    rm_options = {
        "--force": True,
        "--stop": True,
        "-v": False,
        "--rmi": "none",
        "--volumes": "/private",
        "--remove-orphans": False,
        "SERVICE": ""
    }

    project = project_from_options(project_path, rm_options)
    cmd = TopLevelCommand(project)
    cmd.down(rm_options)
Ejemplo n.º 11
0
def start_ioc(request):
    options = common_options
    options["--project-name"] = "ioc"
    options["--file"] = [os.path.join("compose", "docker-compose-ioc.yml")]
    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)

    cmd.up(options)

    def fin():
        print("Stopping zookeeper and kafka", flush=True)
        options["--timeout"] = 30
        options["--project-name"] = "ioc"
        options["--file"] = [os.path.join("compose", "docker-compose-ioc.yml")]
        cmd.down(options)

    request.addfinalizer(fin)
Ejemplo n.º 12
0
def get_container_id(container_name):
    options = {
        "--quiet": True,
        "--filter": "status=running",
        "nginx": True,
        "--services": False,
        "SERVICE": [container_name],
    }

    project = project_from_options(os.path.dirname(os.path.abspath(__file__)),
                                   options)
    cmd = TopLevelCommand(project)
    result = io.StringIO()

    with redirect_stdout(result):
        cmd.ps(options)

    return result.getvalue()
Ejemplo n.º 13
0
def build_and_run(options, request):
    build_forwarder_image()
    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)
    run_containers(cmd, options)

    def fin():
        # Stop the containers then remove them and their volumes (--volumes option)
        print("containers stopping", flush=True)
        log_options = dict(options)
        log_options["SERVICE"] = ["forwarder"]
        cmd.logs(log_options)
        options["--timeout"] = 30
        cmd.down(options)
        print("containers stopped", flush=True)

    # Using a finalizer rather than yield in the fixture means
    # that the containers will be brought down even if tests fail
    request.addfinalizer(fin)
Ejemplo n.º 14
0
def start_kafka(request):
    print("Starting zookeeper and kafka", flush=True)
    options = common_options
    options["--project-name"] = "kafka"
    options["--file"] = ["docker-compose-kafka.yml"]
    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)

    cmd.up(options)
    print("Started kafka containers", flush=True)
    wait_until_kafka_ready(cmd, options)

    def fin():
        print("Stopping zookeeper and kafka", flush=True)
        options["--timeout"] = 30
        options["--project-name"] = "kafka"
        options["--file"] = ["docker-compose-kafka.yml"]
        cmd.down(options)
    request.addfinalizer(fin)
Ejemplo n.º 15
0
def build_and_run(options, request):
    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)
    run_containers(cmd, options)

    start_time = str(int(unix_time_milliseconds(datetime.utcnow())))

    def fin():
        # Stop the containers then remove them and their volumes (--volumes option)
        print("containers stopping", flush=True)
        log_options = dict(options)
        cmd.logs(log_options)
        options["--timeout"] = 30
        cmd.down(options)
        print("containers stopped", flush=True)

    # Using a finalizer rather than yield in the fixture means
    # that the containers will be brought down even if tests fail
    request.addfinalizer(fin)

    return start_time
Ejemplo n.º 16
0
def dao_2048(service_name, config_file):
    options = { "--no-deps":False,
                "--always-recreate-deps":False,
               "--abort-on-container-exit": False,
               "SERVICE": service_name,
               "--remove-orphans": False,
               "--no-recreate": True,
               "--force-recreate": False,
               "--build": False,
               '--no-build': False,
               '--no-color': False,
               "--rmi": "none",
               "--volumes": "",
               "--follow": False,
               "--timestamps": False,
               "--tail": "all",
               "-d": True,
                "--scale": {'master=1'},
                "--file": config_file
               }

    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)
    cmd.up(options)
Ejemplo n.º 17
0
def docker_services(docker_compose_file, docker_allow_fallback,
                    docker_compose_project_name, docker_compose_project_dir):
    """Ensure all Docker-based services are up and running."""

    docker_compose = DockerComposeExecutor(docker_compose_file,
                                           docker_compose_project_name,
                                           docker_compose_project_dir)

    # If we allowed to run without Docker, check it's presence
    if docker_allow_fallback is True:
        try:
            with open(os.devnull, 'w') as devnull:
                subprocess.call(['docker', 'ps'],
                                stdout=devnull,
                                stderr=devnull)
        except Exception:
            yield Services(None, docker_allow_fallback=True)
            return

    project = project_from_options(docker_compose._compose_project_dir,
                                   options=docker_compose.as_dict_options())
    cmd = TopLevelCommand(project)

    # Spawn containers.
    up_options = docker_compose.defaults_opts('up')
    up_options['-d'] = True
    up_options['--build'] = True
    cmd.up(up_options)

    # Let test(s) run.
    yield Services(project)

    # Clean up.
    down_option = docker_compose.defaults_opts('down')
    down_option['-v'] = True
    cmd.down(down_option)
Ejemplo n.º 18
0
def test_environment(request):
    """
    :type request: _pytest.python.FixtureRequest
    """
    # Options must be given as long form
    options = common_options
    options["--file"] = ["docker-compose.yml"]

    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)
    print("Running docker-compose up", flush=True)
    cmd.pull(options)
    cmd.up(options)
    print("\nFinished docker-compose up\n", flush=True)
    wait_until_kafka_ready(cmd, options)

    def fin():
        cmd.logs(options)
        # Stop the containers then remove them and their volumes (--volumes option)
        cmd.down(options)

    # Using a finalizer rather than yield in the fixture means
    # that the containers will be brought down even if tests fail
    request.addfinalizer(fin)
Ejemplo n.º 19
0
 def __init__(self, yml_dir):
     self._output = io.StringIO()
     self.path_to_yml_dir = yml_dir
     self.options = GENERAL_COMPOSE_OPTIONS
     self.project = project_from_options(self.path_to_yml_dir, self.options)
     self._cmd = TopLevelCommand(self.project)
Ejemplo n.º 20
0
 def top_level_command_from_options(self, options):
     """Creates a top level command from options
     :param options (str) docker compose options"""
     project = project_from_options(self.project_location, options)
     cmd = TopLevelCommand(project)
     return cmd
Ejemplo n.º 21
0
def create_cluster(ctx, base, network, cluster_name, pd_count, tikv_count,
                   tidb_version):
    cluster_dir = os.path.join(ctx.obj['etc'], cluster_name)
    # config
    pdservices = [{
        'name': 'pd_{}_{}'.format(cluster_name, index),
        'base': os.path.abspath(base),
        'image': 'pingcap/pd:latest',
    } for index in range(pd_count)]
    tikvservices = [{
        'name': 'tikv_{}_{}'.format(cluster_name, index),
        'base': os.path.abspath(base),
        'image': 'pingcap/tikv:latest',
    } for index in range(tikv_count)]
    tidb = {
        'base': os.path.abspath(base),
        'image': 'pingcap/tidb:{}'.format(tidb_version),
    }
    monitor = {'base': os.path.abspath(base)}

    # ensure dir exists
    ensure_dir(base)
    ensure_dir(os.path.join(base, 'data'))
    ensure_dir(os.path.join(base, 'logs'))
    ensure_dir(cluster_dir)

    # create config
    shutil.copytree('./config', os.path.join(base, 'config'))

    # edit some configs
    filenames = [
        os.path.join(
            base, 'config/grafana/provisioning/datasources/datasources.yaml')
    ]
    massedit.edit_files(filenames, [
        "re.sub(r'prometheus:9090', 'prometheus_{}:9090', line)".format(
            cluster_name)
    ],
                        dry_run=False)

    filenames = [os.path.join(base, 'prometheus.yml')]
    massedit.edit_files(filenames, [
        "re.sub(r'pushgateway:9091', 'pushgateway_{}:9091', line)".format(
            cluster_name)
    ],
                        dry_run=False)

    # keep docker-compose.yml
    tp = Jinja2(__name__)
    content = tp.render_template('/docker_compose.jinja',
                                 network=network,
                                 pdservices=pdservices,
                                 tikvservices=tikvservices,
                                 tidb=tidb,
                                 monitor=monitor,
                                 cluster_name=cluster_name)
    filename = os.path.join(cluster_dir, 'docker-compose.yml')
    with open(filename, 'w') as f:
        f.write(content)

    project = project_from_options(cluster_dir, {})
    project.up(detached=True)
    click.echo(
        click.style('cluster {} created'.format(cluster_name), fg='green'))
Ejemplo n.º 22
0
 def __init__(self, docker_compose_file):
     self.docker_compose_file = docker_compose_file
     project = project_from_options(os.path.dirname(__file__), self.options)
     self.cmd = TopLevelCommand(project)
     self.cmd.up(self.options)
Ejemplo n.º 23
0
def build_and_run(options, request, local_path=None, wait_for_debugger=False):
    if wait_for_debugger and local_path is None:
        warnings.warn(
            "Option specified to wait for debugger to attach, but this "
            "can only be used if a local build path is provided")

    project = project_from_options(os.path.dirname(__file__), options)
    cmd = TopLevelCommand(project)
    start_time = str(int(unix_time_milliseconds(datetime.utcnow())))
    run_containers(cmd, options)

    if local_path is not None:
        # Launch local build of file writer
        full_path_of_file_writer_exe = os.path.join(local_path, "bin",
                                                    "kafka-to-nexus")
        log_file = open("logs/file-writer-logs.txt", "w")
        proc = Popen(
            [
                full_path_of_file_writer_exe,
                "-c",
                "./config-files/local_file_writer_config.ini",
            ],
            stdout=log_file,
        )
        if wait_for_debugger:
            proc.send_signal(
                signal.SIGSTOP
            )  # Pause the file writer until we've had chance to attach a debugger
            input(
                f"\n"
                f"Attach a debugger to process id {proc.pid} now if you wish, then press enter to continue: "
            )
            print(
                "You'll need to tell the debugger to continue after it has attached, "
                'for example type "continue" if using gdb.')
            proc.send_signal(signal.SIGCONT)

    def fin():
        # Stop the containers then remove them and their volumes (--volumes option)
        print("containers stopping", flush=True)
        if local_path is None:
            try:
                # Used for when there are multiple filewriter instances
                # as the service is not called "filewriter"
                multiple_log_options = dict(options)
                multiple_log_options["SERVICE"] = [
                    "filewriter1", "filewriter2"
                ]
                cmd.logs(multiple_log_options)
            except:
                log_options = dict(options)
                log_options["SERVICE"] = ["filewriter"]
                cmd.logs(log_options)
        else:
            proc.kill()
        options["--timeout"] = 30
        cmd.down(options)
        print("containers stopped", flush=True)

    # Using a finalizer rather than yield in the fixture means
    # that the containers will be brought down even if tests fail
    request.addfinalizer(fin)
    # Return the start time so the filewriter knows when to start consuming data
    # from to get all data which was published
    return start_time