예제 #1
0
def update(site: str, restart_service: bool, ssh_user: str, private_key: str,
           python_interpreter: str, ssh_common_args: str) -> bool:
    ar = AnsibleRunner(
        inventory=[f'{playbook_path}/inventory.d/90_os_inventory.sh'])
    hosts = ar.inventory_manager.get_hosts()
    user_hosts = [get_host_info(h) for h in hosts if is_user_host(h, site)]
    infra_host = get_infra_host(hosts, site)

    if not infra_host:
        log.error(f'No r_infra host found for site {site}')
        return False

    rancher_host = get_rancher_host(hosts)
    rancher_host.vars['ansible_ssh_user'] = ssh_user
    rancher_host.vars['ansible_ssh_private_key_file'] = private_key
    rancher_host.vars['ansible_python_interpreter'] = python_interpreter
    rancher_host.vars['ansible_ssh_common_args'] = ssh_common_args

    success = ar.play(f'{playbook_path}/update_landscaper.yml',
                      extra_vars={
                          'INFRA_HOST': infra_host.vars['ansible_ssh_host'],
                          'USER_HOSTS': user_hosts
                      })

    if success and restart_service:
        recap_stack([
            'restart', site, '--infrastructures', 'r_infra', '--services',
            'landscaper'
        ])

    return success
def install_couchbase_server(cluster_config, couchbase_server_config):

    log_info(cluster_config)
    log_info(couchbase_server_config)

    ansible_runner = AnsibleRunner(cluster_config)

    log_info(">>> Installing Couchbase Server")
    # Install Server
    server_baseurl, server_package_name = couchbase_server_config.get_baseurl_package()
    status = ansible_runner.run_ansible_playbook(
        "install-couchbase-server-package.yml",
        extra_vars={
            "couchbase_server_package_base_url": server_baseurl,
            "couchbase_server_package_name": server_package_name
        }
    )
    if status != 0:
        raise ProvisioningError("Failed to install Couchbase Server")

    # Wait for server to be in 'healthy state'
    print(">>> Waiting for server to be in 'healthy' state")
    cluster_keywords = ClusterKeywords()
    cluster_topology = cluster_keywords.get_cluster_topology(cluster_config)
    server_url = cluster_topology["couchbase_servers"][0]
    cb_server = CouchbaseServer(server_url)
    cb_server.wait_for_ready_state()
예제 #3
0
def install_deps(cluster_config):

    log_info("Installing dependencies for cluster_config: {}".format(cluster_config))

    ansible_runner = AnsibleRunner(config=cluster_config)
    status = ansible_runner.run_ansible_playbook("os-level-modifications.yml")
    if status != 0:
        raise ProvisioningError("Failed to make os modifications")

    status = ansible_runner.run_ansible_playbook("install-common-tools.yml")
    if status != 0:
        raise ProvisioningError("Failed to install dependencies")
예제 #4
0
def clean_cluster():
    try:
        cluster_config = os.environ["CLUSTER_CONFIG"]
    except KeyError as ke:
        print ("Make sure CLUSTER_CONFIG is defined and pointing to the configuration you would like to provision")
        raise KeyError("CLUSTER_CONFIG not defined. Unable to provision cluster.")

    print("Cleaning cluster: {}".format(cluster_config))

    ansible_runner = AnsibleRunner()
    status = ansible_runner.run_ansible_playbook("remove-previous-installs.yml", stop_on_fail=False)
    assert(status == 0)
예제 #5
0
def install_deps(cluster_config):

    log_info("Installing dependencies for cluster_config: {}".format(
        cluster_config))

    ansible_runner = AnsibleRunner(config=cluster_config)
    status = ansible_runner.run_ansible_playbook("os-level-modifications.yml")
    if status != 0:
        raise ProvisioningError("Failed to make os modifications")

    status = ansible_runner.run_ansible_playbook("install-common-tools.yml")
    if status != 0:
        raise ProvisioningError("Failed to install dependencies")
예제 #6
0
def clean_cluster(cluster_config):

    log_info("Cleaning cluster: {}".format(cluster_config))

    ansible_runner = AnsibleRunner(config=cluster_config)
    status = ansible_runner.run_ansible_playbook("remove-previous-installs.yml")
    if status != 0:
        raise ProvisioningError("Failed to removed previous installs")

    # Clear firewall rules
    status = ansible_runner.run_ansible_playbook("flush-firewall.yml")
    if status != 0:
        raise ProvisioningError("Failed to flush firewall")
예제 #7
0
def connect_sites(playbook_path: str,
                  inter_network: str,
                  inter_ports: List[str],
                  sites: List[str],
                  site_addresses: List[str],
                  name: str,
                  network: str,
                  router_ip: str) -> bool:
    p2p_network_name = _get_p2p_network_name(sites)
    p2p_subnet_name = _get_p2p_subnet_name(sites)

    network_name = _get_internal_network_name(name)
    subnet_name = _get_internal_subnet_name(name)
    router_name = _get_internal_router_name(name)

    site_routers = [_get_internal_router_name(s) for s in sites]

    ar = AnsibleRunner()

    print("network_name: " + network_name)
    print("subnet_name: " + subnet_name)
    print("network: " + network)
    print("router_name: " + router_name)
    print("router_ip: " + router_ip)
    return ar.play(
        playbook=f'{playbook_path}/connect_sites.yml',
        extra_vars={
            'INTER_NETWORK_NAME': p2p_network_name,
            'INTER_SUBNET_NAME': p2p_subnet_name,
            'INTER_NETWORK_ADDRESS': inter_network,
            'SITES': [
                {
                    'INTER_PORT': inter_ports[0],
                    'ROUTER_NAME': site_routers[0],
                    'OTHER_NETWORK_ADDRESS': site_addresses[1],
                    'OTHER_PORT': inter_ports[1]
                },
                {
                    'INTER_PORT': inter_ports[1],
                    'ROUTER_NAME': site_routers[1],
                    'OTHER_NETWORK_ADDRESS': site_addresses[0],
                    'OTHER_PORT': inter_ports[0]
                }
            ],
            'NETWORK_NAME': network_name,
            'SUBNET_NAME': subnet_name,
            'NETWORK_ADDRESS': network,
            'ROUTER_NAME': router_name,
            'ROUTER_IP': router_ip
        }
    )
예제 #8
0
def clean_cluster(cluster_config):

    log_info("Cleaning cluster: {}".format(cluster_config))

    ansible_runner = AnsibleRunner(config=cluster_config)
    status = ansible_runner.run_ansible_playbook(
        "remove-previous-installs.yml")
    if status != 0:
        raise ProvisioningError("Failed to removed previous installs")

    # Clear firewall rules
    status = ansible_runner.run_ansible_playbook("flush-firewall.yml")
    if status != 0:
        raise ProvisioningError("Failed to flush firewall")
def memory_dump(victim):
    """
    Wrapper function to create memory dump of victim vm with ansible
    args:
        victim - location of vagrantfile
    return:
        None
    """

    runner = AnsibleRunner(
        './scenario_builder/forensics/playbooks/mem-dump-linux.yaml',
        hosts='./scenario_builder/forensics/hosts')
    runner.run()
    tmpfile = victim + 'mem-image.lime'
    os.rename(tmpfile, './mem-image.lime')
def install_aws_credentials(cluster_config, aws_access_key_id, aws_secret_access_key):

    log_info("Installing aws credentials for cluster_config: {}".format(cluster_config))

    ansible_runner = AnsibleRunner(config=cluster_config)

    status = ansible_runner.run_ansible_playbook(
        "install-aws-credentials.yml",
        extra_vars={
            "aws_access_key_id": aws_access_key_id,
            "aws_secret_access_key": aws_secret_access_key,
        },
    )
    if status != 0:
        raise ProvisioningError("Failed to aws credentials")
def disk_image(victim):
    """
    Wrapper function to create disk image of victim vm with ansible
    args:
        victim - location of vagrantfile
    return:
        None
    """

    tmpfile = victim + 'filesystem.image.gz'
    runner = AnsibleRunner(
        './scenario_builder/forensics/playbooks/diskimage-linux.yaml',
        hosts='./scenario_builder/forensics/hosts')
    runner.run()
    os.rename(tmpfile, './filesystem.image.gz')
예제 #12
0
def init_master(playbook_path: str,
                site: str,
                public_key: str,
                private_key: str,
                rancher_sites: List[str]) -> bool:
    ar = AnsibleRunner()

    vm_name = _get_rancher_master_vm_name(site)
    network_name = _get_internal_network_name(site)
    router_name = _get_internal_router_name(site)

    host_available = ar.play(
        playbook=f'{playbook_path}/init_master.yml',
        extra_vars={
            'VM_NAME': vm_name,
            'NETWORK_NAME': network_name,
            'ROUTER_NAME': router_name,
            'SITE_NAME': site,
            'PUBLIC_KEY_FILE': public_key,
            'PRIVATE_KEY_FILE': private_key
        }
    )

    if not host_available:
        return False

    host_vars = ar.inventory_manager.get_host(vm_name).get_vars()
    host = host_vars.get('ansible_host')
    python_interpreter = host_vars.get('ansible_python_interpreter')
    username = host_vars.get('ansible_user')

    print(rancher_sites)
    return init_rancher([
        '--host', host,
        '--username', username,
        '--private-key-file', private_key,
        '--python-interpreter', python_interpreter,
        '--pip-executable', '/home/core/bin/pip',
        '--pip-as-non-root',
        '--update-config',
        '--rancher-username', 'recap',
        '--rancher-password', 'recap$123',
        '--rancher-env-name', 'recap',
        '--rancher-registry-url', 'omi-registry.e-technik.uni-ulm.de',
        '--rancher-registry-username', 'recap_pipeline',
        '--rancher-registry-password', '53qThb2ZDUaXc3L49bs8',
        '--rancher-sites', *rancher_sites
    ])
def install_couchbase_server(couchbase_server_config):

    print(couchbase_server_config)

    ansible_runner = AnsibleRunner()

    server_baseurl, server_package_name = couchbase_server_config.get_baseurl_package()
    status = ansible_runner.run_ansible_playbook(
        "install-couchbase-server-package.yml",
        "couchbase_server_package_base_url={0} couchbase_server_package_name={1}".format(
            server_baseurl,
            server_package_name
        ),
        stop_on_fail=False
    )
    assert(status == 0)
예제 #14
0
def install_nginx(cluster_config):
    """
    Deploys nginx to nodes with the load_balancer tag

    1. Get the sync_gateway endpoints from the cluster configuration
    2. Use the endpoints to render the nginx config (resources/nginx_configs/nginx.conf)
      to distribute load across the running sync_gateways.
      i.e. If your 'cluster_config' has 2 sync_gateways, nginx will be setup to forward
        requests to both of the sync_gateways using a weighted round robin distribution.
        If you have 3, it will split the load between 3, etc ...
    3. Deploy the config and install nginx on load_balancer nodes
    4. Start the nginx service
    """

    cluster = ClusterKeywords()
    # Set lb_enable to False to get the actual SG IPs for nginx.conf
    topology = cluster.get_cluster_topology(cluster_config, lb_enable=False)

    # Get sync_gateway enpoints from cluster_config
    #  and build a string of upstream server definitions
    # upstream sync_gateway {
    #   server 192.168.33.11:4984;
    #   server 192.168.33.12:4984;
    #  }
    upstream_definition = ""
    upstream_definition_admin = ""

    for sg in topology["sync_gateways"]:
        # string http:// to adhere to expected format for nginx.conf
        ip_port = sg["public"].replace("http://", "")
        ip_port_admin = sg["admin"].replace("http://", "")
        upstream_definition += "server {};\n".format(ip_port)
        upstream_definition_admin += "server {};\n".format(ip_port_admin)

    log_info("Upstream definition: {}".format(upstream_definition))
    log_info("Upstream definition admin: {}".format(upstream_definition_admin))

    ansible_runner = AnsibleRunner(cluster_config)
    status = ansible_runner.run_ansible_playbook(
        "install-nginx.yml",
        extra_vars={
            "upstream_sync_gatways": upstream_definition,
            "upstream_sync_gatways_admin": upstream_definition_admin
        })

    assert status == 0, "Failed to install nginx!"
def provision_cluster(couchbase_server_config, sync_gateway_config, install_deps):

    print "\n>>> Host info:\n"

    with open(os.environ["CLUSTER_CONFIG"], "r") as ansible_hosts:
        print(ansible_hosts.read())

    print(couchbase_server_config)
    print(sync_gateway_config)

    if not sync_gateway_config.is_valid():
        print("Invalid sync_gateway provisioning configuration. Exiting ...")
        sys.exit(1)

    print(">>> Provisioning cluster...")

    # Get server base url and package name
    server_baseurl, server_package_name = couchbase_server_config.get_baseurl_package()

    print(">>> Server package: {0}/{1}".format(server_baseurl, server_package_name))
    print(">>> Using sync_gateway config: {}".format(sync_gateway_config.config_path))

    ansible_runner = AnsibleRunner()

    # Reset previous installs
    status = ansible_runner.run_ansible_playbook("remove-previous-installs.yml", stop_on_fail=False)
    assert(status == 0)

    if install_deps:
        # OS-level modifications
        status = ansible_runner.run_ansible_playbook("os-level-modifications.yml", stop_on_fail=False)
        assert(status == 0)

        # Install dependencies
        status = ansible_runner.run_ansible_playbook("install-common-tools.yml", stop_on_fail=False)
        assert(status == 0)

    # Clear firewall rules
    status = ansible_runner.run_ansible_playbook("flush-firewall.yml", stop_on_fail=False)
    assert(status == 0)

    # Install server package
    install_couchbase_server.install_couchbase_server(couchbase_server_config)

    # Install sync_gateway
    install_sync_gateway.install_sync_gateway(sync_gateway_config)
def logs(logs_loc):
    """
    Wrapper function to pull logs off of the victim vm with ansible
    args:
        logs_loc - location of the log files on the victim vm
    return:
        None
    """
    logger = logging.getLogger('root')
    log_path = "log_path={}".format(logs_loc)
    logger.debug('Pulling logs from {}'.format(log_path))
    runner = AnsibleRunner(
        './scenario_builder/forensics/playbooks/logs-linux.yaml',
        hosts='./scenario_builder/forensics/hosts',
        extra_var=log_path)
    runner.run()
    os.rename('./scenario_builder/forensics/playbooks/logs.zip', './logs.zip')
def install_sync_gateway(cluster_config, sync_gateway_config):
    log_info(sync_gateway_config)

    if not sync_gateway_config.is_valid():
        raise ProvisioningError("Invalid sync_gateway provisioning configuration. Exiting ...")

    if sync_gateway_config.build_flags != "":
        log_warn("\n\n!!! WARNING: You are building with flags: {} !!!\n\n".format(sync_gateway_config.build_flags))

    ansible_runner = AnsibleRunner(cluster_config)
    config_path = os.path.abspath(sync_gateway_config.config_path)

    # Create buckets unless the user explicitly asked to skip this step
    if not sync_gateway_config.skip_bucketcreation:
        create_server_buckets(cluster_config, sync_gateway_config)

    # Install Sync Gateway via Source or Package
    if sync_gateway_config.commit is not None:
        # Install from source
        status = ansible_runner.run_ansible_playbook(
            "install-sync-gateway-source.yml",
            extra_vars={
                "sync_gateway_config_filepath": config_path,
                "commit": sync_gateway_config.commit,
                "build_flags": sync_gateway_config.build_flags
            }
        )
        if status != 0:
            raise ProvisioningError("Failed to install sync_gateway source")

    else:
        # Install from Package
        sync_gateway_base_url, sync_gateway_package_name, sg_accel_package_name = sync_gateway_config.sync_gateway_base_url_and_package()
        status = ansible_runner.run_ansible_playbook(
            "install-sync-gateway-package.yml",
            extra_vars={
                "couchbase_sync_gateway_package_base_url": sync_gateway_base_url,
                "couchbase_sync_gateway_package": sync_gateway_package_name,
                "couchbase_sg_accel_package": sg_accel_package_name,
                "sync_gateway_config_filepath": config_path
            }
        )
        if status != 0:
            raise ProvisioningError("Failed to install sync_gateway package")
def install_sync_gateway(sync_gateway_config):
    print(sync_gateway_config)

    if not sync_gateway_config.is_valid():
        print "Invalid sync_gateway provisioning configuration. Exiting ..."
        sys.exit(1)

    if sync_gateway_config.build_flags != "":
        print("\n\n!!! WARNING: You are building with flags: {} !!!\n\n".format(sync_gateway_config.build_flags))

    ansible_runner = AnsibleRunner()
    config_path = os.path.abspath(sync_gateway_config.config_path)

    if sync_gateway_config.commit is not None:
        # Install source
        status = ansible_runner.run_ansible_playbook(
            "install-sync-gateway-source.yml",
            "sync_gateway_config_filepath={0} commit={1} build_flags={2} skip_bucketflush={3}".format(
                config_path,
                sync_gateway_config.commit,
                sync_gateway_config.build_flags,
                sync_gateway_config.skip_bucketflush
            ),
            stop_on_fail=False
        )
        assert(status == 0)

    else:
        # Install build
        sync_gateway_base_url, sync_gateway_package_name, sg_accel_package_name = sync_gateway_config.sync_gateway_base_url_and_package()
        status = ansible_runner.run_ansible_playbook(
            "install-sync-gateway-package.yml",
            "couchbase_sync_gateway_package_base_url={0} couchbase_sync_gateway_package={1} couchbase_sg_accel_package={2} sync_gateway_config_filepath={3} skip_bucketflush={4}".format(
                sync_gateway_base_url,
                sync_gateway_package_name,
                sg_accel_package_name,
                config_path,
                sync_gateway_config.skip_bucketflush
            ),
            stop_on_fail=False
        )
        assert(status == 0)
예제 #19
0
def add_site(playbook_path: str,
             name: str,
             network: str,
             router_ip: str) -> bool:
    ar = AnsibleRunner()

    network_name = _get_internal_network_name(name)
    subnet_name = _get_internal_subnet_name(name)
    router_name = _get_internal_router_name(name)

    return ar.play(
        playbook=f'{playbook_path}/add_site.yml',
        extra_vars={
            'NETWORK_NAME': network_name,
            'SUBNET_NAME': subnet_name,
            'NETWORK_ADDRESS': network,
            'ROUTER_NAME': router_name,
            'ROUTER_IP': router_ip
        }
    )
예제 #20
0
def install_nginx(cluster_config):
    """
    Deploys nginx to nodes with the load_balancer tag

    1. Get the sync_gateway endpoints from the cluster configuration
    2. Use the endpoints to render the nginx config (resources/nginx_configs/nginx.conf)
      to distribute load across the running sync_gateways.
      i.e. If your 'cluster_config' has 2 sync_gateways, nginx will be setup to forward
        requests to both of the sync_gateways using a weighted round robin distribution.
        If you have 3, it will split the load between 3, etc ...
    3. Deploy the config and install nginx on load_balancer nodes
    4. Start the nginx service
    """

    cluster = ClusterKeywords()
    topology = cluster.get_cluster_topology(cluster_config)

    # Get sync_gateway enpoints from cluster_config
    #  and build a string of upstream server definitions
    # upstream sync_gateway {
    #   server 192.168.33.11:4984;
    #   server 192.168.33.12:4984;
    #  }
    upstream_definition = ""
    for sg in topology["sync_gateways"]:
        # string http:// to adhere to expected format for nginx.conf
        ip_port = sg["public"].replace("http://", "")
        upstream_definition += "server {};\n".format(ip_port)

    log_info("Upstream definition: ")
    log_info(upstream_definition)

    ansible_runner = AnsibleRunner(cluster_config)
    status = ansible_runner.run_ansible_playbook(
        "install-nginx.yml",
        extra_vars={
            "upstream_sync_gatways": upstream_definition
        }
    )

    assert status == 0, "Failed to install nginx!"
예제 #21
0
def remove(host: Host,
           ansible_runner: AnsibleRunner,
           rancher_client: RancherClient) -> bool:
    ansible_runner.add_host(host)

    log.info(f'Removing {host} from environment')

    rancher_host = rancher_client.get_host(host.address)

    if rancher_host is None:
        log.warning(f'Host {host.address} was not found in rancher, skipping')
        return False

    deleted_from_rancher = rancher_client.delete_host(rancher_host)
    if not deleted_from_rancher:
        log.warning(
            f'Host {host} was not deleted from Rancher master, skipping')
        return False

    return ansible_runner.play(
        f'{_playbook_path}/remove_host.yml',
        targets=[host.address]
    )
예제 #22
0
def add(host: Host,
        ansible_runner: AnsibleRunner,
        rancher: RancherClient,
        pip_executable: str = 'pip',
        pip_as_root: bool = True) -> bool:
    ansible_runner.add_host(host)

    log.info(f'Adding {host} to environment')

    return ansible_runner.play(
        f'{_playbook_path}/add_host.yml',
        targets=[host.address],
        extra_vars={
            'RANCHER_SERVER_URL': rancher.url,
            'RANCHER_ENV_ID': rancher.env_id,
            'RANCHER_REG_TOKEN': rancher.get_registration_token(),
            'HOST_ID': host.address,
            'HOST_TYPE': host.variables['type'],
            'HOST_SITE': host.variables['site'],
            'PIP_EXECUTABLE': pip_executable,
            'PIP_AS_ROOT': 'yes' if pip_as_root else 'no',
            'EXTRA_LABELS': host.variables['extra_labels']
        }
    )
예제 #23
0
def add_host(playbook_path: str,
             site: str,
             host_type: str,
             instance: int,
             public_key: str,
             private_key: str,
             vm_flavour: str,
             vm_availability_zone: str,
             extra_labels: Dict[str, str],
             is_master: bool = False) -> bool:
    ar = AnsibleRunner()

    vm_name = _get_host_vm_name(site, host_type, instance)
    network_name = _get_internal_network_name(site)
    master_host = _get_master_address()

    if "global" == host_type:
        current_playbook = f'{playbook_path}/add_host_fip.yml'

    else:
        current_playbook = f'{playbook_path}/add_host.yml'

    host_available = ar.play(
        playbook=current_playbook,
        extra_vars={
            'VM_NAME': vm_name,
            'NETWORK_NAME': network_name,
            'SITE_NAME': site,
            'TYPE': host_type,
            'MASTER_HOST': master_host,
            'PUBLIC_KEY_FILE': public_key,
            'PRIVATE_KEY_FILE': private_key,
            'VM_FLAVOUR': vm_flavour,
            'VM_AZ': vm_availability_zone
        }
    )

    if not host_available:
        return False

    host_vars = ar.inventory_manager.get_host(vm_name).get_vars()
    host = host_vars.get('ansible_host')
    python_interpreter = host_vars.get('ansible_python_interpreter')
    username = host_vars.get('ansible_user')

    if is_master:
        extra_labels['master'] = 'true'
    
    if "global" == host_type:
        env(['set', "GLOBAL_ADDRESS", host])

    agent_added = add_agent([
        'add', host,
        '--infrastructure', host_type,
        '--site', site,
        '--username', username,
        '--private-key-file', private_key,
        '--start-stack',
        '--python-interpreter', python_interpreter,
        '--pip-executable', '/home/core/bin/pip',
        '--pip-as-non-root',
        '--proxy-host', master_host,
        '--extra-labels', get_labels_string(extra_labels) or None
    ])

    services = {
        'admin': getenv('SERVICES_ADMIN').split(','),
        'global': getenv('SERVICES_GLOBAL').split(','),
        'r_infra': getenv('SERVICES_INFRA').split(','),
        'r_user': getenv('SERVICES_USER').split(',')
    }

    # if agent_added and host_type == 'r_user':
    #     result = update_landscaper(site=site,
    #                                restart_service=True,
    #                                ssh_user=username,
    #                                private_key=private_key,
    #                                python_interpreter=python_interpreter,
    #                                ssh_common_args='-o StrictHostKeyChecking=no')
    #     if not result:
    #         log.warning('There was an error updating the landscaper')

    return agent_added
예제 #24
0
import os

from ansible_runner import AnsibleRunner

if __name__ == "__main__":
    usage = "usage: python stop_telegraf.py"

    try:
        cluster_config = os.environ["CLUSTER_CONFIG"]
    except KeyError as ke:
        print(
            "Make sure CLUSTER_CONFIG is defined and pointing to the configuration you would like to run against"
        )
        raise KeyError(
            "CLUSTER_CONFIG not defined. Unable to stop telegraf collectors.")

    ansible_runner = AnsibleRunner(cluster_config)
    status = ansible_runner.run_ansible_playbook("stop-telegraf.yml")
    assert status == 0, "Failed to stop telegraf collectors"
예제 #25
0
def main(argv: List[str]) -> bool:
    arg_parser = ArgumentParser(
        prog=recap_module_name,
        description='Add or Remove Hosts from RECAP Environment',
        formatter_class=ArgumentDefaultsHelpFormatter
    )

    arg_parser.add_argument('action', choices=['add', 'remove'])
    arg_parser.add_argument('host')
    arg_parser.add_argument(
        '--infrastructure', choices=['r_user', 'r_infra', 'admin', 'global'], required=False)
    arg_parser.add_argument('--site', required=False)
    arg_parser.add_argument('--username', required=True)
    arg_parser.add_argument('--private-key-file', required=True)
    arg_parser.add_argument('--start-stack', action='store_true')
    arg_parser.add_argument('--python-interpreter', default='/usr/bin/python')
    arg_parser.add_argument('--pip-executable', default='pip')
    arg_parser.add_argument('--pip-as-non-root', action='store_true')
    arg_parser.add_argument('--proxy-host', required=False)
    arg_parser.add_argument('--extra-labels', required=False, type=labels_type)

    args = arg_parser.parse_args(argv)

    if args.action == 'add' and (args.infrastructure is None or args.site is None):
        arg_parser.error('add requires --infrastructure and --site')

    extra_labels = ''
    if args.extra_labels is not None:
        extra_labels = get_labels_string(args.extra_labels)

    if args.proxy_host is None:
        ssh_args = '-o StrictHostKeyChecking=no'
    else:
        ssh_args = f'-o StrictHostKeyChecking=no -o ProxyCommand="ssh -W %h:%p -q {args.username}@{args.proxy_host} -i {args.private_key_file} -o ControlMaster=auto -o ControlPersist=30m -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"'

    runner = AnsibleRunner()
    rancher_access_key_site = os.getenv('RANCHER_ACCESS_KEY_'+args.site)
    rancher_secret_key_site = os.getenv('RANCHER_SECRET_KEY_'+args.site)
    rancher_env_site = os.getenv('RANCHER_ENV_ID_'+args.site)
    rancher = RancherClient(
        rancher_env_site, rancher_access_key_site, rancher_secret_key_site)

    host = Host(address=args.host,
                username=args.username,
                private_key_file=args.private_key_file,
                variables={
                    'ansible_ssh_common_args': ssh_args,
                    'ansible_python_interpreter': args.python_interpreter,
                    'type': args.infrastructure,
                    'site': args.site,
                    'extra_labels': extra_labels
                })

    if args.action == 'add':
        success = add(host=host,
                      ansible_runner=runner,
                      rancher=rancher,
                      pip_executable=args.pip_executable,
                      pip_as_root=not args.pip_as_non_root)

        if success and args.start_stack:
            success = stack_cmd([
                'up', args.site, '--infrastructures', args.infrastructure
            ])

    else:
        success = remove(host=host,
                         ansible_runner=runner,
                         rancher_client=rancher)

    return success
예제 #26
0
import os

from ansible_runner import AnsibleRunner


if __name__ == "__main__":
    usage = "usage: python stop_telegraf.py"

    try:
        cluster_config = os.environ["CLUSTER_CONFIG"]
    except KeyError as ke:
        print ("Make sure CLUSTER_CONFIG is defined and pointing to the configuration you would like to run against")
        raise KeyError("CLUSTER_CONFIG not defined. Unable to stop telegraf collectors.")

    ansible_runner = AnsibleRunner(cluster_config)
    status = ansible_runner.run_ansible_playbook("stop-telegraf.yml")
    assert status == 0, "Failed to stop telegraf collectors"