Example #1
0
def update(site: str, restart_service: bool, ssh_user: str, private_key: str,
           python_interpreter: str, ssh_common_args: str) -> bool:
    ar = AnsibleRunner(
        inventory=[f'{playbook_path}/inventory.d/90_os_inventory.sh'])
    hosts = ar.inventory_manager.get_hosts()
    user_hosts = [get_host_info(h) for h in hosts if is_user_host(h, site)]
    infra_host = get_infra_host(hosts, site)

    if not infra_host:
        log.error(f'No r_infra host found for site {site}')
        return False

    rancher_host = get_rancher_host(hosts)
    rancher_host.vars['ansible_ssh_user'] = ssh_user
    rancher_host.vars['ansible_ssh_private_key_file'] = private_key
    rancher_host.vars['ansible_python_interpreter'] = python_interpreter
    rancher_host.vars['ansible_ssh_common_args'] = ssh_common_args

    success = ar.play(f'{playbook_path}/update_landscaper.yml',
                      extra_vars={
                          'INFRA_HOST': infra_host.vars['ansible_ssh_host'],
                          'USER_HOSTS': user_hosts
                      })

    if success and restart_service:
        recap_stack([
            'restart', site, '--infrastructures', 'r_infra', '--services',
            'landscaper'
        ])

    return success
def install_deps(cluster_config):

    log_info("Installing dependencies for cluster_config: {}".format(
        cluster_config))

    ansible_runner = AnsibleRunner(config=cluster_config)
    status = ansible_runner.run_ansible_playbook("os-level-modifications.yml")
    if status != 0:
        raise ProvisioningError("Failed to make os modifications")

    status = ansible_runner.run_ansible_playbook("install-common-tools.yml")
    if status != 0:
        raise ProvisioningError("Failed to install dependencies")
Example #3
0
def connect_sites(playbook_path: str,
                  inter_network: str,
                  inter_ports: List[str],
                  sites: List[str],
                  site_addresses: List[str],
                  name: str,
                  network: str,
                  router_ip: str) -> bool:
    p2p_network_name = _get_p2p_network_name(sites)
    p2p_subnet_name = _get_p2p_subnet_name(sites)

    network_name = _get_internal_network_name(name)
    subnet_name = _get_internal_subnet_name(name)
    router_name = _get_internal_router_name(name)

    site_routers = [_get_internal_router_name(s) for s in sites]

    ar = AnsibleRunner()

    print("network_name: " + network_name)
    print("subnet_name: " + subnet_name)
    print("network: " + network)
    print("router_name: " + router_name)
    print("router_ip: " + router_ip)
    return ar.play(
        playbook=f'{playbook_path}/connect_sites.yml',
        extra_vars={
            'INTER_NETWORK_NAME': p2p_network_name,
            'INTER_SUBNET_NAME': p2p_subnet_name,
            'INTER_NETWORK_ADDRESS': inter_network,
            'SITES': [
                {
                    'INTER_PORT': inter_ports[0],
                    'ROUTER_NAME': site_routers[0],
                    'OTHER_NETWORK_ADDRESS': site_addresses[1],
                    'OTHER_PORT': inter_ports[1]
                },
                {
                    'INTER_PORT': inter_ports[1],
                    'ROUTER_NAME': site_routers[1],
                    'OTHER_NETWORK_ADDRESS': site_addresses[0],
                    'OTHER_PORT': inter_ports[0]
                }
            ],
            'NETWORK_NAME': network_name,
            'SUBNET_NAME': subnet_name,
            'NETWORK_ADDRESS': network,
            'ROUTER_NAME': router_name,
            'ROUTER_IP': router_ip
        }
    )
def clean_cluster(cluster_config):

    log_info("Cleaning cluster: {}".format(cluster_config))

    ansible_runner = AnsibleRunner(config=cluster_config)
    status = ansible_runner.run_ansible_playbook(
        "remove-previous-installs.yml")
    if status != 0:
        raise ProvisioningError("Failed to removed previous installs")

    # Clear firewall rules
    status = ansible_runner.run_ansible_playbook("flush-firewall.yml")
    if status != 0:
        raise ProvisioningError("Failed to flush firewall")
def memory_dump(victim):
    """
    Wrapper function to create memory dump of victim vm with ansible
    args:
        victim - location of vagrantfile
    return:
        None
    """

    runner = AnsibleRunner(
        './scenario_builder/forensics/playbooks/mem-dump-linux.yaml',
        hosts='./scenario_builder/forensics/hosts')
    runner.run()
    tmpfile = victim + 'mem-image.lime'
    os.rename(tmpfile, './mem-image.lime')
def disk_image(victim):
    """
    Wrapper function to create disk image of victim vm with ansible
    args:
        victim - location of vagrantfile
    return:
        None
    """

    tmpfile = victim + 'filesystem.image.gz'
    runner = AnsibleRunner(
        './scenario_builder/forensics/playbooks/diskimage-linux.yaml',
        hosts='./scenario_builder/forensics/hosts')
    runner.run()
    os.rename(tmpfile, './filesystem.image.gz')
def install_aws_credentials(cluster_config, aws_access_key_id, aws_secret_access_key):

    log_info("Installing aws credentials for cluster_config: {}".format(cluster_config))

    ansible_runner = AnsibleRunner(config=cluster_config)

    status = ansible_runner.run_ansible_playbook(
        "install-aws-credentials.yml",
        extra_vars={
            "aws_access_key_id": aws_access_key_id,
            "aws_secret_access_key": aws_secret_access_key,
        },
    )
    if status != 0:
        raise ProvisioningError("Failed to aws credentials")
Example #8
0
def init_master(playbook_path: str,
                site: str,
                public_key: str,
                private_key: str,
                rancher_sites: List[str]) -> bool:
    ar = AnsibleRunner()

    vm_name = _get_rancher_master_vm_name(site)
    network_name = _get_internal_network_name(site)
    router_name = _get_internal_router_name(site)

    host_available = ar.play(
        playbook=f'{playbook_path}/init_master.yml',
        extra_vars={
            'VM_NAME': vm_name,
            'NETWORK_NAME': network_name,
            'ROUTER_NAME': router_name,
            'SITE_NAME': site,
            'PUBLIC_KEY_FILE': public_key,
            'PRIVATE_KEY_FILE': private_key
        }
    )

    if not host_available:
        return False

    host_vars = ar.inventory_manager.get_host(vm_name).get_vars()
    host = host_vars.get('ansible_host')
    python_interpreter = host_vars.get('ansible_python_interpreter')
    username = host_vars.get('ansible_user')

    print(rancher_sites)
    return init_rancher([
        '--host', host,
        '--username', username,
        '--private-key-file', private_key,
        '--python-interpreter', python_interpreter,
        '--pip-executable', '/home/core/bin/pip',
        '--pip-as-non-root',
        '--update-config',
        '--rancher-username', 'recap',
        '--rancher-password', 'recap$123',
        '--rancher-env-name', 'recap',
        '--rancher-registry-url', 'omi-registry.e-technik.uni-ulm.de',
        '--rancher-registry-username', 'recap_pipeline',
        '--rancher-registry-password', '53qThb2ZDUaXc3L49bs8',
        '--rancher-sites', *rancher_sites
    ])
def install_nginx(cluster_config):
    """
    Deploys nginx to nodes with the load_balancer tag

    1. Get the sync_gateway endpoints from the cluster configuration
    2. Use the endpoints to render the nginx config (resources/nginx_configs/nginx.conf)
      to distribute load across the running sync_gateways.
      i.e. If your 'cluster_config' has 2 sync_gateways, nginx will be setup to forward
        requests to both of the sync_gateways using a weighted round robin distribution.
        If you have 3, it will split the load between 3, etc ...
    3. Deploy the config and install nginx on load_balancer nodes
    4. Start the nginx service
    """

    cluster = ClusterKeywords()
    # Set lb_enable to False to get the actual SG IPs for nginx.conf
    topology = cluster.get_cluster_topology(cluster_config, lb_enable=False)

    # Get sync_gateway enpoints from cluster_config
    #  and build a string of upstream server definitions
    # upstream sync_gateway {
    #   server 192.168.33.11:4984;
    #   server 192.168.33.12:4984;
    #  }
    upstream_definition = ""
    upstream_definition_admin = ""

    for sg in topology["sync_gateways"]:
        # string http:// to adhere to expected format for nginx.conf
        ip_port = sg["public"].replace("http://", "")
        ip_port_admin = sg["admin"].replace("http://", "")
        upstream_definition += "server {};\n".format(ip_port)
        upstream_definition_admin += "server {};\n".format(ip_port_admin)

    log_info("Upstream definition: {}".format(upstream_definition))
    log_info("Upstream definition admin: {}".format(upstream_definition_admin))

    ansible_runner = AnsibleRunner(cluster_config)
    status = ansible_runner.run_ansible_playbook(
        "install-nginx.yml",
        extra_vars={
            "upstream_sync_gatways": upstream_definition,
            "upstream_sync_gatways_admin": upstream_definition_admin
        })

    assert status == 0, "Failed to install nginx!"
def logs(logs_loc):
    """
    Wrapper function to pull logs off of the victim vm with ansible
    args:
        logs_loc - location of the log files on the victim vm
    return:
        None
    """
    logger = logging.getLogger('root')
    log_path = "log_path={}".format(logs_loc)
    logger.debug('Pulling logs from {}'.format(log_path))
    runner = AnsibleRunner(
        './scenario_builder/forensics/playbooks/logs-linux.yaml',
        hosts='./scenario_builder/forensics/hosts',
        extra_var=log_path)
    runner.run()
    os.rename('./scenario_builder/forensics/playbooks/logs.zip', './logs.zip')
Example #11
0
def add_site(playbook_path: str,
             name: str,
             network: str,
             router_ip: str) -> bool:
    ar = AnsibleRunner()

    network_name = _get_internal_network_name(name)
    subnet_name = _get_internal_subnet_name(name)
    router_name = _get_internal_router_name(name)

    return ar.play(
        playbook=f'{playbook_path}/add_site.yml',
        extra_vars={
            'NETWORK_NAME': network_name,
            'SUBNET_NAME': subnet_name,
            'NETWORK_ADDRESS': network,
            'ROUTER_NAME': router_name,
            'ROUTER_IP': router_ip
        }
    )
Example #12
0
def main(argv: List[str]) -> bool:
    arg_parser = ArgumentParser(
        prog=recap_module_name,
        description='Add or Remove Hosts from RECAP Environment',
        formatter_class=ArgumentDefaultsHelpFormatter
    )

    arg_parser.add_argument('action', choices=['add', 'remove'])
    arg_parser.add_argument('host')
    arg_parser.add_argument(
        '--infrastructure', choices=['r_user', 'r_infra', 'admin', 'global'], required=False)
    arg_parser.add_argument('--site', required=False)
    arg_parser.add_argument('--username', required=True)
    arg_parser.add_argument('--private-key-file', required=True)
    arg_parser.add_argument('--start-stack', action='store_true')
    arg_parser.add_argument('--python-interpreter', default='/usr/bin/python')
    arg_parser.add_argument('--pip-executable', default='pip')
    arg_parser.add_argument('--pip-as-non-root', action='store_true')
    arg_parser.add_argument('--proxy-host', required=False)
    arg_parser.add_argument('--extra-labels', required=False, type=labels_type)

    args = arg_parser.parse_args(argv)

    if args.action == 'add' and (args.infrastructure is None or args.site is None):
        arg_parser.error('add requires --infrastructure and --site')

    extra_labels = ''
    if args.extra_labels is not None:
        extra_labels = get_labels_string(args.extra_labels)

    if args.proxy_host is None:
        ssh_args = '-o StrictHostKeyChecking=no'
    else:
        ssh_args = f'-o StrictHostKeyChecking=no -o ProxyCommand="ssh -W %h:%p -q {args.username}@{args.proxy_host} -i {args.private_key_file} -o ControlMaster=auto -o ControlPersist=30m -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"'

    runner = AnsibleRunner()
    rancher_access_key_site = os.getenv('RANCHER_ACCESS_KEY_'+args.site)
    rancher_secret_key_site = os.getenv('RANCHER_SECRET_KEY_'+args.site)
    rancher_env_site = os.getenv('RANCHER_ENV_ID_'+args.site)
    rancher = RancherClient(
        rancher_env_site, rancher_access_key_site, rancher_secret_key_site)

    host = Host(address=args.host,
                username=args.username,
                private_key_file=args.private_key_file,
                variables={
                    'ansible_ssh_common_args': ssh_args,
                    'ansible_python_interpreter': args.python_interpreter,
                    'type': args.infrastructure,
                    'site': args.site,
                    'extra_labels': extra_labels
                })

    if args.action == 'add':
        success = add(host=host,
                      ansible_runner=runner,
                      rancher=rancher,
                      pip_executable=args.pip_executable,
                      pip_as_root=not args.pip_as_non_root)

        if success and args.start_stack:
            success = stack_cmd([
                'up', args.site, '--infrastructures', args.infrastructure
            ])

    else:
        success = remove(host=host,
                         ansible_runner=runner,
                         rancher_client=rancher)

    return success
Example #13
0
import os

from ansible_runner import AnsibleRunner

if __name__ == "__main__":
    usage = "usage: python stop_telegraf.py"

    try:
        cluster_config = os.environ["CLUSTER_CONFIG"]
    except KeyError as ke:
        print(
            "Make sure CLUSTER_CONFIG is defined and pointing to the configuration you would like to run against"
        )
        raise KeyError(
            "CLUSTER_CONFIG not defined. Unable to stop telegraf collectors.")

    ansible_runner = AnsibleRunner(cluster_config)
    status = ansible_runner.run_ansible_playbook("stop-telegraf.yml")
    assert status == 0, "Failed to stop telegraf collectors"
Example #14
0
def add_host(playbook_path: str,
             site: str,
             host_type: str,
             instance: int,
             public_key: str,
             private_key: str,
             vm_flavour: str,
             vm_availability_zone: str,
             extra_labels: Dict[str, str],
             is_master: bool = False) -> bool:
    ar = AnsibleRunner()

    vm_name = _get_host_vm_name(site, host_type, instance)
    network_name = _get_internal_network_name(site)
    master_host = _get_master_address()

    if "global" == host_type:
        current_playbook = f'{playbook_path}/add_host_fip.yml'

    else:
        current_playbook = f'{playbook_path}/add_host.yml'

    host_available = ar.play(
        playbook=current_playbook,
        extra_vars={
            'VM_NAME': vm_name,
            'NETWORK_NAME': network_name,
            'SITE_NAME': site,
            'TYPE': host_type,
            'MASTER_HOST': master_host,
            'PUBLIC_KEY_FILE': public_key,
            'PRIVATE_KEY_FILE': private_key,
            'VM_FLAVOUR': vm_flavour,
            'VM_AZ': vm_availability_zone
        }
    )

    if not host_available:
        return False

    host_vars = ar.inventory_manager.get_host(vm_name).get_vars()
    host = host_vars.get('ansible_host')
    python_interpreter = host_vars.get('ansible_python_interpreter')
    username = host_vars.get('ansible_user')

    if is_master:
        extra_labels['master'] = 'true'
    
    if "global" == host_type:
        env(['set', "GLOBAL_ADDRESS", host])

    agent_added = add_agent([
        'add', host,
        '--infrastructure', host_type,
        '--site', site,
        '--username', username,
        '--private-key-file', private_key,
        '--start-stack',
        '--python-interpreter', python_interpreter,
        '--pip-executable', '/home/core/bin/pip',
        '--pip-as-non-root',
        '--proxy-host', master_host,
        '--extra-labels', get_labels_string(extra_labels) or None
    ])

    services = {
        'admin': getenv('SERVICES_ADMIN').split(','),
        'global': getenv('SERVICES_GLOBAL').split(','),
        'r_infra': getenv('SERVICES_INFRA').split(','),
        'r_user': getenv('SERVICES_USER').split(',')
    }

    # if agent_added and host_type == 'r_user':
    #     result = update_landscaper(site=site,
    #                                restart_service=True,
    #                                ssh_user=username,
    #                                private_key=private_key,
    #                                python_interpreter=python_interpreter,
    #                                ssh_common_args='-o StrictHostKeyChecking=no')
    #     if not result:
    #         log.warning('There was an error updating the landscaper')

    return agent_added