Esempio n. 1
0
def resultInfo():
    config = ansible_runner.run(playbook=os.path.join(playbookBasePath,
                                                      'ks-config.yaml'),
                                private_data_dir=privateDataDir,
                                artifact_dir=os.path.join(
                                    privateDataDir, 'ks-config'),
                                ident='ks-config',
                                quiet=True)

    if config.rc != 0:
        exit()

    result = ansible_runner.run(playbook=os.path.join(playbookBasePath,
                                                      'result-info.yaml'),
                                private_data_dir=privateDataDir,
                                artifact_dir=os.path.join(
                                    privateDataDir, 'result-info'),
                                ident='result',
                                quiet=True)

    if result.rc != 0:
        exit()

    with open('/kubesphere/playbooks/kubesphere_running', 'r') as f:
        info = f.read()
        print(info)
    def setup_data_item(self, data_item, monkeyfs_path, home_dir_path):
        source_file = os.path.join(monkeyfs_path, "data")
        data_name = data_item["name"]
        data_path = data_item["path"]
        data_checksum = data_item["dataset_checksum"]
        dataset_filename = data_item["dataset_filename"]
        installation_location = os.path.join(home_dir_path, data_item["path"])
        dataset_full_path = os.path.join(monkeyfs_path, "data", data_name,
                                         data_checksum, dataset_filename)
        print("Copying dataset from", dataset_full_path, " to ",
              installation_location)

        runner = ansible_runner.run(
            host_pattern=self.name,
            private_data_dir="ansible",
            module="file",
            module_args="path={} state=directory".format(
                installation_location))

        runner = ansible_runner.run(
            host_pattern=self.name,
            private_data_dir="ansible",
            module="unarchive",
            module_args="src={} remote_src=True dest={} creates=yes".format(
                dataset_full_path, installation_location))
        print(runner.stats)
        if runner.status == "failed":
            return False, "Failed to extract archive"

        return True, "Successfully setup data item"
Esempio n. 3
0
def resultInfo(resultState=False):
    ks_config = ansible_runner.run(
        playbook=os.path.join(playbookBasePath, 'ks-config.yaml'),
        private_data_dir=privateDataDir,
        artifact_dir=os.path.join(privateDataDir, 'ks-config'),
        ident='ks-config',
        quiet=True)

    if ks_config.rc != 0:
        exit()

    result = ansible_runner.run(playbook=os.path.join(playbookBasePath,
                                                      'result-info.yaml'),
                                private_data_dir=privateDataDir,
                                artifact_dir=os.path.join(
                                    privateDataDir, 'result-info'),
                                ident='result',
                                quiet=True)

    if result.rc != 0:
        exit()

    config.load_incluster_config()
    api = client.CustomObjectsApi()

    resource = api.get_namespaced_custom_object(
        group="installer.kubesphere.io",
        version="v1alpha1",
        name="ks-installer",
        namespace="kubesphere-system",
        plural="clusterconfigurations",
    )

    if "migration" in resource['status']['core'] and resource['status'][
            'core']['migration'] and resultState == False:
        migration = ansible_runner.run(
            playbook=os.path.join(playbookBasePath, 'ks-migration.yaml'),
            private_data_dir=privateDataDir,
            artifact_dir=os.path.join(privateDataDir, 'ks-migration'),
            ident='ks-migration',
            quiet=False)
        if migration.rc != 0:
            exit()

    if resultState == False:
        with open('/kubesphere/playbooks/kubesphere_running', 'r') as f:
            info = f.read()
            print(info)

    telemeter = ansible_runner.run(
        playbook=os.path.join(playbookBasePath, 'telemetry.yaml'),
        private_data_dir=privateDataDir,
        artifact_dir=os.path.join(privateDataDir, 'telemetry'),
        ident='telemetry',
        quiet=True)

    if telemeter.rc != 0:
        exit()
Esempio n. 4
0
def _simple_ansible(playbook_path, ip, extravars=None):
    ansible_runner.run(
        playbook=playbook_path,
        verbosity=0,
        quiet=False,
        inventory=
        "{0} ansible_connection=ssh ansible_user=cc ansible_ssh_extra_args='-o StrictHostKeyChecking=no'"
        .format(ip),  # this assumes Chameleon!
        ssh_key=PRIVATE_KEY,
        extravars=extravars)
Esempio n. 5
0
def run_playbook(id=None,
                 private_data_dir=None,
                 artifact_dir=None,
                 playbook_name=None,
                 playbook_args=None,
                 module_name=None,
                 module_args=None,
                 module_hosts=None):
    """This function is responsible for running a playbook
    and returns the results of the queries that the playbook
    contains.
    """

    result = {}
    try:
        if playbook_name:
            log.info(u"Running playbook '{}' with ID {}".format(
                playbook_name, id))
            r = ansible_runner.run(ident=id,
                                   private_data_dir=private_data_dir,
                                   artifact_dir=artifact_dir,
                                   playbook=playbook_name,
                                   extravars=playbook_args)
        elif module_name:
            log.info(u"Running module '{}' with ID {}".format(module_name, id))
            r = ansible_runner.run(ident=id,
                                   private_data_dir=private_data_dir,
                                   artifact_dir=artifact_dir,
                                   module=module_name,
                                   module_args=module_args,
                                   host_pattern=module_hosts)

        for host in r.events:
            if sys.version_info[0] >= 3:
                detail = bytes(host.get('stdout', ''),
                               'utf-8').decode('unicode_escape')
            else:
                detail = host.get('stdout', '').decode('string_escape')

            if host.get('event', '').startswith('runner_on'):
                result[host['event_data']['host']] = \
                    { 'summary': r.status,
                       'detail': detail
                    }
            elif host.get('event', '') in ('verbose', 'error'):
                result[host['runner_ident']] = \
                    { 'summary': r.status,
                       'detail': detail
                    }

        return result

    except Exception as original_exception:
        raise ValueError(original_exception)
Esempio n. 6
0
 def start_ansible_playbook(self):
     # print('start_ansible_playbook')
     ansible_runner.run(private_data_dir=self.temp_dir,
                        playbook="playbook.yml",
                        quiet=True,
                        debug=True,
                        ignore_logging=True,
                        cancel_callback=self.cancel_callback,
                        finished_callback=self.finished_callback,
                        event_handler=self.runner_process_message)
     # print('finished ansible runner')
     print(self.temp_dir)
Esempio n. 7
0
def resultInfo(resultState=False, api=None):
    ks_config = ansible_runner.run(
        playbook=os.path.join(playbookBasePath, 'ks-config.yaml'),
        private_data_dir=privateDataDir,
        artifact_dir=os.path.join(privateDataDir, 'ks-config'),
        ident='ks-config',
        quiet=True
    )

    if ks_config.rc != 0:
        print("Failed to ansible-playbook ks-config.yaml")
        exit()

    result = ansible_runner.run(
        playbook=os.path.join(playbookBasePath, 'result-info.yaml'),
        private_data_dir=privateDataDir,
        artifact_dir=os.path.join(privateDataDir, 'result-info'),
        ident='result',
        quiet=True
    )

    if result.rc != 0:
        print("Failed to ansible-playbook result-info.yaml")
        exit()

    resource = get_cluster_configuration(api)

    if "migration" in resource['status']['core'] and resource['status']['core']['migration'] and resultState == False:
        migration = ansible_runner.run(
            playbook=os.path.join(playbookBasePath, 'ks-migration.yaml'),
            private_data_dir=privateDataDir,
            artifact_dir=os.path.join(privateDataDir, 'ks-migration'),
            ident='ks-migration',
            quiet=False
        )
        if migration.rc != 0:
            exit()

    if not resultState:
        with open('/kubesphere/playbooks/kubesphere_running', 'r') as f:
            info = f.read()
            logging.info(info)

    telemeter = ansible_runner.run(
        playbook=os.path.join(playbookBasePath, 'telemetry.yaml'),
        private_data_dir=privateDataDir,
        artifact_dir=os.path.join(privateDataDir, 'telemetry'),
        ident='telemetry',
        quiet=True
    )

    if telemeter.rc != 0:
        exit()
Esempio n. 8
0
def test_missing_private_dir_transmit(tmpdir):
    outgoing_buffer = io.BytesIO()

    # Transmit
    with pytest.raises(ValueError) as excinfo:
        run(
            streamer='transmit',
            _output=outgoing_buffer,
            private_data_dir='/foo/bar/baz',
            playbook='debug.yml',
        )

    assert "private_data_dir path is either invalid or does not exist" in str(
        excinfo.value)
Esempio n. 9
0
def test_unparsable_private_dir_worker(tmpdir):
    worker_dir = str(tmpdir.mkdir('for_worker'))
    incoming_buffer = io.BytesIO(b'')
    outgoing_buffer = io.BytesIO()

    # Worker
    run(
        streamer='worker',
        _input=incoming_buffer,
        _output=outgoing_buffer,
        private_data_dir=worker_dir,
    )
    sent = outgoing_buffer.getvalue()
    assert b'"status": "error"' in sent
Esempio n. 10
0
def run_ansible_script(logFile):
  provider_data = read_txt_data(PROVIDER_EDGES_CONFIG_FILE)
  customer_data = read_txt_data(CUSTOMER_EDGES_CONFIG_FILE)
  network_data = read_yaml_data(ALLOWED_NETWORK_LIST_FILE)

  command_list = ["iptables -A INPUT -p icmp -m limit --limit 1/s --limit-burst 1 -j ACCEPT",
                  "iptables -A INPUT -p icmp -m limit --limit 1/s --limit-burst 1 -j LOG --log-prefix PING-DROP",
                  "iptables -A INPUT -p icmp -j DROP"]

  for nw in network_data["AllowedNetworks"]:
    allow_nw_cmd = "iptables -A INPUT -s "+nw+" -j ACCEPT"
    command_list.append(allow_nw_cmd)

  cmd_data = {"IPTableCommands": command_list}
  write_yaml_data(cmd_data, IP_ROUTE_COMMANDS_FILE)
  print(cmd_data)

  Writing to log file
  for cmd in command_list:
    for key, provider in enumerate(provider_data["ProviderEdges"]):
      for pr in provider:
        providerEdgeList[provider[pr]["ip"]] = pr
        l = time.strftime("%Y%m%d-%H%M%S") + "PE IPTABLE UPDATE: " + pr + "COMMAND: " + cmd + "\n"
        logFile.write(l)
    for key, customer in enumerate(customer_data["CustomerEdges"]):
      for cr in customer:
        customerEdgeList[customer[cr]["ip"]] = cr
        l = time.strftime("%Y%m%d-%H%M%S") + "CE IPTABLE UPDATE: " + cr + "COMMAND: " + cmd + "\n"
        logFile.write(l)

  
  for key, provider in enumerate(provider_data["ProviderEdges"]):
    for pr in provider:
      provider_ip = provider[pr]["ip"]
      ip_data = {"host": provider_ip}
      write_yaml_data(ip_data, IP_FILE)
      
      r = ansible_runner.run(private_data_dir=ANSIBLE_FOLDER_PATH, playbook=IP_ROUTE_SCRIPT)
      out = r.get_fact_cache(provider_ip)

  for key, customer in enumerate(customer_data["CustomerEdges"]):
    for cr in customer:
      customer_ip = customer[cr]["ip"]
      ip_data = {"host": customer_ip}
      write_yaml_data(ip_data, IP_FILE)

      r = ansible_runner.run(private_data_dir=ANSIBLE_FOLDER_PATH, playbook=IP_ROUTE_SCRIPT)
      out = r.get_fact_cache(customer_ip)
    def simulate(self, target, simulation_techniques):
        # check targets running vagrant
        # get ip address from machine
        self.check_targets_running_vagrant(target, self.log)
        target_ip = self.get_ip_address_from_machine(target)
        print("{} {}".format(self.config['art_repository'],
                             self.config['art_branch']))
        runner = ansible_runner.run(
            private_data_dir='.attack_range/',
            cmdline=str('-i ' + target_ip + ', '),
            roles_path="../ansible/roles",
            playbook='../ansible/playbooks/atomic_red_team.yml',
            extravars={
                'art_branch': self.config['art_branch'],
                'art_repository': self.config['art_repository'],
                'art_run_techniques': simulation_techniques,
                'ansible_user': '******',
                'ansible_password': '******',
                'ansible_port': 5985,
                'ansible_winrm_scheme': 'http'
            },
            verbosity=0)

        if runner.status == "successful":
            self.log.info(
                "successfully executed technique ID {0} against target: {1}".
                format(simulation_techniques, target))
        else:
            self.log.error(
                "failed to executed technique ID {0} against target: {1}".
                format(simulation_techniques, target))
            sys.exit(1)
Esempio n. 12
0
def preInstallTasks():
    preInstallTasks = collections.OrderedDict()
    preInstallTasks['preInstall'] = [
        os.path.join(playbookBasePath, 'preinstall.yaml'),
        os.path.join(privateDataDir, 'preinstall')
    ]
    preInstallTasks['metrics-server'] = [
        os.path.join(playbookBasePath, 'metrics_server.yaml'),
        os.path.join(privateDataDir, 'metrics_server')
    ]
    preInstallTasks['common'] = [
        os.path.join(playbookBasePath, 'common.yaml'),
        os.path.join(privateDataDir, 'common')
    ]
    preInstallTasks['ks-core'] = [
        os.path.join(playbookBasePath, 'ks-core.yaml'),
        os.path.join(privateDataDir, 'ks-core')
    ]

    for task, paths in preInstallTasks.items():
        pretask = ansible_runner.run(
            playbook=paths[0],                       #要执行的playbook,如/kubesphere/playbooks/preinstall.yaml
            private_data_dir=privateDataDir,         #整个项目执行的根目录元数据与输出的文件都在该目录下,/kubesphere/results,env/cmdline与env/extravars就在该目录下
            artifact_dir=paths[1],                   #存放输出结果的目录,在/private_data_dir目录下,如/kubesphere/results/preinstall
            ident=str(task),                         #存放输出结果的目录,在/private_data_dir目录下,如/kubesphere/results/preinstall/preInstall
            quiet=False                              #为True则不输出ansible-playbook执行的结果,为False则输出
        )
        if pretask.rc != 0:
            exit()
def launch(**kwargs):
    extraVars = locals()
    print(
        "I'm deploying two ec2 instances. It can take several minutes. If you want to observe a process, please use -v (verbose) key"
    )
    ssh_key_add(**kwargs)
    if kwargs.get('launch_mode_with_vcpu') is None:
        kwargs['launch_mode_with_vcpu'] = True
    r = ansible_runner.run(private_data_dir='./',
                           playbook='playbooks/playbook_aws_install.yml',
                           quiet=quietPlaybooks,
                           extravars={
                               **kwargs,
                               **awsCreds,
                               **sshCreds,
                               **gitSettings,
                               **firewallSettings,
                               **host2BindAdress
                           })
    ini_parser.writeVar('ready4test', str(not r.rc), 'connections')
    if r.rc == 0:
        vpc_id = r.get_fact_cache('localhost')['vpc_id']
        ini_parser.writeVar('ec2_server_id',
                            r.get_fact_cache('localhost')['server_ec2_id'],
                            'aws')
        ini_parser.writeVar('ec2_client_id',
                            r.get_fact_cache('localhost')['client_ec2_id'],
                            'aws')
        ini_parser.writeVar('vpc_id', vpc_id, 'aws')
        ini_parser.writeVar('iperf3_server_host',
                            r.get_fact_cache('localhost')['ext_server'],
                            'connections')
        ini_parser.writeVar('iperf3_int_server',
                            r.get_fact_cache('localhost')['int_server'],
                            'iperf3')
        ini_parser.writeVar('iperf3_client_host',
                            r.get_fact_cache('localhost')['ext_client'],
                            'connections')
        ini_parser.writeVar('iperf3_int_client',
                            r.get_fact_cache('localhost')['int_client'],
                            'iperf3')
        ini_parser.writeVar('region', kwargs['region'], 'aws')
        ini_parser.writeVar('launch_mode_with_vcpu',
                            str(kwargs['launch_mode_with_vcpu']), 'aws')
        ini_parser.writeVar('instance_type_client',
                            kwargs['instance_type_client'], 'aws')
        ini_parser.writeVar('instance_type_server',
                            kwargs['instance_type_server'], 'aws')
        if kwargs['launch_mode_with_vcpu']:
            ini_parser.writeVar('vcpu_server', str(kwargs['vcpu_server']),
                                'aws')
            ini_parser.writeVar('vcpu_client', str(kwargs['vcpu_client']),
                                'aws')

        result = f"I've just deployed two ec2 instances, both are ready to launch a performance tests"

    else:
        result = "I failed ansible playbook. Installation can be partly finished. Please perform manual cleanp using your AWS console. " \
                 "to understand why it happen please launch a tool with -v (verbose) key"
    return result
Esempio n. 14
0
    def update_ESCU_app(self):
        self.log.info("Update ESCU App. This can take some time")
        # upload package
        if self.config['cloud_provider'] == 'aws':
            splunk_ip = aws_service.get_single_instance_public_ip(
                'ar-splunk-' + self.config['range_name'] + '-' +
                self.config['key_name'], self.config)
        elif self.config['cloud_provider'] == 'azure':
            splunk_ip = azure_service.get_instance(
                self.config, "ar-splunk-" + self.config['range_name'] + "-" +
                self.config['key_name'], self.log)['public_ip']
        # Upload the replay logs to the Splunk server
        ansible_vars = {}
        ansible_vars['ansible_user'] = '******'
        ansible_vars['ansible_ssh_private_key_file'] = self.config[
            'private_key_path']
        ansible_vars['splunk_password'] = self.config['attack_range_password']
        ansible_vars['security_content_path'] = self.config[
            'security_content_path']

        cmdline = "-i %s, -u ubuntu" % (splunk_ip)
        runner = ansible_runner.run(
            private_data_dir=os.path.join(os.path.dirname(__file__), '../'),
            cmdline=cmdline,
            roles_path=os.path.join(os.path.dirname(__file__),
                                    '../ansible/roles'),
            playbook=os.path.join(os.path.dirname(__file__),
                                  '../ansible/playbooks/update_escu.yml'),
            extravars=ansible_vars)
Esempio n. 15
0
def main():
    #setup command line args
    setup_args()

    #get args back and reformat them to work with ansible
    arg_vars = unpack_list(parse_args())

    #get inventory
    inventory = get_inventory()

    #playbook
    args = parser.parse_args()
    playbook = args.playbook

    #no output
    settings = {"suppress_ansible_output": False}

    #get private data dir
    directory = os.getcwd() + "/ansible"

    #run playbook wiht inventory
    r = ansible_runner.run(private_data_dir=directory,
                           playbook=playbook,
                           inventory=inventory,
                           extravars=arg_vars,
                           verbosity=0,
                           settings=settings)
    clean_up()
    def setup_persist_folder(self, job_uid, monkeyfs_bucket_name,
                             home_dir_path, persist):
        print("Persisting folder: ", persist)
        persist_path = persist["path"]
        persist_name = "." + persist_path.replace("/", "_") + "_sync.sh"
        script_path = os.path.join(home_dir_path, persist_name)
        monkeyfs_output_folder = "gs://" + \
            os.path.join(monkeyfs_bucket_name, "jobs", job_uid, persist_path)
        persist_folder_path = os.path.join(home_dir_path, persist_path)

        print("Output folder: ", monkeyfs_output_folder)
        print("Input folder: ", persist_folder_path)
        runner = ansible_runner.run(
            host_pattern=self.name,
            private_data_dir="ansible",
            module="include_role",
            module_args="name=gcp/configure/persist_folder",
            extravars={
                "persist_folder_path": persist_folder_path,
                "persist_script_path": script_path,
                "bucket_path": monkeyfs_output_folder,
            })

        if runner.status == "failed":
            return False, "Failed to create persisted directory: " + persist_path
        return True, "Setup persist ran successfully"
Esempio n. 17
0
def preInstallTasks():
    preInstallTasks = collections.OrderedDict()
    preInstallTasks['preInstall'] = [
        os.path.join(playbookBasePath, 'preinstall.yaml'),
        os.path.join(privateDataDir, 'preinstall')
    ]
    preInstallTasks['metrics-server'] = [
        os.path.join(playbookBasePath, 'metrics_server.yaml'),
        os.path.join(privateDataDir, 'metrics_server')
    ]
    preInstallTasks['common'] = [
        os.path.join(playbookBasePath, 'common.yaml'),
        os.path.join(privateDataDir, 'common')
    ]
    preInstallTasks['ks-core'] = [
        os.path.join(playbookBasePath, 'ks-core.yaml'),
        os.path.join(privateDataDir, 'ks-core')
    ]

    for task, paths in preInstallTasks.items():
        pretask = ansible_runner.run(playbook=paths[0],
                                     private_data_dir=privateDataDir,
                                     artifact_dir=paths[1],
                                     ident=str(task),
                                     quiet=False)
        if pretask.rc != 0:
            exit()
Esempio n. 18
0
    def create_cluster(self):
        # Initialize if not already done
        if not self.content_folder:
            self.initialize()

        r = ansible_runner.run(
            private_data_dir=self.content_folder,
            playbook='cluster.yml',
            project_dir=self.content_folder,
            cmdline=self.__calculate_command_line_args(),
            inventory=self.current_inventory,
            ssh_key=self.context.ssh_key_manager.get_private_rsa_key_pem())

        if r.rc == 0 and r.status == 'successful':
            admin_file = os.path.join(self.content_folder,
                                      'inventory/artifacts/admin.conf')
            if os.path.exists(admin_file):
                shutil.copyfile(admin_file,
                                self.context.cluster_space.kubectl_file)
                self.__patch_admin_file()

                os.chmod(self.context.cluster_space.kubectl_file, 0o644)
            # TODO  Check if admin.conf is not in the folder and kubespray is configured to copy it to localhost
        else:
            raise KubesprayCLusterCreationException(
                'An error occurred while executing creation cluster.yml playbook'
            )
        logger.info(f'Kubespray run finished successfully. Time spent')
Esempio n. 19
0
def ansible_playbook(user_input):
    r = ansible_runner.run(data_dir=os.getcwd(), playbook=user_input)
    print("{}: {}".format(r.stats, r.rc))
    for each_host_event in r.events:
        print(each_host_event['event'])
    print("Final status: ")
    print(r.stats)
Esempio n. 20
0
    def _run_ansible_role(
            self, **kwargs: Union[str, int, float]) -> Union[bool, any]:
        """Worker func to run role"""

        try:
            r = ansible_runner.run(
                private_data_dir=ANSIBLE_LOC,
                limit=self.sut,
                role=kwargs["role"],
                rotate_artifacts=1,
                directory_isolation_base_path="/tmp/runner",
                extravars=kwargs["extra_vars"],
                cmdline=kwargs.get("cmdline", "--tags all"),
            )
            subprocess.call(["rm", "-f", ANSIBLE_LOC + "/project/main.json"])
            subprocess.call(["rm", "-f", ANSIBLE_LOC + "/env/extravars"])
        except Exception as e:
            logging.error(f"Ansible role run got error - {e}")
            # clean up
            subprocess.call(["rm", "-f", ANSIBLE_LOC + "/project/main.json"])
            subprocess.call(["rm", "-f", ANSIBLE_LOC + "/env/extravars"])
            return False
        if r.status == "successful" and r.rc == 0:
            return r
        else:
            return False
    def cleanup_job(self, job, provider_info=dict()):
        job_uid = job["job_uid"]
        print("\n\nTerminating Machine:", job_uid, "\n\n")

        delete_instance_params = {
            "monkey_job_uid": job_uid,
            "zone": provider_info["zone"],
            "gcp_project": provider_info["project"],
            "gcp_cred_kind": "serviceaccount",
            "gcp_cred_file": provider_info["gcp_cred_file"],
        }

        print(provider_info)

        runner = ansible_runner.run(host_pattern="localhost",
                                    private_data_dir="ansible",
                                    module="include_role",
                                    module_args="name=gcp/delete",
                                    extravars=delete_instance_params)

        print(runner.stats)
        if runner.status == "failed":
            print("Failed Deletion of machine")
            return False, "Failed to cleanup job after completion"
        return True, "Succesfully cleaned up job"
Esempio n. 22
0
def run(
    server: t.Union[Server, t.Dict],
    playbook: str,
    extravars: t.Dict = None,
    ident: str = None,
    **kwargs
):
    if not server:
        print("Server not found!")
        return
    if extravars is None:
        extravars = {}
    if isinstance(server, dict):
        priv_data_dir = prepare_priv_dir_dict(server)
        extravars["host"] = server["ansible_name"]
    else:
        priv_data_dir = prepare_priv_dir(server)
        extravars["host"] = server.ansible_name
    try:
        runner = ansible_runner.run(
            ident=uuid4() if ident is None else ident,
            private_data_dir=priv_data_dir,
            project_dir="ansible/project",
            playbook=playbook,
            extravars=extravars,
            **kwargs
        )
    except OSError:
        print(traceback.format_exc())
        return
    return runner
Esempio n. 23
0
def test_generate_and_lint_template():
    r = ansible_runner.run(
        private_data_dir=TEST_DIR,
        # inventory='local,', # Supplied in env/cmdline directly
        playbook=str(config.rootdir) +
        '/infrastructure/ansible/aws-security.yml',
        artifact_dir="/tmp/ansible",
        rotate_artifacts=5,
        quiet=False)
    for each_host_event in r.events:
        tasks_to_be_validated = [
            'Generate CloudFormation AWS Security templates'
        ]
        try:
            if each_host_event['event_data']['task'] in tasks_to_be_validated:
                for result in each_host_event['event_data']['res']['results']:
                    print("Changed: {}, Failed: {} \t{}".format(
                        result['changed'], result['failed'], result['item']))
                    assert result['failed'] == False
                    print(result)
                    if result['dest'].endswith('.yml'):
                        result_file_path = os.path.abspath(result['dest'])
                        print(result_file_path)
                        run_cfn_lint(result_file_path)
        except KeyError:
            pass
    assert r.status == "successful"
    print("Ansible playbook run status: {}".format(r.status))
Esempio n. 24
0
def test_tht_ansible_syntax(pytestconfig):
    role_path = ''
    mod_path = ''
    tht_root = str(pytestconfig.invocation_params.dir)
    tht_test_path = os.path.join(tht_root, 'tripleo_heat_templates/tests')

    for r in role_paths:
        role_path = append_path(role_path, os.path.join(tht_test_path, r))

    for m in module_paths:
        mod_path = append_path(mod_path, os.path.join(tht_test_path, m))

    play_path = os.path.join(tht_test_path, 'test_tht_ansible_syntax.yml')

    os.environ["ANSIBLE_ROLES_PATH"] = role_path
    os.environ["ANSIBLE_LIBRARY"] = mod_path
    # Some variables are generated by config-download and part of TripleO inventory,
    # absent from this testing
    os.environ["ANSIBLE_ERROR_ON_UNDEFINED_VARS"] = "False"

    run = ansible_runner.run(playbook=play_path,
                             extravars={'tht_root': tht_root},
                             verbosity=3)

    try:
        assert run.rc == 0
    finally:
        print("{}: {}".format(run.status, run.rc))
Esempio n. 25
0
def test_garbage_private_dir_worker(tmpdir):
    worker_dir = str(tmpdir.mkdir('for_worker'))
    incoming_buffer = io.BytesIO(
        b'{"kwargs": {"playbook": "debug.yml"}}\n{"zipfile": 5}\n\x01\x02\x03\x04\x05{"eof": true}\n'
    )
    outgoing_buffer = io.BytesIO()

    # Worker
    run(
        streamer='worker',
        _input=incoming_buffer,
        _output=outgoing_buffer,
        private_data_dir=worker_dir,
    )
    sent = outgoing_buffer.getvalue()
    assert b'"status": "error"' in sent
def process(ctx, params):
    playbook_path = params[
        "playbook_path"] if "playbook_path" in params else "site.yml"
    private_data_dir = os.getcwd()
    inventory_path = (params["inventory_path"]
                      if "inventory_path" in params else "inventory/hosts")
    extra_vars = dict(webhook_payload=ctx)
    if "extra_vars" in params:
        extra_vars.update(params["extra_vars"])
        # Use the Python that we're running as by default, so dependencies are available
        if "ansible_python_interpreter" not in params["extra_vars"]:
            extra_vars.update(
                {"ansible_python_interpreter": "/opt/app-root/bin/python"})

    passwords = dict(vault_pass=params["vault_password"] if "vault_password" in
                     params else "")

    r = ansible_runner.run(
        private_data_dir=private_data_dir,
        inventory=inventory_path,
        playbook=playbook_path,
        extravars=extra_vars,
        passwords=passwords,
    )

    print("{}: {}".format(r.status, r.rc))
    print(f'Final status: {r.stats}')
Esempio n. 27
0
 def run(self):
     """
     Runs the task.
     """
     tmpdir = tempfile.mkdtemp()
     ssh_priv_key = open(os.path.expanduser('~/.ssh/id_rsa')).read()
     runner = ansible_runner.run(
         ident=self.task_id,
         private_data_dir=tmpdir,
         project_dir=self.ansible_dir,
         artifact_dir=os.path.abspath(
             os.path.join(self.ansible_dir, "artifacts")),
         playbook=os.path.abspath(os.path.join(self.ansible_dir,
                                               "full.yml")),
         inventory=[os.path.abspath(self.ansible_built_inventory_filepath)],
         ssh_key=ssh_priv_key,
         extravars=dict({
             "provision": True,
             "clean": True
         }),
         envvars=dict({"OBJC_DISABLE_INITIALIZE_FORK_SAFETY": "YES"}),
         cmdline="-u root",
     )
     shutil.rmtree(tmpdir)
     if runner.status == "failed" or runner.rc is not 0:
         raise RuntimeError(
             "task execution failed, ansible finished with {0}".format(
                 runner.rc))
Esempio n. 28
0
 def on_get(self, req, resp, run=None):
     doc = {}
     if run is None:
         if 'refresh' in req.params:
             self.refresh()
         doc['directories'] = []
         for i in range(len(self.directories)):
             doc['directories'].append({
                 'href':
                 "%s%s/%s" % (req.prefix, req.path, self.directories[i])
             })
         doc['message'] = 'Provide param refresh=true to update directory listing'
     elif 'playbook' in req.params:
         r = ansible_runner.run(private_data_dir=os.path.join(
             self.path, run),
                                rotate_artifacts=1,
                                **req.params)
         doc['status'] = {}
         doc['status']['status'] = r.status
         doc['status']['stdout'] = r.stdout.readlines()
         doc['status']['stats'] = r.stats
     else:
         doc['message'] = 'Params are passed to the runner eg playbook=testing.yml'
     resp.body = json.dumps(doc, ensure_ascii=False)
     resp.status = falcon.HTTP_200
Esempio n. 29
0
def check_aws_provider(yaml):
    provider_name = yaml.get("name")
    print("Checking integrity of", provider_name, "with type:",
          yaml.get("type"))

    cred_environment = aws_cred_file_environment(yaml["aws_cred_file"])

    runner = ansible_runner.run(playbook='aws_setup_checks.yml',
                                private_data_dir='ansible',
                                extravars={
                                    "access_key_id":
                                    cred_environment["AWS_ACCESS_KEY_ID"],
                                    "access_key_secret":
                                    cred_environment["AWS_SECRET_ACCESS_KEY"],
                                },
                                quiet=True)
    events = [e for e in runner.events]
    if runner.status == "failed":
        printout_ansible_events(events)

        print("Failed to mount the AWS S3 filesystem")
        return False
    print("Mount successful")

    return True
Esempio n. 30
0
    def create_instance(self, machine_params=dict()):

        runner = ansible_runner.run(playbook='gcp_create_job.yml',
                                    private_data_dir='ansible',
                                    extravars=machine_params)
        print(runner.stats)

        if runner.status == "failed":
            return None, False
        print(machine_params)
        retries = 4
        while retries > 0:
            loader = DataLoader()
            inventory = InventoryManager(loader=loader,
                                         sources="ansible/inventory")
            try:
                h = inventory.get_host(machine_params["monkey_job_uid"])
                host_vars = h.get_vars()
                inst = MonkeyInstanceGCP(ansible_info=host_vars)
                # TODO ensure machine is on
                if inst is not None:
                    return inst, True
            except Exception as e:
                print("Failed to get host", e)
                return None, False
            retries -= 1
            print("Retry inventory creation for machine")
            time.sleep(2)
        return None, False