コード例 #1
0
def test_older_ansible_version_fails(monkeypatch):
    logger = logging.getLogger(__name__)
    logger.info("Start")
    monkeypatch.setattr(subprocess, "Popen", PopenMockFailOutput)
    with pytest.raises(ImportError):
        TasksScheduler.check_ansible_version()
    logger.info("End")
コード例 #2
0
def test_openstack_not_installed_fails(monkeypatch):
    logger = logging.getLogger(__name__)
    logger.info("Start")
    monkeypatch.setattr(subprocess, "Popen", PopenMockFailRC)
    with pytest.raises(ImportError):
        TasksScheduler.check_openstack_version()
    logger.info("End")
コード例 #3
0
def manager_stack_0(template_path, rabbit_service):
    manager = TasksScheduler.TasksScheduler(
        template_path=template_path,
        access_point_node_ref="server1_public_ip",
        name_prefix="scheduler1.test",
        rabbit_host="127.0.0.1",
    )
    manager._skip_check_duplicates = True
    return manager
コード例 #4
0
def manager_duplicate_schedulerB(template_path, rabbit_service):
    manager = TasksScheduler.TasksScheduler(
        template_path=template_path,
        access_point_node_ref="server1_public_ip",
        name_prefix="manager_duplicate_scheduler",
        ssh_user="******",
        ssh_password="******",
        rabbit_host="127.0.0.1",
    )

    return manager
コード例 #5
0
def manager_duplicate_schedulerA(template_path, ready_task_path,
                                 post_task_path, rabbit_service):
    manager = TasksScheduler.TasksScheduler(
        template_path=template_path,
        access_point_node_ref="server1_public_ip",
        name_prefix="manager_duplicate_scheduler",
        ssh_user="******",
        ssh_password="******",
        rabbit_host="127.0.0.1",
    )
    manager._skip_check_duplicates = True
    return manager
コード例 #6
0
def manager_stack_e2e(template_path, ready_task_path, post_task_path,
                      rabbit_service):
    manager = TasksScheduler.TasksScheduler(
        template_path=template_path,
        ready_tasks_file_path=ready_task_path,
        access_point_node_ref="server1_public_ip",
        name_prefix="scheduler2.test",
        ssh_user="******",
        ssh_password="******",
        job_done_cb=job_done_cb,
        delete_on_failure=True,
        rabbit_host="127.0.0.1",
    )
    return manager
コード例 #7
0
def manager_bad_flavour(template_path, ready_task_path, post_task_path,
                        rabbit_service):
    manager = TasksScheduler.TasksScheduler(
        template_path=template_path,
        ready_tasks_file_path=ready_task_path,
        access_point_node_ref="server1_public_ip",
        name_prefix="scheduler4.test",
        ssh_user="******",
        ssh_password="******",
        extra_stack_create_params={"default_flavor": "non_existing_flavor"},
        stack_limit=1,
        rabbit_host="127.0.0.1",
    )

    manager._skip_check_duplicates = True
    return manager
コード例 #8
0
def manager_stack_reuse(template_path, ready_task_path, post_task_path,
                        rabbit_service):
    manager = TasksScheduler.TasksScheduler(
        template_path=template_path,
        ready_tasks_file_path=ready_task_path,
        access_point_node_ref="server1_public_ip",
        name_prefix="scheduler6.test",
        ssh_user="******",
        ssh_password="******",
        stack_reuse=True,
        stack_limit=1,
        rabbit_host="127.0.0.1",
        job_done_cb_v2=reuse_call_back,
    )

    manager._skip_check_duplicates = True
    return manager
コード例 #9
0
def test_newer_openstack_version_pass(monkeypatch):
    logger = logging.getLogger(__name__)
    logger.info("Start")
    monkeypatch.setattr(subprocess, "Popen", PopenMockPass)
    TasksScheduler.check_openstack_version()
    logger.info("End")
コード例 #10
0
def main():
    global MANAGER
    logger = logging.getLogger(__name__)
    # we init our manger with definitions for:
    #   1. What our stack looks like?
    #   2. When it is ready to run scripts?
    #   3. Which node in it will be responsible to declare stuck ready?
    #   4. What user\passwd to use.
    #   5. What action to preforme when the script has finished.
    manager = TasksScheduler.TasksScheduler(
        template_path="../tests/playbooks/test_stack.yml",
        ready_tasks_file_path="../tests/playbooks/test_stack_ready.yml",
        access_point_node_ref="server1_public_ip",
        name_prefix="schedexmpl",
        ssh_user="******",
        ssh_password="******",
        job_done_cb=job_done_cb,
        stack_reuse=True,
        stack_create_params={"private_net_name": "private_net_scheduler"},
        stack_limit=40,
    )

    MANAGER = manager
    # The local place where we expect script outputs to arrive (defined in test_stack_fin.yml)
    home = os.path.expanduser("~")
    test_out_folder = f"{home}/test_end_2_end/outputs"
    if os.path.exists(test_out_folder):
        shutil.rmtree(test_out_folder)

    os.makedirs(test_out_folder)

    jobs_2_create = 10
    # we create a simple script that will represent the job.
    # Note:
    #   1. $1 is the first parameter passed to the job.
    #   2. $server2_public_ip will be resolved to the value of the matching output value as described in
    # the stack template file.
    script_path = create_script(
        "script.sh",  # Our script just sleep and echo their id and server2 IP
        [
            "sleep $((RANDOM % 60))",
            "echo Im Job $1 and server2_public_ip is $server2_public_ip > /var/tstdir/$1.out_os_scheduler",
            "exit 0",
        ],
    )

    for job_i in range(
            jobs_2_create
    ):  # we generate scripts dynamically. You probably have static number of known jobs...
        manager.add_script_job(
            "server1_public_ip",
            script_path,
            "../tests/playbooks/test_stack_fin.yml",
            parameters=[job_i],
        )

    # thats it just wait for all to finish
    manager.wait_all()

    # dont forget to do somthing useful with the jobs outputs...
    for r, d, files in os.walk(test_out_folder):
        for file in files:
            path = os.path.join(r, file)
            with open(path) as f:
                logger.info("{}:".format(path))
                logger.info(f.read())