def test_stage_upstream_multijobs(mock_ssh, mock_upload):
    """
    Test if multiple calls are made to SSH and rsync.
    """

    jobs = {
        "jobone": {
            "destdir": "/path/to/jobone12484",
            "resource": "test-machine"
        },
        "jobtwo": {
            "destdir": "/path/to/jobtwo12484",
            "resource": "test-machine"
        },
        "jobthree": {
            "destdir": "/path/to/jobthree12484",
            "resource": "test-machine"
        },
        "jobfour": {
            "destdir": "/path/to/jobfour12484",
            "resource": "test-machine"
        }
    }

    stage_upstream(jobs)

    assert mock_ssh.call_count == 4, \
        "There is only one job, this should only be called once"
    assert mock_upload.call_count == 4, \
        "There is only one job, this should only be called once"
def test_stage_upstream_sshexcept(mock_ssh):
    """
    Test if the SSH exception is raised if passed up from the SSH call.
    """

    jobs = {
        "jobone": {
            "destdir": "/path/to/jobone12484",
            "resource": "test-machine"
        }
    }

    mock_ssh.side_effect = exceptions.SSHError("SSH Error", "output")

    with pytest.raises(exceptions.SSHError):

        stage_upstream(jobs)
def test_stage_upstream_singlejob(mock_ssh, mock_upload):
    """
    Test if a single call is made to SSH and rsync. Multiples here are bad.
    """

    jobs = {
        "jobone": {
            "destdir": "/path/to/jobone12484",
            "resource": "test-machine"
        }
    }

    stage_upstream(jobs)

    assert mock_ssh.call_count == 1, \
        "There is only one job, this should only be called once"
    assert mock_upload.call_count == 1, \
        "There is only one job, this should only be called once"
def test_stage_upstream_rsyncexcept(mock_ssh, mock_upload):
    """
    Test if staging exception is correctly raised if rsync exception happens.
    """

    jobs = {
        "jobone": {
            "destdir": "/path/to/jobone12484",
            "resource": "test-machine",
            "localworkdir": "/path/to/local/dir"
        }
    }

    mock_ssh.return_value = None
    mock_upload.side_effect = exceptions.RsyncError("Rsync Error", "output")

    with pytest.raises(exceptions.StagingError):

        stage_upstream(jobs)
def test_stage_upstream_params(mock_ssh, mock_upload):
    """
    Test the correct arguments make it to the upload method.
    """

    jobs = {
        "jobone": {
            "destdir": "/path/to/jobone12484",
            "resource": "test-machine",
            "localworkdir": "/path/to/local/dir"
        }
    }

    stage_upstream(jobs)

    uploadarg1 = mock_upload.call_args[0][0]
    ssharg1 = mock_ssh.call_args[0][0]
    ssharg2 = mock_ssh.call_args[0][1]

    assert isinstance(uploadarg1, dict)
    assert isinstance(ssharg1, dict)
    assert isinstance(ssharg2, list)
    assert ssharg2[0] == "mkdir -p /path/to/jobone12484\n"
Beispiel #6
0
def longbow(jobs, parameters):
    """Entry point at the top level of the Longbow library.

    Being the top level method that makes calls on the Longbow library.
    This is a good place to link against Longbow if a developer does not want
    to link against the executable, or if low level linking is not needed or is
    over-kill.

    Required inputs are:
    parameters (dictionary): A dictionary containing the parameters and
                             overrides from the command-line.

    """
    # A failure at this level will result in jobs being killed off before
    # escalating the exception to trigger graceful exit.

    # Load configurations and initialise Longbow data structures.
    jobparams = configuration.processconfigs(parameters)

    # Copy to jobs so when exceptions are raised the structure is available.
    for param in jobparams:

        jobs[param] = jobparams[param]

    # Test all connection/s specified in the job configurations
    shellwrappers.checkconnections(jobs)

    # Test the hosts listed in the jobs configuration file have their
    # scheduler environments listed, if not then test and save them.
    scheduling.checkenv(jobs, parameters["hosts"])

    # Test that for the applications listed in the job configuration
    # file are available and that the executable is present.
    if parameters["nochecks"] is False:

        applications.checkapp(jobs)

    # Process the jobs command line arguments and find files for
    # staging.
    applications.processjobs(jobs)

    # Create jobfile and add it to the list of files that needs
    # uploading.
    scheduling.prepare(jobs)

    # Stage all of the job files along with the scheduling script.
    staging.stage_upstream(jobs)

    # Submit all jobs.
    scheduling.submit(jobs)

    # Process the disconnect function.
    if parameters["disconnect"] is True:

        raise exceptions.DisconnectException

    # Monitor all jobs.
    scheduling.monitor(jobs)

    # Clean up all jobs
    staging.cleanup(jobs)