Exemple #1
0
def test_submit_multiplesame(mock_isdir, mock_lsf):
    """
    Test that for multiple jobs the correct number of submission calls happen.
    """

    jobs = {
        "lbowconf": {},
        "job-one": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test123"
        },
        "job-two": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test456"
        },
        "job-three": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test789"
        }
    }

    mock_isdir.return_value = False

    submit(jobs)

    assert mock_lsf.call_count == 3, \
        "For a multi job this method should only be called more than once"
Exemple #2
0
def test_submit_queueinfo(mock_isdir, mock_submit, mock_savini):
    """
    Check that the queueinformation counter is getting used.
    """

    jobs = {
        "lbowconf": {
            "test-machine-queue-slots": 0,
            "test-machine-queue-max": 0
        },
        "job-one": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test123"
        },
        "job-two": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test456"
        },
        "job-three": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test789"
        }
    }

    mock_isdir.return_value = False
    mock_savini.return_value = None
    mock_submit.return_value = None

    submit(jobs)

    assert jobs["lbowconf"]["test-machine-queue-slots"] == "3"
    assert jobs["lbowconf"]["test-machine-queue-max"] == "3"
Exemple #3
0
def test_submit_multiplediff(mock_isdir, mock_lsf, mock_pbs, mock_slurm):
    """
    Test that for multiple jobs the correct number of submission calls happen.
    """

    jobs = {
        "lbowconf": {},
        "job-one": {
            "resource": "lsf-machine",
            "scheduler": "LSF",
            "jobid": "test123"
        },
        "job-two": {
            "resource": "pbs-machine",
            "scheduler": "pbs",
            "jobid": "test456"
        },
        "job-three": {
            "resource": "slurm-machine",
            "scheduler": "Slurm",
            "jobid": "test789"
        }
    }

    mock_isdir.return_value = False

    submit(jobs)

    assert mock_lsf.call_count == 1, \
        "For a single job this method should only be called once"
    assert mock_pbs.call_count == 1, \
        "For a single job this method should only be called once"
    assert mock_slurm.call_count == 1, \
        "For a single job this method should only be called once"
Exemple #4
0
def test_submit_single(mock_isdir, mock_submit):
    """
    Test that a single job only tries to submit something once.
    """

    jobs = {
        "lbowconf": {},
        "job-one": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test456"
        }
    }

    mock_isdir.return_value = False

    submit(jobs)

    assert mock_submit.call_count == 1, \
        "For a single job this method should only be called once"
    assert jobs["job-one"]["laststatus"] == "Queued"
Exemple #5
0
def test_submit_submitexcept(mock_isdir, mock_submit, mock_savini):
    """
    Test that submission failure errors are handled correctly.
    """

    jobs = {
        "lbowconf": {},
        "job-one": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test456"
        }
    }

    mock_isdir.return_value = False
    mock_savini.return_value = None
    mock_submit.side_effect = exceptions.JobsubmitError("Submit Error")

    submit(jobs)

    assert jobs["job-one"]["laststatus"] == "Submit Error"
Exemple #6
0
def test_submit_attrexcept(mock_isdir, mock_submit, mock_savini):
    """
    Test that errors with missing plugins are handled correctly.
    """

    jobs = {
        "lbowconf": {},
        "job-one": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test456"
        }
    }

    mock_isdir.return_value = False
    mock_savini.return_value = None
    mock_submit.side_effect = AttributeError

    with pytest.raises(exceptions.PluginattributeError):

        submit(jobs)
Exemple #7
0
def test_submit_fileexcept2(mock_isdir, mock_submit, mock_savini):
    """
    Test that if the recovery file write fails it does so in a controlled way.
    """

    jobs = {
        "lbowconf": {
            "recoveryfile": "recovery-YYMMDD-HHMMSS"
        },
        "job-one": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test456",
        }
    }

    mock_isdir.return_value = True
    mock_submit.return_value = None
    mock_savini.side_effect = IOError

    submit(jobs)
Exemple #8
0
def test_submit_queueexcept(mock_isdir, mock_submit, mock_savini):
    """
    Test that queue limit events are handled correctly if a plugin raises the
    queuemax exception.
    """

    jobs = {
        "lbowconf": {},
        "job-one": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test456"
        }
    }

    mock_isdir.return_value = False
    mock_savini.return_value = None
    mock_submit.side_effect = exceptions.QueuemaxError("Submit Error")

    submit(jobs)

    assert jobs["job-one"]["laststatus"] == "Waiting Submission"
Exemple #9
0
def test_submit_fileuninit(mock_isdir, mock_submit, mock_savini):
    """
    Test that if the recovery file is uninitialised that no writing happens.
    """

    jobs = {
        "lbowconf": {
            "recoveryfile": ""
        },
        "job-one": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test456",
        }
    }

    mock_isdir.return_value = True
    mock_submit.return_value = None

    submit(jobs)

    assert mock_savini.call_count == 0
Exemple #10
0
def test_submit_filewrite(mock_isdir, mock_submit, mock_savini):
    """
    Test that the recovery file write happens if everything is working.
    """

    jobs = {
        "lbowconf": {
            "recoveryfile": "recovery-YYMMDD-HHMMSS"
        },
        "job-one": {
            "resource": "test-machine",
            "scheduler": "LSF",
            "jobid": "test456",
        }
    }

    mock_isdir.return_value = True
    mock_submit.return_value = None

    submit(jobs)

    assert mock_savini.call_count == 1
Exemple #11
0
def longbow(jobs, parameters):
    """Entry point at the top level of the Longbow library.

    Being the top level method that makes calls on the Longbow library.
    This is a good place to link against Longbow if a developer does not want
    to link against the executable, or if low level linking is not needed or is
    over-kill.

    Required inputs are:
    parameters (dictionary): A dictionary containing the parameters and
                             overrides from the command-line.

    """
    # A failure at this level will result in jobs being killed off before
    # escalating the exception to trigger graceful exit.

    # Load configurations and initialise Longbow data structures.
    jobparams = configuration.processconfigs(parameters)

    # Copy to jobs so when exceptions are raised the structure is available.
    for param in jobparams:

        jobs[param] = jobparams[param]

    # Test all connection/s specified in the job configurations
    shellwrappers.checkconnections(jobs)

    # Test the hosts listed in the jobs configuration file have their
    # scheduler environments listed, if not then test and save them.
    scheduling.checkenv(jobs, parameters["hosts"])

    # Test that for the applications listed in the job configuration
    # file are available and that the executable is present.
    if parameters["nochecks"] is False:

        applications.checkapp(jobs)

    # Process the jobs command line arguments and find files for
    # staging.
    applications.processjobs(jobs)

    # Create jobfile and add it to the list of files that needs
    # uploading.
    scheduling.prepare(jobs)

    # Stage all of the job files along with the scheduling script.
    staging.stage_upstream(jobs)

    # Submit all jobs.
    scheduling.submit(jobs)

    # Process the disconnect function.
    if parameters["disconnect"] is True:

        raise exceptions.DisconnectException

    # Monitor all jobs.
    scheduling.monitor(jobs)

    # Clean up all jobs
    staging.cleanup(jobs)