예제 #1
0
def _assert_pipeline_status(pl, flag):
    """ Assert, based on flag file presence, that a pipeline's completed. """
    flags = glob.glob(pipeline_filepath(pl.manager, filename=flag_name("*")))
    assert 1 == len(flags)
    exp_flag = pipeline_filepath(pl, suffix="_" + flag_name(flag))
    try:
        assert os.path.isfile(exp_flag)
    except AssertionError:
        print("FLAGS: {}".format(flags))
        raise
예제 #2
0
def _assert_pipeline_initialization(pl):
    """
    Assert that a test case begins with output folder in expected state.

    :param pypiper.Pipeline pl: Pipeline instance for test case.
    """
    # TODO: implement.
    suffices = {"_commands.sh", "_profile.tsv",
                "_{}".format(flag_name(RUN_FLAG))}
    exp_init_contents = \
            [pipeline_filepath(pl.manager, suffix=s) for s in suffices]
    obs_init_contents = [pipeline_filepath(pl.manager, filename=n)
                         for n in os.listdir(pl.outfolder)]
    assert len(exp_init_contents) == len(obs_init_contents)
    assert set(exp_init_contents) == set(obs_init_contents)
예제 #3
0
    def test_skips_execution_if_in_unstarted_state(self, get_pipe_manager,
                                                   num_skips):
        """ Pipeline manager skips command execution if not in active state. """

        pm = get_pipe_manager(name="skip-execs")
        pm._active = False

        testfile = pipeline_filepath(pm, filename="output.txt")
        assert not os.path.isfile(testfile)

        cmd = "touch {}".format(testfile)
        num_calls = 0

        # Remain inactive for a parameterized number of call-skipping iterations,
        # then adopt active mode.
        while True:
            pm.run(cmd, target=testfile)
            num_calls += 1
            if num_calls == num_skips:
                pm._active = True
            elif num_calls > num_skips:
                break
            # If we're still looping, we've not yet made a call in active mode.
            assert not os.path.isfile(testfile)

        # We break the loop once we've made a call in active state.
        assert os.path.isfile(testfile)
예제 #4
0
 def test_execution_allows_specific_starting_point(
         self, dummy_pipe, test_type, start_index, start_spec_type):
     """ A pipeline may be started from an arbitrary checkpoint. """
     _assert_pipeline_initialization(dummy_pipe)
     s = _parse_stage(BASIC_ACTIONS[start_index], start_spec_type)
     dummy_pipe.run(start_point=s)
     if test_type == "effects":
         exp_files = FILENAMES[start_index:]
         _assert_output(dummy_pipe, exp_files)
         fpaths = [pipeline_filepath(dummy_pipe.manager, filename=fn)
                   for fn in exp_files]
         for fp, content in zip(fpaths, CONTENTS[start_index:]):
             _assert_expected_content(fp, content)
     elif test_type == "checkpoints":
         # Ensure exact collection of checkpoint files (no more, no less).
         _assert_checkpoints(dummy_pipe, BASIC_ACTIONS[start_index:])
     elif test_type == "stage_labels":
         # Ensure match between skipped and executed stage expectations
         # and observations.
         _assert_stage_states(dummy_pipe, BASIC_ACTIONS[:start_index],
                              BASIC_ACTIONS[start_index:])
     elif test_type == "pipe_flag":
         _assert_pipeline_completed(dummy_pipe)
     else:
         raise ValueError("Unknown test type: '{}'".format(test_type))
예제 #5
0
def test_suffix_is_appended_to_filename_if_both_are_provided(
        pl_mgr, filename, suffix):
    """ Suffix is appended to filename if both are provided. """
    expected = filename + suffix
    fullpath = pipeline_filepath(pl_mgr, filename=filename, suffix=suffix)
    _, observed = os.path.split(fullpath)
    assert expected == observed
예제 #6
0
 def test_respects_halt(self, get_pipe_manager, halt_index):
     """ The pipeline manager skips execution if it's in halted state. """
     pm = get_pipe_manager(name="respects-halt")
     targets = ["file{}.txt".format(i) for i in range(1, 5)]
     for i, t in enumerate(targets):
         if i == halt_index:
             pm.halt(raise_error=False)
         target = pipeline_filepath(pm, filename=t)
         cmd = "touch {}".format(target)
         pm.run(cmd, target=target)
     for i, t in enumerate(targets):
         target = pipeline_filepath(pm, filename=t)
         if i < halt_index:
             assert os.path.isfile(target)
         else:
             assert not os.path.isfile(target)
예제 #7
0
def _assert_output(pl, expected_filenames):
    """
    Assert equivalence--with respect to presence only--between expected
    collection of output file and the observed output file collection for a
    pipeline.

    :param pypiper.Pipeline pl: pipeline for which output is to be checked
    :param Iterable[str] expected_filenames:
    :return:
    """
    obs_outfiles = glob.glob(pipeline_filepath(
            pl.manager, "*{}".format(OUTPUT_SUFFIX)))
    assert len(expected_filenames) == len(obs_outfiles)
    expected_filepaths = []
    for fname in expected_filenames:
        fpath = fname if os.path.isabs(fname) else \
                pipeline_filepath(pl.manager, filename=fname)
        expected_filepaths.append(fpath)
    assert set(expected_filepaths) == set(obs_outfiles)
예제 #8
0
def test_direct_filename(tmpdir, filename, pl_mgr, test_type):
    """ When given, filename is used instead of pipeline name. """
    fullpath = pipeline_filepath(pl_mgr, filename=filename)
    if test_type == "filename":
        _, observed = os.path.split(fullpath)
        assert filename == observed
    elif test_type == "filepath":
        expected = os.path.join(tmpdir.strpath, filename)
        assert expected == fullpath
    else:
        raise ValueError("Unrecognized test type: '{}'".format(test_type))
예제 #9
0
    def test_skips_to_start(self, get_pipe_manager, start_point):
        """ The pipeline manager can skip to a starting point. """

        # Initialize the manager.
        pm = get_pipe_manager(name="StartTestPM", start_point=start_point)

        # Make a call that should be skipped on the basis of not yet
        # reaching the start point.
        pm.timestamp(checkpoint="merge_reads")
        path_merge_file = pipeline_filepath(pm, filename="merge.txt")
        assert not os.path.isfile(path_merge_file)
        cmd = "touch {}".format(path_merge_file)
        pm.run(cmd, target=path_merge_file)
        assert not os.path.isfile(path_merge_file)

        # Make a call that should also be skipped on the basis of not yet
        # reaching the designated starting/activation point.
        pm.timestamp(checkpoint="fastqc")
        fastqc_folder = os.path.join(pm.outfolder, "fastqc")
        os.makedirs(fastqc_folder)
        fastqc_zipfile = os.path.join(fastqc_folder, "qc.zip")
        fastqc_rawfile = os.path.join(fastqc_folder, "qc.txt")
        cmds = [
            "fastqc", "touch {}".format(fastqc_rawfile),
            "touch {}".format(fastqc_zipfile)
        ]
        pm.run(cmds, target=fastqc_zipfile)
        assert not os.path.isfile(fastqc_zipfile)
        assert not os.path.isfile(fastqc_rawfile)

        # Make a all that should be the first executed, on the basis of
        # being associated with the designated.
        pm.timestamp(checkpoint=start_point)
        path_first_file = pipeline_filepath(pm, filename="outfile.bam")
        cmd = "touch {}".format(path_first_file)
        pm.run(cmd, target=path_first_file)
        assert os.path.isfile(path_first_file)
예제 #10
0
    def test_respects_checkpoints(self, get_pipe_manager, num_skips):
        """ Manager can skip pipeline to where it's not yet checkpointed. """

        pm = get_pipe_manager(name="respect-checkpoints")

        # Control for possibility that skips are due to being in inactive mode.
        assert pm._active

        stages = ["merge", "qc", "filter", "align", "call"]

        # Create checkpoints.
        for s in stages[:num_skips]:
            pm.timestamp(checkpoint=s)

        # Go through the stages and see that we're skipping checkpoints
        # that exist, then proceeding to execute each subsequent stage.
        for i, s in enumerate(stages):
            outfile = pipeline_filepath(pm, s + ".txt")
            cmd = "touch {}".format(outfile)
            pm.timestamp(checkpoint=s)
            pm.run(cmd, target=outfile)

            if i < num_skips:
                # We should not have created the output file.
                try:
                    assert not os.path.isfile(outfile)
                except AssertionError:
                    print("Have run {} stage(s) of {} skip(s)".format(
                        i + 1, num_skips))
                    print("Current manager checkpoint: {}".format(
                        pm.curr_checkpoint))
                    raise
            else:
                # We should have created the output file.
                try:
                    assert os.path.isfile(outfile)
                except AssertionError:
                    print("Have run {} stage(s) of {} skip(s)".format(
                        i + 1, num_skips))
                    print("Current manager checkpoint: {}".format(
                        pm.curr_checkpoint))
                    print("Active? {}".format(pm._active))
                    raise
예제 #11
0
    def test_skip_completed(self, dummy_pipe, test_type):
        """ Pre-completed stage(s) are skipped. """

        _assert_pipeline_initialization(dummy_pipe)

        first_stage = dummy_pipe.stages()[0]
        first_stage_chkpt_fpath = checkpoint_filepath(first_stage, dummy_pipe)
        open(first_stage_chkpt_fpath, 'w').close()
        assert os.path.isfile(first_stage_chkpt_fpath)

        exp_skips = [first_stage]
        exp_execs = dummy_pipe.stages()[1:]

        # This should neither exist nor be created.
        first_stage_outfile = pipeline_filepath(
                dummy_pipe.manager, filename=FILE1_NAME)
        assert not os.path.isfile(first_stage_outfile)
        
        # Do the action.
        dummy_pipe.run()
        
        if test_type == "effects":
            # We should not have generated the first stage's output file.
            # That notion is covered in the outfiles assertion.
            _assert_output(dummy_pipe, FILENAMES[1:])
            assert not os.path.isfile(first_stage_outfile)
            # We should have the correct content in files from stages run.
            for fname, content in FILE_TEXT_PAIRS[1:]:
                fpath = os.path.join(dummy_pipe.outfolder, fname)
                _assert_expected_content(fpath, content)

        elif test_type == "checkpoints":
            # We've manually created the first checkpoint file, and the
            # others should've been created by the run() call.
            _assert_checkpoints(dummy_pipe, exp_stages=dummy_pipe.stages())

        elif test_type == "stage_labels":
            _assert_stage_states(dummy_pipe, exp_skips, exp_execs)

        elif test_type == "pipe_flag":
            _assert_pipeline_completed(dummy_pipe)
        else:
            raise ValueError("Unknown test type: '{}'".format(test_type))
예제 #12
0
def test_uses_pipeline_name_if_no_filename(
        pipe_name, suffix, test_type, pl_mgr, tmpdir):
    """ Pipeline name is proxy for filename if just suffix is given. """

    observed = pipeline_filepath(pl_mgr, suffix=suffix)

    # Allow test type to determine assertion.
    if test_type == "has_pipe_name":
        assert pipe_name in observed
    elif test_type == "has_suffix":
        assert observed.endswith(suffix)
    elif test_type == "full_path":
        try:
            expected = os.path.join(tmpdir.strpath, pipe_name + suffix)
            assert expected == observed
        except AssertionError:
            print("OUTFOLDER: {}".format(pl_mgr.outfolder))
            raise
    else:
        raise ValueError("Unrecognized test type: '{}'".format(test_type))
예제 #13
0
def test_stage_completion_determination(dummy_pipe, spec_type, completed):
    """ Pipeline responds to variety of request forms of checkpoint status. """

    # Allow dummy stage definition and determination of filename.
    def dummy_test_func():
        pass

    chkpt_name = checkpoint_filename(
            dummy_test_func.__name__, pipeline_name=dummy_pipe.name)
    chkpt_fpath = checkpoint_filepath(chkpt_name, dummy_pipe.manager)

    # Determine how to request the checkpoint completion status.
    if spec_type == "filename":
        s = chkpt_name
    elif spec_type == "filepath":
        s = chkpt_fpath
    elif spec_type in ["stage", "stage_name"]:
        s = Stage(dummy_test_func)
        if spec_type == "stage_name":
            s = s.name
    else:
        raise ValueError("Unknown test spec type: {}".format(spec_type))

    # Touch the checkpoint file iff we're positively testing completion.
    if completed:
        open(chkpt_fpath, 'w').close()

    # Check the completion status for concordance with expectation.
    # Print a bit of info to inform hypotheses about the source of a
    # hypothetical test error/failure.
    outfolder_contents = os.listdir(dummy_pipe.outfolder)
    print("Pipeline outfolder contents: {}".format(outfolder_contents))
    print("Contents as pipeline files: {}".format(
        [pipeline_filepath(dummy_pipe.manager, f) for f in outfolder_contents]))
    print("Checking completion status: {} ({})".format(s, type(s)))
    observed_completion = dummy_pipe.completed_stage(s)
    if completed:
        assert observed_completion
    else:
        assert not observed_completion
예제 #14
0
    def test_stop(self, dummy_pipe, test_type, stop_index, spec_type, stop_type):
        """ A pipeline is capable of halting at/after a specified stage. """

        # Negative control / pretest.
        _assert_pipeline_initialization(dummy_pipe)

        # Get the stop point in the correct format.
        stop = _parse_stage(BASIC_ACTIONS[stop_index], spec_type)

        # Make the call under test.
        dummy_pipe.run(**{stop_type: stop})

        # For forming expectations, indexing is exclusive.
        # So if the initial specification was inclusive, we need to
        # increment our expectation-indexing bound.
        if stop_type == "stop_after":
            stop_index += 1

        if test_type == "effects":
            exp_files = FILENAMES[:stop_index]
            _assert_output(dummy_pipe, exp_files)
            fpaths = [pipeline_filepath(dummy_pipe.manager, filename=fn)
                      for fn in exp_files]
            for fp, content in zip(fpaths, CONTENTS[:stop_index]):
                _assert_expected_content(fp, content)

        elif test_type == "checkpoints":
            _assert_checkpoints(dummy_pipe, BASIC_ACTIONS[:stop_index])
        elif test_type == "stage_labels":
            _assert_stage_states(
                    dummy_pipe, expected_skipped=BASIC_ACTIONS[stop_index:],
                    expected_executed=BASIC_ACTIONS[:stop_index])
        elif test_type == "pipe_flag":
            if (stop_index == len(BASIC_ACTIONS)) and \
                    (stop_type == "stop_after"):
                _assert_pipeline_completed(dummy_pipe)
            else:
                _assert_pipeline_halted(dummy_pipe)
        else:
            raise ValueError("Unknown test type: '{}'".format(test_type))
예제 #15
0
    def test_runs_through_full(self, dummy_pipe, test_type):
        """ The entire basic pipeline should execute. """

        # Start with clean output folder.
        _assert_pipeline_initialization(dummy_pipe)

        # Make the call under test.
        dummy_pipe.run(start_point=None, stop_before=None, stop_after=None)
        
        if test_type == "effects":
            # We're interested in existence and content of targets.
            exp_files, _ = zip(*FILE_TEXT_PAIRS)
            _assert_output(dummy_pipe, exp_files)
            fpath_text_pairs = [(pipeline_filepath(dummy_pipe, fname), content)
                                for fname, content in FILE_TEXT_PAIRS]
            for fpath, content in fpath_text_pairs:
                _assert_expected_content(fpath, content)

        elif test_type == "checkpoints":
            # Interest is on checkpoint file existence.
            for stage in dummy_pipe.stages():
                chkpt_fpath = checkpoint_filepath(stage, dummy_pipe)
                try:
                    assert os.path.isfile(chkpt_fpath)
                except AssertionError:
                    print("Stage '{}' file doesn't exist: '{}'".format(
                        stage.name, chkpt_fpath))
                    raise

        elif test_type == "stage_labels":
            # Did the Pipeline correctly track it's execution?
            _assert_stage_states(dummy_pipe, [], dummy_pipe.stages())

        elif test_type == "pipe_flag":
            # The final flag should be correctly set.
            _assert_pipeline_completed(dummy_pipe)

        else:
            raise ValueError("Unknown test type: {}".format(test_type))
예제 #16
0
    def test_me(self):

        print("Testing initialization...")

        # Names
        self.assertEqual(self.pp.name, "sample_pipeline")
        self.assertEqual(self.pp2.name, "sample_pipeline2")

        # Outfolder creation
        self.assertTrue(os.path.isdir(self.pp.outfolder))

        print("Testing status flags...")
        self.pp.set_status_flag("testing")
        self._assertFile("sample_pipeline_testing.flag")
        self.pp.set_status_flag("running")
        self._assertNotFile("sample_pipeline_testing.flag")
        self._assertFile("sample_pipeline_running.flag")

        print("Testing waiting for locks...")
        self.pp2.wait = False
        self.pp.wait = False
        sleep_lock = pipeline_filepath(self.pp, filename="lock.sleep")
        subprocess.Popen("sleep .5; rm " + sleep_lock, shell=True)
        self.pp._create_file(sleep_lock)
        print("Putting lock file: " + sleep_lock)
        cmd = "echo hello"
        stamp = time.time()
        self.pp.run(cmd, lock_name="sleep")
        print("Elapsed: " + str(self.pp.time_elapsed(stamp)))
        self.assertTrue(self.pp.time_elapsed(stamp) > 1)

        print("Wait for subprocess...")
        for p in self.pp.procs.copy():
            self.pp._wait_for_process(self.pp.procs[p]["p"])
        self.pp2.wait = True
        self.pp.wait = True

        print("Make sure the pipeline respects files already existing...")
        target = pipeline_filepath(self.pp, filename="tgt")
        if os.path.isfile(target):  # for repeat runs.
            os.remove(target)

        self.pp.run("echo first > " + target, target, shell=True)
        # Should not run
        self.pp.run("echo second > " + target, target, shell=True)
        with open(target) as f:
            lines = f.readlines()
        self._assertLines(["first"], lines)

        print("Execute a targetless command...")
        self.pp.run("echo third > " + target,
                    target=None,
                    lock_name="test",
                    shell=True)
        with open(target) as f:
            lines = f.readlines()
        self._assertLines(["third"], lines)

        # Test reporting results
        self.pp.report_result("key1", "abc")
        self.pp.report_result("key2", "def", "shared")
        key1 = self.pp.get_stat("key1")
        self.assertEqual(key1, 'abc')

        key1 = self.pp2.get_stat("key1")  # should fail
        self.assertEqual(key1, None)
        key2 = self.pp2.get_stat("key2")  # should succeed
        self.assertEqual(key2, 'def')

        print("Test intermediate file cleanup...")
        tgt1 = pipeline_filepath(self.pp, filename="tgt1.temp")
        tgt2 = pipeline_filepath(self.pp, filename="tgt2.temp")
        tgt3 = pipeline_filepath(self.pp, filename="tgt3.temp")
        tgt4 = pipeline_filepath(self.pp, filename="tgt4.txt")
        tgt5 = pipeline_filepath(self.pp, filename="tgt5.txt")
        tgt6 = pipeline_filepath(self.pp, filename="tgt6.txt")
        tgt8 = pipeline_filepath(self.pp, filename="tgt8.cond")
        tgt9 = pipeline_filepath(self.pp, filename="tgt9.cond")
        tgt10 = pipeline_filepath(self.pp, filename="tgt10.txt")

        self.pp.run("touch " + tgt1 + " " + tgt2 + " " + tgt3 + " " + tgt4 +
                    " " + tgt5,
                    lock_name="test")
        self.pp.run("touch " + tgt8 + " " + tgt9, lock_name="test")

        # In global manual_clean mode, even non-manual clean files should not be deleted:
        self.pp.manual_clean = True
        self.pp.clean_add(pipeline_filepath(self.pp, filename="*.temp"))
        self.pp.clean_add(tgt4)
        self.pp.clean_add(tgt5, conditional=True)
        self.pp.clean_add(pipeline_filepath(self.pp, filename="*.cond"),
                          conditional=True)
        self.pp._cleanup()

        self.assertTrue(os.path.isfile(tgt1))
        self.assertTrue(os.path.isfile(tgt2))
        self.assertTrue(os.path.isfile(tgt3))
        self.assertTrue(os.path.isfile(tgt4))

        self.pp.report_figure("Test figure", os.path.join("fig", "fig.jpg"))

        # But in regular mode, they should be deleted:
        self.pp.manual_clean = False
        self.pp.clean_add(pipeline_filepath(self.pp, filename="*.temp"))
        self.pp.clean_add(tgt4)
        self.pp.clean_add(tgt5, conditional=True)
        self.pp.clean_add(pipeline_filepath(self.pp, filename="*.cond"),
                          conditional=True)
        self.pp._cleanup()

        self.assertFalse(os.path.isfile(tgt1))
        self.assertFalse(os.path.isfile(tgt2))
        self.assertFalse(os.path.isfile(tgt3))
        self.assertFalse(os.path.isfile(tgt4))

        tgt7 = pipeline_filepath(self.pp, filename="tgt7.txt")
        self.pp.run("touch " + tgt7, tgt7)
        self.pp.clean_add(tgt7, manual=True)

        self.pp.run("touch " + tgt10, target=tgt10, clean=True)

        # Conditional delete should not delete tgt5
        # while pp2 is running
        self.assertTrue(os.path.isfile(tgt5))
        self.assertTrue(os.path.isfile(tgt8))
        self.assertTrue(os.path.isfile(tgt9))
        self.assertTrue(os.path.isfile(tgt10))  # auto cleanup

        # Stopping pp2 should cause tgt5 to be deleted
        self.pp2.stop_pipeline()
        self.pp._cleanup()
        self.assertFalse(os.path.isfile(tgt5))
        self.assertFalse(os.path.isfile(tgt8))
        self.assertFalse(os.path.isfile(tgt9))
        self.assertFalse(os.path.isfile(tgt10))

        # Manual clean should not clean
        self.assertTrue(os.path.isfile(tgt7))

        # cleanup should run on termination:
        self.pp.run("touch " + tgt6, tgt6)
        self.pp.clean_add(tgt6, conditional=True)
        self.pp.stop_pipeline()
        self.assertFalse(os.path.isfile(tgt5))

        # Manual clean should not clean even after pipeline stops.
        self.assertTrue(os.path.isfile(tgt7))

        print("Test failure and nofail options...")
        self.pp3 = pypiper.PipelineManager("sample_pipeline3",
                                           outfolder=self.OUTFOLDER + "3",
                                           multi=True)

        cmd = "thiscommandisbad"

        # Should not raise an error
        self.pp.run(cmd, target=None, lock_name="badcommand", nofail=True)
        self.pp.callprint(cmd, nofail=True)

        # Should raise an error
        with self.assertRaises(OSError):
            self.pp.run(cmd, target=None, lock_name="badcommand")

        print("Test dynamic recovery...")
        # send sigint
        self.pp.locks.append("lock.sleep")
        with self.assertRaises(KeyboardInterrupt):
            self.pp._signal_int_handler(None, None)

        sleep_lock = pipeline_filepath(self.pp, filename="lock.sleep")
        #subprocess.Popen("sleep .5; rm " + sleep_lock, shell=True)
        self.pp._create_file(sleep_lock)
        cmd = "echo hello"
        self.pp.run(cmd, lock_name="sleep")
예제 #17
0
def test_requires_filename_or_suffix(pl_mgr):
    """ Either filename or suffix is required to build a path. """
    with pytest.raises(TypeError):
        pipeline_filepath(pl_mgr)
예제 #18
0
    def test_me(self):

        print("Testing initialization...")

        # Names
        self.assertEqual(self.pp.name, "sample_pipeline")
        self.assertEqual(self.pp2.name, "sample_pipeline2")

        # Outfolder creation
        self.assertTrue(os.path.isdir(self.pp.outfolder))

        print("Testing status flags...")
        self.pp._set_status_flag("testing")
        self._assertFile("sample_pipeline_testing.flag")
        self.pp._set_status_flag("running")
        self._assertNotFile("sample_pipeline_testing.flag")
        self._assertFile("sample_pipeline_running.flag")

        print("Testing waiting for locks...")
        self.pp2.wait = False
        self.pp.wait = False
        sleep_lock = pipeline_filepath(self.pp, filename="lock.sleep")
        subprocess.Popen("sleep .5; rm " + sleep_lock, shell=True)
        self.pp._create_file(sleep_lock)
        print("Putting lock file: " + sleep_lock)
        cmd = "echo hello"
        stamp = time.time()
        self.pp.run(cmd, lock_name="sleep")
        print("Elapsed: " + str(self.pp.time_elapsed(stamp)))
        self.assertTrue(self.pp.time_elapsed(stamp) > 1)

        print("Wait for subprocess...")
        for p in self.pp.running_procs.copy():
            self.pp._wait_for_process(self.pp.running_procs[p]["p"])
        self.pp2.wait = True
        self.pp.wait = True

        print("Make sure the pipeline respects files already existing...")
        target = pipeline_filepath(self.pp, filename="tgt")
        if os.path.isfile(target):  # for repeat runs.
            os.remove(target)

        self.pp.run("echo first > " + target, target, shell=True)
        # Should not run
        self.pp.run("echo second > " + target, target, shell=True)
        with open(target) as f:
            lines = f.readlines()
        self._assertLines(["first"], lines)

        print("Execute a targetless command...")
        self.pp.run("echo third > " + target,
                    target=None,
                    lock_name="test",
                    shell=True)
        with open(target) as f:
            lines = f.readlines()
        self._assertLines(["third"], lines)

        # Test reporting results
        self.pp.report_result("key1", "abc")
        self.pp.report_result("key2", "def", "shared")
        key1 = self.pp.get_stat("key1")
        self.assertEqual(key1, 'abc')

        key1 = self.pp2.get_stat("key1")  # should fail
        self.assertEqual(key1, None)
        key2 = self.pp2.get_stat("key2")  # should succeed
        self.assertEqual(key2, 'def')

        print("Test intermediate file cleanup...")
        tgt1 = pipeline_filepath(self.pp, filename="tgt1.temp")
        tgt2 = pipeline_filepath(self.pp, filename="tgt2.temp")
        tgt3 = pipeline_filepath(self.pp, filename="tgt3.temp")
        tgt4 = pipeline_filepath(self.pp, filename="tgt4.txt")
        tgt5 = pipeline_filepath(self.pp, filename="tgt5.txt")
        tgt6 = pipeline_filepath(self.pp, filename="tgt6.txt")
        tgt8 = pipeline_filepath(self.pp, filename="tgt8.cond")
        tgt9 = pipeline_filepath(self.pp, filename="tgt9.cond")
        tgt10 = pipeline_filepath(self.pp, filename="tgt10.txt")

        self.pp.run("touch " + tgt1 + " " + tgt2 + " " + tgt3 + " " + tgt4 +
                    " " + tgt5 + " " + tgt6,
                    lock_name="test")
        self.pp.run("touch " + tgt8 + " " + tgt9, lock_name="test")

        # In global dirty mode, even non-manual clean files should not be deleted:
        self.pp.dirty = True
        self.pp.clean_add(tgt3)
        self.pp.clean_add(pipeline_filepath(self.pp, filename="*.temp"))
        self.pp.clean_add(tgt4)
        self.pp.clean_add(tgt5, conditional=True)
        self.pp.clean_add(pipeline_filepath(self.pp, filename="*.cond"),
                          conditional=True)
        self.pp._cleanup()

        self.assertTrue(os.path.isfile(tgt1))
        self.assertTrue(os.path.isfile(tgt2))
        self.assertTrue(os.path.isfile(tgt3))
        self.assertTrue(os.path.isfile(tgt4))

        # But, these files *should* have been added to the cleanup script

        # test from different wd:
        tgt6_abs = os.path.abspath(tgt6)
        cfile = self.pp.cleanup_file
        ofolder = self.pp.outfolder
        cwd = os.getcwd()
        self.pp.clean_add(tgt6_abs)

        os.chdir("pipeline_output")
        self.pp.outfolder = "../" + ofolder
        self.pp.cleanup_file = "../" + cfile
        self.pp.clean_add(tgt6_abs)
        os.chdir(cwd)
        self.pp.cleanup_file = cfile
        self.pp.outfolder = ofolder

        print("Test manual cleanup adds")
        with open(self.pp.cleanup_file) as f:
            lines = f.readlines()

        print(lines)

        self.assertTrue(lines[2] == 'rm tgt3.temp\n')
        self.assertTrue(lines[10] == 'rm tgt6.txt\n')
        self.assertTrue(lines[11] == 'rm tgt6.txt\n')

        self.pp.report_object("Test figure", os.path.join("fig", "fig.jpg"))

        # But in regular mode, they should be deleted:
        self.pp.dirty = False
        self.pp.clean_add(pipeline_filepath(self.pp, filename="*.temp"))
        self.pp.clean_add(tgt4)
        self.pp.clean_add(tgt5, conditional=True)
        self.pp.clean_add(pipeline_filepath(self.pp, filename="*.cond"),
                          conditional=True)
        self.pp._cleanup()

        self.assertFalse(os.path.isfile(tgt1))
        self.assertFalse(os.path.isfile(tgt2))
        self.assertFalse(os.path.isfile(tgt3))
        self.assertFalse(os.path.isfile(tgt4))

        tgt7 = pipeline_filepath(self.pp, filename="tgt7.txt")
        self.pp.run("touch " + tgt7, tgt7)
        self.pp.clean_add(tgt7, manual=True)

        self.pp.run("touch " + tgt10, target=tgt10, clean=True)

        # Conditional delete should not delete tgt5
        # while pp2 is running
        self.assertTrue(os.path.isfile(tgt5))
        self.assertTrue(os.path.isfile(tgt8))
        self.assertTrue(os.path.isfile(tgt9))
        self.assertTrue(os.path.isfile(tgt10))  # auto cleanup

        # Stopping pp2 should cause tgt5 to be deleted
        self.pp2.stop_pipeline()
        self.pp._cleanup()
        self.assertFalse(os.path.isfile(tgt5))
        self.assertFalse(os.path.isfile(tgt8))
        self.assertFalse(os.path.isfile(tgt9))
        self.assertFalse(os.path.isfile(tgt10))

        # Manual clean should not clean
        self.assertTrue(os.path.isfile(tgt7))

        # cleanup should run on termination:
        self.pp.run("touch " + tgt6, tgt6)
        self.pp.clean_add(tgt6, conditional=True)
        self.pp.stop_pipeline()
        self.assertFalse(os.path.isfile(tgt5))

        # Manual clean should not clean even after pipeline stops.
        self.assertTrue(os.path.isfile(tgt7))

        print("Test failure and nofail options...")

        cmd = "thiscommandisbad"

        # Should not raise an error
        self.pp.run(cmd, target=None, lock_name="badcommand", nofail=True)
        self.pp.callprint(cmd, shell=None, nofail=True)

        # Should raise an error
        with self.assertRaises(SubprocessError):
            self.pp.run(cmd, target=None, lock_name="badcommand")

        print("Test dynamic recovery...")
        # send sigint
        self.pp.locks.append("lock.sleep")
        with self.assertRaises(KeyboardInterrupt):
            self.pp._signal_int_handler(None, None)

        sleep_lock = pipeline_filepath(self.pp, filename="lock.sleep")
        #subprocess.Popen("sleep .5; rm " + sleep_lock, shell=True)
        self.pp._create_file(sleep_lock)
        cmd = "echo hello"
        self.pp.run(cmd, lock_name="sleep")

        #subprocess.Popen("sleep .5; rm " + sleep_lock, shell=True)

        print("Test new start")
        if os.path.isfile(target):  # for repeat runs.
            os.remove(target)
        self.pp.run("echo first > " + target, target, shell=True)
        # Should not run
        self.pp.run("echo second > " + target, target, shell=True)
        with open(target) as f:
            lines = f.readlines()
        self._assertLines(["first"], lines)
        self.pp.new_start = True
        # Should run
        self.pp.run("echo third > " + target, target, shell=True)
        with open(target) as f:
            lines = f.readlines()
        self._assertLines(["third"], lines)

        print("Test dual target")
        self.pp.new_start = False
        if os.path.isfile(tgt1):
            os.remove(tgt1)
        self.pp.run("touch " + tgt6, tgt6)
        self.assertTrue(os.path.isfile(tgt6))
        # if target exists, should not run
        self.pp.run("touch " + tgt1, tgt6)
        self.assertFalse(os.path.isfile(tgt1))
        # if two targets, only one exists, should run
        self.assertFalse(os.path.isfile(tgt5))
        self.pp.run("touch " + tgt1, [tgt5, tgt6])
        self.assertTrue(os.path.isfile(tgt1))
        # if two targets, both exist, should not run
        self.assertFalse(os.path.isfile(tgt5))
        self.pp.run("touch " + tgt5, [tgt1, tgt6])
        self.assertFalse(os.path.isfile(tgt5))
예제 #19
0
 def _isFile(self, filename):
     """ Determine if the first manager has this file.  """
     filepath = pipeline_filepath(self.pp, filename=filename)
     return os.path.isfile(filepath)