def test_merge_output(self): """test multiple-input checksums""" # one output incorrectly generated cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') pipeline_run([split1], verbose=0, checksum_level=CHECKSUM_HISTORY_TIMESTAMPS) job_history = dbdict.open(get_default_history_file_name(), picklevalues=True) del job_history[os.path.relpath(split1_outputs[0])] for chksm in possible_chksms: s = StringIO() pipeline_printout(s, [merge2], verbose=5, checksum_level=chksm) if chksm >= CHECKSUM_HISTORY_TIMESTAMPS: self.assertIn('Job needs update:', s.getvalue()) self.assertIn('Previous incomplete run leftover', s.getvalue()) else: self.assertIn('Job up-to-date', s.getvalue()) # make sure the jobs run fine cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') pipeline_run([merge2], verbose=0, checksum_level=CHECKSUM_HISTORY_TIMESTAMPS) for chksm in possible_chksms: s = StringIO() pipeline_printout(s, [merge2], verbose=5, checksum_level=chksm) self.assertIn('Job up-to-date', s.getvalue()) self.assertNotIn('Job needs update:', s.getvalue()) self.assertNotIn('Previous incomplete run leftover', s.getvalue())
def test_newstyle_split_output(self): """test multiple-output checksums""" test_pipeline = self.create_pipeline() # outputs out of date cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') test_pipeline.run([split1], verbose=0, checksum_level=CHECKSUM_HISTORY_TIMESTAMPS) time.sleep(.5) with open(input_file, 'w') as outfile: outfile.write('testme') for chksm in possible_chksms: s = StringIO() test_pipeline.printout( s, [split1], verbose=6, checksum_level=chksm) self.assertIn('Job needs update:', s.getvalue()) # all outputs incorrectly generated cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') time.sleep(.5) for f in split1_outputs: with open(f, 'w') as outfile: outfile.write('testme') for chksm in possible_chksms: s = StringIO() test_pipeline.printout( s, [split1], verbose=6, checksum_level=chksm) if chksm >= CHECKSUM_HISTORY_TIMESTAMPS: self.assertIn('Job needs update:', s.getvalue()) self.assertIn('left over from a failed run?', s.getvalue()) else: #self.assertIn('Job up-to-date', s.getvalue()) pass # one output incorrectly generated cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') test_pipeline.run([split1], verbose=0, checksum_level=CHECKSUM_HISTORY_TIMESTAMPS) job_history = dbdict.open( get_default_history_file_name(), picklevalues=True) del job_history[os.path.relpath(split1_outputs[0])] for chksm in possible_chksms: s = StringIO() test_pipeline.printout( s, [split1], verbose=6, checksum_level=chksm) if chksm >= CHECKSUM_HISTORY_TIMESTAMPS: self.assertIn('Job needs update:', s.getvalue()) self.assertIn('left over from a failed run?', s.getvalue()) else: #self.assertIn('Job up-to-date', s.getvalue()) pass
def test_split_output(self): """test multiple-output checksums""" # outputs out of date cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') pipeline_run([split1], verbose=0) time.sleep(.5) with open(input_file, 'w') as outfile: outfile.write('testme') for chksm in possible_chksms: s = StringIO() pipeline_printout(s, [split1], verbose=5, checksum_level=chksm) self.assertIn('Job needs update:', s.getvalue()) # all outputs incorrectly generated cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') time.sleep(.5) for f in split1_outputs: with open(f, 'w') as outfile: outfile.write('testme') for chksm in possible_chksms: s = StringIO() pipeline_printout(s, [split1], verbose=5, checksum_level=chksm) if chksm >= CHECKSUM_HISTORY_TIMESTAMPS: self.assertIn('Job needs update:', s.getvalue()) self.assertIn('Previous incomplete run leftover', s.getvalue()) else: self.assertIn('Job up-to-date', s.getvalue()) # one output incorrectly generated cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') pipeline_run([split1], verbose=0) job_history = dbdict.open(RUFFUS_HISTORY_FILE, picklevalues=True) del job_history[split1_outputs[0]] for chksm in possible_chksms: s = StringIO() pipeline_printout(s, [split1], verbose=5, checksum_level=chksm) if chksm >= CHECKSUM_HISTORY_TIMESTAMPS: self.assertIn('Job needs update:', s.getvalue()) self.assertIn('Previous incomplete run leftover', s.getvalue()) else: self.assertIn('Job up-to-date', s.getvalue())
def test_merge_output(self): """test multiple-input checksums""" # one output incorrectly generated cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') pipeline_run([split1], verbose=0, checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline="main") job_history = dbdict.open(get_default_history_file_name(), picklevalues=True) del job_history[os.path.relpath(split1_outputs[0])] for chksm in possible_chksms: s = StringIO() pipeline_printout(s, [merge2], verbose=6, checksum_level=chksm, pipeline="main") if chksm >= CHECKSUM_HISTORY_TIMESTAMPS: self.assertIn('Job needs update:', s.getvalue()) self.assertIn('left over from a failed run?', s.getvalue()) else: #self.assertIn('Job up-to-date', s.getvalue()) pass # make sure the jobs run fine cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') pipeline_run([merge2], verbose=0, checksum_level=CHECKSUM_HISTORY_TIMESTAMPS, pipeline="main") for chksm in possible_chksms: s = StringIO() pipeline_printout(s, [merge2], verbose=6, checksum_level=chksm, pipeline="main") #self.assertIn('Job up-to-date', s.getvalue()) self.assertNotIn('Job needs update:', s.getvalue()) self.assertNotIn('left over from a failed run?', s.getvalue())
def test_newstyle_split_output(self): """test multiple-output checksums""" test_pipeline = self.create_pipeline() # outputs out of date cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') test_pipeline.run([split1], verbose=0, checksum_level=CHECKSUM_HISTORY_TIMESTAMPS) time.sleep(.5) with open(input_file, 'w') as outfile: outfile.write('testme') for chksm in possible_chksms: s = StringIO() test_pipeline.printout(s, [split1], verbose=6, checksum_level=chksm) self.assertIn('Job needs update:', s.getvalue()) # all outputs incorrectly generated cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') time.sleep(.5) for f in split1_outputs: with open(f, 'w') as outfile: outfile.write('testme') for chksm in possible_chksms: s = StringIO() test_pipeline.printout(s, [split1], verbose=6, checksum_level=chksm) if chksm >= CHECKSUM_HISTORY_TIMESTAMPS: self.assertIn('Job needs update:', s.getvalue()) self.assertIn('left over from a failed run?', s.getvalue()) else: #self.assertIn('Job up-to-date', s.getvalue()) pass # one output incorrectly generated cleanup_tmpdir() with open(input_file, 'w') as outfile: outfile.write('testme') test_pipeline.run([split1], verbose=0, checksum_level=CHECKSUM_HISTORY_TIMESTAMPS) job_history = dbdict.open(get_default_history_file_name(), picklevalues=True) del job_history[os.path.relpath(split1_outputs[0])] for chksm in possible_chksms: s = StringIO() test_pipeline.printout(s, [split1], verbose=6, checksum_level=chksm) if chksm >= CHECKSUM_HISTORY_TIMESTAMPS: self.assertIn('Job needs update:', s.getvalue()) self.assertIn('left over from a failed run?', s.getvalue()) else: #self.assertIn('Job up-to-date', s.getvalue()) pass