def test_expected_bytes_for_tree(self): tree = self.make_tmpdir() shutil.copyfile(__file__, os.path.join(tree, 'one')) shutil.copyfile(__file__, os.path.join(tree, 'two')) self.assertEqual(self.TEST_SIZE * 2, arv_put.expected_bytes_for([tree])) self.assertEqual(self.TEST_SIZE * 3, arv_put.expected_bytes_for([tree, __file__]))
def upload(source_dir, logger=None): if logger is None: logger = logging.getLogger("arvados") source_dir = os.path.abspath(source_dir) done = False if 'TASK_WORK' in os.environ: resume_cache = put.ResumeCache(os.path.join(arvados.current_task().tmpdir, "upload-output-checkpoint")) else: resume_cache = put.ResumeCache(put.ResumeCache.make_path(Args(source_dir))) reporter = put.progress_writer(machine_progress) bytes_expected = put.expected_bytes_for([source_dir]) backoff = 1 outuuid = None while not done: try: out = put.ArvPutCollectionWriter.from_cache(resume_cache, reporter, bytes_expected) out.do_queued_work() out.write_directory_tree(source_dir, max_manifest_depth=0) outuuid = out.finish() done = True except KeyboardInterrupt as e: logger.critical("caught interrupt signal 2") raise e except Exception as e: logger.exception("caught exception:") backoff *= 2 if backoff > 256: logger.critical("Too many upload failures, giving up") raise e else: logger.warning("Sleeping for %s seconds before trying again" % backoff) time.sleep(backoff) return outuuid
def test_expected_bytes_for_device(self): self.assertIsNone(arv_put.expected_bytes_for(['/dev/null'])) self.assertIsNone(arv_put.expected_bytes_for([__file__, '/dev/null']))
def test_expected_bytes_for_file(self): self.assertEqual(self.TEST_SIZE, arv_put.expected_bytes_for([__file__]))