class TestSaveProcess(unittest.TestCase): def setUp(self): self.block_queue = multiprocessing.JoinableQueue() self.result_queue = multiprocessing.Queue() self.stat_queue = multiprocessing.Queue() self.volume_id = 'volume_id' self.scratch = mkdtemp() backup_path = os.path.join(self.scratch, 'backups') self.conf = LunrConfig({ 'backup': { 'client': 'disk' }, 'disk': { 'path': backup_path }, }) self.conn = get_conn(self.conf) self.conn.put_container(self.volume_id) self.process = SaveProcess(self.conf, self.volume_id, self.block_queue, self.result_queue, self.stat_queue) self.process.start() def tearDown(self): rmtree(self.scratch) self.assertFalse(self.process.is_alive()) def test_upload(self): dev = '/dev/zero' salt = 'salt' block_count = 3 for i in xrange(block_count): block = Block(dev, i, salt) # Lie about the hash. block._hydrate() hash_ = "hash_%s" % i block._hash = hash_ self.block_queue.put(block) self.block_queue.put(None) while self.process.is_alive(): sleep(0.1) stats, errors = self.result_queue.get() self.assertEquals(stats['uploaded'], block_count) self.assertEquals(len(errors.keys()), 0) headers, listing = self.conn.get_container(self.volume_id) self.assertEquals(len(listing), block_count)
class TestSaveProcess(unittest.TestCase): def setUp(self): self.block_queue = multiprocessing.JoinableQueue() self.result_queue = multiprocessing.Queue() self.stat_queue = multiprocessing.Queue() self.volume_id = 'volume_id' self.scratch = mkdtemp() backup_path = os.path.join(self.scratch, 'backups') self.conf = LunrConfig({ 'backup': {'client': 'disk'}, 'disk': {'path': backup_path}, }) self.conn = get_conn(self.conf) self.conn.put_container(self.volume_id) self.process = SaveProcess(self.conf, self.volume_id, self.block_queue, self.result_queue, self.stat_queue) self.process.start() def tearDown(self): rmtree(self.scratch) self.assertFalse(self.process.is_alive()) def test_upload(self): dev = '/dev/zero' salt = 'salt' block_count = 3 for i in xrange(block_count): block = Block(dev, i, salt) # Lie about the hash. block._hydrate() hash_ = "hash_%s" % i block._hash = hash_ self.block_queue.put(block) self.block_queue.put(None) while self.process.is_alive(): sleep(0.1) stats, errors = self.result_queue.get() self.assertEquals(stats['uploaded'], block_count) self.assertEquals(len(errors.keys()), 0) headers, listing = self.conn.get_container(self.volume_id) self.assertEquals(len(listing), block_count)
def setUp(self): self.block_queue = multiprocessing.JoinableQueue() self.result_queue = multiprocessing.Queue() self.stat_queue = multiprocessing.Queue() self.volume_id = 'volume_id' self.scratch = mkdtemp() backup_path = os.path.join(self.scratch, 'backups') self.conf = LunrConfig({ 'backup': { 'client': 'disk' }, 'disk': { 'path': backup_path }, }) self.conn = get_conn(self.conf) self.conn.put_container(self.volume_id) self.process = SaveProcess(self.conf, self.volume_id, self.block_queue, self.result_queue, self.stat_queue) self.process.start()
def setUp(self): self.block_queue = multiprocessing.JoinableQueue() self.result_queue = multiprocessing.Queue() self.stat_queue = multiprocessing.Queue() self.volume_id = 'volume_id' self.scratch = mkdtemp() backup_path = os.path.join(self.scratch, 'backups') self.conf = LunrConfig({ 'backup': {'client': 'disk'}, 'disk': {'path': backup_path}, }) self.conn = get_conn(self.conf) self.conn.put_container(self.volume_id) self.process = SaveProcess(self.conf, self.volume_id, self.block_queue, self.result_queue, self.stat_queue) self.process.start()