def test_lock_fails_if_db_already_locked(self): import sqlite3 from threading import Thread from Queue import Queue db_path = self.tmp / "kv.sqlite" q1 = Queue() q2 = Queue() kv2 = KV(db_path, timeout=0.1) def locker(): kv1 = KV(db_path) with kv1.lock(): q1.put(None) q2.get() th = Thread(target=locker) th.start() try: q1.get() with self.assertRaises(sqlite3.OperationalError) as cm1: with kv2.lock(): pass self.assertEqual(cm1.exception.message, "database is locked") with self.assertRaises(sqlite3.OperationalError) as cm2: kv2["a"] = "b" self.assertEqual(cm2.exception.message, "database is locked") finally: q2.put(None) th.join()
def test_lock_fails_if_db_already_locked(self): import sqlite3 from threading import Thread from Queue import Queue db_path = self.tmp / 'kv.sqlite' q1 = Queue() q2 = Queue() kv2 = KV(db_path, timeout=0.1) def locker(): kv1 = KV(db_path) with kv1.lock(): q1.put(None) q2.get() th = Thread(target=locker) th.start() try: q1.get() with self.assertRaises(sqlite3.OperationalError) as cm1: with kv2.lock(): pass self.assertEqual(cm1.exception.message, 'database is locked') with self.assertRaises(sqlite3.OperationalError) as cm2: kv2['a'] = 'b' self.assertEqual(cm2.exception.message, 'database is locked') finally: q2.put(None) th.join()
class Airship(object): """ The airship object implements most operations performed by airship. It acts as container for deployments. """ def __init__(self, config): self.home_path = config['home'] self.var_path = self.home_path / 'var' self.log_path = self.var_path / 'log' self.deploy_path = self.var_path / 'deploy' self.config = config etc = self.home_path / 'etc' etc.mkdir_p() self.buckets_db = KV(etc / 'buckets.db', table='bucket') self.meta_db = KV(etc / 'buckets.db', table='meta') self.daemons = Supervisor(etc) @property def cfg_links_folder(self): folder = self.home_path / CFG_LINKS_FOLDER if not folder.isdir(): folder.makedirs() return folder def initialize(self): self.var_path.mkdir_p() self.log_path.mkdir_p() (self.var_path / 'run').mkdir_p() self.deploy_path.mkdir_p() self.generate_supervisord_configuration() def generate_supervisord_configuration(self): self.daemons.configure(self.home_path) def _get_bucket_by_id(self, bucket_id): config = self.buckets_db[bucket_id] return Bucket(bucket_id, self, config) def get_bucket(self, name=_newest): if name is _newest: name = max(self.buckets_db) return self._get_bucket_by_id(name) def _bucket_folder(self, id_): return self.deploy_path / id_ def _generate_bucket_id(self): with self.meta_db.lock(): next_id = self.meta_db.get('next_bucket_id', 1) self.meta_db['next_bucket_id'] = next_id + 1 id_ = 'd%d' % (next_id,) self._bucket_folder(id_).mkdir() return id_ def new_bucket(self, config={}): bucket_id = self._generate_bucket_id() self.buckets_db[bucket_id] = {} bucket = self._get_bucket_by_id(bucket_id) return bucket def list_buckets(self): return {'buckets': [{'id': id_} for id_ in self.buckets_db]}
def test_lock_during_lock_still_saves_value(self): kv = KV() with kv.lock(): with kv.lock(): kv["a"] = "b" self.assertEqual(kv["a"], "b")
def locker(): kv1 = KV(db_path) with kv1.lock(): q1.put(None) q2.get()
def test_lock_during_lock_still_saves_value(self): kv = KV() with kv.lock(): with kv.lock(): kv['a'] = 'b' self.assertEqual(kv['a'], 'b')
def locker(): kv1 = KV(db_path) with kv1.lock(): q1.put(None) q2.get()