def test_eventing_processes_mutations_when_mutated_through_subdoc_api_and_set_expiry_through_sdk(self): # set expiry pager interval ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name) url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip, name=self.src_bucket_name) bucket = Bucket(url, username="******", password="******") for docid in ['customer123', 'customer1234', 'customer12345']: bucket.insert(docid, {'some': 'value'}) body = self.create_save_function_body(self.function_name, self.handler_code, dcp_stream_boundary="from_now") # deploy eventing function self.deploy_function(body) # upserting a new sub-document bucket.mutate_in('customer123', SD.upsert('fax', '775-867-5309')) # inserting a sub-document bucket.mutate_in('customer1234', SD.insert('purchases.complete', [42, True, None], create_parents=True)) # Creating and populating an array document bucket.mutate_in('customer12345', SD.array_append('purchases.complete', ['Hello'], create_parents=True)) self.verify_eventing_results(self.function_name, 3, skip_stats_validation=True) for docid in ['customer123', 'customer1234', 'customer12345']: # set expiry on all the docs created using sub doc API bucket.touch(docid, ttl=5) self.sleep(10, "wait for expiry of the documents") # Wait for eventing to catch up with all the expiry mutations and verify results self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True) self.undeploy_and_delete_function(body)
def post(self, request, group_id): c = Bucket('couchbase://localhost/nihongo') success = 'dunno' constgroup = group_id.rsplit('_', 1)[0] print(constgroup) print('adding new deck') try: description = request.POST['description'] print(description) ckey = 'deck_' + str(uuid4()).replace('-', '_') newdeck = {'doc_type' : 'deck', 'description' : description, 'deck_name' : description} newdeck['cards_list'] = [] newdeck['doc_channels'] = [group_id] c.insert(ckey, newdeck) group = c.get(group_id).value print(group.get('decks_list')) group.get('decks_list').append(ckey) c.upsert(group_id, group) success = 'success' except (BaseException, CouchbaseError) as e: success = 'error' print(e) group = c.get(group_id).value group_decks = group.get('decks_list') decks_list = [] for d in group_decks: try: deck = c.get(d) decks_list.append(deck) except CouchbaseError: pass return HttpResponseRedirect(reverse('tutor:group_decks', kwargs={'group_id' : group_id}))
class MatchHistoryUpdater(object): def __init__(self): self.riot = riotwatcher.RiotWatcher( default_region=riotwatcher.EUROPE_WEST, key=os.environ.get('RIOT_API_KEY')) self.bucket = Bucket('couchbase://{}/{}'.format( os.environ.get('DB_HOST', 'localhost'), os.environ.get('DB_BUCKET_MATCH_HISTORY', 'match_history'))) self.players = self.get_players() logger_datapop.info('Setup complete') while True: for player in self.players: self.update_recent_games(player['id']) time.sleep(SLEEP_TIME) self.players = self.get_players() def get_players(self): players = [ row.doc.value for row in self.bucket.query( 'player', 'all_players', stale=False, include_docs=True) ] return players def update_recent_games(self, player_id): api_matches = self.riot.get_recent_games(player_id)['games'] for match in api_matches: match['summonerId'] = player_id key = 'Match::{}::{}'.format(player_id, match['gameId']) try: self.bucket.insert(key, match) except cb_exceptions.KeyExistsError: break try: full_match = self.riot.get_match(match_id=match['gameId'], include_timeline=True) time.sleep(SLEEP_TIME) except Exception: continue else: full_match_key = 'Match::{}'.format(match['gameId']) self.bucket.upsert(full_match_key, full_match)
class MatchHistoryUpdater(object): def __init__(self): self.riot = riotwatcher.RiotWatcher( default_region=riotwatcher.EUROPE_WEST, key=os.environ.get('RIOT_API_KEY')) self.bucket = Bucket('couchbase://{}/{}'.format( os.environ.get('DB_HOST', 'localhost'), os.environ.get('DB_BUCKET_MATCH_HISTORY', 'match_history') )) self.players = self.get_players() logger_datapop.info('Setup complete') while True: for player in self.players: self.update_recent_games(player['id']) time.sleep(SLEEP_TIME) self.players = self.get_players() def get_players(self): players = [row.doc.value for row in self.bucket.query( 'player', 'all_players', stale=False, include_docs=True)] return players def update_recent_games(self, player_id): api_matches = self.riot.get_recent_games(player_id)['games'] for match in api_matches: match['summonerId'] = player_id key = 'Match::{}::{}'.format(player_id, match['gameId']) try: self.bucket.insert(key, match) except cb_exceptions.KeyExistsError: break try: full_match = self.riot.get_match(match_id=match['gameId'], include_timeline=True) time.sleep(SLEEP_TIME) except Exception: continue else: full_match_key = 'Match::{}'.format(match['gameId']) self.bucket.upsert(full_match_key, full_match)
class TaskService(object): def __init__(self): self._bucket = Bucket("couchbase://localhost/Tasks", password="******") def get_tasks(self): rows = self._bucket.query("tasks", "all", stale=False) tasks = [] for row in rows: result = self._bucket.get(row.key) task = result.value task["id"] = row.key tasks.append(task) return tasks def get_task(self, key): task = self._bucket.get(key) return task.value def create_task(self, task): key = uuid.uuid1() task["type"] = "task" task["isComplete"] = False self._bucket.insert(str(key), task) def update_task(self, key, task): saved_task = self.get_task(key) saved_task["title"] = task["title"] saved_task["description"] = task["description"] saved_task["dueDate"] = task["dueDate"] saved_task["isComplete"] = task["isComplete"] saved_task["parentId"] = task["parentId"] self._bucket.upsert(key, saved_task) def delete_task(self, key): self._bucket.remove(key) def complete_task(self, key): saved_task = self.get_task(key) saved_task["isComplete"] = True self._bucket.upsert(key, saved_task)
def test_indistinguishable_mutation(self): body = self.create_save_function_body( self.function_name, HANDLER_CODE.BUCKET_OP_WITH_SOURCE_BUCKET_MUTATION, worker_count=3) self.deploy_function(body) url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip, name=self.src_bucket_name) bucket = Bucket(url, username="******", password="******") bucket.insert('customer123', {'some': 'value'}) self.verify_eventing_results(self.function_name, 2, skip_stats_validation=True) self.pause_function(body) bucket.replace('customer123', {'some': 'value1'}) bucket.replace('customer123', {'some': 'value'}) self.bucket_compaction() self.resume_function(body) self.verify_eventing_results(self.function_name, 3, skip_stats_validation=True)
def _insert_document(statement): logger.info('insert') try: #bucket = _authenticate() bucket = Bucket(URL) result = bucket.insert('document_name', {'some': 'value'}) logger.info(result) except (CouchbaseError, CouchbaseTransientError, CouchbaseNetworkError) as err: logger.error(err) sys.exit(1) return result
def initialize_game_rules_bucket(bucket=None): if bucket is None: bucket = Bucket('couchbase://localhost/game_rules') # Create the game document for super mario bro's smb = { 'file_name': '/home/mcsmash/dev/nestopia/smb.nes', 'system': 'NES', 'name': 'Super Mario Brothers and Duck Hunt', } try: bucket.insert('game:1', smb) except KeyExistsError: pass sprite_list = [] for i, fn in enumerate(glob('/home/mcsmash/dev/data/game_playing/sprites/*')): extensionless = os.path.splitext(os.path.basename(fn)) sprite_list.append({ 'id': extensionless, 'path': os.path.abspath(fn) }) try: bucket.insert('sprites:1', sprite_list) except KeyExistsError: pass try: bucket.remove('game_number') except NotFoundError: pass try: bucket.remove('play_number:1') except NotFoundError: pass bucket.counter('game_number', initial=2) bucket.counter('play_number:1', initial=1)
def initialize_game_rules_bucket(bucket=None): if bucket is None: bucket = Bucket("couchbase://localhost/game_rules") # Create the game document for super mario bro's smb = { "file_name": "/home/mcsmash/dev/nestopia/smb.nes", "system": "NES", "name": "Super Mario Brothers and Duck Hunt", } try: bucket.insert("game:1", smb) except KeyExistsError: pass sprite_list = [] for i, fn in enumerate(glob("/home/mcsmash/dev/data/game_playing/sprites/*")): extensionless = os.path.splitext(os.path.basename(fn)) sprite_list.append({"id": extensionless, "path": os.path.abspath(fn)}) try: bucket.insert("sprites:1", sprite_list) except KeyExistsError: pass try: bucket.remove("game_number") except NotFoundError: pass try: bucket.remove("play_number:1") except NotFoundError: pass bucket.counter("game_number", initial=2) bucket.counter("play_number:1", initial=1)
class SDKClient(object): """Python SDK Client Implementation for testrunner - master branch Implementation""" def __init__(self, bucket, hosts = ["localhost"] , scheme = "couchbase", ssl_path = None, uhm_options = None, password=None, quiet=True, certpath = None, transcoder = None): self.connection_string = \ self._createString(scheme = scheme, bucket = bucket, hosts = hosts, certpath = certpath, uhm_options = uhm_options) self.password = password self.quiet = quiet self.transcoder = transcoder self.default_timeout = 0 self._createConn() def _createString(self, scheme ="couchbase", bucket = None, hosts = ["localhost"], certpath = None, uhm_options = ""): connection_string = "{0}://{1}".format(scheme, ", ".join(hosts).replace(" ","")) if bucket != None: connection_string = "{0}/{1}".format(connection_string, bucket) if uhm_options != None: connection_string = "{0}?{1}".format(connection_string, uhm_options) if scheme == "couchbases": if "?" in connection_string: connection_string = "{0},certpath={1}".format(connection_string, certpath) else: connection_string = "{0}?certpath={1}".format(connection_string, certpath) return connection_string def _createConn(self): try: self.cb = CouchbaseBucket(self.connection_string, password = self.password, quiet = self.quiet, transcoder = self.transcoder) self.default_timeout = self.cb.timeout except BucketNotFoundError as e: raise def reconnect(self): self.cb.close() self._createConn() def close(self): self.cb._close() def counter_in(self, key, path, delta, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0): try: return self.cb.counter_in(key, path, delta, create_parents= create_parents, cas= cas, ttl= ttl, persist_to= persist_to, replicate_to= replicate_to) except CouchbaseError as e: raise def arrayappend_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0): try: return self.cb.arrayappend_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def arrayprepend_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0): try: return self.cb.arrayprepend_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def arrayaddunique_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0): try: return self.cb.addunique_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def arrayinsert_in(self, key, path, value, cas=0, ttl=0, persist_to=0, replicate_to=0): try: return self.cb.arrayinsert_in(key, path, value, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def remove_in(self, key, path, cas=0, ttl=0): try: self.cb.remove_in(key, path, cas = cas, ttl = ttl) except CouchbaseError as e: raise def mutate_in(self, key, *specs, **kwargs): try: self.cb.mutate_in(key, *specs, **kwargs) except CouchbaseError as e: raise def lookup_in(self, key, *specs, **kwargs): try: self.cb.lookup_in(key, *specs, **kwargs) except CouchbaseError as e: raise def get_in(self, key, path): try: result = self.cb.get_in(key, path) return self.__translate_get(result) except CouchbaseError as e: raise def exists_in(self, key, path): try: self.cb.exists_in(key, path) except CouchbaseError as e: raise def replace_in(self, key, path, value, cas=0, ttl=0, persist_to=0, replicate_to=0): try: return self.cb.replace_in(key, path, value, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def insert_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0): try: return self.cb.insert_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def upsert_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0): try: return self.cb.upsert_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def append(self, key, value, cas=0, format=None, persist_to=0, replicate_to=0): try: self.cb.append(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: try: time.sleep(10) self.cb.append(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def append_multi(self, keys, cas=0, format=None, persist_to=0, replicate_to=0): try: self.cb.append_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: try: time.sleep(10) self.cb.append_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def prepend(self, key, value, cas=0, format=None, persist_to=0, replicate_to=0): try: self.cb.prepend(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: try: self.cb.prepend(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def prepend_multi(self, keys, cas=0, format=None, persist_to=0, replicate_to=0): try: self.cb.prepend_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: try: time.sleep(10) self.cb.prepend_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def replace(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.replace( key, value, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: try: time.sleep(10) self.cb.replace( key, value, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def replace_multi(self, keys, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.replace_multi( keys, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: try: time.sleep(10) self.cb.replace_multi( keys, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def cas(self, key, value, cas=0, ttl=0, format=None): return self.cb.replace(key, value, cas=cas,format=format) def delete(self,key, cas=0, quiet=True, persist_to=0, replicate_to=0): self.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to) def remove(self,key, cas=0, quiet=True, persist_to=0, replicate_to=0): try: return self.cb.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: try: time.sleep(10) return self.cb.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def delete(self, keys, quiet=True, persist_to=0, replicate_to=0): return self.remove(self, keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to) def remove_multi(self, keys, quiet=True, persist_to=0, replicate_to=0): try: self.cb.remove_multi(keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: try: time.sleep(10) self.cb.remove_multi(keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def set(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0): return self.upsert(key, value, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) def upsert(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to) except CouchbaseError as e: try: time.sleep(10) self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to) except CouchbaseError as e: raise def set_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0): return self.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) def upsert_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: try: time.sleep(10) self.cb.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def insert(self, key, value, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.insert(key, value, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: try: time.sleep(10) self.cb.insert(key, value, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def insert_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.insert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: try: time.sleep(10) self.cb.insert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def touch(self, key, ttl = 0): try: self.cb.touch(key, ttl=ttl) except CouchbaseError as e: try: time.sleep(10) self.cb.touch(key, ttl=ttl) except CouchbaseError as e: raise def touch_multi(self, keys, ttl = 0): try: self.cb.touch_multi(keys, ttl=ttl) except CouchbaseError as e: try: time.sleep(10) self.cb.touch_multi(keys, ttl=ttl) except CouchbaseError as e: raise def decr(self, key, delta=1, initial=None, ttl=0): self.counter(key, delta=-delta, initial=initial, ttl=ttl) def decr_multi(self, keys, delta=1, initial=None, ttl=0): self.counter_multi(keys, delta=-delta, initial=initial, ttl=ttl) def incr(self, key, delta=1, initial=None, ttl=0): self.counter(key, delta=delta, initial=initial, ttl=ttl) def incr_multi(self, keys, delta=1, initial=None, ttl=0): self.counter_multi(keys, delta=delta, initial=initial, ttl=ttl) def counter(self, key, delta=1, initial=None, ttl=0): try: self.cb.counter(key, delta=delta, initial=initial, ttl=ttl) except CouchbaseError as e: try: time.sleep(10) self.cb.counter(key, delta=delta, initial=initial, ttl=ttl) except CouchbaseError as e: raise def counter_multi(self, keys, delta=1, initial=None, ttl=0): try: self.cb.counter_multi(keys, delta=delta, initial=initial, ttl=ttl) except CouchbaseError as e: try: time.sleep(10) self.cb.counter_multi(keys, delta=delta, initial=initial, ttl=ttl) except CouchbaseError as e: raise def get(self, key, ttl=0, quiet=True, replica=False, no_format=False): try: rv = self.cb.get(key, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format) return self.__translate_get(rv) except CouchbaseError as e: try: time.sleep(10) rv = self.cb.get(key, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format) return self.__translate_get(rv) except CouchbaseError as e: raise def rget(self, key, replica_index=None, quiet=True): try: data = self.rget(key, replica_index=replica_index, quiet=None) return self.__translate_get(data) except CouchbaseError as e: try: time.sleep(10) data = self.rget(key, replica_index=replica_index, quiet=None) return self.__translate_get(data) except CouchbaseError as e: raise def get_multi(self, keys, ttl=0, quiet=True, replica=False, no_format=False): try: data = self.cb.get_multi(keys, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format) return self.__translate_get_multi(data) except CouchbaseError as e: try: time.sleep(10) data = self.cb.get_multi(keys, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format) return self.__translate_get_multi(data) except CouchbaseError as e: raise def rget_multi(self, key, replica_index=None, quiet=True): try: data = self.cb.rget_multi(key, replica_index=None, quiet=quiet) return self.__translate_get_multi(data) except CouchbaseError as e: try: time.sleep(10) data = self.cb.rget_multi(key, replica_index=None, quiet=quiet) return self.__translate_get_multi(data) except CouchbaseError as e: raise def stats(self, keys=None): try: stat_map = self.cb.stats(keys = keys) return stat_map except CouchbaseError as e: try: time.sleep(10) return self.cb.stats(keys = keys) except CouchbaseError as e: raise def errors(self, clear_existing=True): try: rv = self.cb.errors(clear_existing = clear_existing) return rv except CouchbaseError as e: raise def observe(self, key, master_only=False): try: return self.cb.observe(key, master_only = master_only) except CouchbaseError as e: try: time.sleep(10) return self.cb.observe(key, master_only = master_only) except CouchbaseError as e: raise def observe_multi(self, keys, master_only=False): try: data = self.cb.observe_multi(keys, master_only = master_only) return self.__translate_observe_multi(data) except CouchbaseError as e: try: time.sleep(10) data = self.cb.observe_multi(keys, master_only = master_only) return self.__translate_observe_multi(data) except CouchbaseError as e: raise def endure(self, key, persist_to=-1, replicate_to=-1, cas=0, check_removed=False, timeout=5.0, interval=0.010): try: self.cb.endure(key, persist_to=persist_to, replicate_to=replicate_to, cas=cas, check_removed=check_removed, timeout=timeout, interval=interval) except CouchbaseError as e: try: time.sleep(10) self.cb.endure(key, persist_to=persist_to, replicate_to=replicate_to, cas=cas, check_removed=check_removed, timeout=timeout, interval=interval) except CouchbaseError as e: raise def endure_multi(self, keys, persist_to=-1, replicate_to=-1, cas=0, check_removed=False, timeout=5.0, interval=0.010): try: self.cb.endure(keys, persist_to=persist_to, replicate_to=replicate_to, cas=cas, check_removed=check_removed, timeout=timeout, interval=interval) except CouchbaseError as e: try: time.sleep(10) self.cb.endure(keys, persist_to=persist_to, replicate_to=replicate_to, cas=cas, check_removed=check_removed, timeout=timeout, interval=interval) except CouchbaseError as e: raise def lock(self, key, ttl=0): try: data = self.cb.lock(key, ttl = ttl) return self.__translate_get(data) except CouchbaseError as e: try: time.sleep(10) data = self.cb.lock(key, ttl = ttl) return self.__translate_get(data) except CouchbaseError as e: raise def lock_multi(self, keys, ttl=0): try: data = self.cb.lock_multi(keys, ttl = ttl) return self.__translate_get_multi(data) except CouchbaseError as e: try: time.sleep(10) data = self.cb.lock_multi(keys, ttl = ttl) return self.__translate_get_multi(data) except CouchbaseError as e: raise def unlock(self, key, ttl=0): try: return self.cb.unlock(key) except CouchbaseError as e: try: time.sleep(10) return self.cb.unlock(key) except CouchbaseError as e: raise def unlock_multi(self, keys): try: return self.cb.unlock_multi(keys) except CouchbaseError as e: try: time.sleep(10) return self.cb.unlock_multi(keys) except CouchbaseError as e: raise def n1ql_query(self, statement, prepared=False): try: return N1QLQuery(statement, prepared) except CouchbaseError as e: raise def n1ql_request(self, query): try: return N1QLRequest(query, self.cb) except CouchbaseError as e: raise def __translate_get_multi(self, data): map = {} if data == None: return map for key, result in data.items(): map[key] = [result.flags, result.cas, result.value] return map def __translate_get(self, data): return data.flags, data.cas, data.value def __translate_delete(self, data): return data def __translate_observe(self, data): return data def __translate_observe_multi(self, data): map = {} if data == None: return map for key, result in data.items(): map[key] = result.value return map def __translate_upsert_multi(self, data): map = {} if data == None: return map for key, result in data.items(): map[key] = result return map def __translate_upsert_op(self, data): return data.rc, data.success, data.errstr, data.key
class TestStandardCouchDB(unittest.TestCase): def setup_class(self): """ Clear all spans before a test run """ self.recorder = tracer.recorder self.cluster = Cluster('couchbase://%s' % testenv['couchdb_host']) self.bucket = Bucket('couchbase://%s/travel-sample' % testenv['couchdb_host'], username=testenv['couchdb_username'], password=testenv['couchdb_password']) def setup_method(self): self.bucket.upsert('test-key', 1) time.sleep(0.5) self.recorder.clear_spans() def test_vanilla_get(self): res = self.bucket.get("test-key") assert (res) def test_pipeline(self): pass def test_upsert(self): res = None with tracer.start_active_span('test'): res = self.bucket.upsert("test_upsert", 1) assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'upsert') def test_upsert_multi(self): res = None kvs = dict() kvs['first_test_upsert_multi'] = 1 kvs['second_test_upsert_multi'] = 1 with tracer.start_active_span('test'): res = self.bucket.upsert_multi(kvs) assert (res) self.assertTrue(res['first_test_upsert_multi'].success) self.assertTrue(res['second_test_upsert_multi'].success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'upsert_multi') def test_insert_new(self): res = None try: self.bucket.remove('test_insert_new') except NotFoundError: pass with tracer.start_active_span('test'): res = self.bucket.insert("test_insert_new", 1) assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'insert') def test_insert_existing(self): res = None try: self.bucket.insert("test_insert", 1) except KeyExistsError: pass try: with tracer.start_active_span('test'): res = self.bucket.insert("test_insert", 1) except KeyExistsError: pass self.assertIsNone(res) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertEqual(cb_span.ec, 1) # Just search for the substring of the exception class found = cb_span.data["couchbase"]["error"].find("_KeyExistsError") self.assertFalse(found == -1, "Error substring not found.") self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'insert') def test_insert_multi(self): res = None kvs = dict() kvs['first_test_upsert_multi'] = 1 kvs['second_test_upsert_multi'] = 1 try: self.bucket.remove('first_test_upsert_multi') self.bucket.remove('second_test_upsert_multi') except NotFoundError: pass with tracer.start_active_span('test'): res = self.bucket.insert_multi(kvs) assert (res) self.assertTrue(res['first_test_upsert_multi'].success) self.assertTrue(res['second_test_upsert_multi'].success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'insert_multi') def test_replace(self): res = None try: self.bucket.insert("test_replace", 1) except KeyExistsError: pass with tracer.start_active_span('test'): res = self.bucket.replace("test_replace", 2) assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'replace') def test_replace_non_existent(self): res = None try: self.bucket.remove("test_replace") except NotFoundError: pass try: with tracer.start_active_span('test'): res = self.bucket.replace("test_replace", 2) except NotFoundError: pass self.assertIsNone(res) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertEqual(cb_span.ec, 1) # Just search for the substring of the exception class found = cb_span.data["couchbase"]["error"].find("NotFoundError") self.assertFalse(found == -1, "Error substring not found.") self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'replace') def test_replace_multi(self): res = None kvs = dict() kvs['first_test_replace_multi'] = 1 kvs['second_test_replace_multi'] = 1 self.bucket.upsert('first_test_replace_multi', "one") self.bucket.upsert('second_test_replace_multi', "two") with tracer.start_active_span('test'): res = self.bucket.replace_multi(kvs) assert (res) self.assertTrue(res['first_test_replace_multi'].success) self.assertTrue(res['second_test_replace_multi'].success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'replace_multi') def test_append(self): self.bucket.upsert("test_append", "one") res = None with tracer.start_active_span('test'): res = self.bucket.append("test_append", "two") assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'append') def test_append_multi(self): res = None kvs = dict() kvs['first_test_append_multi'] = "ok1" kvs['second_test_append_multi'] = "ok2" self.bucket.upsert('first_test_append_multi', "one") self.bucket.upsert('second_test_append_multi', "two") with tracer.start_active_span('test'): res = self.bucket.append_multi(kvs) assert (res) self.assertTrue(res['first_test_append_multi'].success) self.assertTrue(res['second_test_append_multi'].success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'append_multi') def test_prepend(self): self.bucket.upsert("test_prepend", "one") res = None with tracer.start_active_span('test'): res = self.bucket.prepend("test_prepend", "two") assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'prepend') def test_prepend_multi(self): res = None kvs = dict() kvs['first_test_prepend_multi'] = "ok1" kvs['second_test_prepend_multi'] = "ok2" self.bucket.upsert('first_test_prepend_multi', "one") self.bucket.upsert('second_test_prepend_multi', "two") with tracer.start_active_span('test'): res = self.bucket.prepend_multi(kvs) assert (res) self.assertTrue(res['first_test_prepend_multi'].success) self.assertTrue(res['second_test_prepend_multi'].success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'prepend_multi') def test_get(self): res = None with tracer.start_active_span('test'): res = self.bucket.get("test-key") assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'get') def test_rget(self): res = None try: with tracer.start_active_span('test'): res = self.bucket.rget("test-key", replica_index=None) except CouchbaseTransientError: pass self.assertIsNone(res) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertEqual(cb_span.ec, 1) # Just search for the substring of the exception class found = cb_span.data["couchbase"]["error"].find( "CouchbaseTransientError") self.assertFalse(found == -1, "Error substring not found.") self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'rget') def test_get_not_found(self): res = None try: self.bucket.remove('test_get_not_found') except NotFoundError: pass try: with tracer.start_active_span('test'): res = self.bucket.get("test_get_not_found") except NotFoundError: pass self.assertIsNone(res) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertEqual(cb_span.ec, 1) # Just search for the substring of the exception class found = cb_span.data["couchbase"]["error"].find("NotFoundError") self.assertFalse(found == -1, "Error substring not found.") self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'get') def test_get_multi(self): res = None self.bucket.upsert('first_test_get_multi', "one") self.bucket.upsert('second_test_get_multi', "two") with tracer.start_active_span('test'): res = self.bucket.get_multi( ['first_test_get_multi', 'second_test_get_multi']) assert (res) self.assertTrue(res['first_test_get_multi'].success) self.assertTrue(res['second_test_get_multi'].success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'get_multi') def test_touch(self): res = None self.bucket.upsert("test_touch", 1) with tracer.start_active_span('test'): res = self.bucket.touch("test_touch") assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'touch') def test_touch_multi(self): res = None self.bucket.upsert('first_test_touch_multi', "one") self.bucket.upsert('second_test_touch_multi', "two") with tracer.start_active_span('test'): res = self.bucket.touch_multi( ['first_test_touch_multi', 'second_test_touch_multi']) assert (res) self.assertTrue(res['first_test_touch_multi'].success) self.assertTrue(res['second_test_touch_multi'].success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'touch_multi') def test_lock(self): res = None self.bucket.upsert("test_lock_unlock", "lock_this") with tracer.start_active_span('test'): rv = self.bucket.lock("test_lock_unlock", ttl=5) assert (rv) self.assertTrue(rv.success) # upsert automatically unlocks the key res = self.bucket.upsert("test_lock_unlock", "updated", rv.cas) assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(3, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') filter = lambda span: span.n == "couchbase" and span.data["couchbase"][ "type"] == "lock" cb_lock_span = get_first_span_by_filter(spans, filter) assert (cb_lock_span) filter = lambda span: span.n == "couchbase" and span.data["couchbase"][ "type"] == "upsert" cb_upsert_span = get_first_span_by_filter(spans, filter) assert (cb_upsert_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_lock_span.t) self.assertEqual(test_span.t, cb_upsert_span.t) self.assertEqual(cb_lock_span.p, test_span.s) self.assertEqual(cb_upsert_span.p, test_span.s) assert (cb_lock_span.stack) self.assertIsNone(cb_lock_span.ec) assert (cb_upsert_span.stack) self.assertIsNone(cb_upsert_span.ec) self.assertEqual(cb_lock_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_lock_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_lock_span.data["couchbase"]["type"], 'lock') self.assertEqual(cb_upsert_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_upsert_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_upsert_span.data["couchbase"]["type"], 'upsert') def test_lock_unlock(self): res = None self.bucket.upsert("test_lock_unlock", "lock_this") with tracer.start_active_span('test'): rv = self.bucket.lock("test_lock_unlock", ttl=5) assert (rv) self.assertTrue(rv.success) # upsert automatically unlocks the key res = self.bucket.unlock("test_lock_unlock", rv.cas) assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(3, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') filter = lambda span: span.n == "couchbase" and span.data["couchbase"][ "type"] == "lock" cb_lock_span = get_first_span_by_filter(spans, filter) assert (cb_lock_span) filter = lambda span: span.n == "couchbase" and span.data["couchbase"][ "type"] == "unlock" cb_unlock_span = get_first_span_by_filter(spans, filter) assert (cb_unlock_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_lock_span.t) self.assertEqual(test_span.t, cb_unlock_span.t) self.assertEqual(cb_lock_span.p, test_span.s) self.assertEqual(cb_unlock_span.p, test_span.s) assert (cb_lock_span.stack) self.assertIsNone(cb_lock_span.ec) assert (cb_unlock_span.stack) self.assertIsNone(cb_unlock_span.ec) self.assertEqual(cb_lock_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_lock_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_lock_span.data["couchbase"]["type"], 'lock') self.assertEqual(cb_unlock_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_unlock_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_unlock_span.data["couchbase"]["type"], 'unlock') def test_lock_unlock_muilti(self): res = None self.bucket.upsert("test_lock_unlock_multi_1", "lock_this") self.bucket.upsert("test_lock_unlock_multi_2", "lock_this") keys_to_lock = ("test_lock_unlock_multi_1", "test_lock_unlock_multi_2") with tracer.start_active_span('test'): rv = self.bucket.lock_multi(keys_to_lock, ttl=5) assert (rv) self.assertTrue(rv['test_lock_unlock_multi_1'].success) self.assertTrue(rv['test_lock_unlock_multi_2'].success) res = self.bucket.unlock_multi(rv) assert (res) spans = self.recorder.queued_spans() self.assertEqual(3, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') filter = lambda span: span.n == "couchbase" and span.data["couchbase"][ "type"] == "lock_multi" cb_lock_span = get_first_span_by_filter(spans, filter) assert (cb_lock_span) filter = lambda span: span.n == "couchbase" and span.data["couchbase"][ "type"] == "unlock_multi" cb_unlock_span = get_first_span_by_filter(spans, filter) assert (cb_unlock_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_lock_span.t) self.assertEqual(test_span.t, cb_unlock_span.t) self.assertEqual(cb_lock_span.p, test_span.s) self.assertEqual(cb_unlock_span.p, test_span.s) assert (cb_lock_span.stack) self.assertIsNone(cb_lock_span.ec) assert (cb_unlock_span.stack) self.assertIsNone(cb_unlock_span.ec) self.assertEqual(cb_lock_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_lock_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_lock_span.data["couchbase"]["type"], 'lock_multi') self.assertEqual(cb_unlock_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_unlock_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_unlock_span.data["couchbase"]["type"], 'unlock_multi') def test_remove(self): res = None self.bucket.upsert("test_remove", 1) with tracer.start_active_span('test'): res = self.bucket.remove("test_remove") assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'remove') def test_remove_multi(self): res = None self.bucket.upsert("test_remove_multi_1", 1) self.bucket.upsert("test_remove_multi_2", 1) keys_to_remove = ("test_remove_multi_1", "test_remove_multi_2") with tracer.start_active_span('test'): res = self.bucket.remove_multi(keys_to_remove) assert (res) self.assertTrue(res['test_remove_multi_1'].success) self.assertTrue(res['test_remove_multi_2'].success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'remove_multi') def test_counter(self): res = None self.bucket.upsert("test_counter", 1) with tracer.start_active_span('test'): res = self.bucket.counter("test_counter", delta=10) assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'counter') def test_counter_multi(self): res = None self.bucket.upsert("first_test_counter", 1) self.bucket.upsert("second_test_counter", 1) with tracer.start_active_span('test'): res = self.bucket.counter_multi( ("first_test_counter", "second_test_counter")) assert (res) self.assertTrue(res['first_test_counter'].success) self.assertTrue(res['second_test_counter'].success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'counter_multi') def test_mutate_in(self): res = None self.bucket.upsert( 'king_arthur', { 'name': 'Arthur', 'email': '*****@*****.**', 'interests': ['Holy Grail', 'African Swallows'] }) with tracer.start_active_span('test'): res = self.bucket.mutate_in( 'king_arthur', SD.array_addunique('interests', 'Cats'), SD.counter('updates', 1)) assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'mutate_in') def test_lookup_in(self): res = None self.bucket.upsert( 'king_arthur', { 'name': 'Arthur', 'email': '*****@*****.**', 'interests': ['Holy Grail', 'African Swallows'] }) with tracer.start_active_span('test'): res = self.bucket.lookup_in('king_arthur', SD.get('email'), SD.get('interests')) assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'lookup_in') def test_stats(self): res = None with tracer.start_active_span('test'): res = self.bucket.stats() assert (res) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'stats') def test_ping(self): res = None with tracer.start_active_span('test'): res = self.bucket.ping() assert (res) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'ping') def test_diagnostics(self): res = None with tracer.start_active_span('test'): res = self.bucket.diagnostics() assert (res) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'diagnostics') def test_observe(self): res = None self.bucket.upsert('test_observe', 1) with tracer.start_active_span('test'): res = self.bucket.observe('test_observe') assert (res) self.assertTrue(res.success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'observe') def test_observe_multi(self): res = None self.bucket.upsert('test_observe_multi_1', 1) self.bucket.upsert('test_observe_multi_2', 1) keys_to_observe = ('test_observe_multi_1', 'test_observe_multi_2') with tracer.start_active_span('test'): res = self.bucket.observe_multi(keys_to_observe) assert (res) self.assertTrue(res['test_observe_multi_1'].success) self.assertTrue(res['test_observe_multi_2'].success) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'observe_multi') def test_raw_n1ql_query(self): res = None with tracer.start_active_span('test'): res = self.bucket.n1ql_query("SELECT 1") assert (res) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'n1ql_query') self.assertEqual(cb_span.data["couchbase"]["sql"], 'SELECT 1') def test_n1ql_query(self): res = None with tracer.start_active_span('test'): res = self.bucket.n1ql_query( N1QLQuery( 'SELECT name FROM `travel-sample` WHERE brewery_id ="mishawaka_brewing"' )) assert (res) spans = self.recorder.queued_spans() self.assertEqual(2, len(spans)) test_span = get_first_span_by_name(spans, 'sdk') assert (test_span) self.assertEqual(test_span.data["sdk"]["name"], 'test') cb_span = get_first_span_by_name(spans, 'couchbase') assert (cb_span) # Same traceId and parent relationship self.assertEqual(test_span.t, cb_span.t) self.assertEqual(cb_span.p, test_span.s) assert (cb_span.stack) self.assertIsNone(cb_span.ec) self.assertEqual(cb_span.data["couchbase"]["hostname"], "%s:8091" % testenv['couchdb_host']) self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample') self.assertEqual(cb_span.data["couchbase"]["type"], 'n1ql_query') self.assertEqual( cb_span.data["couchbase"]["sql"], 'SELECT name FROM `travel-sample` WHERE brewery_id ="mishawaka_brewing"' )
class DB(object): def __init__(self, bucket): self.bucket = bucket self.db = Bucket(bucket, lockmode=LOCKMODE_WAIT) def doc_exists(self, docId): try: result = self.db.get(docId) except CouchbaseError as e: return False return result def insert_build_history(self, build, update=False): try: docId = build['version']+"-"+str(build['build_num']) if update: result = self.db.upsert(docId, build) else: result = self.db.insert(docId, build) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 12: logger.warning("Couldn't create build history {0} due to error: {1}".format(docId, e)) docId = None return docId def insert_distro_history(self, distro, update=False): try: docId = distro['version']+"-"+str(distro['build_num'])+"-"+distro['distro']+"-"+distro['edition'] if update: result = self.db.upsert(docId, distro) else: result = self.db.insert(docId, distro) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 12: logger.warning("Couldn't create distro history {0} due to error: {1}".format(docId, e)) docId = None return docId def insert_test_history(self, unit, test_type='unit', update=False): try: if test_type == 'unit': docId = unit['version']+"-"+str(unit['build_num'])+"-"+unit['distro']+"-"+unit['edition']+'-tests' elif test_type == 'build_sanity': docId = unit['version']+"-"+str(unit['build_num'])+"-"+unit['distro']+"-"+unit['edition']+'-sanity-tests' if update: result = self.db.upsert(docId, unit) else: result = self.db.insert(docId, unit) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 12: logger.warning("Couldn't create test history {0} due to error: {1}".format(docId, e)) docId = None return docId def insert_commit(self, commit): docId = commit['repo']+"-"+str(commit['sha']) inb = commit['in_build'][0] try: result = self.db.get(docId) val = result.value if not inb in val['in_build']: val['in_build'].append(inb) result = self.db.upsert(docId, val) except CouchbaseError as e: if e.rc == 13: try: result = self.db.insert(docId, commit) logger.debug("{0}".format(result)) except CouchbaseError as e: print e.rc if e.rc == 12: logger.error("Couldn't create commit history {0} due to error: {1}".format(docId, e)) docId = None return docId def update_distro_result(self, docId, distroId, result): try: ret = self.db.get(docId).value if not distroId in ret[result]: ret[result].append(distroId) if result != 'incomplete': if distroId in ret['incomplete']: ret['incomplete'].remove(distroId) self.db.upsert(docId, ret) logger.debug("{0}".format(result)) except CouchbaseError as e: logger.warning("Couldn't update distro result on {0} due to error: {1}".format(docId, e)) docId = None return def get_incomplete_builds(self): q = N1QLQuery("select url from `build-history` where result is NULL") urls = [] for row in self.db.n1ql_query(q): urls.append(row['url']) return urls def get_incomplete_sanity_runs(self): q = N1QLQuery("select sanity_url from `build-history` where type = 'top_level_build' and sanity_result = 'INCOMPLETE'") urls = [] for row in self.db.n1ql_query(q): urls.append(row['sanity_url']) return urls def get_incomplete_unit_runs(self): q = N1QLQuery("select unit_urls from `build-history` where type = 'top_level_build' and unit_result = 'INCOMPLETE'") urls = [] for row in self.db.n1ql_query(q): ulist = row['unit_urls'] for u in ulist: if u['result'] == 'INCOMPLETE': urls.append(u['url']) return urls
records[record[0]] = record[1] teamspeak = TeamSpeak(records["host"], records["port"]) parser = QueryParser() teamspeak.authenticate(records["username"], records["password"]) teamspeak.select_server(records["serverid"]) channels = parser.parse(teamspeak.query(b"channellist")) clients = parser.parse(teamspeak.query(b"clientlist")) output_data = {"channels" : channels, "clients" : clients} bucket = Bucket("couchbase://localhost/default") # print(json.dumps(output_data)) bucket.insert(str(int(time.time())), output_data) # reply_data = parser.parse(teamspeak.query(b"clientlist")) # reply_data = parser.parse(teamspeak.query(b"channellist")) # pprint.pprint(reply_data) # raw_users = reply_data.split(b"|") # visited_channels = set() # for data in raw_users: # userdata = data.split(b" ") # for client_data in userdata: # if b"cid=" in client_data: # channel_id = client_data.split(b"=")[1]
# se o eleitor se filiou e se desfiliou mais de uma vez, # nao sera considerado (geralmente eh uma desfiliacao # a pedido e outra via judicial) chave = "-".join(( linha['NUMERO DA INSCRICAO'], linha['DATA DA FILIACAO'], linha['SITUACAO DO REGISTRO'], )) dados = { 'type': 'filiado', 'titulo_eleitor': linha['NUMERO DA INSCRICAO'], 'nome': linha['NOME DO FILIADO'], 'sigla_partido': linha['SIGLA DO PARTIDO'], 'situacao_registro': linha['SITUACAO DO REGISTRO'], 'tipo_registro': linha['TIPO DO REGISTRO'], 'zona_eleitoral': linha['SECAO ELEITORAL'], 'secao_eleitoral': linha['ZONA ELEITORAL'], 'codigo_municipio': linha['CODIGO DO MUNICIPIO'], 'uf': linha['UF'], 'data_filiacao': linha['DATA DA FILIACAO'], } if linha['DATA DO CANCELAMENTO']: dados['data_cancelamento'] = linha['DATA DO CANCELAMENTO'] if linha['DATA DA DESFILIACAO']: dados['data_desfiliacao'] = linha['DATA DA DESFILIACAO'] try: bucket.insert(chave, dados) except KeyExistsError: # pula o registro continue
class CouchbaseMemcacheMirror(object): def __init__(self, couchbase_uri, memcached_hosts, primary=PRIMARY_COUCHBASE): """ :param couchbase_uri: Connection string for Couchbase :param memcached_hosts: List of Memcached nodes :param primary: Determines which datastore is authoritative. This affects how get operations are performed and which datastore is used for CAS operations. PRIMARY_COUCHBASE: Couchbase is authoritative PRIMARY_MEMCACHED: Memcached is authoritative By default, Couchbase is the primary store :return: """ self.cb = CbBucket(couchbase_uri) self.mc = McClient(memcached_hosts) self._primary = primary @property def primary(self): return self._primary def _cb_get(self, key): try: return self.cb.get(key).value except NotFoundError: return None def get(self, key, try_alternate=True): """ Gets a document :param key: The key to retrieve :param try_alternate: Whether to try the secondary data source if the item is not found in the primary. :return: The value as a Python object """ if self._primary == PRIMARY_COUCHBASE: order = [self._cb_get, self.mc.get] else: order = [self.mc.get, self._cb_get] for meth in order: ret = meth(key) if ret or not try_alternate: return ret return None def _cb_mget(self, keys): """ Internal method to execute a Couchbase multi-get :param keys: The keys to retrieve :return: A tuple of {found_key:found_value, ...}, [missing_key1,...] """ try: ok_rvs = self.cb.get_multi(keys) bad_rvs = {} except NotFoundError as e: ok_rvs, bad_rvs = e.split_results() ok_dict = {k: (v.value, v.cas) for k, v in ok_rvs} return ok_dict, bad_rvs.keys() def get_multi(self, keys, try_alternate=True): """ Gets multiple items from the server :param keys: The keys to fetch as an iterable :param try_alternate: Whether to fetch missing items from alternate store :return: A dictionary of key:value. Only contains keys which exist and have values """ if self._primary == PRIMARY_COUCHBASE: ok, err = self._cb_get(keys) if err and try_alternate: ok.update(self.mc.get_many(err)) return ok else: ok = self.mc.get_many(keys) if len(ok) < len(keys) and try_alternate: keys_err = set(keys) - set(ok) ok.update(self._cb_mget(list(keys_err))[0]) return ok def gets(self, key): """ Get an item with its CAS. The item will always be fetched from the primary data store. :param key: the key to get :return: the value of the key, or None if no such value """ if self._primary == PRIMARY_COUCHBASE: try: rv = self.cb.get(key) return key, rv.cas except NotFoundError: return None, None else: return self.mc.gets(key) def gets_multi(self, keys): if self._primary == PRIMARY_COUCHBASE: try: rvs = self.cb.get_multi(keys) except NotFoundError as e: rvs, _ = e.split_results() return {k: (v.value, v.cas) for k, v in rvs} else: # TODO: I'm not sure if this is implemented in HasClient :( return self.mc.gets_many(keys) def delete(self, key): st = Status() try: self.cb.remove(key) except NotFoundError as e: st.cb_error = e st.mc_status = self.mc.delete(key) return st def delete_multi(self, keys): st = Status() try: self.cb.remove_multi(keys) except NotFoundError as e: st.cb_error = e st.mc_status = self.mc.delete_many(keys) def _do_incrdecr(self, key, value, is_incr): cb_value = value if is_incr else -value mc_meth = self.mc.incr if is_incr else self.mc.decr st = Status() try: self.cb.counter(key, delta=cb_value) except NotFoundError as e: st.cb_error = e st.mc_status = mc_meth(key, value) def incr(self, key, value): return self._do_incrdecr(key, value, True) def decr(self, key, value): return self._do_incrdecr(key, value, False) def touch(self, key, expire=0): st = Status() try: self.cb.touch(key, ttl=expire) except NotFoundError as e: st.cb_error = st st.mc_status = self.mc.touch(key) def set(self, key, value, expire=0): """ Write first to Couchbase, and then to Memcached :param key: Key to use :param value: Value to use :param expire: If set, the item will expire in the given amount of time :return: Status object if successful (will always be success). on failure an exception is raised """ self.cb.upsert(key, value, ttl=expire) self.mc.set(key, value, expire=expire) return Status() def set_multi(self, values, expire=0): """ Set multiple items. :param values: A dictionary of key, value indicating values to store :param expire: If present, expiration time for all the items :return: """ self.cb.upsert_multi(values, ttl=expire) self.mc.set_many(values, expire=expire) return Status() def replace(self, key, value, expire=0): """ Replace existing items :param key: key to replace :param value: new value :param expire: expiration for item :return: Status object. Will be OK """ status = Status() try: self.cb.replace(key, value, ttl=expire) except NotFoundError as e: status.cb_error = e status.mc_status = self.mc.replace(key, value, expire=expire) return status def add(self, key, value, expire=0): status = Status() try: self.cb.insert(key, value, ttl=expire) except KeyExistsError as e: status.cb_error = e status.mc_status = self.mc.add(key, value, expire=expire) return status def _append_prepend(self, key, value, is_append): cb_meth = self.cb.append if is_append else self.cb.prepend mc_meth = self.mc.append if is_append else self.mc.prepend st = Status() try: cb_meth(key, value, format=FMT_UTF8) except (NotStoredError, NotFoundError) as e: st.cb_error = e st.mc_status = mc_meth(key, value) def append(self, key, value): return self._append_prepend(key, value, True) def prepend(self, key, value): return self._append_prepend(key, value, False) def cas(self, key, value, cas, expire=0): if self._primary == PRIMARY_COUCHBASE: try: self.cb.replace(key, value, cas=cas, ttl=expire) self.mc.set(key, value, ttl=expire) return True except KeyExistsError: return False except NotFoundError: return None else: return self.mc.cas(key, value, cas)
from couchbase.bucket import Bucket import couchbase.exceptions as E cb = Bucket('couchbase://10.0.0.31/default') # This always works! print('Upserting') cb.upsert('docid', {'property': 'value'}) print('Getting item back. Value is:', cb.get('docid').value) print('...') print( 'Will try to insert the document. Should fail because the item already exists..' ) try: cb.insert('docid', {'property': 'value'}) except E.KeyExistsError: print('Insert failed because item already exists!') print('...') print( 'Replacing the document. This should work because the item already exists') cb.replace('docid', {'property': 'new_value'}) print('Getting document again. Should contain the new contents:', cb.get('docid').value) print('...') print('Removing document.') # Remove the item, then try to replace it! cb.remove('docid') print(
from couchbase.bucket import Bucket import couchbase.exceptions as E cb = Bucket('couchbase://10.0.0.31/default') # This always works! print('Upserting') cb.upsert('docid', {'property': 'value'}) print('Getting item back. Value is:', cb.get('docid').value) print('...') print('Will try to insert the document. Should fail because the item already exists..') try: cb.insert('docid', {'property': 'value'}) except E.KeyExistsError: print('Insert failed because item already exists!') print('...') print('Replacing the document. This should work because the item already exists') cb.replace('docid', {'property': 'new_value'}) print('Getting document again. Should contain the new contents:', cb.get('docid').value) print('...') print('Removing document.') # Remove the item, then try to replace it! cb.remove('docid') print('Replacing document again. Should fail because document no longer exists') try:
def put(self, object, id): bucket = Bucket(self._bucketUrl) bucket.insert(id.urn[9:], json.loads(object))
def putJson(self, json, id): bucket = Bucket(self._bucketUrl) bucket.insert(str(id), json)
'interests': ['Braids', 'Hunting'] }) print('Done...') # get non-existent key print('Getting non-existent key. Should fail..') try: cb.get('get-non-existent') except NotFoundError: print('Got exception for missing doc') print('Inserting...') cb.insert( 'u:queen_liz', { 'name': 'Liz', 'email': '*****@*****.**', 'type': 'Royales', 'interests': ['Holy Grail', 'Kingdoms'] }) print('Done...') # get non-existent key print('Getting an existent key. Should pass...') try: print("Value for key 'queen_liz'\n") val = cb.get('u:queen_liz').value print("Value for key 'queen_liz'\n%s" % (val)) except NotFoundError as e: print('Got exception for missing doc with error %s' % (e)) # create primary index
class Db(object): def __init__(self, couchbase_sup=False, mongo_sup=False): self.cfg = election[os.getenv("TARGET_PLATFORM")] self.vt = None self.mysql = None self.connect_mysql() if couchbase_sup: cb_config = self.cfg.COUCHBASE_PARAM self.cb = Bucket("couchbase://{0}/{1}".format(cb_config[0], cb_config[1]), username=cb_config[2], password=cb_config[3]) if mongo_sup: mongo_cfg = self.cfg.MONGO_PARAM self.mongodb_client = MongoClient(host=mongo_cfg[0], port=int(mongo_cfg[1]), username=mongo_cfg[2], password=mongo_cfg[3], authSource=mongo_cfg[4], authMechanism=mongo_cfg[5]) def connect_mysql(self): mysql_config = self.cfg.MYSQL_PARAM self.mysql = mdb.connect(host=mysql_config[0], user=mysql_config[1], passwd=mysql_config[2], db=mysql_config[3], port=int(mysql_config[4])) self.mysql.autocommit(False) self.vt = self.mysql.cursor() def write_mysql(self, query): try: self.vt.execute(query) return True except mdb.OperationalError: self.connect_mysql() self.vt.execute(query) return True def count_mysql(self, query): try: self.vt.execute(query) return self.vt.rowcount except mdb.OperationalError: self.connect_mysql() self.vt.execute(query) return self.vt.rowcount def readt_mysql(self, query): try: self.vt.execute(query) self.mysql_commit() return self.vt.fetchall() except mdb.OperationalError: self.connect_mysql() self.vt.execute(query) self.mysql_commit() return self.vt.fetchall() def mysql_commit(self): self.mysql.commit() def mysql_rollback(self): self.mysql.rollback() def write_couchbase(self, arg): key = calculate_hash(arg.keys()[0]) values = [] for i in arg.values(): if isinstance(i, str): values.append(calculate_hash(i)) continue if isinstance(i, list): for e in i: values.append(calculate_hash(e)) continue values.append(i) try: self.cb.insert(key, values) except couchbase.exceptions.KeyExistsError: self.cb.replace(key, values) return True def readt_couchbase(self, key): try: return True, self.cb.get(calculate_hash(key)).value except couchbase.exceptions.NotFoundError: return False, 0 def delete_key_couchbase(self, key): try: self.cb.delete(calculate_hash(key), quiet=True) except couchbase.exceptions.NotFoundError: pass finally: return True
# nao sera considerado (geralmente eh uma desfiliacao # a pedido e outra via judicial) chave = "-".join(( linha['NUMERO DA INSCRICAO'], linha['DATA DA FILIACAO'], linha['SITUACAO DO REGISTRO'], )) dados = { 'type': 'filiado', 'titulo_eleitor': linha['NUMERO DA INSCRICAO'], 'nome': linha['NOME DO FILIADO'], 'sigla_partido': linha['SIGLA DO PARTIDO'], 'situacao_registro': linha['SITUACAO DO REGISTRO'], 'tipo_registro': linha['TIPO DO REGISTRO'], 'zona_eleitoral': linha['SECAO ELEITORAL'], 'secao_eleitoral': linha['ZONA ELEITORAL'], 'codigo_municipio': linha['CODIGO DO MUNICIPIO'], 'uf': linha['UF'], 'data_filiacao': linha['DATA DA FILIACAO'], } if linha['DATA DO CANCELAMENTO']: dados['data_cancelamento'] = linha['DATA DO CANCELAMENTO'] if linha['DATA DA DESFILIACAO']: dados['data_desfiliacao'] = linha['DATA DA DESFILIACAO'] try: bucket.insert(chave, dados) except KeyExistsError: # pula o registro continue
class SDKClient(object): """Python SDK Client Implementation for testrunner - master branch Implementation""" def __init__(self, bucket, hosts = ["localhost"] , scheme = "couchbase", ssl_path = None, uhm_options = None, password=None, quiet=False, certpath = None, transcoder = None): self.connection_string = \ self._createString(scheme = scheme, bucket = bucket, hosts = hosts, certpath = certpath, uhm_options = uhm_options) self.password = password self.quiet = quiet self.transcoder = transcoder self._createConn() def _createString(self, scheme ="couchbase", bucket = None, hosts = ["localhost"], certpath = None, uhm_options = ""): connection_string = "{0}://{1}".format(scheme, ", ".join(hosts).replace(" ","")) if bucket != None: connection_string = "{0}/{1}".format(connection_string, bucket) if uhm_options != None: connection_string = "{0}?{1}".format(connection_string, uhm_options) if scheme == "couchbases": if "?" in connection_string: connection_string = "{0},certpath={1}".format(connection_string, certpath) else: connection_string = "{0}?certpath={1}".format(connection_string, certpath) return connection_string def _createConn(self): try: self.cb = CouchbaseBucket(self.connection_string, password = self.password, quiet = self.quiet, transcoder = self.transcoder) except BucketNotFoundError as e: raise def reconnect(self): self.cb.close() self._createConn() def close(self): self.cb._close() def append(self, key, value, cas=0, format=None, persist_to=0, replicate_to=0): try: self.cb.append(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def append_multi(self, keys, cas=0, format=None, persist_to=0, replicate_to=0): try: self.cb.append_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def prepend(self, key, value, cas=0, format=None, persist_to=0, replicate_to=0): try: self.cb.prepend(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def prepend_multi(self, keys, cas=0, format=None, persist_to=0, replicate_to=0): try: self.cb.prepend_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def replace(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.replace( key, value, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def replace_multi(self, keys, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.replace_multi( keys, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def cas(self, key, value, cas=0, ttl=0, format=None): return self.cb.replace(key, value, cas=cas,format=format) def delete(self,key, cas=0, quiet=None, persist_to=0, replicate_to=0): self.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to) def remove(self,key, cas=0, quiet=None, persist_to=0, replicate_to=0): try: return self.cb.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def delete(self, keys, quiet=None, persist_to=0, replicate_to=0): return self.remove(self, keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to) def remove_multi(self, keys, quiet=None, persist_to=0, replicate_to=0): try: self.cb.remove_multi(keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def set(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0): return self.upsert(key, value, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) def upsert(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to) except CouchbaseError as e: raise def set_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0): return self.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) def upsert_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def insert(self, key, value, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.insert(key, value, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def insert_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0): try: self.cb.insert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to) except CouchbaseError as e: raise def touch(self, key, ttl = 0): try: self.cb.touch(key, ttl=ttl) except CouchbaseError as e: raise def touch_multi(self, keys, ttl = 0): try: self.cb.touch_multi(keys, ttl=ttl) except CouchbaseError as e: raise def decr(self, key, delta=1, initial=None, ttl=0): self.counter(key, delta=-delta, initial=initial, ttl=ttl) def decr_multi(self, keys, delta=1, initial=None, ttl=0): self.counter_multi(keys, delta=-delta, initial=initial, ttl=ttl) def incr(self, key, delta=1, initial=None, ttl=0): self.counter(key, delta=delta, initial=initial, ttl=ttl) def incr_multi(self, keys, delta=1, initial=None, ttl=0): self.counter_multi(keys, delta=delta, initial=initial, ttl=ttl) def counter(self, key, delta=1, initial=None, ttl=0): try: self.cb.counter(key, delta=delta, initial=initial, ttl=ttl) except CouchbaseError as e: raise def counter_multi(self, keys, delta=1, initial=None, ttl=0): try: self.cb.counter_multi(keys, delta=delta, initial=initial, ttl=ttl) except CouchbaseError as e: raise def get(self, key, ttl=0, quiet=None, replica=False, no_format=False): try: rv = self.cb.get(key, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format) return self.__translate_get(rv) except CouchbaseError as e: raise def rget(self, key, replica_index=None, quiet=None): try: data = self.rget(key, replica_index=replica_index, quiet=None) return self.__translate_get(data) except CouchbaseError as e: raise def get_multi(self, keys, ttl=0, quiet=None, replica=False, no_format=False): try: data = self.cb.get_multi(keys, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format) return self.__translate_get_multi(data) except CouchbaseError as e: raise def rget_multi(self, key, replica_index=None, quiet=None): try: data = self.cb.rget_multi(key, replica_index=None, quiet=None) return self.__translate_get_multi(data) except CouchbaseError as e: raise def stats(self, keys=None): try: stat_map = self.cb.stats(keys = keys) return stat_map except CouchbaseError as e: raise def errors(self, clear_existing=True): try: rv = self.cb.errors(clear_existing = clear_existing) return rv except CouchbaseError as e: raise def observe(self, key, master_only=False): try: return self.cb.observe(key, master_only = master_only) except CouchbaseError as e: raise def observe_multi(self, keys, master_only=False): try: data = self.cb.observe_multi(keys, master_only = master_only) return self.__translate_observe_multi(data) except CouchbaseError as e: raise def endure(self, key, persist_to=-1, replicate_to=-1, cas=0, check_removed=False, timeout=5.0, interval=0.010): try: self.cb.endure(key, persist_to=persist_to, replicate_to=replicate_to, cas=cas, check_removed=check_removed, timeout=timeout, interval=interval) except CouchbaseError as e: raise def endure_multi(self, keys, persist_to=-1, replicate_to=-1, cas=0, check_removed=False, timeout=5.0, interval=0.010): try: self.cb.endure(keys, persist_to=persist_to, replicate_to=replicate_to, cas=cas, check_removed=check_removed, timeout=timeout, interval=interval) except CouchbaseError as e: raise def lock(self, key, ttl=0): try: data = self.cb.lock(key, ttl = ttl) return self.__translate_get(data) except CouchbaseError as e: raise def lock_multi(self, keys, ttl=0): try: data = self.cb.lock_multi(keys, ttl = ttl) return self.__translate_get_multi(data) except CouchbaseError as e: raise def unlock(self, key, ttl=0): try: return self.cb.unlock(key) except CouchbaseError as e: raise def unlock_multi(self, keys): try: return self.cb.unlock_multi(keys) except CouchbaseError as e: raise def __translate_get_multi(self, data): map = {} if data == None: return map for key, result in data.items(): map[key] = [result.flags, result.cas, result.value] return map def __translate_get(self, data): return data.flags, data.cas, data.value def __translate_delete(self, data): return data def __translate_observe(self, data): return data def __translate_observe_multi(self, data): map = {} if data == None: return map for key, result in data.items(): map[key] = result.value return map def __translate_upsert_multi(self, data): map = {} if data == None: return map for key, result in data.items(): map[key] = result return map def __translate_upsert_op(self, data): return data.rc, data.success, data.errstr, data.key
'interests': ['crawling', 'Hunting Unicorns'] }) print('Done...') # get non-existent key print('Getting non-existent key. Should fail..') try: cb.get('get-non-existent') except NotFoundError: print('Got exception for missing doc') print('Inserting...') cb.insert( 'u:babyliz_liz', { 'name': 'Baby Liz', 'email': '*****@*****.**', 'type': 'Royales', 'interests': ['Holy Grail', 'Kingdoms and Dungeons'] }) print('Done...') # get non-existent key print('Getting an existent key. Should pass...') try: print("Value for key 'babyliz_liz'\n") val = cb.get('u:babyliz_liz').value print("Value for key 'babyliz_liz'\n%s" % (val)) except NotFoundError as e: print('Got exception for missing doc with error %s' % (e)) # create primary index
class buildDB(object): def __init__(self, bucket): self.bucket = bucket self.db = Bucket(bucket, lockmode=LOCKMODE_WAIT) def insert_job_history(self, job): # # param: job # type: dict # try: docId = job['branch'] + "-" + str( job['buildNum']) + "-" + job['platform'] + "-" + job['edition'] result = self.db.insert(docId, job) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 12: logger.warning( "Couldn't create job history {0} due to error: {1}".format( docId, e)) return docId def update_job_history(self, job): # # param: job # type: dict # try: docId = job['branch'] + "-" + str( job['buildNum']) + "-" + job['platform'] + "-" + job['edition'] result = self.db.replace(docId, job) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 13: logger.error( "Couldn't update job history. {0} does not exist {1}". format(docId, e)) def insert_build_history(self, bldHistory): # # param: bldHistory # type: dict # # Job history should be inserted prior to this # try: docId = bldHistory['branch'] + "-" + str(bldHistory['buildNum']) result = self.db.insert(docId, bldHistory) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 12: logger.warning( "Couldn't create build history {0} due to error: {1}". format(docId, e)) return docId def update_build_history(self, bldHistory): try: docId = bldHistory['branch'] + "-" + str(bldHistory['buildNum']) result = self.db.replace(docId, bldHistory) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 13: logger.error( "Couldn't update build history {0} does not exist {1}". format(docId, e)) def insert_commit(self, commit): try: docId = commit['repo'] + "-" + str(commit['commitId']) result = self.db.insert(docId, commit) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 12: logger.error( "Couldn't create commit history {0} due to error: {1}". format(docId, e)) return docId def query_commit(self, commitId): readResult = self.db.get(commitId) return readResult.value def find_prev_build(self, dashboard_name, criteria="undefined"): logger.debug("Not implemented") bldNum = 0 return bldNum def retrieve_incomplete_builds(self, dashboard_name): # Get previously incomplete builds logger.debug("{0}...not implemented".format(dashboard_name)) def query_buildHistory(self, bldHistory): docId = bldHistory['branch'] + "-" + str(bldHistory['buildNum']) readResult = self.db.get(docId) return readResult.value def __repr__(self): return ("buildDB(history, num_jobs)".format(self))
class buildDB(object): def __init__(self, bucket): self.bucket = bucket self.db = Bucket(bucket, lockmode=LOCKMODE_WAIT) def insert_job_history(self, job): # # param: job # type: dict # try: docId = job['branch']+"-"+str(job['buildNum'])+"-"+job['platform']+"-"+job['edition'] result = self.db.insert(docId, job) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 12: logger.warning("Couldn't create job history {0} due to error: {1}".format(docId, e)) return docId def update_job_history(self, job): # # param: job # type: dict # try: docId = job['branch']+"-"+str(job['buildNum'])+"-"+job['platform']+"-"+job['edition'] result = self.db.replace(docId, job) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 13: logger.error("Couldn't update job history. {0} does not exist {1}".format(docId, e)) def insert_build_history(self, bldHistory): # # param: bldHistory # type: dict # # Job history should be inserted prior to this # try: docId = bldHistory['branch']+"-"+str(bldHistory['buildNum']) result = self.db.insert(docId, bldHistory) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 12: logger.warning("Couldn't create build history {0} due to error: {1}".format(docId, e)) return docId def update_build_history(self, bldHistory): try: docId = bldHistory['branch']+"-"+str(bldHistory['buildNum']) result = self.db.replace(docId, bldHistory) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 13: logger.error("Couldn't update build history {0} does not exist {1}".format(docId, e)) def insert_commit(self, commit): try: docId = commit['repo']+"-"+str(commit['commitId']) result = self.db.insert(docId, commit) logger.debug("{0}".format(result)) except CouchbaseError as e: if e.rc == 12: logger.error("Couldn't create commit history {0} due to error: {1}".format(docId, e)) return docId def query_commit(self, commitId): readResult = self.db.get(commitId) return readResult.value def find_prev_build(self, dashboard_name, criteria="undefined"): logger.debug("Not implemented") bldNum = 0 return bldNum def retrieve_incomplete_builds(self, dashboard_name): # Get previously incomplete builds logger.debug("{0}...not implemented".format(dashboard_name)) def query_buildHistory(self, bldHistory): docId = bldHistory['branch']+"-"+str(bldHistory['buildNum']) readResult = self.db.get(docId) return readResult.value def __repr__(self): return ("buildDB(history, num_jobs)".format(self))
class EQL(Db): def __init__(self, logger, router_mod_statistic=False, router_mod=False, watcher=False, clustered=False, with_static=False): self.logger = logger self.config = ConfigParser.ConfigParser() if router_mod: if watcher or clustered or with_static: raise RuntimeError( "router_mod açıkken diğer modlar kullanılamaz.") self.config.read("/EQL/source/cdn.cfg") self.edge_locations = self.config.get("env", "edge_locations").split(",") self.default_edge = self.config.get("env", "default_edge") continent_db = self.config.get("env", "continent_db") lb_db = self.config.get("env", "lb_db") self.cc_db = Db(continent_db) if os.path.exists(lb_db): os.remove(lb_db) Db.__init__(self, lb_db) self.write( "CREATE TABLE edge_status(SERVER VARCHAR(200) PRIMARY KEY,STATUS VARCHAR(50), REGION VARCHAR(5))" ) check_interval = int(self.config.get("env", "edge_check_interval")) p = Process(target=self._health_check_edge_server, name="EQL_Watcher", kwargs={"check_interval": check_interval}) p.start() self.router_mod = True if router_mod_statistic: self.request_statistic = Bucket("couchbase://{0}/{1}".format( self.config.get("env", "cbhost"), self.config.get("env", "statistic_bucket")), lockmode=2) self.with_statistic = True if not router_mod: if router_mod_statistic: raise RuntimeError( "Bu özellik sadece router_mod ile birlikte kullanılabilir." ) self.router_mod = False self.config.read("/EQL/source/config.cfg") self.cache_bucket = Bucket("couchbase://{0}/{1}".format( self.config.get("env", "cbhost"), self.config.get("env", "cache_bucket")), lockmode=2) self.statistic_bucket = Bucket("couchbase://{0}/{1}".format( self.config.get("env", "cbhost"), self.config.get("env", "statistic_bucket")), lockmode=2) self.server = self.config.get("env", "server") self.clustered = clustered self.timeout = float(self.config.get("env", "timeout")) self.img_file_expire = int( self.config.get("env", "img_file_expire")) * 24 * 60 * 60 if clustered: lb_db = self.config.get("env", "lb_db") if os.path.exists(lb_db): os.remove(lb_db) Db.__init__(self, lb_db) self.write( "CREATE TABLE lb(HOST VARCHAR(100) PRIMARY KEY, STATUS VARCHAR(20), WEIGHT INT(3) DEFAULT '0')" ) self.clustered = True self._health_check_cluster(first=True) if with_static: self.mime_type = { "css": "text/css", "js": "application/javascript" } self.root_directory = str(self.config.get("env", "root_directory")) self.static_file_expire = int( self.config.get("env", "static_file_expire")) * 24 * 60 * 60 if watcher: if not clustered: raise RuntimeError( "clustered parametresi açılmadan watcher kullanılamaz.") check_interval = int(self.config.get("env", "check_interval")) p = Process(target=self._health_check_cluster, name="EQL_Watcher", kwargs={"check_interval": check_interval}) p.start() def _health_check_cluster(self, first=False, check_interval=3): if first: cluster = self.config.get("env", "cluster").split(",") cluster.append(self.server) url = self.config.get("env", "health_check_url") weight = 1 for server in cluster: status = None try: req = requests.get("http://{0}{1}".format(server, url), timeout=self.timeout) status = "up" if req.status_code == 200 else "down" except requests.exceptions.Timeout: status = "down" except requests.exceptions.ConnectionError: status = "down" finally: if status == "down": self.logger.log_save( "EQL", "ERROR", "{0} Sunucusu down.".format(server)) self.write( "INSERT INTO lb VALUES ('{0}', '{1}', '{2}')".format( server, status, weight)) weight += 1 return True while True: cluster = [i[0] for i in self.readt("SELECT HOST FROM lb")] url = self.config.get("env", "health_check_url") weight = 1 for server in cluster: status = None try: req = requests.get("http://{0}{1}".format(server, url), timeout=self.timeout) status = "up" if req.status_code == 200 else "down" except requests.exceptions.Timeout: status = "down" except requests.exceptions.ConnectionError: status = "down" finally: try: if status == "down": self.logger.log_save( "EQL", "ERROR", "{0} Sunucusu down.".format(server)) self.write( "INSERT INTO lb VALUES ('{0}', '{1}', '{2}')". format(server, status, weight)) except sqlite3.IntegrityError: self.write( "UPDATE lb SET STATUS='{0}', WEIGHT='{1}' WHERE HOST='{2}'" .format(status, weight, server)) weight += 1 time.sleep(int(check_interval)) def _is_cached(self, url): urls = h.md5(url).hexdigest() try: values = self.cache_bucket.get(urls).value type_ = self._statistic(urls, r_turn=True) if type_ is None: raise ValueError() return True, values, type_ except (couchbase.exceptions.NotFoundError, ValueError): try: req = requests.get("http://{0}/{1}".format(self.server, url), timeout=self.timeout) except requests.exceptions.Timeout: if not self.clustered: self.logger.log_save( "EQL", "CRITIC", "Backend server timeout hatası aldı.") return False, int(500) while True: pool = self._get_server() try: try: self.server = pool.next() req = requests.get("http://{0}/{1}".format( self.server, url), timeout=self.timeout) if req.status_code == 200: break except (requests.exceptions.Timeout, requests.exceptions.ConnectionError): pass except StopIteration: self.logger.log_save( "EQL", "CRITIC", "Tüm backend serverlar timeout hatası aldı.") return False, int(500) if req.status_code == 200: self._cache_item(urls, req.content) self._statistic(urls, req.headers.get('content-type')) return True, req.content, req.headers.get('content-type') else: return False, int(req.status_code) def _cache_item(self, url, img, static_file=False): try: if static_file: self.cache_bucket.insert(url, img, format=couchbase.FMT_BYTES, ttl=self.static_file_expire) else: self.cache_bucket.insert(url, img, format=couchbase.FMT_BYTES, ttl=self.img_file_expire) except couchbase.exceptions.KeyExistsError: pass finally: return True def _statistic(self, url, type_=None, r_turn=False, static_file=False): if static_file: expire = self.static_file_expire else: expire = self.img_file_expire try: values = self.statistic_bucket.get(url).value count, timestamp, type_ = values[0], values[1], values[2] count += 1 obj = [count, timestamp, type_] self.statistic_bucket.replace(url, obj) except couchbase.exceptions.NotFoundError: if r_turn: if type_ is None: return False count = 1 obj = [ count, datetime.datetime.now().strftime("%Y-%m-%d %H:%I:%S"), type_ ] self.statistic_bucket.insert(url, obj, ttl=int(expire)) finally: if r_turn: return type_ return True def _get_server(self): cluster = self.readt( "SELECT HOST,WEIGHT FROM lb WHERE STATUS='up' ORDER BY WEIGHT ASC") itr = 1 while len(cluster) >= itr: yield cluster[itr - 1][0] itr += 1 def route_request(self, url, from_file=False): # return status, data, mime type if self.router_mod: raise RuntimeError("router_mod açıkken bu özellik kullanılamaz.") if from_file: urls = h.md5(url).hexdigest() try: values = self.cache_bucket.get(urls).value type_ = self._statistic(urls, r_turn=True) return True, values, type_ except couchbase.exceptions.NotFoundError: try: file_ = open(self.root_directory + str(url)) except IOError: return False, int(500) data = file_.read() type_ = self.mime_type[url.split(".")[-1]] self._cache_item(urls, data, static_file=True) self._statistic(urls, type_, static_file=True) return True, data, type_ return self._is_cached(url) # router_mod işlemleri başlangıcı def _health_check_edge_server(self, check_interval=3): while True: for edge_location in self.edge_locations: health_check_url = self.config.get(edge_location, "health_check_url") timeout = self.config.get(edge_location, "timeout") cluster = self.config.get(edge_location, "servers").split(",") for server in cluster: status = None try: req = requests.get("http://{0}{1}".format( server, health_check_url), timeout=float(timeout)) status = "up" if req.status_code == 200 else "down" except requests.exceptions.Timeout: status = "down" except requests.exceptions.ConnectionError: status = "down" finally: try: if status == "down": self.logger.log_save( "EQL", "ERROR", "{0} Sunucusu down.".format(server)) self.write( "INSERT INTO edge_status VALUES ('{0}', '{1}', '{2}')" .format(server, status, edge_location)) except sqlite3.IntegrityError: self.write( "UPDATE edge_status SET STATUS='{0}', REGION='{2}' WHERE SERVER='{1}'" .format(status, server, edge_location)) time.sleep(int(check_interval)) def _put_statistic(self, country_code, url): urls = h.md5(url) try: values = self.request_statistic.get(urls).value count, timestamp, countries = values[0], values[1], list(values[2]) countries.append(country_code) countries = list(set(countries)) count += 1 obj = [count, timestamp, countries] self.request_statistic.replace(urls, obj) except couchbase.exceptions.NotFoundError: count = 1 countries = [country_code] obj = [ count, datetime.datetime.now().strftime("%Y-%m-%d %H:%I:%S"), countries ] self.request_statistic.insert(url, obj) finally: return True def _get_best_edge(self, country_code): request_from = self.cc_db.readt( "SELECT CONTINENT FROM country_code WHERE CC='{0}'".format( country_code))[0][0] region = request_from if request_from in self.edge_locations else self.default_edge return self.readt( "SELECT SERVER FROM edge_status WHERE STATUS='up' AND REGION='{0}'" .format(region))[0][0] def route_to_best_edge(self, url, origin_ip): origin = geolite2.lookup(origin_ip) if self.with_statistic: self._put_statistic(origin, url) if origin is not None: return True, "http://{0}/{1}".format( self._get_best_edge(origin.country), url) return True, "http://{0}/{1}".format( self._get_best_edge(self.default_edge), url)
class SessionStore(SessionBase): """ A couchbase-based session store. """ def __init__(self, session_key=None): super(SessionStore, self).__init__(session_key) host = settings.COUCHBASE_HOST bucket = settings.COUCHBASE_BUCKET self.server = Bucket('couchbase://' + host + '/' + bucket) @property def cache_key(self): return self._get_or_create_session_key() def load(self): try: session_data = self.server.get( self._get_or_create_session_key() ) return session_data.value except: self._session_key = None return {} def create(self): while True: self._session_key = self._get_new_session_key() try: self.save(must_create=True) except CreateError: continue self.modified = True return def save(self, must_create=False): if self.session_key is None: return self.create() if must_create and self.exists(self._get_or_create_session_key()): raise CreateError if must_create: data = self._get_session(no_load=must_create) self.server.insert( self._get_or_create_session_key(), data ) else: data = self._get_session(no_load=must_create) self.server.replace( self._get_or_create_session_key(), data ) def exists(self, session_key): rv = self.server.get(session_key, quiet=True) return rv.success def delete(self, session_key=None): if session_key is None: if self.session_key is None: return session_key = self.session_key try: self.server.remove(session_key) except: pass @classmethod def clear_expired(cls): pass