def parallel_process(objects, method, *args): with concurrent.futures.ProcessPoolExecutor(max_workers=settings.MAX_REQUEST_WORKERS) as executor: futures = {executor.submit(getattr(obj, method), *args): obj for obj in objects} for future in concurrent.futures.as_completed(futures): if concurrent.futures.as_completed(futures): obj = futures[future] try: log_debug("Object {} method {} output {}".format(obj, method, future.result())) except Exception as exception: log_info('Generated an exception : {} : {}'.format(obj, exception))
def in_parallel(objects, method, *args): result = {} with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_REQUEST_WORKERS) as executor: futures = {executor.submit(getattr(obj, method), *args): obj for obj in objects} for future in concurrent.futures.as_completed(futures): if concurrent.futures.as_completed(futures): obj = futures[future] try: result[obj] = future.result() log_debug("Object {} method {} output {}".format(obj, method, result[obj])) except Exception as exception: log_info('Generated an exception : {} : {}'.format(obj, exception)) raise ValueError('in_parallel: got exception', exception, obj) return result
def delete_couchbase_server_cached_rev_bodies(self, bucket): """ Deletes docs that follow the below format _sync:rev:att_doc:34:1-e7fa9a5e6bb25f7a40f36297247ca93e """ b = Bucket("couchbase://{}/{}".format(self.host, bucket)) cached_rev_doc_ids = [] b.n1ql_query("CREATE PRIMARY INDEX ON `{}`".format(bucket)).execute() for row in b.n1ql_query("SELECT meta(`{}`) FROM `{}`".format(bucket, bucket)): if row["$1"]["id"].startswith("_sync:rev"): cached_rev_doc_ids.append(row["$1"]["id"]) log_info("Found temp rev docs: {}".format(cached_rev_doc_ids)) for doc_id in cached_rev_doc_ids: log_debug("Removing: {}".format(doc_id)) b.remove(doc_id)
def delete_couchbase_server_cached_rev_bodies(self, bucket): """ Deletes docs that follow the below format _sync:rev:att_doc:34:1-e7fa9a5e6bb25f7a40f36297247ca93e """ b = Bucket("couchbase://{}/{}".format(self.host, bucket), password='******') b_manager = b.bucket_manager() b_manager.n1ql_index_create_primary(ignore_exists=True) cached_rev_doc_ids = [] for row in b.n1ql_query("SELECT meta(`{}`) FROM `{}`".format( bucket, bucket)): if row["$1"]["id"].startswith("_sync:rev"): cached_rev_doc_ids.append(row["$1"]["id"]) log_info("Found temp rev docs: {}".format(cached_rev_doc_ids)) for doc_id in cached_rev_doc_ids: log_debug("Removing: {}".format(doc_id)) b.remove(doc_id)
def wait_for_ready_state(self): """ Verify all server node is in are in a "healthy" state to avoid sync_gateway startup failures Work around for this - https://github.com/couchbase/sync_gateway/issues/1745 """ start = time.time() while True: elapsed = time.time() if elapsed - start > keywords.constants.CLIENT_REQUEST_TIMEOUT: raise Exception( "Timeout: Server not in ready state! {}s".format(elapsed)) # Verfy the server is in a "healthy", not "warmup" state try: resp = self._session.get("{}/pools/nodes".format(self.url)) log_r(resp) except ConnectionError: # If bringing a server online, there may be some connnection issues. Continue and try again. time.sleep(1) continue resp_obj = resp.json() all_nodes_healthy = True for node in resp_obj["nodes"]: if node["status"] != "healthy": all_nodes_healthy = False log_info( "Node is still not healthy. Status: {} Retrying ...". format(node["status"])) time.sleep(1) if not all_nodes_healthy: continue log_info("All nodes are healthy") log_debug(resp_obj) # All nodes are heathy if it made it to here break
def wait_for_ready_state(self): """ Verify all server node is in are in a "healthy" state to avoid sync_gateway startup failures Work around for this - https://github.com/couchbase/sync_gateway/issues/1745 """ start = time.time() while True: if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT: raise Exception("Verify Docs Present: TIMEOUT") # Verfy the server is in a "healthy", not "warmup" state try: resp = self._session.get("{}/pools/nodes".format(self.url)) log_r(resp) except ConnectionError: # If bringing a server online, there may be some connnection issues. Continue and try again. time.sleep(1) continue resp_obj = resp.json() all_nodes_healthy = True for node in resp_obj["nodes"]: if node["status"] != "healthy": all_nodes_healthy = False log_info("Node is still not healthy. Status: {} Retrying ...".format(node["status"])) time.sleep(1) if not all_nodes_healthy: continue log_info("All nodes are healthy") log_debug(resp_obj) # All nodes are heathy if it made it to here break