def configure(kwargs): for k, v in iteritems(kwargs): if k.upper() != k: warnings.warn('Invalid setting, \'%s\' which is not defined by Nydus' % k) elif k not in globals(): warnings.warn('Invalid setting, \'%s\' which is not defined by Nydus' % k) else: globals()[k] = v
def join(self): for worker in self.workers: worker.start() results = defaultdict(list) for worker in self.workers: worker.join() for k, v in iteritems(worker.results): results[k].extend(v) return results
def execute(self, cluster, commands): # Create the threadpool and pipe jobs into it pool = self.get_pool(commands) # execute our pending commands either in the pool, or using a pipeline for db_num, command_list in iteritems(commands): for command in command_list: # XXX: its important that we clone the command here so we dont override anything # in the EventualCommand proxy (it can only resolve once) pool.add(command, command.clone().resolve, [cluster[db_num]]) return dict(pool.join())
def execute(self, cluster, commands): # db_num: pipeline object pipes = {} # Create the threadpool and pipe jobs into it pool = self.get_pool(commands) # execute our pending commands either in the pool, or using a pipeline for db_num, command_list in iteritems(commands): pipes[db_num] = cluster[db_num].get_pipeline() for command in command_list: # add to pipeline pipes[db_num].add(command.clone()) # We need to finalize our commands with a single execute in pipelines for db_num, pipe in iteritems(pipes): pool.add(db_num, pipe.execute, (), {}) # Consolidate commands with their appropriate results db_result_map = pool.join() # Results get grouped by their command signature, so we have to separate the logic results = defaultdict(list) for db_num, db_results in iteritems(db_result_map): # Pipelines always execute on a single database assert len(db_results) == 1 db_results = db_results[0] # if pipe.execute (within nydus) fails, this will be an exception object if isinstance(db_results, Exception): for command in commands[db_num]: results[command].append(db_results) continue for command, result in iteritems(db_results): results[command].append(result) return results
def _route(self, attr, args, kwargs, **fkwargs): """ The first argument is assumed to be the ``key`` for routing. """ key = get_key(args, kwargs) found = self._hash.get_node(key) if not found and len(self._down_connections) > 0: raise self.HostListExhausted() return [i for i, h in compat.iteritems(self.cluster.hosts) if h.identifier == found]
def _setup_router(self, args, kwargs, **fkwargs): self._db_num_id_map = dict([(db_num, host.identifier) for db_num, host in compat.iteritems(self.cluster.hosts)]) self._hash = Ketama(self._db_num_id_map.values()) return True
def iter_hosts(hosts): # this can either be a dictionary (with the key acting as the numeric # index) or it can be a sorted list. if isinstance(hosts, collections.Mapping): return iteritems(hosts) return enumerate(hosts)
def reload(self): from nydus.db import create_cluster for conn_alias, conn_settings in iteritems(self.conf_callback()): self[conn_alias] = create_cluster(conn_settings) self._is_ready = True
def apply_defaults(host, defaults): for key, value in iteritems(defaults): if key not in host: host[key] = value return host