def connect(endpoints, groups, **kw): remotes = [] for r in endpoints: remotes.append(elliptics.Address.from_host_port_family(r)) def rename(kw, old, new): if old in kw: kw[new] = kw.pop(old) # drop impedeing attrs, just in case kw.pop('elog', None) kw.pop('cfg', None) kw.pop('remotes', None) # rename good names to required bad ones rename(kw, 'logfile', 'log_file') rename(kw, 'loglevel', 'log_level') n = elliptics.create_node(**kw) n.add_remotes(remotes) s = elliptics.Session(n) s.add_groups(groups) # return PassthroughWrapper(n, s) return s
def connect(endpoints, groups, **kw): remotes = [] for r in endpoints: remotes.append(elliptics.Address.from_host_port_family(r)) def rename(kw, old, new): if old in kw: kw[new] = kw.pop(old) # drop impeding attrs, just in case kw.pop('elog', None) kw.pop('cfg', None) kw.pop('remotes', None) # rename good names to required bad ones rename(kw, 'logfile', 'log_file') rename(kw, 'loglevel', 'log_level') n = elliptics.create_node(**kw) n.add_remotes(remotes) s = elliptics.Session(n) s.add_groups(groups) # return PassthroughWrapper(n, s) return s
def connect(endpoints, groups, **kw): remotes = [] for r in endpoints: parts = r.split(":") remotes.append((parts[0], int(parts[1]))) def rename(kw, old, new): if old in kw: kw[new] = kw.pop(old) # drop impedeing attrs, just in case kw.pop('elog', None) kw.pop('cfg', None) kw.pop('remotes', None) # rename good names to required bad ones rename(kw, 'logfile', 'log_file') rename(kw, 'loglevel', 'log_level') n = elliptics.create_node(**kw) for r in remotes: try: n.add_remote(r[0], r[1]) except Exception: pass s = elliptics.Session(n) s.add_groups(groups) # return PassthroughWrapper(n, s) return s
def connect(endpoints, groups, **kw): remotes = [] for r in endpoints: parts = r.split(":") remotes.append((parts[0], int(parts[1]))) def rename(new, old): if old in kw: kw[new] = kw.pop(old) kw.pop('elog', None) kw.pop('cfg', None) kw.pop('remotes', None) rename('log_file', 'logfile') rename('log_level', 'loglevel') n = elliptics.create_node(**kw) # def create_node(**kw): # log = elliptics.Logger(kw.get('logfile', '/dev/stderr'), kw.get('loglevel', 1)) # config = elliptics.Config() # config.config.wait_timeout = kw.get('wait-timeout', 60) # return elliptics.Node(log, config) # n = create_node(**kw) for r in remotes: try: n.add_remote(r[0], r[1]) except Exception as e: pass s = elliptics.Session(n) s.add_groups(groups) #XXX: Is it time to drop PassthroughWrapper binder? return PassthroughWrapper(n, s)
def upload_app(self): if self.without_cocaine: return import subprocess self.__create_manifest__() self.__create_profile__() app = subprocess.Popen(['cocaine-tool', 'app', 'upload', '--manifest', self.name + '/dnet_cpp_srw_test_app.manifest', '--package', '../dnet_cpp_srw_test_app.tar', '--name', 'dnet_cpp_srw_test_app', '--host={0}'.format(self.addr), '--port={0}'.format(self.locator_port)]) app.wait() assert(app.returncode == 0) profile = subprocess.Popen(['cocaine-tool', 'profile', 'upload', '--profile', self.name + '/dnet_cpp_srw_test_app.profile', '--name', 'dnet_cpp_srw_test_app', '--host={0}'.format(self.addr), '--port={0}'.format(self.locator_port)]) profile.wait() assert(profile.returncode == 0) import elliptics n = elliptics.create_node(remotes=[self.get_addr()], log_level=self.log_level) s = elliptics.Session(n) s.groups = [self.group] s.cflags = elliptics.command_flags.nolock s.set_filter(elliptics.filters.all_with_ack) s.exec_(id=None, event='dnet_cpp_srw_test_app@start-task').wait()
def prepare_session(testdir, remote, group_id): """Initialize and return session for specified remote Args: testdir: fixture for test's temporary dirs/files remote(str): address of the node group_id(int): id of the group to work with Returns: elliptics.newapi.Session: session configured for working with the backend """ node = elliptics.create_node(log_file=str(testdir.join("client.log")), log_level=elliptics.log_level.debug, remotes=[remote]) session = elliptics.newapi.Session(node) session.groups = [group_id] return session
def connect(endpoints, groups, **kw): def rename(kw, old, new): if old in kw: kw[new] = kw.pop(old) # drop impedeing attrs, just in case kw.pop('elog', None) kw.pop('cfg', None) kw.pop('remotes', None) # rename good names to required bad ones rename(kw, 'logfile', 'log_file') rename(kw, 'loglevel', 'log_level') n = elliptics.create_node(**kw) n.add_remote(endpoints) s = elliptics.Session(n) s.add_groups(groups) # return PassthroughWrapper(n, s) return s
def process_uncommitted(ctx, results): ''' Removes uncommitted keys. If a key has any committed replicas, then this key is appended to the file containing committed keys. If an uncommitted key's replica hasn't exceeded prepare timeout, then skip recovering of the key, because the key is under writing and can be committed in the nearest future. ''' if ctx.dry_run or ctx.safe: return node = elliptics.create_node(log_file=ctx.log_file, log_level=int(ctx.log_level), log_watched=True, wait_timeout=ctx.wait_timeout, flags=elliptics.config_flags.no_route_list, net_thread_num=1, io_thread_num=1, remotes=ctx.remotes) session = elliptics.newapi.Session(node) session.trace_id = ctx.trace_id session.exceptions_policy = elliptics.exceptions_policy.no_exceptions session.set_filter(elliptics.filters.all_final) session.ioflags |= elliptics.io_flags.cas_timestamp session.timestamp = ctx.prepare_timeout stats = ctx.stats['recover'] stats_cmd = ctx.stats['commands'] for r in results: with open(r.filename, 'ab') as f: for _, batch in itertools.groupby( enumerate(load_key_data(r.uncommitted_filename)), key=lambda x: x[0] / ctx.batch_size): batch = [item[1] for item in batch] tasks = [] statuses = {} # (key, group_id) -> status for key, key_infos in batch: for info in key_infos: if info.flags & elliptics.record_flags.uncommitted: if info.group_id in ctx.ro_groups: stats.counter( 'skip_remove_uncommitted_key_from_ro_group', 1) statuses[(key, info.group_id )] = 0 # mark status as successful continue tasks.append((key, info.group_id, info.size)) for attempt in range(ctx.attempts): if not tasks: break if attempt > 0: stats.counter('remove_retries', len(tasks)) batch_sizes = defaultdict(int) # group_id -> batch_size for _, group_id, key_size in tasks: batch_sizes[group_id] += key_size timeouts = { group_id: max(60, batch_size / ctx.data_flow_rate) for group_id, batch_size in batch_sizes.iteritems() } responses = [] for key, group_id, _ in tasks: session.groups = [group_id] session.timeout = timeouts[group_id] responses.append(session.remove(key)) failed_tasks = [] for i, r in enumerate(responses): key, group_id, _ = tasks[i] status = r.get()[0].status log.info( 'Removed uncommitted key: %s, group: %s, status: %s, attempts: %s/%s', key, group_id, status, attempt, ctx.attempts) statuses[(key, group_id)] = status if status == 0: stats.counter('removed_uncommitted_keys', 1) else: stats_cmd.counter('remove.{0}'.format(status), 1) if status not in (0, -errno.ENOENT, -errno.EBADFD): failed_tasks.append(tasks[i]) tasks = failed_tasks for key, key_infos in batch: # Filter uncommitted replicas, then append a key to the 'merged' file for recovery. # If an uncommitted replica hasn't exceeded prepare timeout, # then removal status is EBADFD and this key must be skipped. infos = [] for info in key_infos: if info.flags & elliptics.record_flags.uncommitted: status = statuses[(key, info.group_id)] if status == -errno.EBADFD: stats.counter('skipped_uncommitted_keys', 1) infos = None break else: infos.append(info) if infos: dump_key_data((key, infos), f)