def gmaster_builder(): """produce the GMaster class variant corresponding to sync mode""" this = sys.modules[__name__] modemixin = gconf.special_sync_mode if not modemixin: modemixin = 'normal' logging.info('setting up master for %s sync mode' % modemixin) modemixin = getattr(this, modemixin.capitalize() + 'Mixin') sendmarkmixin = boolify(gconf.use_rsync_xattrs) and SendmarkRsyncMixin or SendmarkNormalMixin purgemixin = boolify(gconf.ignore_deletes) and PurgeNoopMixin or PurgeNormalMixin class _GMaster(GMasterBase, modemixin, sendmarkmixin, purgemixin): pass return _GMaster
def service_loop(self): """start a RePCe server serving self's server stop servicing if a timeout is configured and got no keep-alime in that inteval """ if boolify(gconf.use_rsync_xattrs) and not privileged(): raise GsyncdError( "using rsync for extended attributes is not supported") repce = RepceServer( self.server, sys.stdin, sys.stdout, int(gconf.sync_jobs)) t = syncdutils.Thread(target=lambda: (repce.service_loop(), syncdutils.finalize())) t.start() logging.info("slave listening") if gconf.timeout and int(gconf.timeout) > 0: while True: lp = self.server.last_keep_alive time.sleep(int(gconf.timeout)) if lp == self.server.last_keep_alive: logging.info( "connection inactive for %d seconds, stopping" % int(gconf.timeout)) break else: select((), (), ())
def service_loop(self): """start a RePCe server serving self's server stop servicing if a timeout is configured and got no keep-alime in that inteval """ if boolify(gconf.use_rsync_xattrs) and not privileged(): raise GsyncdError( "using rsync for extended attributes is not supported") repce = RepceServer(self.server, sys.stdin, sys.stdout, int(gconf.sync_jobs)) t = syncdutils.Thread( target=lambda: (repce.service_loop(), syncdutils.finalize())) t.start() logging.info("slave listening") if gconf.timeout and int(gconf.timeout) > 0: while True: lp = self.server.last_keep_alive time.sleep(int(gconf.timeout)) if lp == self.server.last_keep_alive: logging.info( "connection inactive for %d seconds, stopping" % int(gconf.timeout)) break else: select((), (), ())
def connect_remote(self, rargs=[], **opts): """connects to a remote slave Invoke an auxiliary utility (slave gsyncd, possibly wrapped) which sets up the connection and set up a RePCe client to communicate throuh its stdio. """ slave = opts.get("slave", self.url) extra_opts = [] so = getattr(gconf, "session_owner", None) if so: extra_opts += ["--session-owner", so] if boolify(gconf.use_rsync_xattrs): extra_opts.append("--use-rsync-xattrs") po = Popen( rargs + gconf.remote_gsyncd.split() + extra_opts + ["-N", "--listen", "--timeout", str(gconf.timeout), slave], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) gconf.transport = po return self.start_fd_client(po.stdout, po.stdin, **opts)
def gmaster_builder(excrawl=None): """produce the GMaster class variant corresponding to sync mode""" this = sys.modules[__name__] modemixin = gconf.special_sync_mode if not modemixin: modemixin = 'normal' changemixin = isinstance(excrawl, str) and excrawl or gconf.change_detector logging.info('setting up %s change detection mode' % changemixin) modemixin = getattr(this, modemixin.capitalize() + 'Mixin') crawlmixin = getattr(this, 'GMaster' + changemixin.capitalize() + 'Mixin') sendmarkmixin = boolify(gconf.use_rsync_xattrs) and SendmarkRsyncMixin or SendmarkNormalMixin purgemixin = boolify(gconf.ignore_deletes) and PurgeNoopMixin or PurgeNormalMixin class _GMaster(crawlmixin, modemixin, sendmarkmixin, purgemixin): pass return _GMaster
def gmaster_builder(excrawl=None): """produce the GMaster class variant corresponding to sync mode""" this = sys.modules[__name__] modemixin = gconf.special_sync_mode if not modemixin: modemixin = 'normal' changemixin = isinstance(excrawl, str) and excrawl or gconf.change_detector logging.info('setting up %s change detection mode' % changemixin) modemixin = getattr(this, modemixin.capitalize() + 'Mixin') crawlmixin = getattr(this, 'GMaster' + changemixin.capitalize() + 'Mixin') sendmarkmixin = boolify( gconf.use_rsync_xattrs) and SendmarkRsyncMixin or SendmarkNormalMixin purgemixin = boolify( gconf.ignore_deletes) and PurgeNoopMixin or PurgeNormalMixin class _GMaster(crawlmixin, modemixin, sendmarkmixin, purgemixin): pass return _GMaster
def rsync(self, files, *args): """invoke rsync""" if not files: raise GsyncdError("no files to sync") logging.debug("files: " + ", ".join(files)) argv = gconf.rsync_command.split() + ['-aR', '--super', '--numeric-ids', '--no-implied-dirs'] + \ gconf.rsync_options.split() + (boolify(gconf.use_rsync_xattrs) and ['--xattrs'] or []) + \ files + list(args) po = Popen(argv, stderr=subprocess.PIPE) po.wait() po.terminate_geterr(fail_on_err = False) return po
def rsync(self, files, *args): """invoke rsync""" if not files: raise GsyncdError("no files to sync") logging.debug("files: " + ", ".join(files)) argv = gconf.rsync_command.split() + \ ['-aR0', '--files-from=-', '--super', '--numeric-ids', '--no-implied-dirs'] + \ gconf.rsync_options.split() + (boolify(gconf.use_rsync_xattrs) and ['--xattrs'] or []) + \ ['.'] + list(args) po = Popen(argv, stdin=subprocess.PIPE, stderr=subprocess.PIPE) for f in files: po.stdin.write(f) po.stdin.write('\0') po.stdin.close() po.wait() po.terminate_geterr(fail_on_err=False) return po
def rsync(self, files, *args): """invoke rsync""" if not files: raise GsyncdError("no files to sync") logging.debug("files: " + ", ".join(files)) argv = gconf.rsync_command.split() + \ ['-aR0', '--files-from=-', '--super','--stats', '--numeric-ids', '--no-implied-dirs'] + \ gconf.rsync_options.split() + (boolify(gconf.use_rsync_xattrs) and ['--xattrs'] or []) + \ ['.'] + list(args) po = Popen(argv, stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE) for f in files: po.stdin.write(f) po.stdin.write('\0') po.stdin.close() po.wait() po.terminate_geterr(fail_on_err = False) return po
def connect_remote(self, rargs=[], **opts): """connects to a remote slave Invoke an auxiliary utility (slave gsyncd, possibly wrapped) which sets up the connection and set up a RePCe client to communicate throuh its stdio. """ slave = opts.get('slave', self.url) extra_opts = [] so = getattr(gconf, 'session_owner', None) if so: extra_opts += ['--session-owner', so] if boolify(gconf.use_rsync_xattrs): extra_opts.append('--use-rsync-xattrs') po = Popen(rargs + gconf.remote_gsyncd.split() + extra_opts + \ ['-N', '--listen', '--timeout', str(gconf.timeout), slave], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) gconf.transport = po return self.start_fd_client(po.stdout, po.stdin, **opts)
def rsync(self, files, *args): """invoke rsync""" if not files: raise GsyncdError("no files to sync") logging.debug("files: " + ", ".join(files)) argv = ( gconf.rsync_command.split() + ["-avR0", "--inplace", "--files-from=-", "--super", "--stats", "--numeric-ids", "--no-implied-dirs"] + gconf.rsync_options.split() + (boolify(gconf.use_rsync_xattrs) and ["--xattrs"] or []) + ["."] + list(args) ) po = Popen(argv, stdin=subprocess.PIPE, stderr=subprocess.PIPE) for f in files: po.stdin.write(f) po.stdin.write("\0") po.stdin.close() po.wait() po.terminate_geterr(fail_on_err=False) return po
def main_i(): """internal main routine parse command line, decide what action will be taken; we can either: - query/manipulate configuration - format gsyncd urls using gsyncd's url parsing engine - start service in following modes, in given stages: - agent: startup(), ChangelogAgent() - monitor: startup(), monitor() - master: startup(), connect_remote(), connect(), service_loop() - slave: startup(), connect(), service_loop() """ rconf = {'go_daemon': 'should'} def store_abs(opt, optstr, val, parser): if val and val != '-': val = os.path.abspath(val) setattr(parser.values, opt.dest, val) def store_local(opt, optstr, val, parser): rconf[opt.dest] = val def store_local_curry(val): return lambda o, oo, vx, p: store_local(o, oo, val, p) def store_local_obj(op, dmake): return lambda o, oo, vx, p: store_local( o, oo, FreeObject(op=op, **dmake(vx)), p) op = OptionParser(usage="%prog [options...] <master> <slave>", version="%prog 0.0.1") op.add_option('--gluster-command-dir', metavar='DIR', default='') op.add_option('--gluster-log-file', metavar='LOGF', default=os.devnull, type=str, action='callback', callback=store_abs) op.add_option('--gluster-log-level', metavar='LVL') op.add_option('--changelog-log-level', metavar='LVL', default="INFO") op.add_option('--gluster-params', metavar='PRMS', default='') op.add_option('--glusterd-uuid', metavar='UUID', type=str, default='', help=SUPPRESS_HELP) op.add_option('--gluster-cli-options', metavar='OPTS', default='--log-file=-') op.add_option('--mountbroker', metavar='LABEL') op.add_option('-p', '--pid-file', metavar='PIDF', type=str, action='callback', callback=store_abs) op.add_option('-l', '--log-file', metavar='LOGF', type=str, action='callback', callback=store_abs) op.add_option('--iprefix', metavar='LOGD', type=str, action='callback', callback=store_abs) op.add_option('--changelog-log-file', metavar='LOGF', type=str, action='callback', callback=store_abs) op.add_option('--log-file-mbr', metavar='LOGF', type=str, action='callback', callback=store_abs) op.add_option('--state-file', metavar='STATF', type=str, action='callback', callback=store_abs) op.add_option('--state-detail-file', metavar='STATF', type=str, action='callback', callback=store_abs) op.add_option('--georep-session-working-dir', metavar='STATF', type=str, action='callback', callback=store_abs) op.add_option('--ignore-deletes', default=False, action='store_true') op.add_option('--isolated-slave', default=False, action='store_true') op.add_option('--use-rsync-xattrs', default=False, action='store_true') op.add_option('--sync-xattrs', default=True, action='store_true') op.add_option('--sync-acls', default=True, action='store_true') op.add_option('--log-rsync-performance', default=False, action='store_true') op.add_option('--max-rsync-retries', type=int, default=10) op.add_option('--pause-on-start', default=False, action='store_true') op.add_option('-L', '--log-level', metavar='LVL') op.add_option('-r', '--remote-gsyncd', metavar='CMD', default=os.path.abspath(sys.argv[0])) op.add_option('--volume-id', metavar='UUID') op.add_option('--slave-id', metavar='ID') op.add_option('--session-owner', metavar='ID') op.add_option('--local-id', metavar='ID', help=SUPPRESS_HELP, default='') op.add_option('--local-path', metavar='PATH', help=SUPPRESS_HELP, default='') op.add_option('-s', '--ssh-command', metavar='CMD', default='ssh') op.add_option('--ssh-port', metavar='PORT', type=int, default=22) op.add_option('--ssh-command-tar', metavar='CMD', default='ssh') op.add_option('--rsync-command', metavar='CMD', default='rsync') op.add_option('--rsync-options', metavar='OPTS', default='') op.add_option('--rsync-ssh-options', metavar='OPTS', default='--compress') op.add_option('--timeout', metavar='SEC', type=int, default=120) op.add_option('--connection-timeout', metavar='SEC', type=int, default=60, help=SUPPRESS_HELP) op.add_option('--sync-jobs', metavar='N', type=int, default=3) op.add_option('--replica-failover-interval', metavar='N', type=int, default=1) op.add_option('--changelog-archive-format', metavar='N', type=str, default="%Y%m") op.add_option('--use-meta-volume', default=False, action='store_true') op.add_option('--meta-volume-mnt', metavar='N', type=str, default="/var/run/gluster/shared_storage") op.add_option('--turns', metavar='N', type=int, default=0, help=SUPPRESS_HELP) op.add_option('--allow-network', metavar='IPS', default='') op.add_option('--socketdir', metavar='DIR') op.add_option('--state-socket-unencoded', metavar='SOCKF', type=str, action='callback', callback=store_abs) op.add_option('--checkpoint', metavar='LABEL', default='0') # tunables for failover/failback mechanism: # None - gsyncd behaves as normal # blind - gsyncd works with xtime pairs to identify # candidates for synchronization # wrapup - same as normal mode but does not assign # xtimes to orphaned files # see crawl() for usage of the above tunables op.add_option('--special-sync-mode', type=str, help=SUPPRESS_HELP) # changelog or xtime? (TODO: Change the default) op.add_option('--change-detector', metavar='MODE', type=str, default='xtime') # sleep interval for change detection (xtime crawl uses a hardcoded 1 # second sleep time) op.add_option('--change-interval', metavar='SEC', type=int, default=3) # working directory for changelog based mechanism op.add_option('--working-dir', metavar='DIR', type=str, action='callback', callback=store_abs) op.add_option('--use-tarssh', default=False, action='store_true') op.add_option('-c', '--config-file', metavar='CONF', type=str, action='callback', callback=store_local) # duh. need to specify dest or value will be mapped to None :S op.add_option('--monitor', dest='monitor', action='callback', callback=store_local_curry(True)) op.add_option('--agent', dest='agent', action='callback', callback=store_local_curry(True)) op.add_option('--resource-local', dest='resource_local', type=str, action='callback', callback=store_local) op.add_option('--resource-remote', dest='resource_remote', type=str, action='callback', callback=store_local) op.add_option('--feedback-fd', dest='feedback_fd', type=int, help=SUPPRESS_HELP, action='callback', callback=store_local) op.add_option('--rpc-fd', dest='rpc_fd', type=str, help=SUPPRESS_HELP) op.add_option('--subvol-num', dest='subvol_num', type=str, help=SUPPRESS_HELP) op.add_option('--listen', dest='listen', help=SUPPRESS_HELP, action='callback', callback=store_local_curry(True)) op.add_option('-N', '--no-daemon', dest="go_daemon", action='callback', callback=store_local_curry('dont')) op.add_option('--verify', type=str, dest="verify", action='callback', callback=store_local) op.add_option('--slavevoluuid-get', type=str, dest="slavevoluuid_get", action='callback', callback=store_local) op.add_option('--create', type=str, dest="create", action='callback', callback=store_local) op.add_option('--delete', dest='delete', action='callback', callback=store_local_curry(True)) op.add_option('--path-list', dest='path_list', action='callback', type=str, callback=store_local) op.add_option('--reset-sync-time', default=False, action='store_true') op.add_option('--status-get', dest='status_get', action='callback', callback=store_local_curry(True)) op.add_option('--debug', dest="go_daemon", action='callback', callback=lambda *a: (store_local_curry('dont') (*a), setattr(a[-1].values, 'log_file', '-'), setattr(a[-1].values, 'log_level', 'DEBUG'), setattr(a[-1].values, 'changelog_log_file', '-'))) op.add_option('--path', type=str, action='append') for a in ('check', 'get'): op.add_option('--config-' + a, metavar='OPT', type=str, dest='config', action='callback', callback=store_local_obj(a, lambda vx: {'opt': vx})) op.add_option('--config-get-all', dest='config', action='callback', callback=store_local_obj('get', lambda vx: {'opt': None})) for m in ('', '-rx', '-glob'): # call this code 'Pythonic' eh? # have to define a one-shot local function to be able # to inject (a value depending on the) # iteration variable into the inner lambda def conf_mod_opt_regex_variant(rx): op.add_option('--config-set' + m, metavar='OPT VAL', type=str, nargs=2, dest='config', action='callback', callback=store_local_obj( 'set', lambda vx: { 'opt': vx[0], 'val': vx[1], 'rx': rx })) op.add_option('--config-del' + m, metavar='OPT', type=str, dest='config', action='callback', callback=store_local_obj( 'del', lambda vx: { 'opt': vx, 'rx': rx })) conf_mod_opt_regex_variant(m and m[1:] or False) op.add_option('--normalize-url', dest='url_print', action='callback', callback=store_local_curry('normal')) op.add_option('--canonicalize-url', dest='url_print', action='callback', callback=store_local_curry('canon')) op.add_option('--canonicalize-escape-url', dest='url_print', action='callback', callback=store_local_curry('canon_esc')) op.add_option('--is-hottier', default=False, action='store_true') tunables = [ norm(o.get_opt_string()[2:]) for o in op.option_list if (o.callback in (store_abs, 'store_true', None) and o.get_opt_string() not in ('--version', '--help')) ] remote_tunables = [ 'listen', 'go_daemon', 'timeout', 'session_owner', 'config_file', 'use_rsync_xattrs' ] rq_remote_tunables = {'listen': True} # precedence for sources of values: 1) commandline, 2) cfg file, 3) # defaults for this to work out we need to tell apart defaults from # explicitly set options... so churn out the defaults here and call # the parser with virgin values container. defaults = op.get_default_values() opts, args = op.parse_args(values=optparse.Values()) # slave url cleanup, if input comes with vol uuid as follows # 'ssh://fvm1::gv2:07dfddca-94bb-4841-a051-a7e582811467' temp_args = [] for arg in args: # Split based on :: data = arg.split("::") if len(data) > 1: slavevol_name = data[1].split(":")[0] temp_args.append("%s::%s" % (data[0], slavevol_name)) else: temp_args.append(data[0]) args = temp_args args_orig = args[:] voluuid_get = rconf.get('slavevoluuid_get') if voluuid_get: slave_host, slave_vol = voluuid_get.split("::") svol_uuid = slave_vol_uuid_get(slave_host, slave_vol) print svol_uuid return r = rconf.get('resource_local') if r: if len(args) == 0: args.append(None) args[0] = r r = rconf.get('resource_remote') if r: if len(args) == 0: raise GsyncdError('local resource unspecfied') elif len(args) == 1: args.append(None) args[1] = r confdata = rconf.get('config') if not (len(args) == 2 or (len(args) == 1 and rconf.get('listen')) or (len(args) <= 2 and confdata) or rconf.get('url_print')): sys.stderr.write("error: incorrect number of arguments\n\n") sys.stderr.write(op.get_usage() + "\n") sys.exit(1) verify = rconf.get('verify') if verify: logging.info(verify) logging.info("Able to spawn gsyncd.py") return restricted = os.getenv('_GSYNCD_RESTRICTED_') if restricted: allopts = {} allopts.update(opts.__dict__) allopts.update(rconf) bannedtuns = set(allopts.keys()) - set(remote_tunables) if bannedtuns: raise GsyncdError('following tunables cannot be set with ' 'restricted SSH invocaton: ' + ', '.join(bannedtuns)) for k, v in rq_remote_tunables.items(): if not k in allopts or allopts[k] != v: raise GsyncdError('tunable %s is not set to value %s required ' 'for restricted SSH invocaton' % (k, v)) confrx = getattr(confdata, 'rx', None) def makersc(aa, check=True): if not aa: return ([], None, None) ra = [resource.parse_url(u) for u in aa] local = ra[0] remote = None if len(ra) > 1: remote = ra[1] if check and not local.can_connect_to(remote): raise GsyncdError("%s cannot work with %s" % (local.path, remote and remote.path)) return (ra, local, remote) if confrx: # peers are regexen, don't try to parse them if confrx == 'glob': args = ['\A' + fnmatch.translate(a) for a in args] canon_peers = args namedict = {} else: dc = rconf.get('url_print') rscs, local, remote = makersc(args_orig, not dc) if dc: for r in rscs: print( r.get_url( **{ 'normal': {}, 'canon': { 'canonical': True }, 'canon_esc': { 'canonical': True, 'escaped': True } }[dc])) return pa = ([], [], []) urlprms = ({}, { 'canonical': True }, { 'canonical': True, 'escaped': True }) for x in rscs: for i in range(len(pa)): pa[i].append(x.get_url(**urlprms[i])) _, canon_peers, canon_esc_peers = pa # creating the namedict, a dict representing various ways of referring # to / repreenting peers to be fillable in config templates mods = (lambda x: x, lambda x: x[0].upper() + x[1:], lambda x: 'e' + x[0].upper() + x[1:]) if remote: rmap = {local: ('local', 'master'), remote: ('remote', 'slave')} else: rmap = {local: ('local', 'slave')} namedict = {} for i in range(len(rscs)): x = rscs[i] for name in rmap[x]: for j in range(3): namedict[mods[j](name)] = pa[j][i] namedict[name + 'vol'] = x.volume if name == 'remote': namedict['remotehost'] = x.remotehost if not 'config_file' in rconf: rconf['config_file'] = TMPL_CONFIG_FILE upgrade_config_file(rconf['config_file'], confdata) gcnf = GConffile(rconf['config_file'], canon_peers, confdata, defaults.__dict__, opts.__dict__, namedict) checkpoint_change = False if confdata: opt_ok = norm(confdata.opt) in tunables + [None] if confdata.op == 'check': if opt_ok: sys.exit(0) else: sys.exit(1) elif not opt_ok: raise GsyncdError("not a valid option: " + confdata.opt) if confdata.op == 'get': gcnf.get(confdata.opt) elif confdata.op == 'set': gcnf.set(confdata.opt, confdata.val, confdata.rx) elif confdata.op == 'del': gcnf.delete(confdata.opt, confdata.rx) # when modifying checkpoint, it's important to make a log # of that, so in that case we go on to set up logging even # if its just config invocation if confdata.opt == 'checkpoint' and confdata.op in ('set', 'del') and \ not confdata.rx: checkpoint_change = True if not checkpoint_change: return gconf.__dict__.update(defaults.__dict__) gcnf.update_to(gconf.__dict__) gconf.__dict__.update(opts.__dict__) gconf.configinterface = gcnf delete = rconf.get('delete') if delete: logging.info('geo-replication delete') # remove the stime xattr from all the brick paths so that # a re-create of a session will start sync all over again stime_xattr_name = getattr(gconf, 'master.stime_xattr_name', None) # Delete pid file, status file, socket file cleanup_paths = [] if getattr(gconf, 'pid_file', None): cleanup_paths.append(gconf.pid_file) if getattr(gconf, 'state_file', None): cleanup_paths.append(gconf.state_file) if getattr(gconf, 'state_detail_file', None): cleanup_paths.append(gconf.state_detail_file) if getattr(gconf, 'state_socket_unencoded', None): cleanup_paths.append(gconf.state_socket_unencoded) cleanup_paths.append(rconf['config_file'][:-11] + "*") # Cleanup changelog working dirs if getattr(gconf, 'working_dir', None): try: shutil.rmtree(gconf.working_dir) except (IOError, OSError): if sys.exc_info()[1].errno == ENOENT: pass else: raise GsyncdError('Error while removing working dir: %s' % gconf.working_dir) for path in cleanup_paths: # To delete temp files for f in glob.glob(path + "*"): _unlink(f) reset_sync_time = boolify(gconf.reset_sync_time) if reset_sync_time and stime_xattr_name: path_list = rconf.get('path_list') paths = [] for p in path_list.split('--path='): stripped_path = p.strip() if stripped_path != "": # set stime to (0,0) to trigger full volume content resync # to slave on session recreation # look at master.py::Xcrawl hint: zero_zero Xattr.lsetxattr(stripped_path, stime_xattr_name, struct.pack("!II", 0, 0)) return if restricted and gconf.allow_network: ssh_conn = os.getenv('SSH_CONNECTION') if not ssh_conn: # legacy env var ssh_conn = os.getenv('SSH_CLIENT') if ssh_conn: allowed_networks = [ IPNetwork(a) for a in gconf.allow_network.split(',') ] client_ip = IPAddress(ssh_conn.split()[0]) allowed = False for nw in allowed_networks: if client_ip in nw: allowed = True break if not allowed: raise GsyncdError("client IP address is not allowed") ffd = rconf.get('feedback_fd') if ffd: fcntl.fcntl(ffd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) # normalize loglevel lvl0 = gconf.log_level if isinstance(lvl0, str): lvl1 = lvl0.upper() lvl2 = logging.getLevelName(lvl1) # I have _never_ _ever_ seen such an utterly braindead # error condition if lvl2 == "Level " + lvl1: raise GsyncdError('cannot recognize log level "%s"' % lvl0) gconf.log_level = lvl2 if not privileged() and gconf.log_file_mbr: gconf.log_file = gconf.log_file_mbr if checkpoint_change: try: GLogger._gsyncd_loginit(log_file=gconf.log_file, label='conf') if confdata.op == 'set': logging.info('checkpoint %s set' % confdata.val) elif confdata.op == 'del': logging.info('checkpoint info was reset') except IOError: if sys.exc_info()[1].errno == ENOENT: # directory of log path is not present, # which happens if we get here from # a peer-multiplexed "config-set checkpoint" # (as that directory is created only on the # original node) pass else: raise return create = rconf.get('create') if create: if getattr(gconf, 'state_file', None): set_monitor_status(gconf.state_file, create) return go_daemon = rconf['go_daemon'] be_monitor = rconf.get('monitor') be_agent = rconf.get('agent') rscs, local, remote = makersc(args) status_get = rconf.get('status_get') if status_get: for brick in gconf.path: brick_status = GeorepStatus(gconf.state_file, brick, getattr(gconf, "pid_file", None)) checkpoint_time = int(getattr(gconf, "checkpoint", "0")) brick_status.print_status(checkpoint_time=checkpoint_time) return if not be_monitor and isinstance(remote, resource.SSH) and \ go_daemon == 'should': go_daemon = 'postconn' log_file = None else: log_file = gconf.log_file if be_monitor: label = 'monitor' elif be_agent: label = 'agent' elif remote: # master label = gconf.local_path else: label = 'slave' startup(go_daemon=go_daemon, log_file=log_file, label=label) resource.Popen.init_errhandler() if be_agent: os.setsid() logging.debug('rpc_fd: %s' % repr(gconf.rpc_fd)) return agent(Changelog(), gconf.rpc_fd) if be_monitor: return monitor(*rscs) logging.info("syncing: %s" % " -> ".join(r.url for r in rscs)) if remote: go_daemon = remote.connect_remote(go_daemon=go_daemon) if go_daemon: startup(go_daemon=go_daemon, log_file=gconf.log_file) # complete remote connection in child remote.connect_remote(go_daemon='done') local.connect() if ffd: os.close(ffd) local.service_loop(*[r for r in [remote] if r])
def main_i(): """internal main routine parse command line, decide what action will be taken; we can either: - query/manipulate configuration - format gsyncd urls using gsyncd's url parsing engine - start service in following modes, in given stages: - agent: startup(), ChangelogAgent() - monitor: startup(), monitor() - master: startup(), connect_remote(), connect(), service_loop() - slave: startup(), connect(), service_loop() """ rconf = {'go_daemon': 'should'} def store_abs(opt, optstr, val, parser): if val and val != '-': val = os.path.abspath(val) setattr(parser.values, opt.dest, val) def store_local(opt, optstr, val, parser): rconf[opt.dest] = val def store_local_curry(val): return lambda o, oo, vx, p: store_local(o, oo, val, p) def store_local_obj(op, dmake): return lambda o, oo, vx, p: store_local( o, oo, FreeObject(op=op, **dmake(vx)), p) op = OptionParser( usage="%prog [options...] <master> <slave>", version="%prog 0.0.1") op.add_option('--gluster-command-dir', metavar='DIR', default='') op.add_option('--gluster-log-file', metavar='LOGF', default=os.devnull, type=str, action='callback', callback=store_abs) op.add_option('--gluster-log-level', metavar='LVL') op.add_option('--changelog-log-level', metavar='LVL', default="INFO") op.add_option('--gluster-params', metavar='PRMS', default='') op.add_option( '--glusterd-uuid', metavar='UUID', type=str, default='', help=SUPPRESS_HELP) op.add_option( '--gluster-cli-options', metavar='OPTS', default='--log-file=-') op.add_option('--mountbroker', metavar='LABEL') op.add_option('-p', '--pid-file', metavar='PIDF', type=str, action='callback', callback=store_abs) op.add_option('-l', '--log-file', metavar='LOGF', type=str, action='callback', callback=store_abs) op.add_option('--iprefix', metavar='LOGD', type=str, action='callback', callback=store_abs) op.add_option('--changelog-log-file', metavar='LOGF', type=str, action='callback', callback=store_abs) op.add_option('--log-file-mbr', metavar='LOGF', type=str, action='callback', callback=store_abs) op.add_option('--state-file', metavar='STATF', type=str, action='callback', callback=store_abs) op.add_option('--state-detail-file', metavar='STATF', type=str, action='callback', callback=store_abs) op.add_option('--georep-session-working-dir', metavar='STATF', type=str, action='callback', callback=store_abs) op.add_option('--ignore-deletes', default=False, action='store_true') op.add_option('--isolated-slave', default=False, action='store_true') op.add_option('--use-rsync-xattrs', default=False, action='store_true') op.add_option('--sync-xattrs', default=True, action='store_true') op.add_option('--sync-acls', default=True, action='store_true') op.add_option('--log-rsync-performance', default=False, action='store_true') op.add_option('--max-rsync-retries', type=int, default=10) op.add_option('--pause-on-start', default=False, action='store_true') op.add_option('-L', '--log-level', metavar='LVL') op.add_option('-r', '--remote-gsyncd', metavar='CMD', default=os.path.abspath(sys.argv[0])) op.add_option('--volume-id', metavar='UUID') op.add_option('--slave-id', metavar='ID') op.add_option('--session-owner', metavar='ID') op.add_option('--local-id', metavar='ID', help=SUPPRESS_HELP, default='') op.add_option( '--local-node', metavar='NODE', help=SUPPRESS_HELP, default='') op.add_option( '--local-node-id', metavar='NODEID', help=SUPPRESS_HELP, default='') op.add_option( '--local-path', metavar='PATH', help=SUPPRESS_HELP, default='') op.add_option('-s', '--ssh-command', metavar='CMD', default='ssh') op.add_option('--ssh-port', metavar='PORT', type=int, default=22) op.add_option('--ssh-command-tar', metavar='CMD', default='ssh') op.add_option('--rsync-command', metavar='CMD', default='rsync') op.add_option('--rsync-options', metavar='OPTS', default='') op.add_option('--rsync-ssh-options', metavar='OPTS', default='--compress') op.add_option('--timeout', metavar='SEC', type=int, default=120) op.add_option('--connection-timeout', metavar='SEC', type=int, default=60, help=SUPPRESS_HELP) op.add_option('--sync-jobs', metavar='N', type=int, default=3) op.add_option('--replica-failover-interval', metavar='N', type=int, default=1) op.add_option('--changelog-archive-format', metavar='N', type=str, default="%Y%m") op.add_option('--use-meta-volume', default=False, action='store_true') op.add_option('--meta-volume-mnt', metavar='N', type=str, default="/var/run/gluster/shared_storage") op.add_option( '--turns', metavar='N', type=int, default=0, help=SUPPRESS_HELP) op.add_option('--allow-network', metavar='IPS', default='') op.add_option('--socketdir', metavar='DIR') op.add_option('--state-socket-unencoded', metavar='SOCKF', type=str, action='callback', callback=store_abs) op.add_option('--checkpoint', metavar='LABEL', default='0') # tunables for failover/failback mechanism: # None - gsyncd behaves as normal # blind - gsyncd works with xtime pairs to identify # candidates for synchronization # wrapup - same as normal mode but does not assign # xtimes to orphaned files # see crawl() for usage of the above tunables op.add_option('--special-sync-mode', type=str, help=SUPPRESS_HELP) # changelog or xtime? (TODO: Change the default) op.add_option( '--change-detector', metavar='MODE', type=str, default='xtime') # sleep interval for change detection (xtime crawl uses a hardcoded 1 # second sleep time) op.add_option('--change-interval', metavar='SEC', type=int, default=3) # working directory for changelog based mechanism op.add_option('--working-dir', metavar='DIR', type=str, action='callback', callback=store_abs) op.add_option('--use-tarssh', default=False, action='store_true') op.add_option('-c', '--config-file', metavar='CONF', type=str, action='callback', callback=store_local) # duh. need to specify dest or value will be mapped to None :S op.add_option('--monitor', dest='monitor', action='callback', callback=store_local_curry(True)) op.add_option('--agent', dest='agent', action='callback', callback=store_local_curry(True)) op.add_option('--resource-local', dest='resource_local', type=str, action='callback', callback=store_local) op.add_option('--resource-remote', dest='resource_remote', type=str, action='callback', callback=store_local) op.add_option('--feedback-fd', dest='feedback_fd', type=int, help=SUPPRESS_HELP, action='callback', callback=store_local) op.add_option('--rpc-fd', dest='rpc_fd', type=str, help=SUPPRESS_HELP) op.add_option('--subvol-num', dest='subvol_num', type=str, help=SUPPRESS_HELP) op.add_option('--listen', dest='listen', help=SUPPRESS_HELP, action='callback', callback=store_local_curry(True)) op.add_option('-N', '--no-daemon', dest="go_daemon", action='callback', callback=store_local_curry('dont')) op.add_option('--verify', type=str, dest="verify", action='callback', callback=store_local) op.add_option('--slavevoluuid-get', type=str, dest="slavevoluuid_get", action='callback', callback=store_local) op.add_option('--create', type=str, dest="create", action='callback', callback=store_local) op.add_option('--delete', dest='delete', action='callback', callback=store_local_curry(True)) op.add_option('--path-list', dest='path_list', action='callback', type=str, callback=store_local) op.add_option('--reset-sync-time', default=False, action='store_true') op.add_option('--status-get', dest='status_get', action='callback', callback=store_local_curry(True)) op.add_option('--debug', dest="go_daemon", action='callback', callback=lambda *a: (store_local_curry('dont')(*a), setattr( a[-1].values, 'log_file', '-'), setattr(a[-1].values, 'log_level', 'DEBUG'), setattr(a[-1].values, 'changelog_log_file', '-'))) op.add_option('--path', type=str, action='append') for a in ('check', 'get'): op.add_option('--config-' + a, metavar='OPT', type=str, dest='config', action='callback', callback=store_local_obj(a, lambda vx: {'opt': vx})) op.add_option('--config-get-all', dest='config', action='callback', callback=store_local_obj('get', lambda vx: {'opt': None})) for m in ('', '-rx', '-glob'): # call this code 'Pythonic' eh? # have to define a one-shot local function to be able # to inject (a value depending on the) # iteration variable into the inner lambda def conf_mod_opt_regex_variant(rx): op.add_option('--config-set' + m, metavar='OPT VAL', type=str, nargs=2, dest='config', action='callback', callback=store_local_obj('set', lambda vx: { 'opt': vx[0], 'val': vx[1], 'rx': rx})) op.add_option('--config-del' + m, metavar='OPT', type=str, dest='config', action='callback', callback=store_local_obj('del', lambda vx: { 'opt': vx, 'rx': rx})) conf_mod_opt_regex_variant(m and m[1:] or False) op.add_option('--normalize-url', dest='url_print', action='callback', callback=store_local_curry('normal')) op.add_option('--canonicalize-url', dest='url_print', action='callback', callback=store_local_curry('canon')) op.add_option('--canonicalize-escape-url', dest='url_print', action='callback', callback=store_local_curry('canon_esc')) op.add_option('--is-hottier', default=False, action='store_true') tunables = [norm(o.get_opt_string()[2:]) for o in op.option_list if (o.callback in (store_abs, 'store_true', None) and o.get_opt_string() not in ('--version', '--help'))] remote_tunables = ['listen', 'go_daemon', 'timeout', 'session_owner', 'config_file', 'use_rsync_xattrs'] rq_remote_tunables = {'listen': True} # precedence for sources of values: 1) commandline, 2) cfg file, 3) # defaults for this to work out we need to tell apart defaults from # explicitly set options... so churn out the defaults here and call # the parser with virgin values container. defaults = op.get_default_values() opts, args = op.parse_args(values=optparse.Values()) # slave url cleanup, if input comes with vol uuid as follows # 'ssh://fvm1::gv2:07dfddca-94bb-4841-a051-a7e582811467' temp_args = [] for arg in args: # Split based on :: data = arg.split("::") if len(data)>1: slavevol_name = data[1].split(":")[0] temp_args.append("%s::%s" % (data[0], slavevol_name)) else: temp_args.append(data[0]) args = temp_args args_orig = args[:] voluuid_get = rconf.get('slavevoluuid_get') if voluuid_get: slave_host, slave_vol = voluuid_get.split("::") svol_uuid = slave_vol_uuid_get(slave_host, slave_vol) print svol_uuid return r = rconf.get('resource_local') if r: if len(args) == 0: args.append(None) args[0] = r r = rconf.get('resource_remote') if r: if len(args) == 0: raise GsyncdError('local resource unspecfied') elif len(args) == 1: args.append(None) args[1] = r confdata = rconf.get('config') if not (len(args) == 2 or (len(args) == 1 and rconf.get('listen')) or (len(args) <= 2 and confdata) or rconf.get('url_print')): sys.stderr.write("error: incorrect number of arguments\n\n") sys.stderr.write(op.get_usage() + "\n") sys.exit(1) verify = rconf.get('verify') if verify: logging.info(verify) logging.info("Able to spawn gsyncd.py") return restricted = os.getenv('_GSYNCD_RESTRICTED_') if restricted: allopts = {} allopts.update(opts.__dict__) allopts.update(rconf) bannedtuns = set(allopts.keys()) - set(remote_tunables) if bannedtuns: raise GsyncdError('following tunables cannot be set with ' 'restricted SSH invocaton: ' + ', '.join(bannedtuns)) for k, v in rq_remote_tunables.items(): if not k in allopts or allopts[k] != v: raise GsyncdError('tunable %s is not set to value %s required ' 'for restricted SSH invocaton' % (k, v)) confrx = getattr(confdata, 'rx', None) def makersc(aa, check=True): if not aa: return ([], None, None) ra = [resource.parse_url(u) for u in aa] local = ra[0] remote = None if len(ra) > 1: remote = ra[1] if check and not local.can_connect_to(remote): raise GsyncdError("%s cannot work with %s" % (local.path, remote and remote.path)) return (ra, local, remote) if confrx: # peers are regexen, don't try to parse them if confrx == 'glob': args = ['\A' + fnmatch.translate(a) for a in args] canon_peers = args namedict = {} else: dc = rconf.get('url_print') rscs, local, remote = makersc(args_orig, not dc) if dc: for r in rscs: print(r.get_url(**{'normal': {}, 'canon': {'canonical': True}, 'canon_esc': {'canonical': True, 'escaped': True}}[dc])) return pa = ([], [], []) urlprms = ( {}, {'canonical': True}, {'canonical': True, 'escaped': True}) for x in rscs: for i in range(len(pa)): pa[i].append(x.get_url(**urlprms[i])) _, canon_peers, canon_esc_peers = pa # creating the namedict, a dict representing various ways of referring # to / repreenting peers to be fillable in config templates mods = (lambda x: x, lambda x: x[ 0].upper() + x[1:], lambda x: 'e' + x[0].upper() + x[1:]) if remote: rmap = {local: ('local', 'master'), remote: ('remote', 'slave')} else: rmap = {local: ('local', 'slave')} namedict = {} for i in range(len(rscs)): x = rscs[i] for name in rmap[x]: for j in range(3): namedict[mods[j](name)] = pa[j][i] namedict[name + 'vol'] = x.volume if name == 'remote': namedict['remotehost'] = x.remotehost if not 'config_file' in rconf: rconf['config_file'] = TMPL_CONFIG_FILE # Upgrade Config File only if it is session conf file if rconf['config_file'] != TMPL_CONFIG_FILE: upgrade_config_file(rconf['config_file'], confdata) gcnf = GConffile( rconf['config_file'], canon_peers, confdata, defaults.__dict__, opts.__dict__, namedict) conf_change = False if confdata: opt_ok = norm(confdata.opt) in tunables + [None] if confdata.op == 'check': if opt_ok: sys.exit(0) else: sys.exit(1) elif not opt_ok: raise GsyncdError("not a valid option: " + confdata.opt) if confdata.op == 'get': gcnf.get(confdata.opt) elif confdata.op == 'set': gcnf.set(confdata.opt, confdata.val, confdata.rx) elif confdata.op == 'del': gcnf.delete(confdata.opt, confdata.rx) # when modifying checkpoint, it's important to make a log # of that, so in that case we go on to set up logging even # if its just config invocation if confdata.op in ('set', 'del') and not confdata.rx: conf_change = True if not conf_change: return gconf.__dict__.update(defaults.__dict__) gcnf.update_to(gconf.__dict__) gconf.__dict__.update(opts.__dict__) gconf.configinterface = gcnf delete = rconf.get('delete') if delete: logging.info('geo-replication delete') # remove the stime xattr from all the brick paths so that # a re-create of a session will start sync all over again stime_xattr_name = getattr(gconf, 'master.stime_xattr_name', None) # Delete pid file, status file, socket file cleanup_paths = [] if getattr(gconf, 'pid_file', None): cleanup_paths.append(gconf.pid_file) if getattr(gconf, 'state_file', None): cleanup_paths.append(gconf.state_file) if getattr(gconf, 'state_detail_file', None): cleanup_paths.append(gconf.state_detail_file) if getattr(gconf, 'state_socket_unencoded', None): cleanup_paths.append(gconf.state_socket_unencoded) cleanup_paths.append(rconf['config_file'][:-11] + "*") # Cleanup changelog working dirs if getattr(gconf, 'working_dir', None): try: shutil.rmtree(gconf.working_dir) except (IOError, OSError): if sys.exc_info()[1].errno == ENOENT: pass else: raise GsyncdError( 'Error while removing working dir: %s' % gconf.working_dir) for path in cleanup_paths: # To delete temp files for f in glob.glob(path + "*"): _unlink(f) reset_sync_time = boolify(gconf.reset_sync_time) if reset_sync_time and stime_xattr_name: path_list = rconf.get('path_list') paths = [] for p in path_list.split('--path='): stripped_path = p.strip() if stripped_path != "": # set stime to (0,0) to trigger full volume content resync # to slave on session recreation # look at master.py::Xcrawl hint: zero_zero Xattr.lsetxattr(stripped_path, stime_xattr_name, struct.pack("!II", 0, 0)) return if restricted and gconf.allow_network: ssh_conn = os.getenv('SSH_CONNECTION') if not ssh_conn: # legacy env var ssh_conn = os.getenv('SSH_CLIENT') if ssh_conn: allowed_networks = [IPNetwork(a) for a in gconf.allow_network.split(',')] client_ip = IPAddress(ssh_conn.split()[0]) allowed = False for nw in allowed_networks: if client_ip in nw: allowed = True break if not allowed: raise GsyncdError("client IP address is not allowed") ffd = rconf.get('feedback_fd') if ffd: fcntl.fcntl(ffd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) # normalize loglevel lvl0 = gconf.log_level if isinstance(lvl0, str): lvl1 = lvl0.upper() lvl2 = logging.getLevelName(lvl1) # I have _never_ _ever_ seen such an utterly braindead # error condition if lvl2 == "Level " + lvl1: raise GsyncdError('cannot recognize log level "%s"' % lvl0) gconf.log_level = lvl2 if not privileged() and gconf.log_file_mbr: gconf.log_file = gconf.log_file_mbr if conf_change: try: GLogger._gsyncd_loginit(log_file=gconf.log_file, label='conf') gconf.log_exit = False if confdata.op == 'set': if confdata.opt == 'checkpoint': logging.info("Checkpoint Set: %s" % ( human_time_utc(confdata.val))) else: logging.info("Config Set: %s = %s" % ( confdata.opt, confdata.val)) elif confdata.op == 'del': if confdata.opt == 'checkpoint': logging.info("Checkpoint Reset") else: logging.info("Config Reset: %s" % confdata.opt) except IOError: if sys.exc_info()[1].errno == ENOENT: # directory of log path is not present, # which happens if we get here from # a peer-multiplexed "config-set checkpoint" # (as that directory is created only on the # original node) pass else: raise return create = rconf.get('create') if create: if getattr(gconf, 'state_file', None): set_monitor_status(gconf.state_file, create) try: GLogger._gsyncd_loginit(log_file=gconf.log_file, label='monitor') gconf.log_exit = False logging.info("Monitor Status: %s" % create) except IOError: if sys.exc_info()[1].errno == ENOENT: # If log dir not present pass else: raise return go_daemon = rconf['go_daemon'] be_monitor = rconf.get('monitor') be_agent = rconf.get('agent') rscs, local, remote = makersc(args) status_get = rconf.get('status_get') if status_get: master_name, slave_data = get_master_and_slave_data_from_args(args) for brick in gconf.path: brick_status = GeorepStatus(gconf.state_file, gconf.local_node, brick, gconf.local_node_id, master_name, slave_data, getattr(gconf, "pid_file", None)) checkpoint_time = int(getattr(gconf, "checkpoint", "0")) brick_status.print_status(checkpoint_time=checkpoint_time) return if not be_monitor and isinstance(remote, resource.SSH) and \ go_daemon == 'should': go_daemon = 'postconn' log_file = None else: log_file = gconf.log_file if be_monitor: label = 'monitor' elif be_agent: label = gconf.local_path elif remote: # master label = gconf.local_path else: label = 'slave' startup(go_daemon=go_daemon, log_file=log_file, label=label) resource.Popen.init_errhandler() if be_agent: os.setsid() logging.debug('rpc_fd: %s' % repr(gconf.rpc_fd)) return agent(Changelog(), gconf.rpc_fd) if be_monitor: return monitor(*rscs) if remote: go_daemon = remote.connect_remote(go_daemon=go_daemon) if go_daemon: startup(go_daemon=go_daemon, log_file=gconf.log_file) # complete remote connection in child remote.connect_remote(go_daemon='done') local.connect() if ffd: os.close(ffd) local.service_loop(*[r for r in [remote] if r])
def crawl(self, path='.', xtl=None): """crawling... Standing around All the right people Crawling Tennis on Tuesday The ladder is long It is your nature You've gotta suntan Football on Sunday Society boy Recursively walk the master side tree and check if updates are needed due to xtime differences. One invocation of crawl checks children of @path and do a recursive enter only on those directory children where there is an update needed. Way of updates depend on file type: - for symlinks, sync them directy and synchronously - for regular children, register jobs for @path (cf. .add_job) to start and wait on their rsync - for directory children, register a job for @path which waits (.wait) on jobs for the given child (other kind of filesystem nodes are not considered) Those slave side children which do not exist on master are simply purged (see Server.purge). Behavior is fault tolerant, synchronization is adaptive: if some action fails, just go on relentlessly, adding a fail job (see .add_failjob) which will prevent the .sendmark on @path, so when the next crawl will arrive to @path it will not see it as up-to-date and will try to sync it again. While this semantics can be supported by funky design principles (http://c2.com/cgi/wiki?LazinessImpatienceHubris), the ultimate reason which excludes other possibilities is simply transience: we cannot assert that the file systems (master / slave) underneath do not change and actions taken upon some condition will not lose their context by the time they are performed. """ if path == '.': if self.start: self.crawls += 1 logging.debug("... crawl #%d done, took %.6f seconds" % \ (self.crawls, time.time() - self.start)) time.sleep(1) self.start = time.time() should_display_info = self.start - self.lastreport['time'] >= 60 if should_display_info: logging.info("completed %d crawls, %d turns", self.crawls - self.lastreport['crawls'], self.turns - self.lastreport['turns']) self.lastreport.update(crawls = self.crawls, turns = self.turns, time = self.start) volinfo_sys = self.get_sys_volinfo() self.volinfo_state, state_change = self.volinfo_state_machine(self.volinfo_state, volinfo_sys) if self.inter_master: self.volinfo = volinfo_sys[self.KFGN] else: self.volinfo = volinfo_sys[self.KNAT] if state_change == self.KFGN or (state_change == self.KNAT and not self.inter_master): logging.info('new master is %s', self.uuid) if self.volinfo: logging.info("%s master with volume id %s ..." % \ (self.inter_master and "intermediate" or "primary", self.uuid)) if state_change == self.KFGN: gconf.configinterface.set('volume_id', self.uuid) if self.volinfo: if self.volinfo['retval']: raise GsyncdError ("master is corrupt") else: if should_display_info or self.crawls == 0: if self.inter_master: logging.info("waiting for being synced from %s ..." % \ self.volinfo_state[self.KFGN]['uuid']) else: logging.info("waiting for volume info ...") return logging.debug("entering " + path) if not xtl: xtl = self.xtime(path) if isinstance(xtl, int): self.add_failjob(path, 'no-local-node') return xtr0 = self.xtime(path, self.slave) if isinstance(xtr0, int): if xtr0 != ENOENT: self.slave.server.purge(path) try: self.slave.server.mkdir(path) except OSError: self.add_failjob(path, 'no-remote-node') return xtr = URXTIME else: xtr = xtr0 if xtr > xtl: raise GsyncdError("timestamp corruption for " + path) if xtl == xtr: if path == '.' and self.change_seen: self.turns += 1 self.change_seen = False if self.total_turns: logging.info("finished turn #%s/%s" % \ (self.turns, self.total_turns)) if self.turns == self.total_turns: logging.info("reached turn limit") self.terminate = True return if path == '.': self.change_seen = True try: dem = self.master.server.entries(path) except OSError: self.add_failjob(path, 'local-entries-fail') return random.shuffle(dem) try: des = self.slave.server.entries(path) except OSError: self.slave.server.purge(path) try: self.slave.server.mkdir(path) des = self.slave.server.entries(path) except OSError: self.add_failjob(path, 'remote-entries-fail') return dd = set(des) - set(dem) if dd and not boolify(gconf.ignore_deletes): self.slave.server.purge(path, dd) chld = [] for e in dem: e = os.path.join(path, e) xte = self.xtime(e) if isinstance(xte, int): logging.warn("irregular xtime for %s: %s" % (e, errno.errorcode[xte])) elif xte > xtr: chld.append((e, xte)) def indulgently(e, fnc, blame=None): if not blame: blame = path try: return fnc(e) except (IOError, OSError): ex = sys.exc_info()[1] if ex.errno == ENOENT: logging.warn("salvaged ENOENT for " + e) self.add_failjob(blame, 'by-indulgently') return False else: raise for e, xte in chld: st = indulgently(e, lambda e: os.lstat(e)) if st == False: continue mo = st.st_mode adct = {'own': (st.st_uid, st.st_gid)} if stat.S_ISLNK(mo): if indulgently(e, lambda e: self.slave.server.symlink(os.readlink(e), e)) == False: continue self.sendmark(e, xte, adct) elif stat.S_ISREG(mo): logging.debug("syncing %s ..." % e) pb = self.syncer.add(e) def regjob(e, xte, pb): if pb.wait(): logging.debug("synced " + e) self.sendmark(e, xte) return True else: logging.warn("failed to sync " + e) self.add_job(path, 'reg', regjob, e, xte, pb) elif stat.S_ISDIR(mo): adct['mode'] = mo if indulgently(e, lambda e: (self.add_job(path, 'cwait', self.wait, e, xte, adct), self.crawl(e, xte), True)[-1], blame=e) == False: continue else: # ignore fifos, sockets and special files pass if path == '.': self.wait(path, xtl)
def crawl(self, path='.', xtl=None): """crawling... Standing around All the right people Crawling Tennis on Tuesday The ladder is long It is your nature You've gotta suntan Football on Sunday Society boy Recursively walk the master side tree and check if updates are needed due to xtime differences. One invocation of crawl checks children of @path and do a recursive enter only on those directory children where there is an update needed. Way of updates depend on file type: - for symlinks, sync them directy and synchronously - for regular children, register jobs for @path (cf. .add_job) to start and wait on their rsync - for directory children, register a job for @path which waits (.wait) on jobs for the given child (other kind of filesystem nodes are not considered) Those slave side children which do not exist on master are simply purged (see Server.purge). Behavior is fault tolerant, synchronization is adaptive: if some action fails, just go on relentlessly, adding a fail job (see .add_failjob) which will prevent the .sendmark on @path, so when the next crawl will arrive to @path it will not see it as up-to-date and will try to sync it again. While this semantics can be supported by funky design principles (http://c2.com/cgi/wiki?LazinessImpatienceHubris), the ultimate reason which excludes other possibilities is simply transience: we cannot assert that the file systems (master / slave) underneath do not change and actions taken upon some condition will not lose their context by the time they are performed. """ if path == '.': if self.start: self.crawls += 1 logging.debug("... crawl #%d done, took %.6f seconds" % \ (self.crawls, time.time() - self.start)) time.sleep(1) self.start = time.time() should_display_info = self.start - self.lastreport['time'] >= 60 if should_display_info: logging.info("completed %d crawls, %d turns", self.crawls - self.lastreport['crawls'], self.turns - self.lastreport['turns']) self.lastreport.update(crawls=self.crawls, turns=self.turns, time=self.start) volinfo_sys = self.get_sys_volinfo() self.volinfo_state, state_change = self.volinfo_state_machine( self.volinfo_state, volinfo_sys) if self.inter_master: self.volinfo = volinfo_sys[self.KFGN] else: self.volinfo = volinfo_sys[self.KNAT] if state_change == self.KFGN or (state_change == self.KNAT and not self.inter_master): logging.info('new master is %s', self.uuid) if self.volinfo: logging.info("%s master with volume id %s ..." % \ (self.inter_master and "intermediate" or "primary", self.uuid)) if state_change == self.KFGN: gconf.configinterface.set('volume_id', self.uuid) if self.volinfo: if self.volinfo['retval']: raise GsyncdError("master is corrupt") else: if should_display_info or self.crawls == 0: if self.inter_master: logging.info("waiting for being synced from %s ..." % \ self.volinfo_state[self.KFGN]['uuid']) else: logging.info("waiting for volume info ...") return logging.debug("entering " + path) if not xtl: xtl = self.xtime(path) if isinstance(xtl, int): self.add_failjob(path, 'no-local-node') return xtr0 = self.xtime(path, self.slave) if isinstance(xtr0, int): if xtr0 != ENOENT: self.slave.server.purge(path) try: self.slave.server.mkdir(path) except OSError: self.add_failjob(path, 'no-remote-node') return xtr = URXTIME else: xtr = xtr0 if xtr > xtl: raise GsyncdError("timestamp corruption for " + path) if xtl == xtr: if path == '.' and self.change_seen: self.turns += 1 self.change_seen = False if self.total_turns: logging.info("finished turn #%s/%s" % \ (self.turns, self.total_turns)) if self.turns == self.total_turns: logging.info("reached turn limit") self.terminate = True return if path == '.': self.change_seen = True try: dem = self.master.server.entries(path) except OSError: self.add_failjob(path, 'local-entries-fail') return random.shuffle(dem) try: des = self.slave.server.entries(path) except OSError: self.slave.server.purge(path) try: self.slave.server.mkdir(path) des = self.slave.server.entries(path) except OSError: self.add_failjob(path, 'remote-entries-fail') return dd = set(des) - set(dem) if dd and not boolify(gconf.ignore_deletes): self.slave.server.purge(path, dd) chld = [] for e in dem: e = os.path.join(path, e) xte = self.xtime(e) if isinstance(xte, int): logging.warn("irregular xtime for %s: %s" % (e, errno.errorcode[xte])) elif xte > xtr: chld.append((e, xte)) def indulgently(e, fnc, blame=None): if not blame: blame = path try: return fnc(e) except (IOError, OSError): ex = sys.exc_info()[1] if ex.errno == ENOENT: logging.warn("salvaged ENOENT for " + e) self.add_failjob(blame, 'by-indulgently') return False else: raise for e, xte in chld: st = indulgently(e, lambda e: os.lstat(e)) if st == False: continue mo = st.st_mode adct = {'own': (st.st_uid, st.st_gid)} if stat.S_ISLNK(mo): if indulgently( e, lambda e: self.slave.server.symlink( os.readlink(e), e)) == False: continue self.sendmark(e, xte, adct) elif stat.S_ISREG(mo): logging.debug("syncing %s ..." % e) pb = self.syncer.add(e) def regjob(e, xte, pb): if pb.wait(): logging.debug("synced " + e) self.sendmark(e, xte) return True else: logging.warn("failed to sync " + e) self.add_job(path, 'reg', regjob, e, xte, pb) elif stat.S_ISDIR(mo): adct['mode'] = mo if indulgently( e, lambda e: (self.add_job(path, 'cwait', self.wait, e, xte, adct), self.crawl(e, xte), True)[-1], blame=e) == False: continue else: # ignore fifos, sockets and special files pass if path == '.': self.wait(path, xtl)