def start_fd_client(self, i, o, **opts): self.server = RepceClient(i, o) rv = self.server.__version__() exrv = {'proto': repce.repce_version, 'object': Server.version()} da0 = (rv, exrv) da1 = ({}, {}) for i in range(2): for k, v in da0[i].iteritems(): da1[i][k] = int(v) if da1[0] != da1[1]: raise RuntimeError( "RePCe major version mismatch: local %s, remote %s" % (exrv, rv))
def start_fd_client(self, i, o, **opts): """set up RePCe client, handshake with server It's cut out as a separate method to let subclasses hook into client startup """ self.server = RepceClient(i, o) rv = self.server.__version__() exrv = {'proto': repce.repce_version, 'object': Server.version()} da0 = (rv, exrv) da1 = ({}, {}) for i in range(2): for k, v in da0[i].iteritems(): da1[i][k] = int(v) if da1[0] != da1[1]: raise GsyncdError( "RePCe major version mismatch: local %s, remote %s" % (exrv, rv))
def service_loop(self, *args): """enter service loop - if slave given, instantiate GMaster and pass control to that instance, which implements master behavior - else do that's what's inherited """ if args: slave = args[0] if gconf.local_path: class brickserver(FILE.FILEServer): local_path = gconf.local_path aggregated = self.server @classmethod def entries(cls, path): e = super(brickserver, cls).entries(path) # on the brick don't mess with /.glusterfs if path == '.': try: e.remove('.glusterfs') except ValueError: pass return e @classmethod def lstat(cls, e): """ path based backend stat """ return super(brickserver, cls).lstat(e) @classmethod def gfid(cls, e): """ path based backend gfid fetch """ return super(brickserver, cls).gfid(e) @classmethod def linkto_check(cls, e): return super(brickserver, cls).linkto_check(e) if gconf.slave_id: # define {,set_}xtime in slave, thus preempting # the call to remote, so that it takes data from # the local brick slave.server.xtime = types.MethodType( lambda _self, path, uuid: ( brickserver.xtime(path, uuid + '.' + gconf.slave_id) ), slave.server) slave.server.stime = types.MethodType( lambda _self, path, uuid: ( brickserver.stime(path, uuid + '.' + gconf.slave_id) ), slave.server) slave.server.set_stime = types.MethodType( lambda _self, path, uuid, mark: ( brickserver.set_stime(path, uuid + '.' + gconf.slave_id, mark) ), slave.server) (g1, g2, g3) = self.gmaster_instantiate_tuple(slave) g1.master.server = brickserver g2.master.server = brickserver g3.master.server = brickserver else: (g1, g2, g3) = self.gmaster_instantiate_tuple(slave) g1.master.server.aggregated = gmaster.master.server g2.master.server.aggregated = gmaster.master.server g3.master.server.aggregated = gmaster.master.server # bad bad bad: bad way to do things like this # need to make this elegant # register the crawlers and start crawling # g1 ==> Xsync, g2 ==> config.change_detector(changelog by default) # g3 ==> changelog History (inf, ouf, ra, wa) = gconf.rpc_fd.split(',') os.close(int(ra)) os.close(int(wa)) changelog_agent = RepceClient(int(inf), int(ouf)) rv = changelog_agent.version() if int(rv) != CHANGELOG_AGENT_CLIENT_VERSION: raise GsyncdError( "RePCe major version mismatch(changelog agent): " "local %s, remote %s" % (CHANGELOG_AGENT_CLIENT_VERSION, rv)) g1.register() try: (workdir, logfile) = g2.setup_working_dir() # register with the changelog library # 9 == log level (DEBUG) # 5 == connection retries changelog_agent.register(gconf.local_path, workdir, logfile, 9, 5) g2.register(changelog_agent) g3.register(changelog_agent) except ChangelogException as e: logging.debug("Changelog register failed: %s - %s" % (e.errno, e.strerror)) # oneshot: Try to use changelog history api, if not # available switch to FS crawl # Note: if config.change_detector is xsync then # it will not use changelog history api try: g3.crawlwrap(oneshot=True) except (ChangelogException, NoPurgeTimeAvailable, PartialHistoryAvailable) as e: if isinstance(e, ChangelogException): logging.debug('Changelog history crawl failed, failback ' 'to xsync: %s - %s' % (e.errno, e.strerror)) elif isinstance(e, NoPurgeTimeAvailable): logging.debug('Using xsync crawl since no purge time ' 'available') elif isinstance(e, PartialHistoryAvailable): logging.debug('Using xsync crawl after consuming history ' 'till %s' % str(e)) g1.crawlwrap(oneshot=True) # crawl loop: Try changelog crawl, if failed # switch to FS crawl try: g2.crawlwrap() except ChangelogException as e: logging.debug('Changelog crawl failed, failback to xsync: ' '%s - %s' % (e.errno, e.strerror)) g1.crawlwrap() else: sup(self, *args)