def distribute(master, slave): if rconf.args.use_gconf_volinfo: mvol = VolinfoFromGconf(master.volume, master=True) else: mvol = Volinfo(master.volume, master.host, master=True) logging.debug('master bricks: ' + repr(mvol.bricks)) prelude = [] slave_host = None slave_vol = None prelude = [gconf.get("ssh-command")] + \ gconf.get("ssh-options").split() + \ ["-p", str(gconf.get("ssh-port"))] + \ [slave.remote_addr] logging.debug('slave SSH gateway: ' + slave.remote_addr) if rconf.args.use_gconf_volinfo: svol = VolinfoFromGconf(slave.volume, master=False) else: svol = Volinfo(slave.volume, "localhost", prelude, master=False) sbricks = svol.bricks suuid = svol.uuid slave_host = slave.remote_addr.split('@')[-1] slave_vol = slave.volume # save this xattr for the session delete command old_stime_xattr_prefix = gconf.get("stime-xattr-prefix", None) new_stime_xattr_prefix = "trusted.glusterfs." + mvol.uuid + "." + \ svol.uuid if not old_stime_xattr_prefix or \ old_stime_xattr_prefix != new_stime_xattr_prefix: gconf.setconfig("stime-xattr-prefix", new_stime_xattr_prefix) logging.debug('slave bricks: ' + repr(sbricks)) slavenodes = set((b['host'], b["uuid"]) for b in sbricks) rap = SSH.parse_ssh_address(slave) slaves = [(rap['user'] + '@' + h[0], h[1]) for h in slavenodes] workerspex = [] for idx, brick in enumerate(mvol.bricks): if rconf.args.local_node_id == brick['uuid']: is_hot = mvol.is_hot(":".join([brick['host'], brick['dir']])) workerspex.append((brick, slaves[idx % len(slaves)], get_subvol_num(idx, mvol, is_hot), is_hot)) logging.debug('worker specs: ' + repr(workerspex)) return workerspex, suuid, slave_vol, slave_host, master, slavenodes
def distribute(*resources): master, slave = resources mvol = Volinfo(master.volume, master.host) logging.debug('master bricks: ' + repr(mvol.bricks)) prelude = [] si = slave slave_host = None slave_vol = None if isinstance(slave, SSH): prelude = gconf.ssh_command.split() + [slave.remote_addr] si = slave.inner_rsc logging.debug('slave SSH gateway: ' + slave.remote_addr) if isinstance(si, FILE): sbricks = {'host': 'localhost', 'dir': si.path} suuid = uuid.uuid5(uuid.NAMESPACE_URL, slave.get_url(canonical=True)) elif isinstance(si, GLUSTER): svol = Volinfo(si.volume, slave.remote_addr.split('@')[-1]) sbricks = svol.bricks suuid = svol.uuid slave_host = slave.remote_addr.split('@')[-1] slave_vol = si.volume # save this xattr for the session delete command old_stime_xattr_name = getattr(gconf, "master.stime_xattr_name", None) new_stime_xattr_name = "trusted.glusterfs." + mvol.uuid + "." + \ svol.uuid + ".stime" if not old_stime_xattr_name or \ old_stime_xattr_name != new_stime_xattr_name: gconf.configinterface.set("master.stime_xattr_name", new_stime_xattr_name) else: raise GsyncdError("unknown slave type " + slave.url) logging.debug('slave bricks: ' + repr(sbricks)) if isinstance(si, FILE): slaves = [slave.url] else: slavenodes = set(b['host'] for b in sbricks) if isinstance(slave, SSH) and not gconf.isolated_slave: rap = SSH.parse_ssh_address(slave) slaves = [ 'ssh://' + rap['user'] + '@' + h + ':' + si.url for h in slavenodes ] else: slavevols = [h + ':' + si.volume for h in slavenodes] if isinstance(slave, SSH): slaves = [ 'ssh://' + rap.remote_addr + ':' + v for v in slavevols ] else: slaves = slavevols workerspex = [] for idx, brick in enumerate(mvol.bricks): if is_host_local(brick['uuid']): is_hot = mvol.is_hot(":".join([brick['host'], brick['dir']])) workerspex.append((brick, slaves[idx % len(slaves)], get_subvol_num(idx, mvol, is_hot), is_hot)) logging.debug('worker specs: ' + repr(workerspex)) return workerspex, suuid, slave_vol, slave_host, master