示例#1
0
def subcmd_worker(args):
    import os
    import fcntl

    from resource import GLUSTER, SSH, Popen

    Popen.init_errhandler()
    fcntl.fcntl(args.feedback_fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
    local = GLUSTER("localhost", args.master)
    slavehost, slavevol = args.slave.split("::")
    remote = SSH(slavehost, slavevol)
    remote.connect_remote()
    local.connect()
    logging.info("Worker spawn successful. Acknowledging back to monitor")
    os.close(args.feedback_fd)
    local.service_loop(remote)
示例#2
0
def subcmd_worker(args):
    import os
    import fcntl

    from resource import GLUSTER, SSH, Popen

    Popen.init_errhandler()
    fcntl.fcntl(args.feedback_fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
    local = GLUSTER("localhost", args.master)
    slavehost, slavevol = args.slave.split("::")
    remote = SSH(slavehost, slavevol)
    remote.connect_remote()
    local.connect()
    logging.info("Worker spawn successful. Acknowledging back to monitor")
    os.close(args.feedback_fd)
    local.service_loop(remote)
示例#3
0
def distribute(*resources):
    master, slave = resources
    mvol = Volinfo(master.volume, master.host)
    logging.debug('master bricks: ' + repr(mvol.bricks))
    prelude = []
    si = slave
    slave_host = None
    slave_vol = None

    if isinstance(slave, SSH):
        prelude = gconf.ssh_command.split() + [slave.remote_addr]
        si = slave.inner_rsc
        logging.debug('slave SSH gateway: ' + slave.remote_addr)
    if isinstance(si, FILE):
        sbricks = {'host': 'localhost', 'dir': si.path}
        suuid = uuid.uuid5(uuid.NAMESPACE_URL, slave.get_url(canonical=True))
    elif isinstance(si, GLUSTER):
        svol = Volinfo(si.volume, slave.remote_addr.split('@')[-1])
        sbricks = svol.bricks
        suuid = svol.uuid
        slave_host = slave.remote_addr.split('@')[-1]
        slave_vol = si.volume

        # save this xattr for the session delete command
        old_stime_xattr_name = getattr(gconf, "master.stime_xattr_name", None)
        new_stime_xattr_name = "trusted.glusterfs." + mvol.uuid + "." + \
            svol.uuid + ".stime"
        if not old_stime_xattr_name or \
           old_stime_xattr_name != new_stime_xattr_name:
            gconf.configinterface.set("master.stime_xattr_name",
                                      new_stime_xattr_name)
    else:
        raise GsyncdError("unknown slave type " + slave.url)
    logging.debug('slave bricks: ' + repr(sbricks))
    if isinstance(si, FILE):
        slaves = [slave.url]
    else:
        slavenodes = set(b['host'] for b in sbricks)
        if isinstance(slave, SSH) and not gconf.isolated_slave:
            rap = SSH.parse_ssh_address(slave)
            slaves = ['ssh://' + rap['user'] + '@' + h + ':' + si.url
                      for h in slavenodes]
        else:
            slavevols = [h + ':' + si.volume for h in slavenodes]
            if isinstance(slave, SSH):
                slaves = ['ssh://' + rap.remote_addr + ':' + v
                          for v in slavevols]
            else:
                slaves = slavevols

    workerspex = []
    for idx, brick in enumerate(mvol.bricks):
        if is_host_local(brick['uuid']):
            is_hot = mvol.is_hot(":".join([brick['host'], brick['dir']]))
            workerspex.append((brick,
                               slaves[idx % len(slaves)],
                               get_subvol_num(idx, mvol, is_hot),
                               is_hot))
    logging.debug('worker specs: ' + repr(workerspex))
    return workerspex, suuid, slave_vol, slave_host, master
示例#4
0
def distribute(*resources):
    master, slave = resources
    mvol = Volinfo(master.volume, master.host)
    logging.debug('master bricks: ' + repr(mvol.bricks))
    prelude = []
    si = slave
    slave_host = None
    slave_vol = None

    if isinstance(slave, SSH):
        prelude = gconf.ssh_command.split() + [slave.remote_addr]
        si = slave.inner_rsc
        logging.debug('slave SSH gateway: ' + slave.remote_addr)
    if isinstance(si, FILE):
        sbricks = {'host': 'localhost', 'dir': si.path}
        suuid = uuid.uuid5(uuid.NAMESPACE_URL, slave.get_url(canonical=True))
    elif isinstance(si, GLUSTER):
        svol = Volinfo(si.volume, slave.remote_addr.split('@')[-1])
        sbricks = svol.bricks
        suuid = svol.uuid
        slave_host = slave.remote_addr.split('@')[-1]
        slave_vol = si.volume

        # save this xattr for the session delete command
        old_stime_xattr_name = getattr(gconf, "master.stime_xattr_name", None)
        new_stime_xattr_name = "trusted.glusterfs." + mvol.uuid + "." + \
            svol.uuid + ".stime"
        if not old_stime_xattr_name or \
           old_stime_xattr_name != new_stime_xattr_name:
            gconf.configinterface.set("master.stime_xattr_name",
                                      new_stime_xattr_name)
    else:
        raise GsyncdError("unknown slave type " + slave.url)
    logging.debug('slave bricks: ' + repr(sbricks))
    if isinstance(si, FILE):
        slaves = [slave.url]
    else:
        slavenodes = set(b['host'] for b in sbricks)
        if isinstance(slave, SSH) and not gconf.isolated_slave:
            rap = SSH.parse_ssh_address(slave)
            slaves = ['ssh://' + rap['user'] + '@' + h + ':' + si.url
                      for h in slavenodes]
        else:
            slavevols = [h + ':' + si.volume for h in slavenodes]
            if isinstance(slave, SSH):
                slaves = ['ssh://' + rap.remote_addr + ':' + v
                          for v in slavevols]
            else:
                slaves = slavevols

    workerspex = []
    for idx, brick in enumerate(mvol.bricks):
        if is_host_local(brick['uuid']):
            is_hot = mvol.is_hot(":".join([brick['host'], brick['dir']]))
            workerspex.append((brick,
                               slaves[idx % len(slaves)],
                               get_subvol_num(idx, mvol, is_hot),
                               is_hot))
    logging.debug('worker specs: ' + repr(workerspex))
    return workerspex, suuid, slave_vol, slave_host, master
示例#5
0
def subcmd_monitor(args):
    import monitor
    from resource import GLUSTER, SSH, Popen
    go_daemon = False if args.debug else True

    monitor.startup(go_daemon)
    Popen.init_errhandler()
    local = GLUSTER("localhost", args.master)
    slavehost, slavevol = args.slave.split("::")
    remote = SSH(slavehost, slavevol)
    return monitor.monitor(local, remote)
示例#6
0
def distribute(*resources):
    master, slave = resources
    mvol = Volinfo(master.volume, master.host)
    logging.debug('master bricks: ' + repr(mvol.bricks))
    prelude = []
    si = slave
    slave_host = None
    slave_vol = None

    if isinstance(slave, SSH):
        prelude = gconf.ssh_command.split() + [slave.remote_addr]
        si = slave.inner_rsc
        logging.debug('slave SSH gateway: ' + slave.remote_addr)
    if isinstance(si, FILE):
        sbricks = {'host': 'localhost', 'dir': si.path}
        suuid = uuid.uuid5(uuid.NAMESPACE_URL, slave.get_url(canonical=True))
    elif isinstance(si, GLUSTER):
        svol = Volinfo(si.volume, slave.remote_addr.split('@')[-1])
        sbricks = svol.bricks
        suuid = svol.uuid
        slave_host = slave.remote_addr.split('@')[-1]
        slave_vol = si.volume
    else:
        raise GsyncdError("unknown slave type " + slave.url)
    logging.info('slave bricks: ' + repr(sbricks))
    if isinstance(si, FILE):
        slaves = [slave.url]
    else:
        slavenodes = set(b['host'] for b in sbricks)
        if isinstance(slave, SSH) and not gconf.isolated_slave:
            rap = SSH.parse_ssh_address(slave)
            slaves = [
                'ssh://' + rap['user'] + '@' + h + ':' + si.url
                for h in slavenodes
            ]
        else:
            slavevols = [h + ':' + si.volume for h in slavenodes]
            if isinstance(slave, SSH):
                slaves = [
                    'ssh://' + rap.remote_addr + ':' + v for v in slavevols
                ]
            else:
                slaves = slavevols

    workerspex = [(brick['dir'], slaves[idx % len(slaves)],
                   get_subvol_num(idx, mvol.replica_count,
                                  mvol.disperse_count),
                   mvol.is_cold(":".join([brick['host'], brick['dir']])))
                  for idx, brick in enumerate(mvol.bricks)
                  if is_host_local(brick['host'])]
    logging.info('worker specs: ' + repr(workerspex))
    return workerspex, suuid, slave_vol, slave_host, master
示例#7
0
def distribute(master, slave):
    if rconf.args.use_gconf_volinfo:
        mvol = VolinfoFromGconf(master.volume, master=True)
    else:
        mvol = Volinfo(master.volume, master.host, master=True)
    logging.debug('master bricks: ' + repr(mvol.bricks))
    prelude = []
    slave_host = None
    slave_vol = None

    prelude = [gconf.get("ssh-command")] + \
        gconf.get("ssh-options").split() + \
        ["-p", str(gconf.get("ssh-port"))] + \
        [slave.remote_addr]

    logging.debug('slave SSH gateway: ' + slave.remote_addr)

    if rconf.args.use_gconf_volinfo:
        svol = VolinfoFromGconf(slave.volume, master=False)
    else:
        svol = Volinfo(slave.volume, "localhost", prelude, master=False)

    sbricks = svol.bricks
    suuid = svol.uuid
    slave_host = slave.remote_addr.split('@')[-1]
    slave_vol = slave.volume

    # save this xattr for the session delete command
    old_stime_xattr_prefix = gconf.get("stime-xattr-prefix", None)
    new_stime_xattr_prefix = "trusted.glusterfs." + mvol.uuid + "." + \
                             svol.uuid
    if not old_stime_xattr_prefix or \
       old_stime_xattr_prefix != new_stime_xattr_prefix:
        gconf.setconfig("stime-xattr-prefix", new_stime_xattr_prefix)

    logging.debug('slave bricks: ' + repr(sbricks))

    slavenodes = set((b['host'], b["uuid"]) for b in sbricks)
    rap = SSH.parse_ssh_address(slave)
    slaves = [(rap['user'] + '@' + h[0], h[1]) for h in slavenodes]

    workerspex = []
    for idx, brick in enumerate(mvol.bricks):
        if rconf.args.local_node_id == brick['uuid']:
            is_hot = mvol.is_hot(":".join([brick['host'], brick['dir']]))
            workerspex.append((brick,
                               slaves[idx % len(slaves)],
                               get_subvol_num(idx, mvol, is_hot),
                               is_hot))
    logging.debug('worker specs: ' + repr(workerspex))
    return workerspex, suuid, slave_vol, slave_host, master, slavenodes
示例#8
0
def distribute(master, slave):
    if rconf.args.use_gconf_volinfo:
        mvol = VolinfoFromGconf(master.volume, master=True)
    else:
        mvol = Volinfo(master.volume, master.host)
    logging.debug('master bricks: ' + repr(mvol.bricks))
    prelude = []
    slave_host = None
    slave_vol = None

    prelude = [gconf.get("ssh-command")] + \
        gconf.get("ssh-options").split() + \
        ["-p", str(gconf.get("ssh-port"))] + \
        [slave.remote_addr]

    logging.debug('slave SSH gateway: ' + slave.remote_addr)

    if rconf.args.use_gconf_volinfo:
        svol = VolinfoFromGconf(slave.volume, master=False)
    else:
        svol = Volinfo(slave.volume, "localhost", prelude)

    sbricks = svol.bricks
    suuid = svol.uuid
    slave_host = slave.remote_addr.split('@')[-1]
    slave_vol = slave.volume

    # save this xattr for the session delete command
    old_stime_xattr_prefix = gconf.get("stime-xattr-prefix", None)
    new_stime_xattr_prefix = "trusted.glusterfs." + mvol.uuid + "." + \
                             svol.uuid
    if not old_stime_xattr_prefix or \
       old_stime_xattr_prefix != new_stime_xattr_prefix:
        gconf.setconfig("stime-xattr-prefix", new_stime_xattr_prefix)

    logging.debug('slave bricks: ' + repr(sbricks))

    slavenodes = set((b['host'], b["uuid"]) for b in sbricks)
    rap = SSH.parse_ssh_address(slave)
    slaves = [(rap['user'] + '@' + h[0], h[1]) for h in slavenodes]

    workerspex = []
    for idx, brick in enumerate(mvol.bricks):
        if rconf.args.local_node_id == brick['uuid']:
            is_hot = mvol.is_hot(":".join([brick['host'], brick['dir']]))
            workerspex.append((brick,
                               slaves[idx % len(slaves)],
                               get_subvol_num(idx, mvol, is_hot),
                               is_hot))
    logging.debug('worker specs: ' + repr(workerspex))
    return workerspex, suuid, slave_vol, slave_host, master, slavenodes
示例#9
0
def distribute(*resources):
    master, slave = resources
    mvol = Volinfo(master.volume, master.host)
    logging.debug('master bricks: ' + repr(mvol.bricks))
    prelude = []
    si = slave
    slave_host = None
    slave_vol = None

    if isinstance(slave, SSH):
        prelude = gconf.ssh_command.split() + [slave.remote_addr]
        si = slave.inner_rsc
        logging.debug('slave SSH gateway: ' + slave.remote_addr)
    if isinstance(si, FILE):
        sbricks = {'host': 'localhost', 'dir': si.path}
        suuid = uuid.uuid5(uuid.NAMESPACE_URL, slave.get_url(canonical=True))
    elif isinstance(si, GLUSTER):
        svol = Volinfo(si.volume, slave.remote_addr.split('@')[-1])
        sbricks = svol.bricks
        suuid = svol.uuid
        slave_host = slave.remote_addr.split('@')[-1]
        slave_vol = si.volume
    else:
        raise GsyncdError("unknown slave type " + slave.url)
    logging.info('slave bricks: ' + repr(sbricks))
    if isinstance(si, FILE):
        slaves = [slave.url]
    else:
        slavenodes = set(b['host'] for b in sbricks)
        if isinstance(slave, SSH) and not gconf.isolated_slave:
            rap = SSH.parse_ssh_address(slave)
            slaves = ['ssh://' + rap['user'] + '@' + h + ':' + si.url
                      for h in slavenodes]
        else:
            slavevols = [h + ':' + si.volume for h in slavenodes]
            if isinstance(slave, SSH):
                slaves = ['ssh://' + rap.remote_addr + ':' + v
                          for v in slavevols]
            else:
                slaves = slavevols

    workerspex = [(brick['dir'], slaves[idx % len(slaves)],
                  get_subvol_num(idx, mvol.replica_count, mvol.disperse_count),
                  mvol.is_cold(":".join([brick['host'], brick['dir']])))
                  for idx, brick in enumerate(mvol.bricks)
                  if is_host_local(brick['host'])]
    logging.info('worker specs: ' + repr(workerspex))
    return workerspex, suuid, slave_vol, slave_host, master
示例#10
0
def distribute(*resources):
    master, slave = resources
    mvol = Volinfo(master.volume, master.host)
    logging.debug("master bricks: " + repr(mvol.bricks))
    prelude = []
    si = slave
    if isinstance(slave, SSH):
        prelude = gconf.ssh_command.split() + [slave.remote_addr]
        si = slave.inner_rsc
        logging.debug("slave SSH gateway: " + slave.remote_addr)
    if isinstance(si, FILE):
        sbricks = {"host": "localhost", "dir": si.path}
        suuid = uuid.uuid5(uuid.NAMESPACE_URL, slave.get_url(canonical=True))
    elif isinstance(si, GLUSTER):
        svol = Volinfo(si.volume, si.host)
        sbricks = svol.bricks
        suuid = svol.uuid
    else:
        raise GsyncdError("unkown slave type " + slave.url)
    logging.info("slave bricks: " + repr(sbricks))
    if isinstance(si, FILE):
        slaves = [slave.url]
    else:
        slavenodes = set(b["host"] for b in sbricks)
        if isinstance(slave, SSH) and not gconf.isolated_slave:
            rap = SSH.parse_ssh_address(slave)
            slaves = ["ssh://" + rap["user"] + "@" + h + ":" + si.url for h in slavenodes]
        else:
            slavevols = [h + ":" + si.volume for h in slavenodes]
            if isinstance(slave, SSH):
                slaves = ["ssh://" + rap.remote_addr + ":" + v for v in slavevols]
            else:
                slaves = slavevols

    workerspex = [
        (brick["dir"], slaves[idx % len(slaves)])
        for idx, brick in enumerate(mvol.bricks)
        if is_host_local(brick["host"])
    ]
    logging.info("worker specs: " + repr(workerspex))
    return workerspex, suuid
示例#11
0
def distribute(*resources):
    master, slave = resources
    mvol = Volinfo(master.volume, master.host)
    logging.debug('master bricks: ' + repr(mvol.bricks))
    locmbricks = [ b['dir'] for b in mvol.bricks if is_host_local(b['host']) ]
    prelude  = []
    si = slave
    if isinstance(slave, SSH):
        prelude = gconf.ssh_command.split() + [slave.remote_addr]
        si = slave.inner_rsc
        logging.debug('slave SSH gateway: ' + slave.remote_addr)
    if isinstance(si, FILE):
        sbricks = {'host': 'localhost', 'dir': si.path}
        suuid = uuid.uuid5(uuid.NAMESPACE_URL, slave.get_url(canonical=True))
    elif isinstance(si, GLUSTER):
        svol = Volinfo(si.volume, si.host, prelude)
        sbricks = svol.bricks
        suuid = svol.uuid
    else:
        raise GsyncdError("unkown slave type " + slave.url)
    logging.info('slave bricks: ' + repr(sbricks))
    if isinstance(si, FILE):
        slaves = [ slave.url ]
    else:
        slavenodes = set(b['host'] for b in sbricks)
        if isinstance(slave, SSH) and not gconf.isolated_slave:
            rap = SSH.parse_ssh_address(slave.remote_addr)
            slaves = [ 'ssh://' + rap['user'] + '@' + h + ':' + si.url for h in slavenodes ]
        else:
            slavevols = [ h + ':' + si.volume for h in slavenodes ]
            if isinstance(slave, SSH):
                slaves = [ 'ssh://' + rap.remote_addr + ':' + v for v in slavevols ]
            else:
                slaves = slavevols
    locmbricks.sort()
    slaves.sort()
    workerspex = []
    for i in range(len(locmbricks)):
        workerspex.append((locmbricks[i], slaves[i % len(slaves)]))
    logging.info('worker specs: ' + repr(workerspex))
    return workerspex, suuid