Beispiel #1
0
def test_read_lockspace_4k_invalid_sector_size(sanlock_daemon, user_4k_path):
    sanlock.write_lockspace(
        "name", user_4k_path, iotimeout=1, sector=SECTOR_SIZE_4K)

    with pytest.raises(sanlock.SanlockException) as e:
        sanlock.read_lockspace(user_4k_path, sector=SECTOR_SIZE_512)
    assert e.value.errno == errno.EINVAL
Beispiel #2
0
def test_write_lockspace_4k(user_4k_path, sanlock_daemon, align):

    # Poison lockspace area, ensuring that previous tests will not break this
    # test, and sanlock does not write beyond the lockspace area.
    with io.open(user_4k_path, "rb+") as f:
        f.write(align * b"x")
    util.write_guard(user_4k_path, align)

    sanlock.write_lockspace(
        "name", user_4k_path, iotimeout=1, align=align, sector=SECTOR_SIZE_4K)

    ls = sanlock.read_lockspace(
        user_4k_path, align=align, sector=SECTOR_SIZE_4K)

    assert ls == {"iotimeout": 1, "lockspace": b"name"}

    acquired = sanlock.inq_lockspace("name", 1, user_4k_path, wait=False)
    assert acquired is False

    # Verify that lockspace was written.
    with io.open(user_4k_path, "rb") as f:
        magic, = struct.unpack("< I", f.read(4))
        assert magic == constants.DELTA_DISK_MAGIC

    # Check that sanlock did not write beyond the lockspace area.
    util.check_guard(user_4k_path, align)
Beispiel #3
0
def test_add_rem_lockspace(tmpdir, sanlock_daemon, size, offset):
    path = str(tmpdir.join("ls_name"))
    util.create_file(path, size)

    sanlock.write_lockspace("ls_name", path, offset=offset, iotimeout=1)

    # Since the lockspace is not acquired, we exepect to get False.
    acquired = sanlock.inq_lockspace("ls_name",
                                     1,
                                     path,
                                     offset=offset,
                                     wait=False)
    assert acquired is False

    sanlock.add_lockspace("ls_name", 1, path, offset=offset, iotimeout=1)

    # Once the lockspace is acquired, we exepect to get True.
    acquired = sanlock.inq_lockspace("ls_name",
                                     1,
                                     path,
                                     offset=offset,
                                     wait=False)
    assert acquired is True

    sanlock.rem_lockspace("ls_name", 1, path, offset=offset)

    # Once the lockspace is released, we exepect to get False.
    acquired = sanlock.inq_lockspace("ls_name",
                                     1,
                                     path,
                                     offset=offset,
                                     wait=False)
    assert acquired is False
Beispiel #4
0
def test_write_lockspace_invalid_align_sector(tmpdir, sanlock_daemon, align,
                                              sector):
    path = str(tmpdir.join("lockspace"))
    util.create_file(path, LOCKSPACE_SIZE)

    with pytest.raises(ValueError):
        sanlock.write_lockspace(b"ls_name", path, align=align, sector=sector)
Beispiel #5
0
def test_add_rem_lockspace_async(tmpdir, sanlock_daemon):
    path = str(tmpdir.join("ls_name"))
    util.create_file(path, MiB)

    sanlock.write_lockspace("ls_name", path, iotimeout=1)
    acquired = sanlock.inq_lockspace("ls_name", 1, path, wait=False)
    assert acquired is False

    # This will take 3 seconds.
    sanlock.add_lockspace("ls_name", 1, path, iotimeout=1, **{"async": True})

    # While the lockspace is being aquired, we expect to get None.
    time.sleep(1)
    acquired = sanlock.inq_lockspace("ls_name", 1, path, wait=False)
    assert acquired is None

    # Once the lockspace is acquired, we exepect to get True.
    acquired = sanlock.inq_lockspace("ls_name", 1, path, wait=True)
    assert acquired is True

    # This will take about 3 seconds.
    sanlock.rem_lockspace("ls_name", 1, path, **{"async": True})

    # Wait until the lockspace change state from True to None.
    while sanlock.inq_lockspace("ls_name", 1, path, wait=False):
        time.sleep(1)

    # While the lockspace is being released, we expect to get None.
    acquired = sanlock.inq_lockspace("ls_name", 1, path, wait=False)
    assert acquired is None

    # Once the lockspace was released, we expect to get False.
    acquired = sanlock.inq_lockspace("ls_name", 1, path, wait=True)
    assert acquired is False
Beispiel #6
0
def test_write_lockspace(
        tmpdir, sanlock_daemon, filename, encoding, size, offset):
    path = util.generate_path(tmpdir, filename, encoding)
    util.create_file(path, size)

    # Test read and write with default alignment and sector size values.
    sanlock.write_lockspace(b"ls_name", path, offset=offset, iotimeout=1)

    ls = sanlock.read_lockspace(path, offset=offset)
    assert ls == {"iotimeout": 1, "lockspace": b"ls_name"}

    # Test read and write with explicit alignment and sector size values.
    sanlock.write_lockspace(
        b"ls_name", path, offset=offset, iotimeout=1, align=ALIGNMENT_1M,
        sector=SECTOR_SIZE_512)

    ls = sanlock.read_lockspace(
        path, offset=offset, align=ALIGNMENT_1M, sector=SECTOR_SIZE_512)
    assert ls == {"iotimeout": 1, "lockspace": b"ls_name"}

    acquired = sanlock.inq_lockspace(
        b"ls_name", 1, path, offset=offset, wait=False)
    assert acquired is False

    magic = util.read_magic(path, offset)
    assert magic == constants.DELTA_DISK_MAGIC
    # TODO: check more stuff here...

    util.check_guard(path, size)
Beispiel #7
0
def test_write_lockspace_4k(user_4k_path, sanlock_daemon, align):

    # Poison lockspace area, ensuring that previous tests will not break this
    # test, and sanlock does not write beyond the lockspace area.
    with io.open(user_4k_path, "rb+") as f:
        f.write(align * b"x")
    util.write_guard(user_4k_path, align)

    sanlock.write_lockspace(b"ls_name",
                            user_4k_path,
                            iotimeout=1,
                            align=align,
                            sector=SECTOR_SIZE_4K)

    ls = sanlock.read_lockspace(user_4k_path,
                                align=align,
                                sector=SECTOR_SIZE_4K)

    assert ls == {"iotimeout": 1, "lockspace": b"ls_name"}

    acquired = sanlock.inq_lockspace(b"ls_name", 1, user_4k_path, wait=False)
    assert acquired is False

    # Verify that lockspace was written.
    magic = util.read_magic(user_4k_path)
    assert magic == constants.DELTA_DISK_MAGIC

    # Check that sanlock did not write beyond the lockspace area.
    util.check_guard(user_4k_path, align)
Beispiel #8
0
def test_write_lockspace_4k_invalid_sector_size(sanlock_daemon, user_4k_path):
    with pytest.raises(sanlock.SanlockException) as e:
        sanlock.write_lockspace(b"ls_name",
                                user_4k_path,
                                iotimeout=1,
                                sector=SECTOR_SIZE_512)
    assert e.value.errno == errno.EINVAL
Beispiel #9
0
def test_write_lockspace(tmpdir, sanlock_daemon, filename, encoding, size, offset):
    path = util.generate_path(tmpdir, filename, encoding)
    util.create_file(path, size)

    # Test read and write with default alignment and sector size values.
    sanlock.write_lockspace("name", path, offset=offset, iotimeout=1)

    ls = sanlock.read_lockspace(path, offset=offset)
    assert ls == {"iotimeout": 1, "lockspace": b"name"}

    # Test read and write with explicit alignment and sector size values.
    sanlock.write_lockspace(
        "name", path, offset=offset, iotimeout=1, align=ALIGNMENT_1M,
        sector=SECTOR_SIZE_512)

    ls = sanlock.read_lockspace(
        path, offset=offset, align=ALIGNMENT_1M, sector=SECTOR_SIZE_512)
    assert ls == {"iotimeout": 1, "lockspace": b"name"}

    acquired = sanlock.inq_lockspace(
        "name", 1, path, offset=offset, wait=False)
    assert acquired is False

    with io.open(path, "rb") as f:
        f.seek(offset)
        magic, = struct.unpack("< I", f.read(4))
        assert magic == constants.DELTA_DISK_MAGIC

        # TODO: check more stuff here...

    util.check_guard(path, size)
Beispiel #10
0
def test_write_lockspace_invalid_align_sector(
        tmpdir, sanlock_daemon, align, sector):
    path = str(tmpdir.join("lockspace"))
    util.create_file(path, LOCKSPACE_SIZE)

    with pytest.raises(ValueError):
        sanlock.write_lockspace("name", path, align=align, sector=sector)
Beispiel #11
0
def test_add_rem_lockspace_async(tmpdir, sanlock_daemon):
    path = str(tmpdir.join("ls_name"))
    util.create_file(path, MiB)

    sanlock.write_lockspace(b"ls_name", path, iotimeout=1)
    acquired = sanlock.inq_lockspace(b"ls_name", 1, path, wait=False)
    assert acquired is False

    # This will take 3 seconds.
    sanlock.add_lockspace(b"ls_name", 1, path, iotimeout=1, wait=False)

    # While the lockspace is being aquired, we expect to get None.
    time.sleep(1)
    acquired = sanlock.inq_lockspace(b"ls_name", 1, path, wait=False)
    assert acquired is None

    # Once the lockspace is acquired, we exepect to get True.
    acquired = sanlock.inq_lockspace(b"ls_name", 1, path, wait=True)
    assert acquired is True

    # This will take about 3 seconds.
    sanlock.rem_lockspace(b"ls_name", 1, path, wait=False)

    # Wait until the lockspace change state from True to None.
    while sanlock.inq_lockspace(b"ls_name", 1, path, wait=False):
        time.sleep(1)

    # While the lockspace is being released, we expect to get None.
    acquired = sanlock.inq_lockspace(b"ls_name", 1, path, wait=False)
    assert acquired is None

    # Once the lockspace was released, we expect to get False.
    acquired = sanlock.inq_lockspace(b"ls_name", 1, path, wait=True)
    assert acquired is False
Beispiel #12
0
    def initLock(self, lease):
        self.log.info(
            "Initializing sanlock for domain %s path=%s alignment=%s "
            "block_size=%s io_timeout=%s",
            self._sdUUID, self._idsPath, self._alignment, self._block_size,
            self._io_timeout)

        resource_name = lease.name.encode("utf-8")
        try:
            sanlock.write_lockspace(
                self._lockspace_name,
                self._idsPath,
                iotimeout=self._io_timeout,
                align=self._alignment,
                sector=self._block_size)

            sanlock.write_resource(
                self._lockspace_name,
                resource_name,
                [(lease.path, lease.offset)],
                align=self._alignment,
                sector=self._block_size)
        except sanlock.SanlockException:
            self.log.exception(
                "Cannot initialize lock for domain %s", self._sdUUID)
            raise se.ClusterLockInitError()
 def reset_lockspace(self):
     if os.path.exists(self._lease_file):
         sanlock.write_lockspace(
             lockspace=broker_constants.LOCKSPACE_NAME.encode(),
             path=self._lease_file,
             offset=0,
             sector=self._sector_size,
         )
Beispiel #14
0
def main():
    signal.signal(signal.SIGTERM, sigTermHandler)

    print "Creating the sanlock disk"
    fd, disk = tempfile.mkstemp()
    os.close(fd)

    os.chown(disk,
             pwd.getpwnam("sanlock").pw_uid,
             grp.getgrnam("sanlock").gr_gid)
    offset = sanlock.get_alignment(disk)

    SNLK_DISKS = [(disk, offset)]

    print "Registering to sanlock"
    fd = sanlock.register()

    print "Initializing '%s'" % (LOCKSPACE_NAME, )
    sanlock.write_lockspace(LOCKSPACE_NAME, disk)

    print "Initializing '%s' on '%s'" % (RESOURCE_NAME, LOCKSPACE_NAME)
    sanlock.write_resource(LOCKSPACE_NAME, RESOURCE_NAME, SNLK_DISKS)

    print "Acquiring the id '%i' on '%s'" % (HOST_ID, LOCKSPACE_NAME)
    sanlock.add_lockspace(LOCKSPACE_NAME, HOST_ID, disk)

    try:
        print "Acquiring '%s' on '%s'" % (RESOURCE_NAME, LOCKSPACE_NAME)
        sanlock.acquire(LOCKSPACE_NAME,
                        RESOURCE_NAME,
                        SNLK_DISKS,
                        slkfd=fd,
                        version=0)
        while True:
            print "Trying to get lockspace '%s' hosts" % LOCKSPACE_NAME
            try:
                hosts_list = sanlock.get_hosts(LOCKSPACE_NAME)
            except sanlock.SanlockException as e:
                if e.errno != os.errno.EAGAIN:
                    raise
            else:
                print "Lockspace '%s' hosts: " % LOCKSPACE_NAME, hosts_list
                break
            time.sleep(5)
        print "Resource '%s' owners: " % RESOURCE_NAME, \
            sanlock.read_resource_owners(
                LOCKSPACE_NAME, RESOURCE_NAME, SNLK_DISKS)
        print "Releasing '%s' on '%s'" % (RESOURCE_NAME, LOCKSPACE_NAME)
        sanlock.release(LOCKSPACE_NAME, RESOURCE_NAME, SNLK_DISKS, slkfd=fd)
    except Exception as e:
        print "Exception: ", e
    finally:
        print "Releasing the id '%i' on '%s'" % (HOST_ID, LOCKSPACE_NAME)
        sanlock.rem_lockspace(LOCKSPACE_NAME, HOST_ID, disk)

    print "Removing the sanlock disk"
    os.remove(disk)
Beispiel #15
0
    def reset_lockspace(self, force=False):
        # Lockspace file
        lockspace_file = None

        # Service names
        lockspace = (constants.SERVICE_TYPE +
                     agent_constants.LOCKSPACE_EXTENSION)
        service = (constants.SERVICE_TYPE +
                   agent_constants.MD_EXTENSION)

        if self._config is None:
            self._config = config.Config()

        host_id = self._config.get(config.ENGINE, config.HOST_ID)
        is_configured = self._config.get(config.ENGINE, config.CONFIGURED)
        if (not host_id or
                (is_configured != "True" and is_configured is not None)):
            self._log.error("Hosted engine is not configured.")
            return

        # Connect to a broker and read all stats
        broker = brokerlink.BrokerLink()

        with broker.connection():
            self._configure_broker_conn(broker)
            stats = broker.get_stats_from_storage(service)
            lockspace_file = broker.get_service_path(lockspace)

        # Process raw stats
        try:
            all_stats = self._parse_stats(stats, self.StatModes.ALL)
            self._check_liveness_for_stats(all_stats, broker)
        except Exception as ex:
            self._log.exception(ex)
            all_stats = {}

        # Check whether it is safe to perform lockfile reset
        for id, stats in all_stats.iteritems():
            if id == 0:
                if (not force and
                        not stats.get(self.GlobalMdFlags.MAINTENANCE, False)):
                    raise Exception("Lockfile reset can be performed in"
                                    " global maintenance mode only.")
            else:
                if not force and not stats.get("stopped", False):
                    raise Exception("Lockfile reset cannot be performed with"
                                    " an active agent.")

        if os.path.exists(lockspace_file):
            sanlock.write_lockspace(lockspace=constants.SERVICE_TYPE,
                                    path=lockspace_file,
                                    offset=0)
Beispiel #16
0
def main():
    signal.signal(signal.SIGTERM, sigTermHandler)

    print "Creating the sanlock disk"
    fd, disk = tempfile.mkstemp()
    os.close(fd)

    os.chown(disk, pwd.getpwnam("sanlock").pw_uid, grp.getgrnam("sanlock").gr_gid)
    offset = sanlock.get_alignment(disk)

    SNLK_DISKS = [(disk, offset)]

    print "Registering to sanlock"
    fd = sanlock.register()

    print "Initializing '%s'" % (LOCKSPACE_NAME,)
    sanlock.write_lockspace(LOCKSPACE_NAME, disk, max_hosts=0, iotimeout=0, align=1048576, sector=512)

    print "Initializing '%s' on '%s'" % (RESOURCE_NAME, LOCKSPACE_NAME)
    sanlock.write_resource(LOCKSPACE_NAME, RESOURCE_NAME, SNLK_DISKS, align=1048576, sector=512)

    print "Acquiring the id '%i' on '%s'" % (HOST_ID, LOCKSPACE_NAME)
    sanlock.add_lockspace(LOCKSPACE_NAME, HOST_ID, disk)

    try:
        print "Acquiring '%s' on '%s'" % (RESOURCE_NAME, LOCKSPACE_NAME)
        sanlock.acquire(LOCKSPACE_NAME, RESOURCE_NAME, SNLK_DISKS, slkfd=fd,
                        version=0)
        while True:
            print "Trying to get lockspace '%s' hosts" % LOCKSPACE_NAME
            try:
                hosts_list = sanlock.get_hosts(LOCKSPACE_NAME)
            except sanlock.SanlockException as e:
                if e.errno != os.errno.EAGAIN:
                    raise
            else:
                print "Lockspace '%s' hosts: " % LOCKSPACE_NAME, hosts_list
                break
            time.sleep(5)
        print "Resource '%s' owners: " % RESOURCE_NAME, \
            sanlock.read_resource_owners(
                LOCKSPACE_NAME, RESOURCE_NAME, SNLK_DISKS, align=1048576, sector=512)
        print "Releasing '%s' on '%s'" % (RESOURCE_NAME, LOCKSPACE_NAME)
        sanlock.release(LOCKSPACE_NAME, RESOURCE_NAME, SNLK_DISKS, slkfd=fd)
    except Exception as e:
        print "Exception: ", e
    finally:
        print "Releasing the id '%i' on '%s'" % (HOST_ID, LOCKSPACE_NAME)
        sanlock.rem_lockspace(LOCKSPACE_NAME, HOST_ID, disk)

    print "Removing the sanlock disk"
    os.remove(disk)
Beispiel #17
0
def test_write_lockspace(tmpdir, sanlock_daemon):
    path = tmpdir.join("lockspace")
    size = 1024**2
    util.create_file(str(path), size)

    sanlock.write_lockspace("name", str(path), offset=0)

    with io.open(str(path), "rb") as f:
        magic, = struct.unpack("< I", f.read(4))
        assert magic == constants.DELTA_DISK_MAGIC

        # TODO: check more stuff here...

    util.check_guard(str(path), size)
Beispiel #18
0
def test_add_rem_lockspace(tmpdir, sanlock_daemon, size, offset):
    path = str(tmpdir.join("ls_name"))
    util.create_file(path, size)

    sanlock.write_lockspace(b"ls_name", path, offset=offset, iotimeout=1)

    # Since the lockspace is not acquired, we exepect to get False.
    acquired = sanlock.inq_lockspace(b"ls_name",
                                     1,
                                     path,
                                     offset=offset,
                                     wait=False)
    assert acquired is False

    sanlock.add_lockspace(b"ls_name", 1, path, offset=offset, iotimeout=1)

    # Once the lockspace is acquired, we exepect to get True.
    acquired = sanlock.inq_lockspace(b"ls_name",
                                     1,
                                     path,
                                     offset=offset,
                                     wait=False)
    assert acquired is True

    lockspaces = sanlock.get_lockspaces()
    assert lockspaces == [{
        'flags': 0,
        'host_id': 1,
        'lockspace': b'ls_name',
        'offset': offset,
        'path': path
    }]

    sanlock.rem_lockspace(b"ls_name", 1, path, offset=offset)

    # Once the lockspace is released, we exepect to get False.
    acquired = sanlock.inq_lockspace(b"ls_name",
                                     1,
                                     path,
                                     offset=offset,
                                     wait=False)
    assert acquired is False

    lockspaces = sanlock.get_lockspaces()
    assert lockspaces == []
Beispiel #19
0
def test_write_lockspace(tmpdir, sanlock_daemon, size, offset):
    path = str(tmpdir.join("lockspace"))
    util.create_file(path, size)

    # test read and write with default alignment and sector size values
    sanlock.write_lockspace("name", path, offset=offset, iotimeout=1)

    ls = sanlock.read_lockspace(path, offset=offset)
    assert ls == {"iotimeout": 1, "lockspace": "name"}

    # test read and write with explicit alignment and sector size values
    sanlock.write_lockspace("name",
                            path,
                            offset=offset,
                            iotimeout=1,
                            align=ALIGNMENT_1M,
                            sector=SECTOR_SIZE_512)

    ls = sanlock.read_lockspace(path,
                                offset=offset,
                                align=ALIGNMENT_1M,
                                sector=SECTOR_SIZE_512)
    assert ls == {"iotimeout": 1, "lockspace": "name"}

    acquired = sanlock.inq_lockspace("name",
                                     1,
                                     path,
                                     offset=offset,
                                     wait=False)
    assert acquired is False

    with io.open(path, "rb") as f:
        f.seek(offset)
        magic, = struct.unpack("< I", f.read(4))
        assert magic == constants.DELTA_DISK_MAGIC

        # TODO: check more stuff here...

    util.check_guard(path, size)
Beispiel #20
0
def test_add_rem_lockspace(tmpdir, sanlock_daemon, size, offset):
    path = str(tmpdir.join("ls_name"))
    util.create_file(path, size)

    sanlock.write_lockspace("ls_name", path, offset=offset, iotimeout=1)

    # Since the lockspace is not acquired, we exepect to get False.
    acquired = sanlock.inq_lockspace(
        "ls_name", 1, path, offset=offset, wait=False)
    assert acquired is False

    sanlock.add_lockspace("ls_name", 1, path, offset=offset, iotimeout=1)

    # Once the lockspace is acquired, we exepect to get True.
    acquired = sanlock.inq_lockspace(
        "ls_name", 1, path, offset=offset, wait=False)
    assert acquired is True

    lockspaces = sanlock.get_lockspaces()
    assert lockspaces == [{
        'flags': 0,
        'host_id': 1,
        'lockspace': b'ls_name',
        'offset': offset,
        'path': path
    }]

    sanlock.rem_lockspace("ls_name", 1, path, offset=offset)

    # Once the lockspace is released, we exepect to get False.
    acquired = sanlock.inq_lockspace(
        "ls_name", 1, path, offset=offset, wait=False)
    assert acquired is False

    lockspaces = sanlock.get_lockspaces()
    assert lockspaces == []
Beispiel #21
0
def test_acquire_release_resource(tmpdir, sanlock_daemon, size, offset):
    ls_path = str(tmpdir.join("ls_name"))
    util.create_file(ls_path, size)

    res_path = str(tmpdir.join("res_name"))
    util.create_file(res_path, size)

    sanlock.write_lockspace(b"ls_name", ls_path, offset=offset, iotimeout=1)
    sanlock.add_lockspace(b"ls_name", 1, ls_path, offset=offset, iotimeout=1)

    # Host status is not available until the first renewal.
    with pytest.raises(sanlock.SanlockException) as e:
        sanlock.get_hosts(b"ls_name", 1)
    assert e.value.errno == errno.EAGAIN

    time.sleep(1)
    host = sanlock.get_hosts(b"ls_name", 1)[0]
    assert host["flags"] == sanlock.HOST_LIVE

    disks = [(res_path, offset)]
    sanlock.write_resource(b"ls_name", b"res_name", disks)

    res = sanlock.read_resource(res_path, offset=offset)
    assert res == {
        "lockspace": b"ls_name",
        "resource": b"res_name",
        "version": 0
    }

    owners = sanlock.read_resource_owners(b"ls_name", b"res_name", disks)
    assert owners == []

    fd = sanlock.register()
    sanlock.acquire(b"ls_name", b"res_name", disks, slkfd=fd)

    res = sanlock.read_resource(res_path, offset=offset)
    assert res == {
        "lockspace": b"ls_name",
        "resource": b"res_name",
        "version": 1
    }

    owner = sanlock.read_resource_owners(b"ls_name", b"res_name", disks)[0]

    assert owner["host_id"] == 1
    assert owner["flags"] == 0
    assert owner["generation"] == 1
    assert owner["io_timeout"] == 0  # Why 0?
    # TODO: check timestamp.

    host = sanlock.get_hosts(b"ls_name", 1)[0]
    assert host["flags"] == sanlock.HOST_LIVE
    assert host["generation"] == owner["generation"]

    sanlock.release(b"ls_name", b"res_name", disks, slkfd=fd)

    res = sanlock.read_resource(res_path, offset=offset)
    assert res == {
        "lockspace": b"ls_name",
        "resource": b"res_name",
        "version": 1
    }

    owners = sanlock.read_resource_owners(b"ls_name", b"res_name", disks)
    assert owners == []
Beispiel #22
0
def test_acquire_release_resource(tmpdir, sanlock_daemon, size, offset):
    ls_path = str(tmpdir.join("ls_name"))
    util.create_file(ls_path, size)

    res_path = str(tmpdir.join("res_name"))
    util.create_file(res_path, size)

    sanlock.write_lockspace("ls_name", ls_path, offset=offset, iotimeout=1)
    sanlock.add_lockspace("ls_name", 1, ls_path, offset=offset, iotimeout=1)

    # Host status is not available until the first renewal.
    with pytest.raises(sanlock.SanlockException) as e:
        sanlock.get_hosts("ls_name", 1)
    assert e.value.errno == errno.EAGAIN

    time.sleep(1)
    host = sanlock.get_hosts("ls_name", 1)[0]
    assert host["flags"] == sanlock.HOST_LIVE

    disks = [(res_path, offset)]
    sanlock.write_resource("ls_name", "res_name", disks)

    res = sanlock.read_resource(res_path, offset=offset)
    assert res == {
        "lockspace": b"ls_name",
        "resource": b"res_name",
        "version": 0
    }

    owners = sanlock.read_resource_owners("ls_name", "res_name", disks)
    assert owners == []

    fd = sanlock.register()
    sanlock.acquire("ls_name", "res_name", disks, slkfd=fd)

    res = sanlock.read_resource(res_path, offset=offset)
    assert res == {
        "lockspace": b"ls_name",
        "resource": b"res_name",
        "version": 1
    }

    owner = sanlock.read_resource_owners("ls_name", "res_name", disks)[0]

    assert owner["host_id"] == 1
    assert owner["flags"] == 0
    assert owner["generation"] == 1
    assert owner["io_timeout"] == 0  # Why 0?
    # TODO: check timestamp.

    host = sanlock.get_hosts("ls_name", 1)[0]
    assert host["flags"] == sanlock.HOST_LIVE
    assert host["generation"] == owner["generation"]

    sanlock.release("ls_name", "res_name", disks, slkfd=fd)

    res = sanlock.read_resource(res_path, offset=offset)
    assert res == {
        "lockspace": b"ls_name",
        "resource": b"res_name",
        "version": 1
    }

    owners = sanlock.read_resource_owners("ls_name", "res_name", disks)
    assert owners == []
Beispiel #23
0
def test_write_lockspace_parse_args(no_sanlock_daemon, name):
    with raises_sanlock_errno():
        sanlock.write_lockspace(name, "ls_path")
 def reset_lockspace(self):
     if os.path.exists(self._lease_file):
         sanlock.write_lockspace(lockspace=broker_constants.LOCKSPACE_NAME,
                                 path=self._lease_file,
                                 offset=0)
Beispiel #25
0
def test_write_lockspace_parse_args(no_sanlock_daemon, name, filename,
                                    encoding):
    path = util.generate_path("/tmp/", filename, encoding)
    with raises_sanlock_errno():
        sanlock.write_lockspace(name, path)
    def _misc(self):
        """
        Here the storage pool is connected and activated.
        Pass needed configuration to HA VdsmBackend for initializing
        the metadata and lockspace volumes.
        """
        self.logger.info(_('Verifying sanlock lockspace initialization'))
        self.services.state(
            name=self.environment[
                ohostedcons.SanlockEnv.SANLOCK_SERVICE
            ],
            state=True,
        )

        dom_type = self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE]
        lockspace = self.environment[ohostedcons.SanlockEnv.LOCKSPACE_NAME]
        host_id = self.environment[ohostedcons.StorageEnv.HOST_ID]

        # Prepare the Backend interface
        # - this supports nfs, iSCSI and Gluster automatically
        activate_devices = {
            lockspace + '.lockspace': None,  # created by backend
            lockspace + '.metadata': None,   # created by backend
        }
        backend = storage_backends.VdsmBackend(
            sd_uuid=self.environment[ohostedcons.StorageEnv.SD_UUID],
            sp_uuid=self.environment[ohostedcons.StorageEnv.SP_UUID],
            dom_type=dom_type,
            **activate_devices
        )
        backend.set_external_logger(self.logger)

        # Compute the size needed to store metadata for all hosts
        # and for the global cluster state
        md_size = (
            ohostedcons.Const.METADATA_CHUNK_SIZE * (
                ohostedcons.Const.MAX_HOST_ID + 1
            )
        )

        with ohostedutil.VirtUserContext(
            self.environment,
            # umask 007
            umask=stat.S_IRWXO
        ):
            # Create storage for he metadata and sanlock lockspace
            # 1MB is good for 2000 clients when the block size is 512B
            created = backend.create({
                lockspace + '.lockspace': 1024*1024*backend.blocksize/512,
                lockspace + '.metadata': md_size,
            })

            # Get UUIDs of the storage
            metadata_device = backend.get_device(lockspace + '.metadata')
            self.environment[
                ohostedcons.StorageEnv.METADATA_VOLUME_UUID
            ] = metadata_device.volume_uuid
            self.environment[
                ohostedcons.StorageEnv.METADATA_IMAGE_UUID
            ] = metadata_device.image_uuid

            lockspace_device = backend.get_device(lockspace + '.lockspace')
            self.environment[
                ohostedcons.StorageEnv.LOCKSPACE_VOLUME_UUID
            ] = lockspace_device.volume_uuid
            self.environment[
                ohostedcons.StorageEnv.LOCKSPACE_IMAGE_UUID
            ] = lockspace_device.image_uuid

            # for lv_based storage (like iscsi) creates symlinks in /rhev/..
            # for nfs does nothing (the real files are already in /rhev/..)
            backend.connect()

            # Get the path to sanlock lockspace area
            lease_file, offset = backend.filename(lockspace + '.lockspace')

            agent_data_dir = os.path.dirname(lease_file)

            stat_info = os.stat(agent_data_dir)
            # only change it when it's not already owned by vdsm,
            # because on NFS we don't need the chown and it won't work
            if stat_info.st_uid != self.environment[
                ohostedcons.VDSMEnv.VDSM_UID
            ]:
                os.chown(
                    agent_data_dir,
                    self.environment[ohostedcons.VDSMEnv.VDSM_UID],
                    self.environment[ohostedcons.VDSMEnv.KVM_GID]
                )
            # Update permissions on the lockspace directory to 0755
            os.chmod(agent_data_dir,
                     stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)

            self.logger.debug(
                (
                    'Ensuring lease for lockspace {lockspace}, '
                    'host id {host_id} '
                    'is acquired (file: {lease_file})'
                ).format(
                    lockspace=lockspace,
                    host_id=host_id,
                    lease_file=lease_file,
                )
            )

        # Reinitialize the sanlock lockspace
        # if it was newly created or updated
        if (lockspace + '.lockspace') in created:
            sanlock.write_lockspace(
                lockspace=lockspace,
                path=lease_file,
                offset=offset
            )
        backend.disconnect()
    def _misc(self):
        """
        Here the storage pool is connected and activated.
        Pass needed configuration to HA VdsmBackend for initializing
        the metadata and lockspace volumes.
        """
        self.logger.info(_('Verifying sanlock lockspace initialization'))
        self.services.state(
            name=self.environment[ohostedcons.SanlockEnv.SANLOCK_SERVICE],
            state=True,
        )

        dom_type = self.environment[ohostedcons.StorageEnv.DOMAIN_TYPE]
        lockspace = self.environment[ohostedcons.SanlockEnv.LOCKSPACE_NAME]
        host_id = self.environment[ohostedcons.StorageEnv.HOST_ID]

        sp_uuid = self.environment[ohostedcons.StorageEnv.SP_UUID]
        if self.environment[ohostedcons.Upgrade.UPGRADE_CREATE_LM_VOLUMES]:
            cli = self.environment[ohostedcons.VDSMEnv.VDS_CLI]
            res = cli.getStorageDomainInfo(storagedomainID=self.environment[
                ohostedcons.StorageEnv.SD_UUID])
            self.logger.debug(res)
            if 'status' not in res or res['status']['code'] != 0:
                raise RuntimeError(
                    _('Failed getting storage domain info: {m}').format(
                        m=res['status']['message'], ))
            sp_uuid = res['pool'][0]

        # Prepare the Backend interface
        # - this supports nfs, iSCSI and Gluster automatically
        activate_devices = {
            lockspace + '.lockspace': None,  # created by backend
            lockspace + '.metadata': None,  # created by backend
        }
        backend = storage_backends.VdsmBackend(
            sd_uuid=self.environment[ohostedcons.StorageEnv.SD_UUID],
            sp_uuid=sp_uuid,
            dom_type=dom_type,
            **activate_devices)
        backend.set_external_logger(self.logger)

        # Compute the size needed to store metadata for all hosts
        # and for the global cluster state
        md_size = (ohostedcons.Const.METADATA_CHUNK_SIZE *
                   (ohostedcons.Const.MAX_HOST_ID + 1))

        with ohostedutil.VirtUserContext(
                self.environment,
                # umask 007
                umask=stat.S_IRWXO):
            # Create storage for he metadata and sanlock lockspace
            # 1MB is good for 2000 clients when the block size is 512B
            created = backend.create({
                lockspace + '.lockspace':
                1024 * 1024 * backend.blocksize / 512,
                lockspace + '.metadata':
                md_size,
            })

            # Get UUIDs of the storage
            metadata_device = backend.get_device(lockspace + '.metadata')
            self.environment[
                ohostedcons.StorageEnv.
                METADATA_VOLUME_UUID] = metadata_device.volume_uuid
            self.environment[ohostedcons.StorageEnv.
                             METADATA_IMAGE_UUID] = metadata_device.image_uuid

            lockspace_device = backend.get_device(lockspace + '.lockspace')
            self.environment[
                ohostedcons.StorageEnv.
                LOCKSPACE_VOLUME_UUID] = lockspace_device.volume_uuid
            self.environment[
                ohostedcons.StorageEnv.
                LOCKSPACE_IMAGE_UUID] = lockspace_device.image_uuid

            # for lv_based storage (like iscsi) creates symlinks in /rhev/..
            # for nfs does nothing (the real files are already in /rhev/..)
            backend.connect()

            # Get the path to sanlock lockspace area
            lease_file, offset = backend.filename(lockspace + '.lockspace')

            agent_data_dir = os.path.dirname(lease_file)

            stat_info = os.stat(agent_data_dir)
            # only change it when it's not already owned by vdsm,
            # because on NFS we don't need the chown and it won't work
            if stat_info.st_uid != self.environment[
                    ohostedcons.VDSMEnv.VDSM_UID]:
                os.chown(agent_data_dir,
                         self.environment[ohostedcons.VDSMEnv.VDSM_UID],
                         self.environment[ohostedcons.VDSMEnv.KVM_GID])
            # Update permissions on the lockspace directory to 0755
            os.chmod(agent_data_dir,
                     stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)

            self.logger.debug(('Ensuring lease for lockspace {lockspace}, '
                               'host id {host_id} '
                               'is acquired (file: {lease_file})').format(
                                   lockspace=lockspace,
                                   host_id=host_id,
                                   lease_file=lease_file,
                               ))

        # Reinitialize the sanlock lockspace
        # if it was newly created or updated
        if (lockspace + '.lockspace') in created:
            sanlock.write_lockspace(lockspace=lockspace,
                                    path=lease_file,
                                    offset=offset)
        backend.disconnect()
 def reset_lockspace(self):
     if os.path.exists(self._lease_file):
         sanlock.write_lockspace(lockspace=broker_constants.LOCKSPACE_NAME,
                                 path=self._lease_file,
                                 offset=0)