Ejemplo n.º 1
0
def scanDomains(pattern="*"):
    log = logging.getLogger("storage.scanDomains")

    mntList = _getMountsList(pattern)

    def collectMetaFiles(possibleDomain):
        try:
            metaFiles = oop.getProcessPool(possibleDomain).glob.glob(
                os.path.join(possibleDomain, constants.UUID_GLOB_PATTERN,
                             sd.DOMAIN_META_DATA))

            for metaFile in metaFiles:
                if (os.path.basename(os.path.dirname(metaFile)) !=
                        sd.MASTER_FS_DIR):
                    sdUUID = os.path.basename(os.path.dirname(metaFile))

                    return (sdUUID, os.path.dirname(metaFile))

        except Exception:
            log.warn("Could not collect metadata file for domain path %s",
                     possibleDomain,
                     exc_info=True)

    # Run collectMetaFiles in extenral processes.
    # The amount of processes that can be initiated in the same time is the
    # amount of stuck domains we are willing to handle +1.
    # We Use 30% of the available slots.
    # TODO: calculate it right, now we use same value of max process per
    #       domain.
    for res in misc.itmap(collectMetaFiles, mntList, oop.HELPERS_PER_DOMAIN):
        if res is None:
            continue

        yield res
Ejemplo n.º 2
0
def scanDomains(pattern="*"):
    log = logging.getLogger("Storage.scanDomains")

    mntList = _getMountsList(pattern)

    def collectMetaFiles(possibleDomain):
        try:
            metaFiles = oop.getProcessPool(possibleDomain).glob.glob(
                os.path.join(possibleDomain,
                             constants.UUID_GLOB_PATTERN,
                             sd.DOMAIN_META_DATA))

            for metaFile in metaFiles:
                if (os.path.basename(os.path.dirname(metaFile)) !=
                        sd.MASTER_FS_DIR):
                    sdUUID = os.path.basename(os.path.dirname(metaFile))

                    return (sdUUID, os.path.dirname(metaFile))

        except Exception:
            log.warn("Could not collect metadata file for domain path %s",
                     possibleDomain, exc_info=True)

    # Run collectMetaFiles in extenral processes.
    # The amount of processes that can be initiated in the same time is the
    # amount of stuck domains we are willing to handle +1.
    # We Use 30% of the available slots.
    # TODO: calculate it right, now we use same value of max process per
    #       domain.
    for res in misc.itmap(collectMetaFiles, mntList, oop.HELPERS_PER_DOMAIN):
        if res is None:
            continue

        yield res
Ejemplo n.º 3
0
 def testMaxAvailableProcesses(self):
     def dummy(arg):
         return arg
     # here we launch the maximum threads we can initiate in every
     # outOfProcess operation + 1. it let us know that oop and itmap operate
     # properly with their limitations
     data = frozenset(range(oop.HELPERS_PER_DOMAIN + 1))
     ret = frozenset(misc.itmap(dummy, data, misc.UNLIMITED_THREADS))
     self.assertEqual(ret, data)
Ejemplo n.º 4
0
 def testMaxAvailableProcesses(self):
     def dummy(arg):
         return arg
     # here we launch the maximum threads we can initiate in every
     # outOfProcess operation + 1. it let us know that oop and itmap operate
     # properly with their limitations
     data = frozenset(range(oop.HELPERS_PER_DOMAIN + 1))
     ret = frozenset(misc.itmap(dummy, data, misc.UNLIMITED_THREADS))
     self.assertEquals(ret, data)
Ejemplo n.º 5
0
def scanDomains(pattern="*"):
    log = logging.getLogger("storage.scanDomains")

    mntList = _getMountsList(pattern)
    mount_prefix = os.path.join(
        sd.StorageDomain.storage_repository, sd.DOMAIN_MNT_POINT)

    def collectMetaFiles(mountPoint):
        try:
            # removes the path to the data center's mount directory from
            # the mount point.
            if mountPoint.startswith(mount_prefix):
                client_name = mountPoint[len(mount_prefix):]

            # Since glob treats values between brackets as character ranges,
            # and since IPV6 addresses contain brackets, we should escape the
            # mountPoint that we pass to glob.
            # <data-center>/mnt/mountpoint/<uuid>/dom_mdm
            mdPattern = os.path.join(
                glob_escape(mountPoint),
                UUID_GLOB_PATTERN,
                sd.DOMAIN_META_DATA)

            metaFiles = oop.getProcessPool(client_name).glob.glob(mdPattern)

            for metaFile in metaFiles:
                if (os.path.basename(os.path.dirname(metaFile)) !=
                        sd.MASTER_FS_DIR):
                    sdUUID = os.path.basename(os.path.dirname(metaFile))

                    return (sdUUID, os.path.dirname(metaFile))

        except Exception:
            log.warn("Could not collect metadata file for domain path %s",
                     mountPoint, exc_info=True)

    # Run collectMetaFiles in extenral processes.
    # The amount of processes that can be initiated in the same time is the
    # amount of stuck domains we are willing to handle +1.
    # We Use 30% of the available slots.
    # TODO: calculate it right, now we use same value of max process per
    #       domain.
    for res in misc.itmap(collectMetaFiles, mntList, oop.HELPERS_PER_DOMAIN):
        if res is None:
            continue

        yield res
Ejemplo n.º 6
0
Archivo: fileSD.py Proyecto: oVirt/vdsm
def scanDomains(pattern="*"):
    log = logging.getLogger("storage.scanDomains")

    mntList = _getMountsList(pattern)

    def collectMetaFiles(mountPoint):
        try:
            # removes the path to the data center's mount directory from
            # the mount point.
            if mountPoint.startswith(sc.REPO_MOUNT_DIR):
                client_name = mountPoint[len(sc.REPO_MOUNT_DIR):]

            # Since glob treats values between brackets as character ranges,
            # and since IPV6 addresses contain brackets, we should escape the
            # mountPoint that we pass to glob.
            # <data-center>/mnt/mountpoint/<uuid>/dom_mdm
            mdPattern = os.path.join(
                glob_escape(mountPoint),
                UUID_GLOB_PATTERN,
                sd.DOMAIN_META_DATA)

            metaFiles = oop.getProcessPool(client_name).glob.glob(mdPattern)

            for metaFile in metaFiles:
                if (os.path.basename(os.path.dirname(metaFile)) !=
                        sd.MASTER_FS_DIR):
                    sdUUID = os.path.basename(os.path.dirname(metaFile))

                    return (sdUUID, os.path.dirname(metaFile))

        except Exception:
            log.warn("Could not collect metadata file for domain path %s",
                     mountPoint, exc_info=True)

    # Run collectMetaFiles in extenral processes.
    # The amount of processes that can be initiated in the same time is the
    # amount of stuck domains we are willing to handle +1.
    # We Use 30% of the available slots.
    # TODO: calculate it right, now we use same value of max process per
    #       domain.
    for res in misc.itmap(collectMetaFiles, mntList, oop.HELPERS_PER_DOMAIN):
        if res is None:
            continue

        yield res
Ejemplo n.º 7
0
 def testMoreArgsThanThreads(self):
     def dummy(arg):
         time.sleep(0.5)
         return arg
     data = frozenset([1, 2, 3, 4])
     currentTime = time.time()
     # we provide 3 thread slots and the input contain 4 vals, means we
     # need to wait for 1 thread to finish before processing all input.
     ret = frozenset(misc.itmap(dummy, data, 3))
     afterTime = time.time()
     # the time should take at least 0.5sec to wait for 1 of the first 3 to
     # finish and another 0.5sec for the last operation,
     # not more than 2 seconds (and I'm large here..)
     self.assertFalse(afterTime - currentTime > 2,
                      msg=("Operation took too long (more than 2 second). "
                           "starts: %s ends: %s") %
                      (currentTime, afterTime))
     # Verify the operation waits at least for 1 thread to finish
     self.assertFalse(afterTime - currentTime < 1,
                      msg="Operation was too fast, not all threads were "
                          "initiated as desired (with 1 thread delay)")
     self.assertEqual(ret, data)
Ejemplo n.º 8
0
 def testMoreArgsThanThreads(self):
     def dummy(arg):
         time.sleep(0.5)
         return arg
     data = frozenset([1, 2, 3, 4])
     currentTime = time.time()
     # we provide 3 thread slots and the input contain 4 vals, means we
     # need to wait for 1 thread to finish before processing all input.
     ret = frozenset(misc.itmap(dummy, data, 3))
     afterTime = time.time()
     # the time should take at least 0.5sec to wait for 1 of the first 3 to
     # finish and another 0.5sec for the last operation,
     # not more than 2 seconds (and I'm large here..)
     self.assertFalse(afterTime - currentTime > 2,
                      msg=("Operation took too long (more than 2 second). "
                           "starts: %s ends: %s") %
                      (currentTime, afterTime))
     # Verify the operation waits at least for 1 thread to finish
     self.assertFalse(afterTime - currentTime < 1,
                      msg="Operation was too fast, not all threads were "
                          "initiated as desired (with 1 thread delay)")
     self.assertEquals(ret, data)
Ejemplo n.º 9
0
 def testInvalidITMapParams(self):
     data = 1
     self.assertRaises(ValueError, lambda: next(misc.itmap(int, data, 0)))
Ejemplo n.º 10
0
 def testMoreThreadsThanArgs(self):
     data = [1]
     self.assertEqual(list(misc.itmap(int, data, 80)), data)
Ejemplo n.º 11
0
 def testInvalidITMapParams(self):
     data = 1
     self.assertRaises(ValueError, misc.itmap(int, data, 0).next)
Ejemplo n.º 12
0
 def testMoreThreadsThanArgs(self):
     data = [1]
     self.assertEqual(list(misc.itmap(int, data, 80)), data)