Exemple #1
0
def test_pool_with_memmap_array_view(tmpdir):
    """Check that subprocess can access and update shared memory array"""
    assert_array_equal = np.testing.assert_array_equal

    # Fork the subprocess before allocating the objects to be passed
    pool_temp_folder = tmpdir.mkdir('pool').strpath
    p = MemmapingPool(10, max_nbytes=2, temp_folder=pool_temp_folder)
    try:

        filename = tmpdir.join('test.mmap').strpath
        a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
        a.fill(1.0)

        # Create an ndarray view on the memmap instance
        a_view = np.asarray(a)
        assert not isinstance(a_view, np.memmap)
        assert has_shareable_memory(a_view)

        p.map(inplace_double, [(a_view, (i, j), 1.0)
                               for i in range(a.shape[0])
                               for j in range(a.shape[1])])

        # Both a and the a_view have been updated
        assert_array_equal(a, 2 * np.ones(a.shape))
        assert_array_equal(a_view, 2 * np.ones(a.shape))

        # Passing memmap array view to the pool should not trigger the
        # creation of new files on the FS
        assert os.listdir(pool_temp_folder) == []

    finally:
        p.terminate()
        del p
Exemple #2
0
def test_memmaping_on_dev_shm():
    """Check that MemmapingPool uses /dev/shm when possible"""
    p = MemmapingPool(3, max_nbytes=10)
    try:
        # Check that the pool has correctly detected the presence of the
        # shared memory filesystem.
        pool_temp_folder = p._temp_folder
        folder_prefix = '/dev/shm/joblib_memmaping_pool_'
        assert_true(pool_temp_folder.startswith(folder_prefix))
        assert_true(os.path.exists(pool_temp_folder))

        # Try with a file larger than the memmap threshold of 10 bytes
        a = np.ones(100, dtype=np.float64)
        assert_equal(a.nbytes, 800)
        p.map(id, [a] * 10)
        # a should have been memmaped to the pool temp folder: the joblib
        # pickling procedure generate a .pkl and a .npy file:
        assert_equal(len(os.listdir(pool_temp_folder)), 2)

        b = np.ones(100, dtype=np.float64)
        assert_equal(b.nbytes, 800)
        p.map(id, [b] * 10)
        # A copy of both a and b are not stored in the shared memory folder
        assert_equal(len(os.listdir(pool_temp_folder)), 4)

    finally:
        # Cleanup open file descriptors
        p.terminate()
        del p

    # The temp folder is cleaned up upon pool termination
    assert_false(os.path.exists(pool_temp_folder))
Exemple #3
0
def test_memmaping_on_dev_shm():
    """Check that MemmapingPool uses /dev/shm when possible"""
    p = MemmapingPool(3, max_nbytes=10)
    try:
        # Check that the pool has correctly detected the presence of the
        # shared memory filesystem.
        pool_temp_folder = p._temp_folder
        folder_prefix = '/dev/shm/joblib_memmaping_pool_'
        assert pool_temp_folder.startswith(folder_prefix)
        assert os.path.exists(pool_temp_folder)

        # Try with a file larger than the memmap threshold of 10 bytes
        a = np.ones(100, dtype=np.float64)
        assert a.nbytes == 800
        p.map(id, [a] * 10)
        # a should have been memmaped to the pool temp folder: the joblib
        # pickling procedure generate one .pkl file:
        assert len(os.listdir(pool_temp_folder)) == 1

        # create a new array with content that is different from 'a' so that
        # it is mapped to a different file in the temporary folder of the
        # pool.
        b = np.ones(100, dtype=np.float64) * 2
        assert b.nbytes == 800
        p.map(id, [b] * 10)
        # A copy of both a and b are now stored in the shared memory folder
        assert len(os.listdir(pool_temp_folder)) == 2

    finally:
        # Cleanup open file descriptors
        p.terminate()
        del p

    # The temp folder is cleaned up upon pool termination
    assert not os.path.exists(pool_temp_folder)
def test_pool_memmap_with_big_offset(tmpdir):
    # Test that numpy memmap offset is set correctly if greater than
    # mmap.ALLOCATIONGRANULARITY, see
    # https://github.com/joblib/joblib/issues/451 and
    # https://github.com/numpy/numpy/pull/8443 for more details.
    fname = tmpdir.join('test.mmap').strpath
    size = 5 * mmap.ALLOCATIONGRANULARITY
    offset = mmap.ALLOCATIONGRANULARITY + 1
    obj = make_memmap(fname,
                      mode='w+',
                      shape=size,
                      dtype='uint8',
                      offset=offset)

    p = MemmapingPool(2, temp_folder=tmpdir.strpath)
    result = p.apply_async(identity, args=(obj, )).get()
    assert isinstance(result, np.memmap)
    assert result.offset == offset
    np.testing.assert_array_equal(obj, result)
Exemple #5
0
def test_memmaping_pool_for_large_arrays_in_return():
    """Check that large arrays are not copied in memory in return"""
    assert_array_equal = np.testing.assert_array_equal

    # Build an array reducers that automaticaly dump large array content
    # but check that the returned datastructure are regular arrays to avoid
    # passing a memmap array pointing to a pool controlled temp folder that
    # might be confusing to the user

    # The MemmapingPool user can always return numpy.memmap object explicitly
    # to avoid memory copy
    p = MemmapingPool(3, max_nbytes=10, temp_folder=TEMP_FOLDER)
    try:
        res = p.apply_async(np.ones, args=(1000, ))
        large = res.get()
        assert_false(has_shareable_memory(large))
        assert_array_equal(large, np.ones(1000))
    finally:
        p.terminate()
        del p
Exemple #6
0
def test_memmaping_pool_for_large_arrays_in_return(tmpdir):
    """Check that large arrays are not copied in memory in return"""
    assert_array_equal = np.testing.assert_array_equal

    # Build an array reducers that automaticaly dump large array content
    # but check that the returned datastructure are regular arrays to avoid
    # passing a memmap array pointing to a pool controlled temp folder that
    # might be confusing to the user

    # The MemmapingPool user can always return numpy.memmap object explicitly
    # to avoid memory copy
    p = MemmapingPool(3, max_nbytes=10, temp_folder=tmpdir.strpath)
    try:
        res = p.apply_async(np.ones, args=(1000,))
        large = res.get()
        assert not has_shareable_memory(large)
        assert_array_equal(large, np.ones(1000))
    finally:
        p.terminate()
        del p
Exemple #7
0
def test_memmaping_pool_for_large_arrays_disabled(tmpdir):
    """Check that large arrays memmaping can be disabled"""
    # Set max_nbytes to None to disable the auto memmaping feature
    p = MemmapingPool(3, max_nbytes=None, temp_folder=tmpdir.strpath)
    try:

        # Check that the tempfolder is empty
        assert os.listdir(tmpdir.strpath) == []

        # Try with a file largish than the memmap threshold of 40 bytes
        large = np.ones(100, dtype=np.float64)
        assert large.nbytes == 800
        p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])])

        # Check that the tempfolder is still empty
        assert os.listdir(tmpdir.strpath) == []

    finally:
        # Cleanup open file descriptors
        p.terminate()
        del p
Exemple #8
0
def test_pool_with_memmap_array_view():
    """Check that subprocess can access and update shared memory array"""
    assert_array_equal = np.testing.assert_array_equal

    # Fork the subprocess before allocating the objects to be passed
    pool_temp_folder = os.path.join(TEMP_FOLDER, 'pool')
    os.makedirs(pool_temp_folder)
    p = MemmapingPool(10, max_nbytes=2, temp_folder=pool_temp_folder)
    try:

        filename = os.path.join(TEMP_FOLDER, 'test.mmap')
        a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
        a.fill(1.0)

        # Create an ndarray view on the memmap instance
        a_view = np.asarray(a)
        assert_false(isinstance(a_view, np.memmap))
        assert_true(has_shareable_memory(a_view))

        p.map(inplace_double, [(a_view, (i, j), 1.0) for i in range(a.shape[0])
                               for j in range(a.shape[1])])

        # Both a and the a_view have been updated
        assert_array_equal(a, 2 * np.ones(a.shape))
        assert_array_equal(a_view, 2 * np.ones(a.shape))

        # Passing memmap array view to the pool should not trigger the
        # creation of new files on the FS
        assert_equal(os.listdir(pool_temp_folder), [])

    finally:
        p.terminate()
        del p
Exemple #9
0
def test_pool_with_memmap():
    """Check that subprocess can access and update shared memory memmap"""
    assert_array_equal = np.testing.assert_array_equal

    # Fork the subprocess before allocating the objects to be passed
    pool_temp_folder = os.path.join(TEMP_FOLDER, 'pool')
    os.makedirs(pool_temp_folder)
    p = MemmapingPool(10, max_nbytes=2, temp_folder=pool_temp_folder)
    try:
        filename = os.path.join(TEMP_FOLDER, 'test.mmap')
        a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
        a.fill(1.0)

        p.map(inplace_double, [(a, (i, j), 1.0) for i in range(a.shape[0])
                               for j in range(a.shape[1])])

        assert_array_equal(a, 2 * np.ones(a.shape))

        # Open a copy-on-write view on the previous data
        b = np.memmap(filename, dtype=np.float32, shape=(5, 3), mode='c')

        p.map(inplace_double, [(b, (i, j), 2.0) for i in range(b.shape[0])
                               for j in range(b.shape[1])])

        # Passing memmap instances to the pool should not trigger the creation
        # of new files on the FS
        assert_equal(os.listdir(pool_temp_folder), [])

        # the original data is untouched
        assert_array_equal(a, 2 * np.ones(a.shape))
        assert_array_equal(b, 2 * np.ones(b.shape))

        # readonly maps can be read but not updated
        c = np.memmap(filename,
                      dtype=np.float32,
                      shape=(10, ),
                      mode='r',
                      offset=5 * 4)

        assert_raises(AssertionError, p.map, check_array,
                      [(c, i, 3.0) for i in range(c.shape[0])])

        # depending on the version of numpy one can either get a RuntimeError
        # or a ValueError
        assert_raises((RuntimeError, ValueError), p.map, inplace_double,
                      [(c, i, 2.0) for i in range(c.shape[0])])
    finally:
        # Clean all filehandlers held by the pool
        p.terminate()
        del p
Exemple #10
0
def get_score(data, labels, fold_pairs, name, model, param):
    """
    Function to get score for a classifier.

    Parameters
    ----------
    data: array_like
        Data from which to derive score.
    labels: array_like or list
        Corresponding labels for each sample.
    fold_pairs: list of pairs of array_like
        A list of train/test indicies for each fold
        dhjelm(Why can't we just use the KFold object?)
    name: str
        Name of classifier.
    model: WRITEME
    param: WRITEME
        Parameters for the classifier.

    Returns
    -------
    classifier: WRITEME
    fScore: WRITEME
    """
    assert isinstance(name, str)
    logger.info("Classifying %s" % name)

    ksplit = len(fold_pairs)
    if name not in NAMES:
        raise ValueError("Classifier %s not supported. "
                         "Did you enter it properly?" % name)

    # Redefine the parameters to be used for RBF SVM (dependent on
    # training data)

    if True:  #better identifier here
        logger.info("Attempting to use grid search...")
        fScore = []
        for i, fold_pair in enumerate(fold_pairs):
            print("Classifying a %s the %d-th out of %d folds..." %
                  (name, i + 1, len(fold_pairs)))
            classifier = get_classifier(name, model, param,
                                        data[fold_pair[0], :])
            area = classify(data, labels, fold_pair, classifier)
            fScore.append(area)
    else:
        logger.warn("Multiprocessing splits not tested yet.")
        pool = Pool(processes=min(ksplit, PROCESSORS))
        classify_func = lambda f: classify(
            data,
            labels,
            fold_pairs[f],
            classifier=get_classifier(
                name, model, param, data=data[fold_pairs[f][0], :]))
        fScore = pool.map(functools.partial(classify_func, xrange(ksplit)))
        pool.close()
        pool.join()

    return classifier, fScore
Exemple #11
0
def test_memmaping_pool_for_large_arrays():
    """Check that large arrays are not copied in memory"""
    assert_array_equal = np.testing.assert_array_equal

    # Check that the tempfolder is empty
    assert_equal(os.listdir(TEMP_FOLDER), [])

    # Build an array reducers that automaticaly dump large array content
    # to filesystem backed memmap instances to avoid memory explosion
    p = MemmapingPool(3, max_nbytes=40, temp_folder=TEMP_FOLDER)
    try:
        # The tempory folder for the pool is not provisioned in advance
        assert_equal(os.listdir(TEMP_FOLDER), [])
        assert_false(os.path.exists(p._temp_folder))

        small = np.ones(5, dtype=np.float32)
        assert_equal(small.nbytes, 20)
        p.map(check_array, [(small, i, 1.0) for i in range(small.shape[0])])

        # Memory has been copied, the pool filesystem folder is unused
        assert_equal(os.listdir(TEMP_FOLDER), [])

        # Try with a file larger than the memmap threshold of 40 bytes
        large = np.ones(100, dtype=np.float64)
        assert_equal(large.nbytes, 800)
        p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])])

        # The data has been dumped in a temp folder for subprocess to share it
        # without per-child memory copies
        assert_true(os.path.isdir(p._temp_folder))
        dumped_filenames = os.listdir(p._temp_folder)
        assert_equal(len(dumped_filenames), 2)

        # Check that memmory mapping is not triggered for arrays with
        # dtype='object'
        objects = np.array(['abc'] * 100, dtype='object')
        results = p.map(has_shareable_memory, [objects])
        assert_false(results[0])

    finally:
        # check FS garbage upon pool termination
        p.terminate()
        assert_false(os.path.exists(p._temp_folder))
        del p
Exemple #12
0
def test_workaround_against_bad_memmap_with_copied_buffers():
    """Check that memmaps with a bad buffer are returned as regular arrays

    Unary operations and ufuncs on memmap instances return a new memmap
    instance with an in-memory buffer (probably a numpy bug).
    """
    assert_array_equal = np.testing.assert_array_equal

    p = MemmapingPool(3, max_nbytes=10, temp_folder=TEMP_FOLDER)
    try:
        # Send a complex, large-ish view on a array that will be converted to
        # a memmap in the worker process
        a = np.asarray(np.arange(6000).reshape((1000, 2, 3)),
                       order='F')[:, :1, :]

        # Call a non-inplace multiply operation on the worker and memmap and
        # send it back to the parent.
        b = p.apply_async(_worker_multiply, args=(a, 3)).get()
        assert_false(has_shareable_memory(b))
        assert_array_equal(b, 3 * a)
    finally:
        p.terminate()
        del p
Exemple #13
0
def test_workaround_against_bad_memmap_with_copied_buffers(tmpdir):
    """Check that memmaps with a bad buffer are returned as regular arrays

    Unary operations and ufuncs on memmap instances return a new memmap
    instance with an in-memory buffer (probably a numpy bug).
    """
    assert_array_equal = np.testing.assert_array_equal

    p = MemmapingPool(3, max_nbytes=10, temp_folder=tmpdir.strpath)
    try:
        # Send a complex, large-ish view on a array that will be converted to
        # a memmap in the worker process
        a = np.asarray(np.arange(6000).reshape((1000, 2, 3)),
                       order='F')[:, :1, :]

        # Call a non-inplace multiply operation on the worker and memmap and
        # send it back to the parent.
        b = p.apply_async(_worker_multiply, args=(a, 3)).get()
        assert not has_shareable_memory(b)
        assert_array_equal(b, 3 * a)
    finally:
        p.terminate()
        del p
Exemple #14
0
def get_score(data, labels, fold_pairs,
              name, model, param):
    """
    Function to get score for a classifier.

    Parameters
    ----------
    data: array-like
        Data from which to derive score.
    labels: array-like or list.
        Corresponding labels for each sample.
    fold_pairs: list of pairs of array-like
        A list of train/test indicies for each fold
        (Why can't we just use the KFold object?)
    name: string
        Name of classifier.
    model: WRITEME
    param: WRITEME
        Parameters for the classifier.
    """
    assert isinstance(name, str)
    logger.info("Classifying %s" % name)

    ksplit = len(fold_pairs)
    if name not in NAMES:
        raise ValueError("Classifier %s not supported. "
                         "Did you enter it properly?" % name)

    # Redefine the parameters to be used for RBF SVM (dependent on
    # training data)

    if True:  #better identifier here
        logger.info("Attempting to use grid search...")
        fScore = []
        for i, fold_pair in enumerate(fold_pairs):
            print ("Classifying a %s the %d-th out of %d folds..."
                   % (name, i+1, len(fold_pairs)))
            classifier = get_classifier(name, model, param, data[fold_pair[0], :])
            area = classify(data, labels, fold_pair, classifier)
            fScore.append(area)
    else:
        warnings.warn("Multiprocessing splits not tested yet.")
        pool = Pool(processes=min(ksplit, PROCESSORS))
        classify_func = lambda f : classify(
            data,
            labels,
            fold_pairs[f],
            classifier=get_classifier(
                name,
                model,
                param,
                data=data[fold_pairs[f][0], :]))
        fScore = pool.map(functools.partial(classify_func, xrange(ksplit)))
        pool.close()
        pool.join()

    return classifier, fScore
Exemple #15
0
 def initialize(self, n_parallel):
     self.n_parallel = n_parallel
     if self.pool is not None:
         print("Warning: terminating existing pool")
         self.pool.terminate()
         self.queue.close()
         self.worker_queue.close()
         self.G = SharedGlobal()
     if n_parallel > 1:
         self.queue = mp.Queue()
         self.worker_queue = mp.Queue()
         self.pool = MemmappingPool(
             self.n_parallel,
             temp_folder="/tmp",
         )
 def initialize(self, n_parallel):
     self.n_parallel = n_parallel
     if self.pool is not None:
         print("Warning: terminating existing pool")
         self.pool.terminate()
         self.queue.close()
         self.worker_queue.close()
         self.G = SharedGlobal()
     if n_parallel > 1:
         self.queue = mp.Queue()
         self.worker_queue = mp.Queue()
         self.pool = MemmapingPool(
             self.n_parallel,
             temp_folder="/tmp",
         )
Exemple #17
0
def test_pool_with_memmap(tmpdir_path):
    """Check that subprocess can access and update shared memory memmap"""
    assert_array_equal = np.testing.assert_array_equal

    # Fork the subprocess before allocating the objects to be passed
    pool_temp_folder = os.path.join(tmpdir_path, 'pool')
    os.makedirs(pool_temp_folder)
    p = MemmapingPool(10, max_nbytes=2, temp_folder=pool_temp_folder)
    try:
        filename = os.path.join(tmpdir_path, 'test.mmap')
        a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
        a.fill(1.0)

        p.map(inplace_double, [(a, (i, j), 1.0)
                               for i in range(a.shape[0])
                               for j in range(a.shape[1])])

        assert_array_equal(a, 2 * np.ones(a.shape))

        # Open a copy-on-write view on the previous data
        b = np.memmap(filename, dtype=np.float32, shape=(5, 3), mode='c')

        p.map(inplace_double, [(b, (i, j), 2.0)
                               for i in range(b.shape[0])
                               for j in range(b.shape[1])])

        # Passing memmap instances to the pool should not trigger the creation
        # of new files on the FS
        assert os.listdir(pool_temp_folder) == []

        # the original data is untouched
        assert_array_equal(a, 2 * np.ones(a.shape))
        assert_array_equal(b, 2 * np.ones(b.shape))

        # readonly maps can be read but not updated
        c = np.memmap(filename, dtype=np.float32, shape=(10,), mode='r',
                      offset=5 * 4)

        assert_raises(AssertionError, p.map, check_array,
                      [(c, i, 3.0) for i in range(c.shape[0])])

        # depending on the version of numpy one can either get a RuntimeError
        # or a ValueError
        assert_raises((RuntimeError, ValueError), p.map, inplace_double,
                      [(c, i, 2.0) for i in range(c.shape[0])])
    finally:
        # Clean all filehandlers held by the pool
        p.terminate()
        del p
Exemple #18
0
class StatefulPool(object):
    def __init__(self):

        self.n_parallel = 1
        self.pool = None
        self.queue = None
        self.worker_queue = None
        self.G = SharedGlobal()

    def initialize(self, n_parallel):

        self.n_parallel = n_parallel

        if self.pool is not None:
            print("Warning: terminating existing pool")
            self.pool.terminate()
            self.queue.close()
            self.worker_queue.close()
            self.G = SharedGlobal()

        if n_parallel > 1:
            self.queue = mp.Queue()
            self.worker_queue = mp.Queue()
            self.pool = MemmapingPool(self.n_parallel, temp_folder="/tmp")

    def run_each(self, runner, args_list=None):

        if args_list is None:
            args_list = [tuple()] * self.n_parallel
        assert len(args_list) == self.n_parallel

        if self.n_parallel > 1:
            results = self.pool.map_async(worker_run_each,
                                          [(runner, args)
                                           for args in args_list])
            for _ in range(self.n_parallel):
                self.worker_queue.get()
            for _ in range(self.n_parallel):
                self.queue.put(None)
            return results.get()
        else:
            return [runner(self.G, *args_list[0])]
Exemple #19
0
def test_memmaping_pool_for_large_arrays_disabled():
    """Check that large arrays memmaping can be disabled"""
    # Set max_nbytes to None to disable the auto memmaping feature
    p = MemmapingPool(3, max_nbytes=None, temp_folder=TEMP_FOLDER)
    try:

        # Check that the tempfolder is empty
        assert_equal(os.listdir(TEMP_FOLDER), [])

        # Try with a file largish than the memmap threshold of 40 bytes
        large = np.ones(100, dtype=np.float64)
        assert_equal(large.nbytes, 800)
        p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])])

        # Check that the tempfolder is still empty
        assert_equal(os.listdir(TEMP_FOLDER), [])

    finally:
        # Cleanup open file descriptors
        p.terminate()
        del p
Exemple #20
0
class StatefulPool:
    def __init__(self):
        self.n_parallel = 1
        self.pool = None
        self.queue = None
        self.worker_queue = None
        self.G = SharedGlobal()

    def initialize(self, n_parallel):
        self.n_parallel = n_parallel
        if self.pool is not None:
            print("Warning: terminating existing pool")
            self.pool.terminate()
            self.queue.close()
            self.worker_queue.close()
            self.G = SharedGlobal()
        if n_parallel > 1:
            self.queue = mp.Queue()
            self.worker_queue = mp.Queue()
            self.pool = MemmapingPool(
                self.n_parallel,
                temp_folder="/tmp",
            )

    def terminate(self):
        if self.pool:
            self.pool.terminate()

    def run_each(self, runner, args_list=None):
        """
        Run the method on each worker process, and collect the result of
        execution.

        The runner method will receive 'g' as its first argument, followed
        by the arguments in the args_list, if any
        :return:
        """
        assert not inspect.ismethod(runner), (
            "run_each() cannot run a class method. Please ensure that runner "
            "is a function with the prototype def foo(g, ...), where g is an "
            "object of type garage.sampler.stateful_pool.SharedGlobal")

        if args_list is None:
            args_list = [tuple()] * self.n_parallel
        assert len(args_list) == self.n_parallel
        if self.n_parallel > 1:
            results = self.pool.map_async(_worker_run_each,
                                          [(runner, args)
                                           for args in args_list])
            for i in range(self.n_parallel):
                self.worker_queue.get()
            for i in range(self.n_parallel):
                self.queue.put(None)
            return results.get()
        return [runner(self.G, *args_list[0])]

    def run_map(self, runner, args_list):
        assert not inspect.ismethod(runner), (
            "run_map() cannot run a class method. Please ensure that runner "
            "is a function with the prototype 'def foo(g, ...)', where g is "
            "an object of type garage.sampler.stateful_pool.SharedGlobal")

        if self.n_parallel > 1:
            return self.pool.map(_worker_run_map,
                                 [(runner, args) for args in args_list])
        else:
            ret = []
            for args in args_list:
                ret.append(runner(self.G, *args))
            return ret

    def run_imap_unordered(self, runner, args_list):
        assert not inspect.ismethod(runner), (
            "run_imap_unordered() cannot run a class method. Please ensure "
            "that runner is a function with the prototype 'def foo(g, ...)', "
            "where g is an object of type "
            "garage.sampler.stateful_pool.SharedGlobal")

        if self.n_parallel > 1:
            for x in self.pool.imap_unordered(_worker_run_map,
                                              [(runner, args)
                                               for args in args_list]):
                yield x
        else:
            for args in args_list:
                yield runner(self.G, *args)

    def run_collect(self,
                    collect_once,
                    threshold,
                    args=None,
                    show_prog_bar=True):
        """
        Run the collector method using the worker pool. The collect_once method
        will receive 'g' as its first argument, followed by the provided args,
        if any. The method should return a pair of values. The first should be
        the object to be collected, and the second is the increment to be
        added.
        This will continue until the total increment reaches or exceeds the
        given threshold.

        Sample script:

        def collect_once(g):
            return 'a', 1

        stateful_pool.run_collect(collect_once, threshold=3)
        # should return ['a', 'a', 'a']

        :param collector:
        :param threshold:
        :return:
        """
        assert not inspect.ismethod(collect_once), (
            "run_collect() cannot run a class method. Please ensure that "
            "collect_once is a function with the prototype 'def foo(g, ...)', "
            "where g is an object of type "
            "garage.sampler.stateful_pool.SharedGlobal")

        if args is None:
            args = tuple()
        if self.pool:
            manager = mp.Manager()
            counter = manager.Value('i', 0)
            lock = manager.RLock()
            results = self.pool.map_async(
                _worker_run_collect,
                [(collect_once, counter, lock, threshold, args)] *
                self.n_parallel)
            if show_prog_bar:
                pbar = ProgBarCounter(threshold)
            last_value = 0
            while True:
                time.sleep(0.1)
                with lock:
                    if counter.value >= threshold:
                        if show_prog_bar:
                            pbar.stop()
                        break
                    if show_prog_bar:
                        pbar.inc(counter.value - last_value)
                    last_value = counter.value
            return sum(results.get(), [])
        else:
            count = 0
            results = []
            if show_prog_bar:
                pbar = ProgBarCounter(threshold)
            while count < threshold:
                result, inc = collect_once(self.G, *args)
                results.append(result)
                count += inc
                if show_prog_bar:
                    pbar.inc(inc)
            if show_prog_bar:
                pbar.stop()
            return results
        return []
Exemple #21
0
class StatefulPool(object):
    def __init__(self):
        self.n_parallel = 1
        self.pool = None
        self.queue = None
        self.worker_queue = None
        self.G = SharedGlobal()

    def initialize(self, n_parallel):
        self.n_parallel = n_parallel
        if self.pool is not None:
            print("Warning: terminating existing pool")
            self.pool.terminate()
            self.queue.close()
            self.worker_queue.close()
            self.G = SharedGlobal()
        if n_parallel > 1:
            self.queue = mp.Queue()
            self.worker_queue = mp.Queue()
            self.pool = MemmapingPool(
                self.n_parallel,
                temp_folder="/tmp",
            )

    def run_each(self, runner, args_list=None):
        """
        Run the method on each worker process, and collect the result of execution.
        The runner method will receive 'G' as its first argument, followed by the arguments
        in the args_list, if any
        :return:
        """
        if args_list is None:
            args_list = [tuple()] * self.n_parallel
        assert len(args_list) == self.n_parallel
        if self.n_parallel > 1:
            #return [runner(self.G, *args_list[i]) for i in range(self.n_parallel)]
            results = self.pool.map_async(
                _worker_run_each, [(runner, args) for args in args_list]
            )
            for i in range(self.n_parallel):
                self.worker_queue.get()
            for i in range(self.n_parallel):
                self.queue.put(None)
            return results.get()
        return [runner(self.G, *args_list[0])]

    def run_map(self, runner, args_list):
        if self.n_parallel > 1:
            return self.pool.map(_worker_run_map, [(runner, args) for args in args_list])
        else:
            ret = []
            for args in args_list:
                ret.append(runner(self.G, *args))
            return ret

    def run_imap_unordered(self, runner, args_list):
        if self.n_parallel > 1:
            for x in self.pool.imap_unordered(_worker_run_map, [(runner, args) for args in args_list]):
                yield x
        else:
            for args in args_list:
                yield runner(self.G, *args)

    def run_collect(self, collect_once, threshold, args=None, show_prog_bar=True, multi_task=False):
        """
        Run the collector method using the worker pool. The collect_once method will receive 'G' as
        its first argument, followed by the provided args, if any. The method should return a pair of values.
        The first should be the object to be collected, and the second is the increment to be added.
        This will continue until the total increment reaches or exceeds the given threshold.

        Sample script:

        def collect_once(G):
            return 'a', 1

        stateful_pool.run_collect(collect_once, threshold=3) # => ['a', 'a', 'a']

        :param collector:
        :param threshold:
        :return:
        """
        if args is None:
            args = tuple()
        if self.pool and multi_task:
            manager = mp.Manager()
            counter = manager.Value('i', 0)
            lock = manager.RLock()

            inputs = [(collect_once, counter, lock, threshold, arg) for arg in args]
            results = self.pool.map_async(
                _worker_run_collect,
                inputs,
            )
            if show_prog_bar:
                pbar = ProgBarCounter(threshold)
            last_value = 0
            while True:
                time.sleep(0.1)
                with lock:
                    if counter.value >= threshold:
                        if show_prog_bar:
                            pbar.stop()
                        break
                    if show_prog_bar:
                        pbar.inc(counter.value - last_value)
                    last_value = counter.value
            finished_results = results.get()
            # TODO - for some reason this is buggy.
            return {i:finished_results[i] for i in range(len(finished_results))}
        elif multi_task:
            assert False # not supported
        elif self.pool:
            manager = mp.Manager()
            counter = manager.Value('i', 0)
            lock = manager.RLock()
            results = self.pool.map_async(
                _worker_run_collect,
                [(collect_once, counter, lock, threshold, args)] * self.n_parallel
            )
            if show_prog_bar:
                pbar = ProgBarCounter(threshold)
            last_value = 0
            while True:
                time.sleep(0.1)
                with lock:
                    if counter.value >= threshold:
                        if show_prog_bar:
                            pbar.stop()
                        break
                    if show_prog_bar:
                        pbar.inc(counter.value - last_value)
                    last_value = counter.value
            return sum(results.get(), [])
        else:
            count = 0
            results = []
            if show_prog_bar:
                pbar = ProgBarCounter(threshold)
            while count < threshold:
                result, inc = collect_once(self.G, *args)
                results.append(result)
                count += inc
                if show_prog_bar:
                    pbar.inc(inc)
            if show_prog_bar:
                pbar.stop()
            return results
Exemple #22
0
def get_score(data,
              labels,
              fold_pairs,
              name,
              model,
              param,
              numTopVars,
              rank_per_fold=None,
              parallel=True,
              rand_iter=-1):
    """
    Function to get score for a classifier.

    Parameters
    ----------
    data: array_like
        Data from which to derive score.
    labels: array_like or list
        Corresponding labels for each sample.
    fold_pairs: list of pairs of array_like
        A list of train/test indicies for each fold
        dhjelm(Why can't we just use the KFold object?)
    name: str
        Name of classifier.
    model: WRITEME
    param: WRITEME
        Parameters for the classifier.
    parallel: bool
        Whether to run folds in parallel. Default: True

    Returns
    -------
    classifier: WRITEME
    allConfMats: Confusion matrix for all folds and all variables sets and best performing parameter set
                 ([numFolds, numVarSets]) 
    """
    assert isinstance(name, str)
    logging.info("Classifying %s" % name)
    ksplit = len(fold_pairs)
    #    if name not in NAMES:
    #        raise ValueError("Classifier %s not supported. "
    #                         "Did you enter it properly?" % name)

    # Redefine the parameters to be used for RBF SVM (dependent on
    # training data)
    if "SGD" in name:
        param["n_iter"] = [25]  # [np.ceil(10**3 / len(fold_pairs[0][0]))]
    classifier = get_classifier(name, model, param, rand_iter=rand_iter)

    if name == "RBF SVM":  #This doesn't use labels, but looks as ALL data
        logging.info("RBF SVM requires some preprocessing."
                     "This may take a while")
        #
        is_data_computed_gamma = True
        #
        if not is_data_computed_gamma:
            # Sahil commented the code below that computes the gamma choices from data.
            # The computed gamma choices seem too low thereby making SVM very slow. Instead, trying out fixed values.
            print param
            gamma = param['gamma']
            gamma = np.array(gamma)
            print 'gamma', gamma
        else:
            #Euclidean distances between samples
            # sahil switched from the first call to second one for computing the dist as the first one is giving error.
            # dist = pdist(StandardScaler().fit(data), "euclidean").ravel()
            dist = pdist(RobustScaler().fit_transform(data),
                         "euclidean").ravel()
            print 'dist', dist
            #Estimates for sigma (10th, 50th and 90th percentile)
            sigest = np.asarray(np.percentile(dist, [10, 50, 90]))
            print 'sigest', sigest
            #Estimates for gamma (= -1/(2*sigma^2))
            gamma = 1. / (2 * sigest**2)
            print 'gamma', gamma
        #
        #
        #Set SVM parameters with these values
        # sahil changed the code a bit to remove a bug
        # param = [{"kernel": ["rbf"],
        #           "gamma": gamma.tolist(),
        #           "C": np.logspace(-2,2,5).tolist()}]
        param = {
            "kernel": ["rbf"],
            "gamma": gamma.tolist(),
            "C": np.logspace(-2, 2, 5).tolist()
        }
    # if name not in ["Decision Tree", "Naive Bayes"]:
    if param:
        if hasattr(classifier, 'param_grid'):
            # isinstance(classifier, GridSearchCV):
            print 'param', param
            N_p = np.prod([len(l) for l in param.values()])
        elif isinstance(classifier, RandomizedSearchCV):
            N_p = classifier.n_iter
    else:
        N_p = 1


#    is_cv = isinstance(classifier, GridSearchCV) or \
#            isinstance(classifier, RandomizedSearchCV)
#    print('Name: {}, ksplit: {}, N_p: {}'.format(name, ksplit, N_p))
    if (not parallel) or ksplit <= N_p or \
    (name == "Random Forest") or ("SGD" in name):
        logging.info("Attempting to use grid search...")
        classifier.n_jobs = PROCESSORS
        classifier.pre_dispatch = 1  # np.floor(PROCESSORS/24)
        allConfMats = []
        allTotalErrs = []
        allFittedClassifiers = []
        for i, fold_pair in enumerate(fold_pairs):
            confMats = []
            totalErrs = []
            fitted_classifiers = []
            logging.info("Classifying a %s the %d-th out of %d folds..." %
                         (name, i + 1, len(fold_pairs)))
            if rank_per_fold is not None:
                rankedVars = rank_per_fold[i]
            else:
                rankedVars = np.arange(data.shape[1])
            #
            for numVars in numTopVars:
                logging.info('Classifying for top %i variables' % numVars)
                #
                # print 'rankedVars', rankedVars
                #
                confMat, totalErr, fitted_classifier = classify(
                    data[:, rankedVars[:numVars]], labels, fold_pair,
                    classifier)
                confMats.append(confMat)
                totalErrs.append(totalErr)
                fitted_classifiers.append(fitted_classifier)
            # recheck the structure of area and fScore variables
            allConfMats.append(confMats)
            allTotalErrs.append(totalErrs)
            allFittedClassifiers.append(fitted_classifiers)
    else:
        print 'parallel computing going on (debug Sahil ...) ..........................'
        #
        classifier.n_jobs = PROCESSORS
        logging.info("Multiprocessing folds for classifier {}.".format(name))
        pool = Pool(processes=min(ksplit, PROCESSORS))
        out_list = pool.map(
            per_split_classifier(data, labels, classifier, numTopVars),
            zip(rank_per_fold, fold_pairs))
        pool.close()
        pool.join()
        #allConfMats = [el[0] for el in out_list]
        #allTotalErrs = [el[1] for el in out_list]
        #allFittedClassifiers = [el[2] for el in out_list]
        allConfMats, allTotalErrs, allFittedClassifiers = tuple(zip(*out_list))
    return classifier, allConfMats, allTotalErrs, allFittedClassifiers
Exemple #23
0
def get_rank_per_fold(data,
                      labels,
                      fold_pairs,
                      ranking_function=ttest_ind,
                      save_path=None,
                      load_file=True,
                      parallel=True):
    '''
    Applies rank_vars to each test set in list of fold pairs
    Inputs:
        data: array
            features for all samples
        labels: array
            label vector of each sample
        fold_pair: list
            list pairs of index arrays containing train and test sets
        ranking_function: function object, default: ttest_ind
            function to apply for ranking features
        ranking_function: function
            ranking function to use, default: ttest_ind
        save_path: dir to load and save ranking files
        load_file: bool
            Whether to try to load an existing file, default: True
        parallel: bool
            True if multicore processing is desired, default: True        
    Outputs:
        rank_per_fod: list
            List of ranked feature indexes for each fold pair
    '''
    file_loaded = False
    if load_file:
        if isinstance(save_path, str):
            fname = path.join(
                save_path, "{}_{}_folds.mat".format(ranking_function.__name__,
                                                    len(fold_pairs)))
            try:
                rd = scipy.io.loadmat(fname, mat_dtype=True)
                rank_per_fold = rd['rank_per_fold']
                file_loaded = True
            except:
                pass
        else:
            print('No rank file path: Computing from scratch without saving')
    if not file_loaded:
        if not parallel:
            rank_per_fold = []
            for fold_pair in fold_pairs:
                rankedVars = rank_vars(data[fold_pair[0], :],
                                       labels[fold_pair[0]], ranking_function)
                rank_per_fold.append(rankedVars)
        else:
            pool = Pool(processes=min(len(fold_pairs), PROCESSORS))
            rank_per_fold = pool.map(
                Ranker(data, labels, ranking_function, rank_vars), fold_pairs)
            pool.close()
            pool.join()
        if isinstance(save_path, str):
            fname = path.join(
                save_path, "{}_{}_folds.mat".format(ranking_function.__name__,
                                                    len(fold_pairs)))
            with open(fname, 'wb') as f:
                scipy.io.savemat(f, {'rank_per_fold': rank_per_fold})
    return rank_per_fold