コード例 #1
0
def test_validate_kwargs():
    kwargs = {'a': 1, 'b': 2}
    V.validate_kwargs(kwargs, (
        'a',
        'b',
        'c',
    ))
    assert_raises(ValueError, V.validate_kwargs, kwargs, ('a', ))
コード例 #2
0
ファイル: parallel.py プロジェクト: pschulam/kernels
    def __init__(self, runners, backend='multiprocessing', **kwargs):
        self._runners = runners
        if backend not in (
                'multiprocessing',
                'multyvac',
        ):
            raise ValueError("invalid backend: {}".format(backend))
        self._backend = backend
        if backend == 'multiprocessing':
            validator.validate_kwargs(kwargs, ('processes', ))
            if 'processes' not in kwargs:
                kwargs['processes'] = mp.cpu_count()
            validator.validate_positive(kwargs['processes'], 'processes')
            self._processes = kwargs['processes']
        elif backend == 'multyvac':
            if not _has_multyvac:
                raise ValueError("multyvac module not installed on machine")
            validator.validate_kwargs(kwargs, (
                'layer',
                'core',
                'volume',
            ))
            if 'layer' not in kwargs:
                msg = ('multyvac support requires setting up a layer.'
                       'see scripts in bin')
                raise ValueError(msg)
            self._volume = kwargs.get('volume', None)
            if self._volume is None:
                msg = "use of a volume is highly recommended"
                warnings.warn(msg)
            else:
                volume = multyvac.volume.get(self._volume)
                if not volume:
                    raise ValueError("no such volume: {}".format(self._volume))

            self._layer = kwargs['layer']
            if (not multyvac.config.api_key
                    or not multyvac.config.api_secret_key):
                raise ValueError("multyvac is not auth-ed")
            # XXX(stephentu): currently defaults to the good stuff
            self._core = kwargs.get('core', 'f2')
            self._env = {}
            # XXX(stephentu): assumes you used the setup multyvac scripts we
            # provide
            self._env['PATH'] = '{}:{}'.format(
                '/home/multyvac/miniconda/envs/build/bin', _MULTYVAC_PATH)
            self._env['CONDA_DEFAULT_ENV'] = 'build'
            # this is needed for multyvacinit.pybootstrap
            self._env['PYTHONPATH'] = '/usr/local/lib/python2.7/dist-packages'

            # XXX(stephentu): multyvac post requests are limited in size
            # (don't know what the hard limit is). so to avoid the limits,
            # we explicitly serialize the expensive state to a file

            if not self._volume:
                # no volume provided for uploads
                self._digests = [None for _ in xrange(len(self._runners))]
                return

            # XXX(stephentu): we shouldn't reach in there like this
            self._digests = []
            digest_cache = {}
            for runner in self._runners:
                cache_key = id(runner.expensive_state)
                if cache_key in digest_cache:
                    digest = digest_cache[cache_key]
                else:
                    h = hashlib.sha1()
                    runner.expensive_state_digest(h)
                    digest = h.hexdigest()
                    digest_cache[cache_key] = digest
                self._digests.append(digest)

            uploaded = set(_mvac_list_files_in_dir(volume, ""))
            _logger.info("starting state uploads")
            start = time.time()
            for runner, digest in zip(self._runners, self._digests):
                if digest in uploaded:
                    continue
                _logger.info("uploaded state-%s since not found", digest)
                f = tempfile.NamedTemporaryFile()
                pickle.dump(runner.expensive_state, f)
                f.flush()
                # XXX(stephentu) this seems to fail for large files
                #volume.put_file(f.name, 'state-{}'.format(digest))
                volume.sync_up(f.name, 'state-{}'.format(digest))
                f.close()
                uploaded.add(digest)
            _logger.info("state upload took %f seconds", (time.time() - start))

        else:
            assert False, 'should not be reached'
コード例 #3
0
def test_validate_kwargs():
    kwargs = {'a': 1, 'b': 2}
    V.validate_kwargs(kwargs, ('a', 'b', 'c',))
    assert_raises(ValueError, V.validate_kwargs, kwargs, ('a',))
コード例 #4
0
ファイル: parallel.py プロジェクト: datamicroscopes/kernels
    def __init__(self, runners, backend='multiprocessing', **kwargs):
        self._runners = runners
        if backend not in ('multiprocessing', 'multyvac',):
            raise ValueError("invalid backend: {}".format(backend))
        self._backend = backend
        if backend == 'multiprocessing':
            validator.validate_kwargs(kwargs, ('processes',))
            if 'processes' not in kwargs:
                kwargs['processes'] = mp.cpu_count()
            validator.validate_positive(kwargs['processes'], 'processes')
            self._processes = kwargs['processes']
        elif backend == 'multyvac':
            if not _has_multyvac:
                raise ValueError("multyvac module not installed on machine")
            validator.validate_kwargs(kwargs, ('layer', 'core', 'volume',))
            if 'layer' not in kwargs:
                msg = ('multyvac support requires setting up a layer.'
                       'see scripts in bin')
                raise ValueError(msg)
            self._volume = kwargs.get('volume', None)
            if self._volume is None:
                msg = "use of a volume is highly recommended"
                warnings.warn(msg)
            else:
                volume = multyvac.volume.get(self._volume)
                if not volume:
                    raise ValueError(
                        "no such volume: {}".format(self._volume))

            self._layer = kwargs['layer']
            if (not multyvac.config.api_key or
                    not multyvac.config.api_secret_key):
                raise ValueError("multyvac is not auth-ed")
            # XXX(stephentu): currently defaults to the good stuff
            self._core = kwargs.get('core', 'f2')
            self._env = {}
            # XXX(stephentu): assumes you used the setup multyvac scripts we
            # provide
            self._env['PATH'] = '{}:{}'.format(
                '/home/multyvac/miniconda/envs/build/bin', _MULTYVAC_PATH)
            self._env['CONDA_DEFAULT_ENV'] = 'build'
            # this is needed for multyvacinit.pybootstrap
            self._env['PYTHONPATH'] = '/usr/local/lib/python2.7/dist-packages'

            # XXX(stephentu): multyvac post requests are limited in size
            # (don't know what the hard limit is). so to avoid the limits,
            # we explicitly serialize the expensive state to a file

            if not self._volume:
                # no volume provided for uploads
                self._digests = [None for _ in xrange(len(self._runners))]
                return

            # XXX(stephentu): we shouldn't reach in there like this
            self._digests = []
            digest_cache = {}
            for runner in self._runners:
                cache_key = id(runner.expensive_state)
                if cache_key in digest_cache:
                    digest = digest_cache[cache_key]
                else:
                    h = hashlib.sha1()
                    runner.expensive_state_digest(h)
                    digest = h.hexdigest()
                    digest_cache[cache_key] = digest
                self._digests.append(digest)

            uploaded = set(_mvac_list_files_in_dir(volume, ""))
            _logger.info("starting state uploads")
            start = time.time()
            for runner, digest in zip(self._runners, self._digests):
                if digest in uploaded:
                    continue
                _logger.info("uploaded state-%s since not found", digest)
                f = tempfile.NamedTemporaryFile()
                pickle.dump(runner.expensive_state, f)
                f.flush()
                # XXX(stephentu) this seems to fail for large files
                #volume.put_file(f.name, 'state-{}'.format(digest))
                volume.sync_up(f.name, 'state-{}'.format(digest))
                f.close()
                uploaded.add(digest)
            _logger.info("state upload took %f seconds", (time.time() - start))

        else:
            assert False, 'should not be reached'