コード例 #1
0
    def execute(self, is_null = False):
        # Reset the counter
        counter.value = 0

        # Create a parallelization environment with the current backend
        parallel = ParallelizedEnvironment(self._backend, 5)

        # Run the computation a few times in the parallelized environment
        loop_count = 0
        with caching_into(FileSystemPersistentCache(self.working_directory)):
            while parallel.run(False):
                # Run some computations
                x = computation(1, 2)
                y = computation(3, 4)
                z = computation(5, 6)

                # Check that we can monitor if we're capturing
                if loop_count == 0 and not is_null:
                    self.assertTrue(parallel.capturing())
                else:
                    self.assertFalse(parallel.capturing())
                loop_count += 1

        # Make sure the computation was never invoked locally
        self.assertEqual(counter.value, 3 if is_null else 0)

        # Validate the results
        self.assertEqual(x, 3)
        self.assertEqual(y, 7)
        self.assertEqual(z, 11)

        # Run outside of the parallelization environment
        self.assertEqual(computation(7, 8), 15)
        self.assertEqual(counter.value, 4 if is_null else 1)
コード例 #2
0
ファイル: test_parallel.py プロジェクト: spiiph/owls-parallel
    def execute(self, is_null=False):
        # Reset the counter
        counter.value = 0

        # Create a parallelization environment with the current backend
        parallel = ParallelizedEnvironment(self._backend, 5)

        # Run the computation a few times in the parallelized environment
        loop_count = 0
        with caching_into(FileSystemPersistentCache(self.working_directory)):
            while parallel.run(False):
                # Run some computations
                x = computation(1, 2)
                y = computation(3, 4)
                z = computation(5, 6)

                # Check that we can monitor if we're capturing
                if loop_count == 0 and not is_null:
                    self.assertTrue(parallel.capturing())
                else:
                    self.assertFalse(parallel.capturing())
                loop_count += 1

        # Make sure the computation was never invoked locally
        self.assertEqual(counter.value, 3 if is_null else 0)

        # Validate the results
        self.assertEqual(x, 3)
        self.assertEqual(y, 7)
        self.assertEqual(z, 11)

        # Run outside of the parallelization environment
        self.assertEqual(computation(7, 8), 15)
        self.assertEqual(counter.value, 4 if is_null else 1)
コード例 #3
0
    def test(self):
        # Execute in a cached context
        with caching_into(fs_backend):
            # Run some computations which should be cache misses
            value_1 = self.do_computation(1, 2, 'add')
            value_2 = self.do_computation(1, 2, 'subtract')

            # Check that both missed
            self.assertEqual(self._counter, 2)
コード例 #4
0
def _run(cache, job):
    with caching_into(cache):
        for batcher, calls in iteritems(job):
            for function, args_kwargs in iteritems(calls):
                try:
                    batcher(function, args_kwargs)
                except Exception:
                    print_exc()
                    raise
コード例 #5
0
    def test(self):
        # Execute in a cached context
        with caching_into(fs_backend):
            # Run some computations which should be cache misses
            value_1 = self.do_computation(1, 2, 'add')
            value_2 = self.do_computation(1, 2, 'subtract')

            # Check that both missed
            self.assertEqual(self._counter, 2)
コード例 #6
0
    def test(self):
        # Execute in a cached context
        with caching_into(fs_backend):
            # Run a computation which should trigger a cache miss
            value_1 = self.do_computation(1, 2, 'add')

        # Now run again and check that we skip caching
        value_1_uncached = self.do_computation(1, 2, 'add')
        self.assertEqual(self._counter, 2)
        self.assertEqual(value_1, value_1_uncached)
コード例 #7
0
    def test(self):
        # Execute in a cached context
        with caching_into(redis_backend):
            # Run a computation which should trigger a cache miss
            value_1 = self.do_computation(1, 2, 'add')

            # Now run again and check for a cache hit
            value_1_cached = self.do_computation(1, 2, 'add')
            self.assertEqual(self._counter, 1)
            self.assertEqual(value_1, value_1_cached)
コード例 #8
0
    def test(self):
        # Execute in a cached context
        with caching_into(fs_backend):
            # Run a computation which should trigger a cache miss
            value_1 = self.do_computation(1, 2, 'add')

        # Now run again and check that we skip caching
        value_1_uncached = self.do_computation(1, 2, 'add')
        self.assertEqual(self._counter, 2)
        self.assertEqual(value_1, value_1_uncached)
コード例 #9
0
    def test(self):
        # Execute in a cached context
        with caching_into(redis_backend):
            # Run a computation which should trigger a cache miss
            value_1 = self.do_computation(1, 2, 'add')

            # Now run again and check for a cache hit
            value_1_cached = self.do_computation(1, 2, 'add')
            self.assertEqual(self._counter, 1)
            self.assertEqual(value_1, value_1_cached)
コード例 #10
0
ファイル: plot.py プロジェクト: spiiph/owls-mutau
# Create the parallelization environment
parallel = ParallelizedEnvironment(backend)

# Create output directories
for region_name in regions:
    # Compute the region's output path
    region_path = join(arguments.output, region_name)

    # Try to create it
    if not exists(region_path):
        makedirs(region_path)


# Run in a cached environment
with caching_into(cache):
    # Run in a parallelized environment
    while parallel.run():
        if parallel.computed():
            print('Creating plots...')

        # Loop over regions and distributions
        for region_name, distribution_name in product(regions, distributions):
            # Grab the region/distribution objects
            region = regions[region_name]
            distribution = distributions[distribution_name]

            # Create the data histogram
            data_histogram = None
            if data is not None:
                data_process = data['process']
コード例 #11
0
def _run(cache, job):
    with caching_into(cache):
        for batcher, calls in iteritems(job):
            for function, args_kwargs in iteritems(calls):
                batcher(function, args_kwargs)
コード例 #12
0
ファイル: ipython.py プロジェクト: spiiph/owls-parallel
def _run(cache, job):
    with caching_into(cache):
        for batcher, calls in iteritems(job):
            for function, args_kwargs in iteritems(calls):
                batcher(function, args_kwargs)