Example #1
0
    def __init__(self, slvr_cfg):
        super(CPUSolver, self).__init__(slvr_cfg)

        # Monkey patch these functions onto the object
        # TODO: Remove this when deprecating v2.
        from montblanc.impl.rime.v4.ant_pairs import monkey_patch_antenna_pairs
        monkey_patch_antenna_pairs(self)

        from montblanc.impl.rime.v4.config import (A, P)

        self.register_default_dimensions()

        # Configure the dimensions of the beam cube
        self.register_dimension('beam_lw',
            slvr_cfg[Options.E_BEAM_WIDTH],
            description='E cube l width')

        self.register_dimension('beam_mh',
            slvr_cfg[Options.E_BEAM_HEIGHT],
            description='E cube m height')

        self.register_dimension('beam_nud',
            slvr_cfg[Options.E_BEAM_DEPTH],
            description='E cube nu depth')

        self.register_properties(P)
        self.register_arrays(A)
        self.create_arrays()
Example #2
0
    def __init__(self, slvr_cfg):
        """
        RimeSolver Constructor

        Parameters:
            slvr_cfg : SolverConfiguration
                Solver Configuration variables
        """
        # Call the parent constructor
        super(RimeSolver, self).__init__(slvr_cfg)

        self.register_default_dimensions()

        # Configure the dimensions of the beam cube
        self.register_dimension('beam_lw',
                                slvr_cfg[Options.E_BEAM_WIDTH],
                                description='E cube l width')

        self.register_dimension('beam_mh',
                                slvr_cfg[Options.E_BEAM_HEIGHT],
                                description='E cube m height')

        self.register_dimension('beam_nud',
                                slvr_cfg[Options.E_BEAM_DEPTH],
                                description='E cube nu depth')

        self.rime_e_beam = RimeEBeam()
        self.rime_b_sqrt = RimeBSqrt()
        self.rime_ekb_sqrt = RimeEKBSqrt()
        self.rime_sum = RimeSumCoherencies()
        self.rime_reduce = RimeReduction()

        from montblanc.impl.rime.v4.ant_pairs import monkey_patch_antenna_pairs
        monkey_patch_antenna_pairs(self)

        # Create
        # (1) A stream that this solver will asynchronously
        #     operate on
        # (2) An event indicating when an iteration of
        #     the kernels above have finished executing
        with self.context:
            self.stream = cuda.Stream()
            self.kernels_done = cuda.Event()

        # Create constant data for transfer to GPU
        self._const_data = mbu.create_rime_const_data(self)

        # Indicate these variables have not been set
        self._dev_mem_pool = None
        self._pinned_mem_pool = None
        self._pool_lock = None
Example #3
0
    def __init__(self, slvr_cfg):
        """
        RimeSolver Constructor

        Parameters:
            slvr_cfg : SolverConfiguration
                Solver Configuration variables
        """
        # Call the parent constructor
        super(RimeSolver, self).__init__(slvr_cfg)

        self.register_default_dimensions()

        # Configure the dimensions of the beam cube
        self.register_dimension('beam_lw',
            slvr_cfg[Options.E_BEAM_WIDTH],
            description='E cube l width')

        self.register_dimension('beam_mh',
            slvr_cfg[Options.E_BEAM_HEIGHT],
            description='E cube m height')

        self.register_dimension('beam_nud',
            slvr_cfg[Options.E_BEAM_DEPTH],
            description='E cube nu depth')

        self.rime_e_beam = RimeEBeam()
        self.rime_b_sqrt = RimeBSqrt()
        self.rime_ekb_sqrt = RimeEKBSqrt()
        self.rime_sum = RimeSumCoherencies()
        self.rime_reduce = RimeReduction()

        from montblanc.impl.rime.v4.ant_pairs import monkey_patch_antenna_pairs
        monkey_patch_antenna_pairs(self)

        # Create
        # (1) A stream that this solver will asynchronously
        #     operate on
        # (2) An event indicating when an iteration of
        #     the kernels above have finished executing
        with self.context:
            self.stream = cuda.Stream()
            self.kernels_done = cuda.Event()

        # Create constant data for transfer to GPU
        self._const_data = mbu.create_rime_const_data(self)

        # Indicate these variables have not been set
        self._dev_mem_pool = None
        self._pinned_mem_pool = None
        self._pool_lock = None
Example #4
0
    def __init__(self, slvr_cfg):
        """
        RimeSolver Constructor

        Parameters:
            slvr_cfg : SolverConfiguration
                Solver Configuration variables
        """

        # Set up a default pipeline if None is supplied
        slvr_cfg.setdefault('pipeline', get_pipeline(slvr_cfg))

        super(RimeSolver, self).__init__(slvr_cfg)

        self.register_default_dimensions()

        # Configure the dimensions of the beam cube
        self.register_dimension('beam_lw',
                                slvr_cfg[Options.E_BEAM_WIDTH],
                                description='E Beam cube l width')

        self.register_dimension('beam_mh',
                                slvr_cfg[Options.E_BEAM_HEIGHT],
                                description='E Beam cube m height')

        self.register_dimension('beam_nud',
                                slvr_cfg[Options.E_BEAM_DEPTH],
                                description='E Beam cube nu depth')

        # Monkey patch these functions onto the object
        # TODO: Remove this when deprecating v2.
        from montblanc.impl.rime.v4.ant_pairs import monkey_patch_antenna_pairs
        monkey_patch_antenna_pairs(self)

        from montblanc.impl.rime.v4.config import (A, P)

        self.register_properties(P)
        self.register_arrays(A)
        self.create_arrays()

        self._const_data = mbu.create_rime_const_data(self)
Example #5
0
    def __init__(self, slvr_cfg):
        """
        RimeSolver Constructor

        Parameters:
            slvr_cfg : SolverConfiguration
                Solver Configuration variables
        """

        # Set up a default pipeline if None is supplied
        slvr_cfg.setdefault("pipeline", get_pipeline(slvr_cfg))

        super(RimeSolver, self).__init__(slvr_cfg)

        self.register_default_dimensions()

        # Configure the dimensions of the beam cube
        self.register_dimension("beam_lw", slvr_cfg[Options.E_BEAM_WIDTH], description="E Beam cube l width")

        self.register_dimension("beam_mh", slvr_cfg[Options.E_BEAM_HEIGHT], description="E Beam cube m height")

        self.register_dimension("beam_nud", slvr_cfg[Options.E_BEAM_DEPTH], description="E Beam cube nu depth")

        # Monkey patch these functions onto the object
        # TODO: Remove this when deprecating v2.
        from montblanc.impl.rime.v4.ant_pairs import monkey_patch_antenna_pairs

        monkey_patch_antenna_pairs(self)

        from montblanc.impl.rime.v4.config import A, P

        self.register_properties(P)
        self.register_arrays(A)
        self.create_arrays()

        self._const_data = mbu.create_rime_const_data(self)
Example #6
0
    def __init__(self, slvr_cfg):
        """
        RimeSolver Constructor

        Parameters:
            slvr_cfg : SolverConfiguration
                Solver Configuration variables
        """
        super(CompositeRimeSolver, self).__init__(slvr_cfg=slvr_cfg)

        # Create thread local storage
        self.thread_local = threading.local()

        self.register_default_dimensions()

        # Configure the dimensions of the beam cube
        self.register_dimension('beam_lw',
                                slvr_cfg[Options.E_BEAM_WIDTH],
                                description='E cube l width')

        self.register_dimension('beam_mh',
                                slvr_cfg[Options.E_BEAM_HEIGHT],
                                description='E cube m height')

        self.register_dimension('beam_nud',
                                slvr_cfg[Options.E_BEAM_DEPTH],
                                description='E cube nu depth')

        # Monkey patch v4 antenna pair functions into the object
        from montblanc.impl.rime.v4.ant_pairs import monkey_patch_antenna_pairs
        monkey_patch_antenna_pairs(self)

        # Copy the v4 arrays and properties and
        # modify them for use on this Composite Solver
        A_main, P_main = self._cfg_comp_slvr_arys_and_props(v4Arrays, v4Props)

        self.register_properties(P_main)
        self.register_arrays(A_main)

        # Look for ignored and supplied arrays in the solver configuration
        array_cfg = slvr_cfg.get('array_cfg', {})
        ignore = array_cfg.get('ignore', None)
        supplied = array_cfg.get('supplied', None)

        # Create arrays on the solver, ignoring
        # and using supplied arrays as necessary
        self.create_arrays(ignore, supplied)

        # PyCUDA contexts for each GPU device
        self.dev_ctxs = slvr_cfg.get(Options.CONTEXT)
        # Number of GPU Solvers created for each device
        nsolvers = slvr_cfg.get(Options.NSOLVERS)
        # Maximum number of enqueued visibility chunks
        # before throttling is applied
        self.throttle_factor = slvr_cfg.get(Options.VISIBILITY_THROTTLE_FACTOR)

        # Massage the contexts for each device into a list
        if not isinstance(self.dev_ctxs, list):
            self.dev_ctxs = [self.dev_ctxs]

        montblanc.log.info(
            'Using {d} solver(s) per device.'.format(d=nsolvers))

        # Shorten the type name
        C = CompositeRimeSolver

        # Create a one thread executor for each device context,
        # i.e. a thread per device
        enqueue_executors = [cf.ThreadPoolExecutor(1) for ctx in self.dev_ctxs]
        sync_executors = [cf.ThreadPoolExecutor(1) for ctx in self.dev_ctxs]

        self.enqueue_executors = enqueue_executors
        self.sync_executors = sync_executors
        self.initialised = False
        self._vis_write_mode = slvr_cfg.get(Options.VISIBILITY_WRITE_MODE)

        montblanc.log.info(
            'Created {d} executor(s).'.format(d=len(enqueue_executors)))

        # Initialise executor threads
        for ex, ctx in zip(enqueue_executors, self.dev_ctxs):
            ex.submit(C._thread_init, self, ctx).result()

        for ex, ctx in zip(sync_executors, self.dev_ctxs):
            ex.submit(C._thread_init, self, ctx).result()

        montblanc.log.info(
            'Initialised {d} thread(s).'.format(d=len(enqueue_executors)))

        # Get a template dictionary
        T = self.template_dict()

        A_sub, P_sub = self._cfg_sub_slvr_arys_and_props(v4Arrays, v4Props)
        self._validate_arrays(A_sub)

        # Find the budget with the lowest memory usage
        # Work with the device with the lowest memory
        budgets = sorted([
            ex.submit(C._thread_budget, self, slvr_cfg, A_sub, T).result()
            for ex in enqueue_executors
        ],
                         key=lambda T: T[1])

        P, M, mem = budgets[0]

        # Log some information about the memory budget
        # and dimension reduction
        montblanc.log.info(('Selected a solver memory budget of {b} '
                            'for {d} solvers.').format(b=mbu.fmt_bytes(mem),
                                                       d=nsolvers))

        montblanc.log.info(('The following dimension reductions '
                            'have been applied:'))

        for k, v in M.iteritems():
            montblanc.log.info('{p}{d}: {id} => {rd}'.format(p=' ' * 4,
                                                             d=k,
                                                             id=T[k],
                                                             rd=v))

        # Create the sub solver configuration
        subslvr_cfg = slvr_cfg.copy()
        subslvr_cfg[Options.DATA_SOURCE] = Options.DATA_SOURCE_EMPTY
        subslvr_cfg[Options.CONTEXT] = ctx

        subslvr_cfg = self._cfg_subslvr_dims(subslvr_cfg, P)

        # Extract the dimension differences
        self.src_diff = P[Options.NSRC]
        self.time_diff = P[Options.NTIME]
        self.ant_diff = P[Options.NA]
        self.bl_diff = P[Options.NBL]
        self.chan_diff = P[Options.NCHAN]

        montblanc.log.info('Creating {s} solver(s) on {d} device(s).'.format(
            s=nsolvers, d=len(enqueue_executors)))

        # Now create the solvers on each thread
        for ex in enqueue_executors:
            ex.submit(C._thread_create_solvers, self, subslvr_cfg, P,
                      nsolvers).result()

        montblanc.log.info('Solvers Created')

        # Register arrays and properties on each thread's solvers
        for ex in enqueue_executors:
            ex.submit(C._thread_reg_sub_arys_and_props, self, A_sub,
                      P_sub).result()

        montblanc.log.info('Priming Memory Pools')

        # Prime the memory pools on each sub-solver
        for ex in enqueue_executors:
            ex.submit(C._thread_prime_memory_pools, self).result()
Example #7
0
    def __init__(self, slvr_cfg):
        """
        RimeSolver Constructor

        Parameters:
            slvr_cfg : SolverConfiguration
                Solver Configuration variables
        """
        super(CompositeRimeSolver, self).__init__(slvr_cfg=slvr_cfg)

        # Create thread local storage
        self.thread_local = threading.local()

        self.register_default_dimensions()

        # Configure the dimensions of the beam cube
        self.register_dimension('beam_lw',
            slvr_cfg[Options.E_BEAM_WIDTH],
            description='E cube l width')

        self.register_dimension('beam_mh',
            slvr_cfg[Options.E_BEAM_HEIGHT],
            description='E cube m height')

        self.register_dimension('beam_nud',
            slvr_cfg[Options.E_BEAM_DEPTH],
            description='E cube nu depth')

        # Monkey patch v4 antenna pair functions into the object
        from montblanc.impl.rime.v4.ant_pairs import monkey_patch_antenna_pairs
        monkey_patch_antenna_pairs(self)

        # Copy the v4 arrays and properties and
        # modify them for use on this Composite Solver
        A_main, P_main = self._cfg_comp_slvr_arys_and_props(v4Arrays, v4Props)

        self.register_properties(P_main)
        self.register_arrays(A_main)

        # Look for ignored and supplied arrays in the solver configuration
        array_cfg = slvr_cfg.get('array_cfg', {})
        ignore = array_cfg.get('ignore', None)
        supplied = array_cfg.get('supplied', None)

        # Create arrays on the solver, ignoring
        # and using supplied arrays as necessary
        self.create_arrays(ignore, supplied)

        # PyCUDA contexts for each GPU device   
        self.dev_ctxs = slvr_cfg.get(Options.CONTEXT)
        # Number of GPU Solvers created for each device
        nsolvers = slvr_cfg.get(Options.NSOLVERS)
        # Maximum number of enqueued visibility chunks
        # before throttling is applied
        self.throttle_factor = slvr_cfg.get(
            Options.VISIBILITY_THROTTLE_FACTOR)

        # Massage the contexts for each device into a list
        if not isinstance(self.dev_ctxs, list):
            self.dev_ctxs = [self.dev_ctxs]

        montblanc.log.info('Using {d} solver(s) per device.'.format(
            d=nsolvers))

        # Shorten the type name
        C = CompositeRimeSolver

        # Create a one thread executor for each device context,
        # i.e. a thread per device
        enqueue_executors = [cf.ThreadPoolExecutor(1) for ctx in self.dev_ctxs]
        sync_executors = [cf.ThreadPoolExecutor(1) for ctx in self.dev_ctxs]

        self.enqueue_executors = enqueue_executors
        self.sync_executors = sync_executors
        self.initialised = False
        self._vis_write_mode = slvr_cfg.get(Options.VISIBILITY_WRITE_MODE)

        montblanc.log.info('Created {d} executor(s).'.format(d=len(enqueue_executors)))

        # Initialise executor threads
        for ex, ctx in zip(enqueue_executors, self.dev_ctxs):
            ex.submit(C._thread_init, self, ctx).result()

        for ex, ctx in zip(sync_executors, self.dev_ctxs):
            ex.submit(C._thread_init, self, ctx).result()

        montblanc.log.info('Initialised {d} thread(s).'.format(d=len(enqueue_executors)))

        # Get a template dictionary
        T = self.template_dict()

        A_sub, P_sub = self._cfg_sub_slvr_arys_and_props(v4Arrays, v4Props)
        self._validate_arrays(A_sub)

        # Find the budget with the lowest memory usage
        # Work with the device with the lowest memory
        budgets = sorted([ex.submit(C._thread_budget, self,
                            slvr_cfg, A_sub, T).result()
                        for ex in enqueue_executors],
                    key=lambda T: T[1])

        P, M, mem = budgets[0]

        # Log some information about the memory budget
        # and dimension reduction
        montblanc.log.info(('Selected a solver memory budget of {b} '
            'for {d} solvers.').format(b=mbu.fmt_bytes(mem), d=nsolvers))

        montblanc.log.info(('The following dimension reductions '
            'have been applied:'))

        for k, v in M.iteritems():
            montblanc.log.info('{p}{d}: {id} => {rd}'.format
                (p=' '*4, d=k, id=T[k], rd=v))

        # Create the sub solver configuration
        subslvr_cfg = slvr_cfg.copy()
        subslvr_cfg[Options.DATA_SOURCE] = Options.DATA_SOURCE_EMPTY
        subslvr_cfg[Options.CONTEXT] = ctx

        subslvr_cfg = self._cfg_subslvr_dims(subslvr_cfg, P)

        # Extract the dimension differences
        self.src_diff = P[Options.NSRC]
        self.time_diff = P[Options.NTIME]
        self.ant_diff = P[Options.NA]
        self.bl_diff = P[Options.NBL]
        self.chan_diff = P[Options.NCHAN]

        montblanc.log.info('Creating {s} solver(s) on {d} device(s).'
            .format(s=nsolvers, d=len(enqueue_executors)))

        # Now create the solvers on each thread
        for ex in enqueue_executors:
            ex.submit(C._thread_create_solvers,
                self, subslvr_cfg, P, nsolvers).result()

        montblanc.log.info('Solvers Created')

        # Register arrays and properties on each thread's solvers
        for ex in enqueue_executors:
            ex.submit(C._thread_reg_sub_arys_and_props,
                self, A_sub, P_sub).result()

        montblanc.log.info('Priming Memory Pools')

        # Prime the memory pools on each sub-solver
        for ex in enqueue_executors:
            ex.submit(C._thread_prime_memory_pools, self).result()