Example #1
0
    def get_projection_operator(self, verbose=True):
        """
        Return the projection operator for the peak sampling.
        Convert units from W to W/sr.

        Parameters
        ----------
        verbose : bool, optional
            If true, display information about the memory allocation.

        """
        f = self.instrument.get_projection_operator
        if len(self.block) == 1:
            return BlockColumnOperator([
                f(self.sampling[b], self.scene, verbose=verbose)
                for b in self.block
            ],
                                       axisout=1)

        # XXX HACK
        def callback(i):
            p = f(self.sampling[self.block[i]], self.scene, verbose=False)
            return p

        shapeouts = [
            (len(self.instrument), s.stop - s.start) + self.scene.shape[1:]
            for s in self.block
        ]
        proxies = proxy_group(len(self.block), callback, shapeouts=shapeouts)
        return BlockColumnOperator(proxies, axisout=1)
Example #2
0
def test_block_column2():
    p = np.matrix([[1, 0], [0, 2], [1, 0]])
    o = asoperator(np.matrix(p))
    e = BlockColumnOperator([o, 2 * o], axisout=0)
    assert_eq(e.todense(), np.vstack([p, 2 * p]))
    assert_eq(e.T.todense(), e.todense().T)
    e = BlockColumnOperator([o, 2 * o], new_axisout=0)
    assert_eq(e.todense(), np.vstack([p, 2 * p]))
    assert_eq(e.T.todense(), e.todense().T)
Example #3
0
    def tod2map(self, tod, d, cov=None):
        tol = d['tol']
        maxiter = d['maxiter']
        verbose = d['verbose']
        p = self.planck
        H = []
        for q, w in zip(self.qubic, self.weights):
            H.append(QubicPlanckAcquisition(q, p).get_operator() * w)
        H = np.array(H)
        H = [H[(self.qubic.nus > mi) * (self.qubic.nus < ma)].sum() * \
             self.weights[(self.qubic.nus > mi) * (self.qubic.nus < ma)].sum() \
             for (mi, ma) in self.qubic.bands]
        invntt = self.get_invntt_operator()

        A_columns = []
        for h1 in H:
            c = []
            for h2 in H:
                c.append(h2.T * invntt * h1)
            A_columns.append(BlockColumnOperator(c, axisout=0))
        A = BlockRowOperator(A_columns, axisin=0)

        H = [h.T for h in H]
        b = BlockColumnOperator(H, new_axisout=0) * (invntt * tod)
        sh = b.shape
        if (len(self.qubic.nus) -
                1) > 1:  # If number of subbands is more than one
            if len(sh) == 3:
                b = b.reshape((sh[0] * sh[1], sh[2]))
            else:
                b = b.reshape((sh[0], sh[1]))

        preconditioner = self.get_preconditioner(cov)
        solution = pcg(A,
                       b,
                       M=preconditioner,
                       disp=verbose,
                       tol=tol,
                       maxiter=maxiter)
        #        solution = pcg(A, b, disp=verbose, tol=tol, maxiter=maxiter)
        if len(sh) == 3:
            maps_recon = solution['x'].reshape(sh[0], sh[1], sh[2])
        else:
            maps_recon = solution['x'].reshape(sh[0], sh[1])
        return maps_recon
Example #4
0
 def get_projection_operator(self):
     header = self.get_map_header()
     junk, pmatrix = self._read_tod_pmatrix()
     return BlockColumnOperator([
         ProjectionInMemoryOperator(
             p, attrin={'header': header}, classin=Map, classout=Tod)
         for p in pmatrix
     ],
                                axisout=-1)
Example #5
0
    def get_operator(self):
        """
        Return the fused observation as an operator.

        """
        H_qubic = self.qubic.get_operator()
        R_qubic = ReshapeOperator(H_qubic.shapeout, H_qubic.shape[0])
        H_planck = self.planck.get_operator()
        R_planck = ReshapeOperator(H_planck.shapeout, H_planck.shape[0])
        return BlockColumnOperator(
            [R_qubic(H_qubic), R_planck(H_planck)], axisout=0)
Example #6
0
    def get_operator(self):
        """
        Return the acquisition model H as an operator.

        """
        if self._operator is None:
            self._operator = CompositionOperator([
                BlockColumnOperator(
                    [self.instrument.get_operator(self.sampling[b], self.scene)
                     for b in self.block], axisin=1),
                self.scene.get_distribution_operator(self.comm)])
        return self._operator
Example #7
0
def test_diagonal_numexpr2():
    d1 = DiagonalNumexprOperator([1, 2, 3],
                                 '(data+1)*3',
                                 broadcast='rightward')
    d2 = DiagonalNumexprOperator([3, 2, 1], '(data+2)*2')
    d = d1 * d2
    assert_is_instance(d, DiagonalOperator)
    assert_eq(d.broadcast, 'disabled')
    assert_eq(d.data, [60, 72, 72])
    c = BlockColumnOperator(3 * [IdentityOutplaceOperator()], new_axisout=0)
    v = [1, 2]
    assert_inplace_outplace(d1 * c, v, d1(c(v)))
Example #8
0
def _get_projection_restricted(acq, P, mask):
    #XXX HACK
    if len(acq.block) == 1:
        return P.restrict(mask, inplace=True)

    def callback(i):
        f = acq.instrument.get_projection_operator
        p = f(acq.sampling[acq.block[i]], acq.scene, verbose=False)
        return p.restrict(mask, inplace=True)

    shapeouts = [(len(acq.instrument), s.stop - s.start) + acq.scene.shape[1:]
                 for s in acq.block]
    proxies = proxy_group(len(acq.block), callback, shapeouts=shapeouts)
    return BlockColumnOperator(proxies, axisout=1)
Example #9
0
def test_block_column2():
    p = np.matrix([[1, 0], [0, 2], [1, 0]])
    o = asoperator(np.matrix(p))
    e = BlockColumnOperator([o, 2*o], axisout=0)
    assert_eq(e.todense(), np.vstack([p, 2*p]))
    assert_eq(e.T.todense(), e.todense().T)
    e = BlockColumnOperator([o, 2*o], new_axisout=0)
    assert_eq(e.todense(), np.vstack([p, 2*p]))
    assert_eq(e.T.todense(), e.todense().T)
Example #10
0
    def tod2map(self, tod, cov=None, tol=1e-5, maxiter=1000, verbose=True):
        p = self.planck
        H = []
        for q, w in zip(self.qubic, self.weights):
            H.append(QubicPlanckAcquisition(q, p).get_operator() * w)
        H = np.array(H)
        H = [H[(self.qubic.nus > mi) * (self.qubic.nus < ma)].sum() * \
             self.weights[(self.qubic.nus > mi) * (self.qubic.nus < ma)].sum() \
                for (mi, ma) in self.qubic.bands]
        invntt = self.get_invntt_operator()

        A_columns = []
        for h1 in H:
            c = []
            for h2 in H:
                c.append(h2.T * invntt * h1)
            A_columns.append(BlockColumnOperator(c, axisout=0))
        A = BlockRowOperator(A_columns, axisin=0)

        H = [h.T for h in H]
        b = BlockColumnOperator(H, new_axisout=0) * (invntt * tod)
        sh = b.shape
        if len(self.qubic.nus) - 1 > 1: # If number of subbands is more than one
            if len(sh) == 3:
                b = b.reshape((sh[0] * sh[1], sh[2]))
            else:
                b = b.reshape((sh[0] * sh[1]))

        preconditioner = self.get_preconditioner(cov)
        solution = pcg(A, b, M=preconditioner, disp=verbose, tol=tol, maxiter=maxiter)
#        solution = pcg(A, b, disp=verbose, tol=tol, maxiter=maxiter)
        if len(sh) == 3:
            maps_recon = solution['x'].reshape(sh[0], sh[1], sh[2])
        else:
            maps_recon = solution['x'].reshape(sh[0], sh[1])
        return maps_recon
Example #11
0
def test_integration_trapeze():
    @pyoperators.flags.square
    class Op(Operator):
        """ output[i] = value ** (i + input[i]) """
        def __init__(self, x):
            Operator.__init__(self, dtype=float)
            self.x = x

        def direct(self, input, output):
            output[...] = self.x**(np.arange(input.size) + input)

    value = list(range(3))
    x = [0.5, 1, 2, 4]
    func_op = BlockColumnOperator([Op(_) for _ in x], new_axisout=0)
    eval_ = func_op(value)
    expected = np.trapz(eval_, x=x, axis=0)
    integ = IntegrationTrapezeOperator(x)(func_op)
    assert_same(integ(value), expected)
 def get_unit_conversion_operator(self, nus):
     shape_freq = () if len(nus) == 1 else (len(nus),)
     shape_kind = () if self.kind == 'I' else (len(self.kind),)
     return BlockColumnOperator(
         [self._get_unit_conversion_operator(nu) for nu in nus],
         new_axisout=0, shapeout=shape_freq + (len(self),) + shape_kind)
Example #13
0
data_dir = os.path.dirname(__file__) + '/data/'
obs = PacsObservation(filename=data_dir + 'frames_blue.fits')
obs.pointing.chop[:] = 0
tod = obs.get_tod()

projection = obs.get_projection_operator(resolution=3.2,
                                         downsampling=True,
                                         npixels_per_sample=6)
masking_tod = MaskOperator(tod.mask)
model = masking_tod * projection

naive = mapper_naive(tod, model)
naive[np.isnan(naive)] = 0

prior = BlockColumnOperator(
    [DiscreteDifferenceOperator(axis, shapein=(103, 97)) for axis in (0, 1)],
    new_axisout=0)

stop_condition = StopCondition(maxiter=2)
dli = DoubleLoopAlgorithm(model,
                          tod,
                          prior,
                          stop_condition=stop_condition,
                          lanczos={'maxiter': 5},
                          fmin_args={'maxiter': 2})

map_dli = dli.run()


def test():
    pass
def ProjectionOperator(input, method=None, header=None, resolution=None,
                       npixels_per_sample=0, units=None, derived_units=None,
                       downsampling=False, packed=False, commin=MPI.COMM_WORLD,
                       commout=MPI.COMM_WORLD, onfly_func=None, onfly_ids=None,
                       onfly_shapeouts=None, **keywords):
    """
    Projection operator factory, to handle operations by one or more pointing
    matrices.

    It is assumed that each pointing matrix row has at most 'npixels_per_sample'
    non-null elements, or in other words, an output element can only intersect
    a fixed number of input elements.

    Given an input vector x and an output vector y, y = P(x) translates into:
        y[i] = sum(P.matrix[i,:].value * x[P.matrix[i,:].index])

    If the input is not MPI-distributed unlike the output, the projection
    operator is automatically multiplied by the operator MPIDistributionIdenti-
    tyOperator, to enable MPI reductions.

    If the input is MPI-distributed, this operator is automatically packed (see
    below) and multiplied by the operator MPIDistributionLocalOperator, which
    takes the local input as argument.

    Arguments
    ---------
    input : pointing matrix (or sequence of) or observation (deprecated)
    method : deprecated
    header : deprecated
    resolution : deprecated
    npixels_per_sample : deprecated
    downsampling : deprecated
    packed deprecated

    """
    # check if there is only one pointing matrix
    isonfly = input is None
    isobservation = hasattr(input, 'get_pointing_matrix')
    if isobservation:
        if hasattr(input, 'slice'):
            nmatrices = len(input.slice)
        else:
            nmatrices = 1
        commout = input.instrument.comm
    elif isonfly:
        nmatrices = len(onfly_ids)
    else:
        if isinstance(input, PointingMatrix):
            input = (input,)
        if any(not isinstance(i, PointingMatrix) for i in input):
            raise TypeError('The input is not a PointingMatrix, (nor a sequence'
                            ' of).')
        nmatrices = len(input)

    ismapdistributed = commin.size > 1
    istoddistributed = commout.size > 1

    # get the pointing matrix from the input observation
    if isobservation:
        if header is None:
            if not hasattr(input, 'get_map_header'):
                raise AttributeError("No map header has been specified and "
                    "the observation has no 'get_map_header' method.")
            header_global = input.get_map_header(resolution=resolution,
                                                 downsampling=downsampling)
        else:
            if isinstance(header, str):
                header = str2fitsheader(header)
            header_global = gather_fitsheader_if_needed(header, comm=commin)
        #XXX we should hand over the local header
        input = input.get_pointing_matrix(header_global, npixels_per_sample,
                    method=method, downsampling=downsampling, comm=commin)
        if isinstance(input, PointingMatrix):
            input = (input,)
    elif isonfly:
        header_global = header
    else:
        header_global = input[0].info['header']

    # check shapein
    if not isonfly:
        shapeins = [i.shape_input for i in input]
        if any(s != shapeins[0] for s in shapeins):
            raise ValueError('The pointing matrices do not have the same input '
                             "shape: {0}.".format(', '.join(str(shapeins))))
        shapein = shapeins[0]
    else:
        shapein = fitsheader2shape(header_global)

    # the output is simply a ProjectionOperator instance
    if nmatrices == 1 and not ismapdistributed and not istoddistributed \
       and not packed:
        return ProjectionInMemoryOperator(input[0], units=units, derived_units=
            derived_units, commin=commin, commout=commout, **keywords)

    if packed or ismapdistributed:
        if isonfly:
            raise NotImplementedError()
        # compute the map mask before this information is lost while packing
        mask_global = Map.ones(shapein, dtype=np.bool8, header=header_global)
        for i in input:
            i.get_mask(out=mask_global)
        for i in input:
            i.pack(mask_global)
        if ismapdistributed:
            shapes = distribute_shapes(mask_global.shape, comm=commin)
            shape = shapes[commin.rank]
            mask = np.empty(shape, bool)
            commin.Reduce_scatter([mask_global, MPI.BYTE], [mask, MPI.BYTE],
                                  [product(s) for s in shapes], op=MPI.BAND)
        else:
            mask = mask_global

    if isonfly:
        place_holder = {}
        operands = [ProjectionOnFlyOperator(place_holder, id, onfly_func,
                                            shapein=shapein, shapeout=shapeout,
                                            units=units,
                                            derived_units=derived_units)
                    for id, shapeout in zip(onfly_ids, onfly_shapeouts)]
    else:
        operands = [ProjectionInMemoryOperator(i, units=units, derived_units=
                    derived_units, commout=commout, **keywords) for i in input]
    result = BlockColumnOperator(operands, axisout=-1)

    if nmatrices > 1:
        def apply_mask(self, mask):
            mask = np.asarray(mask, np.bool8)
            dest = 0
            if mask.shape != self.shapeout:
                raise ValueError("The mask shape '{0}' is incompatible with tha"
                    "t of the projection operator '{1}'.".format(mask.shape,
                    self.shapeout))
            if any(isinstance(p, ProjectionOnFlyOperator)
                   for p in self.operands):
                blocks = self.copy()
                self.__class__ = CompositionOperator
                self.__init__([MaskOperator(mask), blocks])
                return
            for p in self.operands:
                n = p.matrix.shape[1]
                p.apply_mask(mask[...,dest:dest+n])
                dest += n
        def get_mask(self, out=None):
            for p in self.operands:
                out = p.get_mask(out=out)
            return out
        def get_pTp(self, out=None):
            for p in self.operands:
                out = p.get_pTp(out=out)
            return out
        def get_pTx_pT1(self, x, out=None, mask=None):
            dest = 0
            for p in self.operands:
                n = p.matrix.shape[1]
                out = p.get_pTx_pT1(x[...,dest:dest+n], out=out, mask=mask)
                dest += n
            return out
        def intersects(self, out=None):
            raise NotImplementedError('email-me')

        result.apply_mask = apply_mask.__get__(result)
        result.get_mask = get_mask.__get__(result)
        result.get_pTp = get_pTp.__get__(result)
        result.get_pTx_pT1 = get_pTx_pT1.__get__(result)
        result.intersects = intersects.__get__(result)

    if not istoddistributed and not ismapdistributed and not packed:
        return result

    if packed or ismapdistributed:
        def get_mask(self, out=None):
            if out is not None:
                out &= mask
            else:
                out = mask
            return out
        if ismapdistributed:
            header = scatter_fitsheader(header_global, comm=commin)
            result *= MPIDistributionLocalOperator(mask_global, commin=commin,
                                                   attrin={'header':header})
        elif packed:
            result *= PackOperator(mask)
        result.get_mask = get_mask.__get__(result)

    if istoddistributed and not ismapdistributed:
        def get_mask(self, out=None):
            out = self.operands[0].get_mask(out=out)
            commout.Allreduce(MPI.IN_PLACE, [out, MPI.BYTE], op=MPI.BAND)
            return out        
        result *= MPIDistributionIdentityOperator(commout=commout)
        result.get_mask = get_mask.__get__(result)

    def not_implemented(out=None):
        raise NotImplementedError('email-me')

    def apply_mask(self, mask):
        self.operands[0].apply_mask(mask)
    result.apply_mask = apply_mask.__get__(result)
    result.get_pTp = not_implemented
    result.get_pTx_pT1 = not_implemented
    result.intersects = not_implemented

    return result
def ProjectionOperator(input,
                       method=None,
                       header=None,
                       resolution=None,
                       npixels_per_sample=0,
                       units=None,
                       derived_units=None,
                       downsampling=False,
                       packed=False,
                       commin=MPI.COMM_WORLD,
                       commout=MPI.COMM_WORLD,
                       onfly_func=None,
                       onfly_ids=None,
                       onfly_shapeouts=None,
                       **keywords):
    """
    Projection operator factory, to handle operations by one or more pointing
    matrices.

    It is assumed that each pointing matrix row has at most 'npixels_per_sample'
    non-null elements, or in other words, an output element can only intersect
    a fixed number of input elements.

    Given an input vector x and an output vector y, y = P(x) translates into:
        y[i] = sum(P.matrix[i,:].value * x[P.matrix[i,:].index])

    If the input is not MPI-distributed unlike the output, the projection
    operator is automatically multiplied by the operator MPIDistributionIdenti-
    tyOperator, to enable MPI reductions.

    If the input is MPI-distributed, this operator is automatically packed (see
    below) and multiplied by the operator MPIDistributionLocalOperator, which
    takes the local input as argument.

    Arguments
    ---------
    input : pointing matrix (or sequence of) or observation (deprecated)
    method : deprecated
    header : deprecated
    resolution : deprecated
    npixels_per_sample : deprecated
    downsampling : deprecated
    packed deprecated

    """
    # check if there is only one pointing matrix
    isonfly = input is None
    isobservation = hasattr(input, 'get_pointing_matrix')
    if isobservation:
        if hasattr(input, 'slice'):
            nmatrices = len(input.slice)
        else:
            nmatrices = 1
        commout = input.instrument.comm
    elif isonfly:
        nmatrices = len(onfly_ids)
    else:
        if isinstance(input, PointingMatrix):
            input = (input, )
        if any(not isinstance(i, PointingMatrix) for i in input):
            raise TypeError(
                'The input is not a PointingMatrix, (nor a sequence'
                ' of).')
        nmatrices = len(input)

    ismapdistributed = commin.size > 1
    istoddistributed = commout.size > 1

    # get the pointing matrix from the input observation
    if isobservation:
        if header is None:
            if not hasattr(input, 'get_map_header'):
                raise AttributeError(
                    "No map header has been specified and "
                    "the observation has no 'get_map_header' method.")
            header_global = input.get_map_header(resolution=resolution,
                                                 downsampling=downsampling)
        else:
            if isinstance(header, str):
                header = str2fitsheader(header)
            header_global = gather_fitsheader_if_needed(header, comm=commin)
        #XXX we should hand over the local header
        input = input.get_pointing_matrix(header_global,
                                          npixels_per_sample,
                                          method=method,
                                          downsampling=downsampling,
                                          comm=commin)
        if isinstance(input, PointingMatrix):
            input = (input, )
    elif isonfly:
        header_global = header
    else:
        header_global = input[0].info['header']

    # check shapein
    if not isonfly:
        shapeins = [i.shape_input for i in input]
        if any(s != shapeins[0] for s in shapeins):
            raise ValueError(
                'The pointing matrices do not have the same input '
                "shape: {0}.".format(', '.join(str(shapeins))))
        shapein = shapeins[0]
    else:
        shapein = fitsheader2shape(header_global)

    # the output is simply a ProjectionOperator instance
    if nmatrices == 1 and not ismapdistributed and not istoddistributed \
       and not packed:
        return ProjectionInMemoryOperator(input[0],
                                          units=units,
                                          derived_units=derived_units,
                                          commin=commin,
                                          commout=commout,
                                          **keywords)

    if packed or ismapdistributed:
        if isonfly:
            raise NotImplementedError()
        # compute the map mask before this information is lost while packing
        mask_global = Map.ones(shapein, dtype=np.bool8, header=header_global)
        for i in input:
            i.get_mask(out=mask_global)
        for i in input:
            i.pack(mask_global)
        if ismapdistributed:
            shapes = distribute_shapes(mask_global.shape, comm=commin)
            shape = shapes[commin.rank]
            mask = np.empty(shape, bool)
            commin.Reduce_scatter([mask_global, MPI.BYTE], [mask, MPI.BYTE],
                                  [product(s) for s in shapes],
                                  op=MPI.BAND)
        else:
            mask = mask_global

    if isonfly:
        place_holder = {}
        operands = [
            ProjectionOnFlyOperator(place_holder,
                                    id,
                                    onfly_func,
                                    shapein=shapein,
                                    shapeout=shapeout,
                                    units=units,
                                    derived_units=derived_units)
            for id, shapeout in zip(onfly_ids, onfly_shapeouts)
        ]
    else:
        operands = [
            ProjectionInMemoryOperator(i,
                                       units=units,
                                       derived_units=derived_units,
                                       commout=commout,
                                       **keywords) for i in input
        ]
    result = BlockColumnOperator(operands, axisout=-1)

    if nmatrices > 1:

        def apply_mask(self, mask):
            mask = np.asarray(mask, np.bool8)
            dest = 0
            if mask.shape != self.shapeout:
                raise ValueError(
                    "The mask shape '{0}' is incompatible with tha"
                    "t of the projection operator '{1}'.".format(
                        mask.shape, self.shapeout))
            if any(
                    isinstance(p, ProjectionOnFlyOperator)
                    for p in self.operands):
                blocks = self.copy()
                self.__class__ = CompositionOperator
                self.__init__([MaskOperator(mask), blocks])
                return
            for p in self.operands:
                n = p.matrix.shape[1]
                p.apply_mask(mask[..., dest:dest + n])
                dest += n

        def get_mask(self, out=None):
            for p in self.operands:
                out = p.get_mask(out=out)
            return out

        def get_pTp(self, out=None):
            for p in self.operands:
                out = p.get_pTp(out=out)
            return out

        def get_pTx_pT1(self, x, out=None, mask=None):
            dest = 0
            for p in self.operands:
                n = p.matrix.shape[1]
                out = p.get_pTx_pT1(x[..., dest:dest + n], out=out, mask=mask)
                dest += n
            return out

        def intersects(self, out=None):
            raise NotImplementedError('email-me')

        result.apply_mask = apply_mask.__get__(result)
        result.get_mask = get_mask.__get__(result)
        result.get_pTp = get_pTp.__get__(result)
        result.get_pTx_pT1 = get_pTx_pT1.__get__(result)
        result.intersects = intersects.__get__(result)

    if not istoddistributed and not ismapdistributed and not packed:
        return result

    if packed or ismapdistributed:

        def get_mask(self, out=None):
            if out is not None:
                out &= mask
            else:
                out = mask
            return out

        if ismapdistributed:
            header = scatter_fitsheader(header_global, comm=commin)
            result *= MPIDistributionLocalOperator(mask_global,
                                                   commin=commin,
                                                   attrin={'header': header})
        elif packed:
            result *= PackOperator(mask)
        result.get_mask = get_mask.__get__(result)

    if istoddistributed and not ismapdistributed:

        def get_mask(self, out=None):
            out = self.operands[0].get_mask(out=out)
            commout.Allreduce(MPI.IN_PLACE, [out, MPI.BYTE], op=MPI.BAND)
            return out

        result *= MPIDistributionIdentityOperator(commout=commout)
        result.get_mask = get_mask.__get__(result)

    def not_implemented(out=None):
        raise NotImplementedError('email-me')

    def apply_mask(self, mask):
        self.operands[0].apply_mask(mask)

    result.apply_mask = apply_mask.__get__(result)
    result.get_pTp = not_implemented
    result.get_pTx_pT1 = not_implemented
    result.intersects = not_implemented

    return result