示例#1
0
    def __call__(self, context, var1, var2, var_out):
        """
        Multiply two columns together, put the answer in a new column.

        :param context: The context instance, used to move data around.
        :param var1: Variable name of data.  Usually intensity.
            Assets is the 0 dimension, and can have other dimensions.
        :param var2: The values in this column are Multiplied.
            Asserts is the 0 dimension and has only this dimension.
        :param var_out: The new variable name, with the values of
            var1 * var2.
        """

        rolled = context.exposure_att[var1]
        context.exposure_att[var1] = scipy.rollaxis(rolled, 0, rolled.ndim)

        context.exposure_att[var_out] = (context.exposure_att[var1] *
                                         context.exposure_att[var2])
        rolled = context.exposure_att[var1]

        # Roll var1 back
        context.exposure_att[var1] = scipy.rollaxis(rolled, rolled.ndim - 1, 0)

        # Roll the output so the asset dimension is 0.
        result = context.exposure_att[var_out]
        context.exposure_att[var_out] = scipy.rollaxis(result, rolled.ndim - 1,
                                                       0)
示例#2
0
    def calc_C(self, n_low=-1, n_high=-1):
        """Generates the C matrices used to calculate the K's and ultimately the B's

        These are to be used on one side of the super-operator when applying the
        nearest-neighbour Hamiltonian, similarly to C in eqn. (44) of
        arXiv:1103.0936v2 [cond-mat.str-el], except being for the non-norm-preserving case.

        Makes use only of the nearest-neighbour hamiltonian, and of the A's.

        C[n] depends on A[n] and A[n + 1].
        
        This calculation can be significantly faster if a matrix form for h_nn
        is available. See gen_h_matrix().

        """
        if self.h_nn is None:
            return 0

        if n_low < 1:
            n_low = 0
        if n_high < 1:
            n_high = self.N + 1
        
        if self.h_nn_mat is None:
            for n in xrange(n_low, n_high):
                self.C[n].fill(0)
                for u in xrange(self.q[n]):
                    for v in xrange(self.q[n + 1]):
                        AA = mm.mmul(self.A[n][u], self.A[n + 1][v]) #only do this once for each
                        for s in xrange(self.q[n]):
                            for t in xrange(self.q[n + 1]):
                                h_nn_stuv = self.h_nn(n, s, t, u, v)
                                if h_nn_stuv != 0:
                                    self.C[n][s, t] += h_nn_stuv * AA
        else:
            dot = sp.dot
            for n in xrange(n_low, n_high):
                An = self.A[n]
                Anp1 = self.A[n + 1]
                
                AA = sp.empty_like(self.C[n])
                for u in xrange(self.q[n]):
                    for v in xrange(self.q[n + 1]):
                        AA[u, v] = dot(An[u], Anp1[v])
                        
                if n == 0: #FIXME: Temp. hack
                    self.AA0 = AA
                elif n == 1:
                    self.AA1 = AA
                
                res = sp.tensordot(AA, self.h_nn_mat[n], ((0, 1), (2, 3)))
                res = sp.rollaxis(res, 3)
                res = sp.rollaxis(res, 3)
                
                self.C[n][:] = res
def decode_bayes(arr, axis=None, density=None, scale=None):
    """ The density is the probabilistic equivalent of the tuning curve, with
    two input arguments: the first is the value and the second the location
    parameter of the density. Assumin fdomain is (0., 1.)
    """

    nunits = arr.shape[axis]
    values = sp.linspace(0.0, 1.0, nunits, dtype=float)

    # matrix of discrete tuning curves; i: stimulus value #ID, j: preference
    tuning = sp.array([[density(theta, loc=phi, scale=scale) \
        for theta in values] for phi in values])
    tuning /= tuning.sum(1, keepdims=True)

    # normalized spike count
    spksum = arr.sum(axis, keepdims=True)
    spksum[spksum == 0] = 1.0
    spikes = arr / spksum

    # now decode
    probas = sp.tensordot(spikes, tuning, ((axis, ), (0, )))
    probas = sp.rollaxis(probas, axis=-1, start=axis)
    mapest = values[probas.argmax(axis)]
    #mapest = sp.tensordot(probas, values, ((axis,), (0,)))
    spread = 1.0 / (probas.max(axis) - probas.min(axis))

    return probas, mapest, spread
示例#4
0
文件: Capture.py 项目: jlettvin/RPN
    def process(self, sbmp):
        """Core function"""
        if sbmp == None:
            # Oversize required
            return self.size
        # reverse height and width under advice
        (ws, hs, ps)    = shape = (sbmp.GetHeight(), sbmp.GetWidth(), 3)
        simg            = wx.ImageFromBitmap(sbmp)

        sarray          = scipy.array(scipy.fromstring(simg.GetData(), 'uint8'), self.dtype) / 255.0
        sarray          = scipy.rollaxis(scipy.reshape(sarray, shape), 2)
        self.shape      = sarray.shape

        tarray          = (self.withGPU if self.gpgpu else self.withCPU)(sarray)
        mm              = (sarray.min(), sarray.max(), tarray.min(), tarray.max())
        #print '\t', sarray.shape, tarray.shape,
        print type(sarray[0,0,0]), type(tarray[0,0,0]), mm

        tarray          = numpy.nan_to_num(tarray)
        tarray         /= max(tarray.max(), self.coefficient)
        tarray          = scipy.array((tarray * 255.0).tolist(), 'uint8')

        tarray          = scipy.dstack(tarray)
        timg            = wx.EmptyImage(ws, hs)
        timg              .SetData(tarray.tostring())
        self.tbmp       = timg.ConvertToBitmap()
        return self.tbmp
示例#5
0
 def test_partial_dot_mat_mat_block(self):
     mat1 = sp.arange(2 * 3 * 5 * 7 * 11)
     mat1.shape = (2, 3, 5, 7, 11)
     mat1 = algebra.make_mat(mat1,
                             axis_names=('time', 'x', 'y', 'ra', 'z'),
                             row_axes=(0, 1, 3),
                             col_axes=(0, 2, 3, 4))
     mat2 = sp.arange(2 * 13 * 5 * 7 * 17)
     mat2.shape = (2, 13, 7, 5, 17)
     mat2 = algebra.make_mat(mat2,
                             axis_names=('time', 'w', 'ra', 'y', 'freq'),
                             row_axes=(0, 1, 2, 3),
                             col_axes=(1, 2, 4))
     tmp_arr = sp.tensordot(mat1, mat2, ((2, ), (3, )))
     right_ans = sp.empty((7, 13, 2, 3, 11, 17))
     for ii in range(2):
         for jj in range(7):
             this_tmp = tmp_arr[ii, :, jj, :, ii, :, jj, :]
             this_tmp = sp.rollaxis(this_tmp, 2, 0)
             right_ans[jj, :, ii, ...] = this_tmp
     result = algebra.partial_dot(mat1, mat2)
     self.assertEqual(result.axes, ('ra', 'w', 'time', 'x', 'z', 'freq'))
     self.assertEqual(result.rows, (0, 1, 2, 3))
     self.assertEqual(result.cols, (0, 1, 4, 5))
     self.assertTrue(sp.allclose(right_ans, result))
示例#6
0
    def test_partial_dot_mat_mat_block(self):
        mat1 = sp.arange(2 * 3 * 5 * 7 * 11)
        mat1.shape = (2, 3, 5, 7, 11)

        mat1 = matrix.make_mat(mat1, axis_names=('time', 'x', 'y', 'ra', 'z'),
                                row_axes=(0, 1, 3), col_axes=(0, 2, 3, 4))

        mat2 = sp.arange(2 * 13 * 5 * 7 * 17)
        mat2.shape = (2, 13, 7, 5, 17)

        mat2 = matrix.make_mat(mat2,
                                axis_names=('time', 'w', 'ra', 'y', 'freq'),
                                row_axes=(0, 1, 2, 3), col_axes=(1, 2, 4))

        tmp_arr = sp.tensordot(mat1, mat2, ((2, ), (3, )))
        right_ans = sp.empty((7, 13, 2, 3, 11, 17))

        for ii in range(2):
            for jj in range(7):
                this_tmp = tmp_arr[ii, :, jj, :, ii, :, jj, :]
                this_tmp = sp.rollaxis(this_tmp, 2, 0)
                right_ans[jj, :, ii, ...] = this_tmp

        result = dot_products.partial_dot(mat1, mat2)
        self.assertEqual(result.axes, ('ra', 'w', 'time', 'x', 'z', 'freq'))
        self.assertEqual(result.rows, (0, 1, 2, 3))
        self.assertEqual(result.cols, (0, 1, 4, 5))
        self.assertTrue(sp.allclose(right_ans, result))
def __interpolateBetweenBinaryObjects(obj1, obj2, slices):
    """
    Takes two binary objects and puts slices slices in-between them, each of which
    contains a smooth binary transition between the objects.
    @note private inner function
    """
    if not obj1.shape == obj2.shape:
        raise AttributeError(
            "The two supplied objects have to be of the same shape, not {} and {}.".format(obj1.shape, obj2.shape)
        )

    # constant
    offset = 0.5  # must be a value smaller than the minimal distance possible
    temporal_dimension = 3

    # get all voxel position
    obj1_voxel = scipy.nonzero(obj1)
    obj2_voxel = scipy.nonzero(obj2)

    # get smallest pairwise distances between all object voxels
    distances = cdist(scipy.transpose(obj1_voxel), scipy.transpose(obj2_voxel))

    # keep for each True voxel of obj1 only the smallest distance to a True voxel in obj2
    min_distances = distances.min(1)

    # test if all seems to work
    if len(min_distances) != len(obj1_voxel[0]):
        raise Exception("Invalid number of minimal distances received.")

    # replace True voxels in obj1 with their respective distances to the True voxels in obj2
    thr_obj = obj1.copy()
    thr_obj = thr_obj.astype(scipy.float_)
    thr_obj[obj1_voxel] = min_distances
    thr_obj[obj1_voxel] += offset  # previous steps distances include zeros, therefore this is required

    # compute the step size for each slice that is added
    maximum = min_distances.max()
    step = maximum / float(slices + 1)
    threshold = maximum

    # control step: see if thr_obj really corresponds to obj1
    if not scipy.all(thr_obj.astype(scipy.bool_) == obj1.astype(scipy.bool_)):
        raise Exception("First created object does not correspond to obj1.")

    # assemble return volume
    return_volume = [thr_obj.astype(scipy.bool_)]  # corresponds to obj1
    for _ in range(slices):
        threshold -= step
        # remove all value higher than the threshold
        thr_obj[thr_obj > threshold] = 0
        # add binary volume to list /makes a copy)
        return_volume.append(thr_obj.astype(scipy.bool_))

    # add last slice (corresponds to es obj2 slice)
    thr_obj[thr_obj > offset] = 0
    return_volume.append(thr_obj.astype(scipy.bool_))

    # return binary scipy array
    return scipy.rollaxis(scipy.asarray(return_volume, dtype=scipy.bool_), 0, temporal_dimension + 1)
示例#8
0
文件: __main__.py 项目: jlettvin/rpna
 def collect(self):
     self.iteration += 1
     # Fetch and normalize the camera image.
     self.camera = rollaxis(
             asarray(GetMat(QueryFrame(self.capture))).astype(self.dtype) /
             self.scale,
             2,
             0)
def make_datum(im, center, label, width):
    """Creates a Caffe datum object from a window on ``im`` with the given label"""
    window = get_window(im, center, width)
    window = randomly_flip(window)
    window = randomly_rotate(window)
    window = sp.rollaxis(window, 2, 0)
    datum = caffe.io.array_to_datum(window.astype(float), label)
    return datum
示例#10
0
def create_axes_array(axes):
    """
    Given a list of N axes of length {a,b,c,...}, returns an N+1 dimension
        array of shape {N,a,b,c,...} describing the coordinates at each point
        in the grid.
    """
    import scipy
    ndim = len(axes)
    shape = [ndim]
    for i in axes:
        shape.append(i.size)

    coords = scipy.ones(shape)
    for i in range(ndim):
        coords[i] = scipy.rollaxis(scipy.rollaxis(coords[i],i,ndim)*axes[i],ndim-1,i)

    return coords
def make_datum(im, center, label, width):
    """Creates a Caffe datum object from a window on ``im`` with the given label"""
    window = get_window(im, center, width)
    window = randomly_flip(window)
    window = randomly_rotate(window)
    window = sp.rollaxis(window, 2, 0)
    datum = caffe.io.array_to_datum(window.astype(float), label)
    return datum
def create_axes_array(axes):
    """
    Given a list of N axes of length {a,b,c,...}, returns an N+1 dimension
        array of shape {N,a,b,c,...} describing the coordinates at each point
        in the grid.
    """
    import scipy
    ndim = len(axes)
    shape = [ndim]
    for i in axes:
        shape.append(i.size)

    coords = scipy.ones(shape)
    for i in range(ndim):
        coords[i] = scipy.rollaxis(scipy.rollaxis(coords[i],i,ndim)*axes[i],ndim-1,i)

    return coords
示例#13
0
def __interpolateBetweenBinaryObjects(obj1, obj2, slices):
    """
    Takes two binary objects and puts slices slices in-between them, each of which
    contains a smooth binary transition between the objects.
    @note private inner function
    """
    if not obj1.shape == obj2.shape:
        raise AttributeError('The two supplied objects have to be of the same shape, not {} and {}.'.format(obj1.shape, obj2.shape))
    
    # constant
    offset = 0.5 # must be a value smaller than the minimal distance possible
    temporal_dimension = 3
    
    # get all voxel position
    obj1_voxel = scipy.nonzero(obj1)
    obj2_voxel = scipy.nonzero(obj2)
    
    # get smallest pairwise distances between all object voxels
    distances = cdist(scipy.transpose(obj1_voxel),
                      scipy.transpose(obj2_voxel))
    
    # keep for each True voxel of obj1 only the smallest distance to a True voxel in obj2 
    min_distances = distances.min(1)
    
    # test if all seems to work
    if len(min_distances) != len(obj1_voxel[0]):
        raise Exception('Invalid number of minimal distances received.')
    
    # replace True voxels in obj1 with their respective distances to the True voxels in obj2
    thr_obj = obj1.copy()
    thr_obj = thr_obj.astype(scipy.float_)
    thr_obj[obj1_voxel] = min_distances
    thr_obj[obj1_voxel] += offset # previous steps distances include zeros, therefore this is required
    
    # compute the step size for each slice that is added
    maximum = min_distances.max()
    step = maximum / float(slices + 1)
    threshold = maximum
    
    # control step: see if thr_obj really corresponds to obj1
    if not scipy.all(thr_obj.astype(scipy.bool_) == obj1.astype(scipy.bool_)):
        raise Exception('First created object does not correspond to obj1.')
    
    # assemble return volume
    return_volume = [thr_obj.astype(scipy.bool_)] # corresponds to obj1
    for _ in range(slices):
        threshold -= step
        # remove all value higher than the threshold
        thr_obj[thr_obj > threshold] = 0
        # add binary volume to list /makes a copy)
        return_volume.append(thr_obj.astype(scipy.bool_)) 
    
    # add last slice (corresponds to es obj2 slice)
    thr_obj[thr_obj > offset] = 0
    return_volume.append(thr_obj.astype(scipy.bool_)) 
    
    # return binary scipy array
    return scipy.rollaxis(scipy.asarray(return_volume, dtype=scipy.bool_), 0, temporal_dimension + 1)
def get_window_centers(im, width):
    """Returns a list of all indices more than ``width`` distant from the boundaries of ``im``"""
    boundary = int(sp.ceil(width/2))
    yslice = slice(boundary, im.shape[0]-boundary)
    xslice = slice(boundary, im.shape[1]-boundary)
    indices = sp.mgrid[yslice, xslice]
    indices = sp.rollaxis(indices, 0, 3).reshape((-1, 2))
    
    return indices
示例#15
0
def GET_SO_CS(im_lms):
    """"""
    assert im_lms.ndim == 3
    assert im_lms.shape[-1] == 3
    im_lms = sp.array(im_lms, dtype=floatX)
    im_lms = sp.rollaxis(im_lms, -1, 0)
    sel = _CS_SO_PARAMETERS['selected_channels']
    so = mod.ventral_so(p=_CS_SO_PARAMETERS, image=im_lms, verbose=False)
    return so.squeeze()[sel, -1, :, :]
def get_window_centers(im, width):
    """Returns a list of all indices more than ``width`` distant from the boundaries of ``im``"""
    boundary = int(sp.ceil(width / 2))
    yslice = slice(boundary, im.shape[0] - boundary)
    xslice = slice(boundary, im.shape[1] - boundary)
    indices = sp.mgrid[yslice, xslice]
    indices = sp.rollaxis(indices, 0, 3).reshape((-1, 2))

    return indices
示例#17
0
def GET_SO_STANDARD(im_lms):
    """"""
    assert im_lms.ndim == 3
    assert im_lms.shape[-1] == 3
    im_lms = sp.array(im_lms, dtype=floatX)
    im_lms = sp.rollaxis(im_lms, -1, 0)
    # im_lms = sp.array(im_lms.swapaxes(0, -1), dtype=floatX)
    sel = _DEFAULT_SO_PARAMETERS['selected_channels']
    so = mod.ventral_so(p=_DEFAULT_SO_PARAMETERS, image=im_lms, verbose=False)
    return so.squeeze().max(-3)[sel, :, :]
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load 3d image
    data_3d, header_3d = load(args.input)

    # check if supplied dimension parameter is inside the images dimensions
    if args.dimension >= data_3d.ndim or args.dimension < 0:
        raise ArgumentError(
            'The supplied cut-dimension {} exceeds the number of input volume dimensions {}.'
            .format(args.dimension, data_3d.ndim))

    # check if the supplied offset parameter is a divider of the cut-dimensions slice number
    if not 0 == data_3d.shape[args.dimension] % args.offset:
        raise ArgumentError(
            'The offset is not a divider of the number of slices in cut dimension ({} / {}).'
            .format(data_3d.shape[args.dimension], args.offset))

    # prepare empty target volume
    volumes_3d = data_3d.shape[args.dimension] / args.offset
    shape_4d = list(data_3d.shape)
    shape_4d[args.dimension] = volumes_3d
    data_4d = scipy.zeros([args.offset] + shape_4d, dtype=data_3d.dtype)

    logger.debug(
        'Separating {} slices into {} 3D volumes of thickness {}.'.format(
            data_3d.shape[args.dimension], volumes_3d, args.offset))

    # iterate over 3D image and create sub volumes which are then added to the 4d volume
    for idx in range(args.offset):
        # collect the slices
        for sl in range(volumes_3d):
            idx_from = [slice(None), slice(None), slice(None)]
            idx_from[args.dimension] = slice(idx + sl * args.offset,
                                             idx + sl * args.offset + 1)
            idx_to = [slice(None), slice(None), slice(None)]
            idx_to[args.dimension] = slice(sl, sl + 1)
            #print 'Slice {} to {}.'.format(idx_from, idx_to)
            data_4d[idx][idx_to] = data_3d[idx_from]

    # flip dimensions such that the newly created is the last
    data_4d = scipy.swapaxes(data_4d, 0, args.dimension + 1)
    data_4d = scipy.rollaxis(data_4d, 0, 4)

    # save resulting 4D volume
    save(data_4d, args.output, header_3d, args.force)

    logger.info("Successfully terminated.")
 def _spawn(self, log_mean, log_sigma):
     """
     Spawning will add a spawning dimension, as the first dimension.
     Each cut into the spawning dimension represents the SA at
     a different centroid.
     """
     new_shape = list(log_sigma.shape) + [1]
     log_sigma = log_sigma.reshape(new_shape)
     spawned_log_sigma = log_sigma * self.spawn_centroids
     # roll the spawn dimension to the front
     spawned_log_sigma = rollaxis(spawned_log_sigma,
                                  spawned_log_sigma.ndim - 1, 0)
     sample_values = exp(log_mean + spawned_log_sigma)
     return sample_values
示例#20
0
 def _spawn(self, log_mean, log_sigma):
     """
     Spawning will add a spawning dimension, as the first dimension.
     Each cut into the spawning dimension represents the SA at
     a different centroid.
     """
     new_shape = list(log_sigma.shape) + [1]
     log_sigma = log_sigma.reshape(new_shape)
     spawned_log_sigma = log_sigma * self.spawn_centroids
     # roll the spawn dimension to the front
     spawned_log_sigma = rollaxis(spawned_log_sigma,
                                  spawned_log_sigma.ndim-1, 0)
     sample_values = exp(log_mean + spawned_log_sigma)
     return sample_values
示例#21
0
 def test_transpose_partial_dot(self):
     self.mat.shape = (5, 4, 6)
     self.mat.cols = (1, 2)
     self.mat.rows = (0, )
     self.mat.axes = ('freq', 'x', 'y')
     matT = self.mat.mat_transpose()
     new_vect = algebra.partial_dot(matT, self.vect)
     self.assertEqual(new_vect.shape, (4, 6, 2, 3))
     self.assertEqual(new_vect.axes, ('x', 'y', 'a', 'b'))
     # Reform origional matrix to get same numerical result.
     mat = sp.reshape(self.mat, (5, 4 * 6))
     mat = sp.rollaxis(mat, 1, 0)
     numerical_result = sp.dot(mat, sp.reshape(self.vect, (5, 2 * 3)))
     self.assertTrue(
         sp.allclose(numerical_result.flatten(), new_vect.flatten()))
示例#22
0
 def test_transpose_partial_dot(self):
     self.mat.shape = (5, 4, 6)
     self.mat.cols = (1, 2)
     self.mat.rows = (0,)
     self.mat.axes = ('freq', 'x', 'y')
     matT = self.mat.mat_transpose()
     new_vect = algebra.partial_dot(matT, self.vect)
     self.assertEqual(new_vect.shape, (4, 6, 2, 3))
     self.assertEqual(new_vect.axes, ('x', 'y', 'a', 'b'))
     # Reform origional matrix to get same numerical result.
     mat = sp.reshape(self.mat, (5, 4*6))
     mat = sp.rollaxis(mat, 1, 0)
     numerical_result = sp.dot(mat, sp.reshape(self.vect, (5, 2*3)))
     self.assertTrue(sp.allclose(numerical_result.flatten(),
                                 new_vect.flatten()))
def two_channel_to_color(im):
    """Converts a two-channel microarray image to a color image, as described in the paper associated with this 
    codebase"""
    lower = sp.percentile(im, 5)
    upper = sp.percentile(im, 98)   
    
    channel_0 = sp.clip((im[:, :, 0] - lower)/(upper - lower), 0, 1)
    channel_2 = sp.clip((im[:, :, 1] - lower)/(upper - lower), 0, 1)
    channel_1 = ((channel_0 + channel_2)/2.)
    
    im = sp.array((channel_0, channel_1, channel_2))
    im = sp.rollaxis(im, 0, 3)
    
    im = (255*im).astype(sp.uint8)    
    
    return im
def two_channel_to_color(im):
    """Converts a two-channel microarray image to a color image, as described in the paper associated with this 
    codebase"""
    lower = sp.percentile(im, 5)
    upper = sp.percentile(im, 98)

    channel_0 = sp.clip((im[:, :, 0] - lower) / (upper - lower), 0, 1)
    channel_2 = sp.clip((im[:, :, 1] - lower) / (upper - lower), 0, 1)
    channel_1 = ((channel_0 + channel_2) / 2.)

    im = sp.array((channel_0, channel_1, channel_2))
    im = sp.rollaxis(im, 0, 3)

    im = (255 * im).astype(sp.uint8)

    return im
示例#25
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load 3d image
    data_3d, header_3d = load(args.input)
    
    # check if supplied dimension parameter is inside the images dimensions
    if args.dimension >= data_3d.ndim or args.dimension < 0:
        raise ArgumentError('The supplied cut-dimension {} exceeds the number of input volume dimensions {}.'.format(args.dimension, data_3d.ndim))
    
    # check if the supplied offset parameter is a divider of the cut-dimensions slice number
    if not 0 == data_3d.shape[args.dimension] % args.offset:
        raise ArgumentError('The offset is not a divider of the number of slices in cut dimension ({} / {}).'.format(data_3d.shape[args.dimension], args.offset))
        
    # prepare empty target volume
    volumes_3d = data_3d.shape[args.dimension] / args.offset
    shape_4d = list(data_3d.shape)
    shape_4d[args.dimension] = volumes_3d
    data_4d = scipy.zeros([args.offset] + shape_4d, dtype=data_3d.dtype)
    
    logger.debug('Separating {} slices into {} 3D volumes of thickness {}.'.format(data_3d.shape[args.dimension], volumes_3d, args.offset))
        
    # iterate over 3D image and create sub volumes which are then added to the 4d volume
    for idx in range(args.offset):
        # collect the slices
        for sl in range(volumes_3d):
            idx_from = [slice(None), slice(None), slice(None)]
            idx_from[args.dimension] = slice(idx + sl * args.offset, idx + sl * args.offset + 1)
            idx_to = [slice(None), slice(None), slice(None)]
            idx_to[args.dimension] = slice(sl, sl+1)
            #print 'Slice {} to {}.'.format(idx_from, idx_to)
            data_4d[idx][idx_to] = data_3d[idx_from]
        
    # flip dimensions such that the newly created is the last
    data_4d = scipy.swapaxes(data_4d, 0, args.dimension + 1)
    data_4d = scipy.rollaxis(data_4d, 0, 4)
        
    # save resulting 4D volume
    save(data_4d, args.output, header_3d, args.force)
    
    logger.info("Successfully terminated.")
示例#26
0
def convert(map_file, history_file=None):
    """Main function."""

    map = algebra.load(map_file)
    map = algebra.make_vect(map)

    if map.axes != ('freq', 'ra', 'dec'):
        raise NotImplementedError("Exepected input map to be organized "
                                  "('freq', 'ra', 'dec').")

    new_shape = map.shape[1:] + (map.shape[0], )

    # Make the out file name assuming the input file end in .npy.  This is a
    # hack and someone should fix it sometime.
    out_fname = map_file.split('/')[-1][:-4] + '.fits'

    Map_fits = data_map.DataMap(sp.rollaxis(map, 0, 3))
    # Set axis names
    Map_fits.set_field('CTYPE3', 'FREQ--HZ', (), '32A')
    Map_fits.set_field('CTYPE1', 'RA---DEG', (), '32A')
    Map_fits.set_field('CTYPE2', 'DEC--DEG', (), '32A')
    # Copy frequency axis (now the third axis not the first).
    Map_fits.set_field('CRVAL3', map.info['freq_centre'], (), 'D')
    Map_fits.set_field('CRPIX3', new_shape[2] // 2 + 1, (), 'D')
    Map_fits.set_field('CDELT3', map.info['freq_delta'], (), 'D')
    # Set the other two axes.
    Map_fits.set_field('CRVAL1', map.info['ra_centre'], (), 'D')
    Map_fits.set_field('CRPIX1', new_shape[0] // 2 + 1, (), 'D')
    Map_fits.set_field('CDELT1', map.info['ra_delta'], (), 'D')
    Map_fits.set_field('CRVAL2', map.info['dec_centre'], (), 'D')
    Map_fits.set_field('CRPIX2', new_shape[1] // 2 + 1, (), 'D')
    Map_fits.set_field('CDELT2', map.info['dec_delta'], (), 'D')

    # Copy the file history if provided.
    if not history_file is None:
        history = hist.read(history_file)
        history.add("Converted map to fits.", ("File name: " + out_fname, ))
        Map_fits.history = history

    # Verify contents and write out.
    Map_fits.verify()
    fits_map.write(Map_fits, out_fname)
示例#27
0
def convert(map_file, history_file=None):
    """Main function."""

    map = algebra.load(map_file)
    map = algebra.make_vect(map)

    if map.axes != ("freq", "ra", "dec"):
        raise NotImplementedError("Exepected input map to be organized " "('freq', 'ra', 'dec').")

    new_shape = map.shape[1:] + (map.shape[0],)

    # Make the out file name assuming the input file end in .npy.  This is a
    # hack and someone should fix it sometime.
    out_fname = map_file.split("/")[-1][:-4] + ".fits"

    Map_fits = data_map.DataMap(sp.rollaxis(map, 0, 3))
    # Set axis names
    Map_fits.set_field("CTYPE3", "FREQ--HZ", (), "32A")
    Map_fits.set_field("CTYPE1", "RA---DEG", (), "32A")
    Map_fits.set_field("CTYPE2", "DEC--DEG", (), "32A")
    # Copy frequency axis (now the third axis not the first).
    Map_fits.set_field("CRVAL3", map.info["freq_centre"], (), "D")
    Map_fits.set_field("CRPIX3", new_shape[2] // 2 + 1, (), "D")
    Map_fits.set_field("CDELT3", map.info["freq_delta"], (), "D")
    # Set the other two axes.
    Map_fits.set_field("CRVAL1", map.info["ra_centre"], (), "D")
    Map_fits.set_field("CRPIX1", new_shape[0] // 2 + 1, (), "D")
    Map_fits.set_field("CDELT1", map.info["ra_delta"], (), "D")
    Map_fits.set_field("CRVAL2", map.info["dec_centre"], (), "D")
    Map_fits.set_field("CRPIX2", new_shape[1] // 2 + 1, (), "D")
    Map_fits.set_field("CDELT2", map.info["dec_delta"], (), "D")

    # Copy the file history if provided.
    if not history_file is None:
        history = hist.read(history_file)
        history.add("Converted map to fits.", ("File name: " + out_fname,))
        Map_fits.history = history

    # Verify contents and write out.
    Map_fits.verify()
    fits_map.write(Map_fits, out_fname)
示例#28
0
    def execute(self, nprocesses=1):
        """Function that acctually does the work.

        The nprocesses parameter does not do anything yet.  It is just there
        for compatibility with the pipeline manager.
        """
        params = self.params
        kiyopy.utils.mkparents(params["output_root"])
        parse_ini.write_params(params, params["output_root"] + "params.ini", prefix="mm_")
        # Rename some commonly used parameters.
        map_shape = params["map_shape"]
        spacing = params["pixel_spacing"]
        algorithm = params["noise_model"]
        noise_root = params["noise_parameters_input_root"]
        ra_spacing = -spacing / sp.cos(params["field_centre"][1] * sp.pi / 180.0)
        if not algorithm in ("grid", "diag_file", "disjoint_scans"):
            raise ValueError("Invalid noise model: " + algorithm)
        if len(params["IFs"]) != 1:
            raise ce.FileParameterTypeError("Can only process a single IF.")

        # Set up to iterate over the pol states.
        npol = 2  # This will be reset when we read the first data block.
        pol_ind = 0

        all_file_names = []

        while pol_ind < npol:
            # Flag for the first block processed (will allowcate memory on the
            # first iteration).
            first_block = True
            # Loop over the files to process.
            try:
                for file_middle in params["file_middles"]:
                    input_fname = params["input_root"] + file_middle + params["input_end"]
                    # Read in the data, and loop over data blocks.
                    Reader = fitsGBT.Reader(input_fname, feedback=self.feedback)
                    Blocks = Reader.read(params["scans"], params["IFs"])

                    # Calculate the time varience at each frequency.  This will
                    # be used as weights in most algorithms.
                    if not algorithm == "grid":
                        if not noise_root == "None":
                            # We have measured variance.
                            noise_pars = sp.load(noise_root + file_middle + ".npy")
                            var = noise_pars[params["IFs"][0], pol_ind, 0, :]
                        else:
                            # We need to measure the variance.
                            var = tools.calc_time_var_file(Blocks, pol_ind, 0)
                            # Convert from masked array to array.
                            var = var.filled(9999.0)
                    else:
                        var = 1.0
                    weight = 1 / var

                    for Data in Blocks:
                        dims = Data.dims
                        # On first pass set up the map parameters.
                        if first_block:
                            shape = map_shape + (dims[-1],)
                            Data.calc_freq()
                            centre_freq = Data.freq[dims[-1] // 2]
                            delta_freq = Data.field["CDELT1"]
                            if pol_ind == 0:
                                # Figure out the length of the polarization
                                # loop.
                                npol = dims[1]
                                # Accumulate the data history.
                                history = hist.History(Data.history)
                            # Get the current polarization integer.
                            this_pol = Data.field["CRVAL4"][pol_ind]
                            # Check that we even want to make a dirty map for
                            # this polarization.
                            if (not utils.polint2str(this_pol) in params["polarizations"]) and params["polarizations"]:
                                # Break to the end of the polarization loop.
                                raise ce.NextIteration()
                            # Allowcate memory for the map.
                            map_data = sp.zeros(shape, dtype=float)
                            map_data = algebra.make_vect(map_data, axis_names=("ra", "dec", "freq"))
                            # Allowcate memory for the inverse map noise.
                            if algorithm in ("grid", "diag_file"):
                                noise_inv = sp.zeros(shape, dtype=float)
                                noise_inv = algebra.make_mat(
                                    noise_inv, axis_names=("ra", "dec", "freq"), row_axes=(0, 1, 2), col_axes=(0, 1, 2)
                                )
                            elif algorithm in ("disjoint_scans", "ds_grad"):
                                # At each frequency use full N^2 noise matrix,
                                # but assume each frequency has uncorrelated
                                # noise. This is a big matrix so make sure it
                                # is reasonable.
                                size = shape[0] ^ 2 * shape[1] ^ 2 * shape[2]
                                if size > 4e9:  # 16 GB
                                    raise RunTimeError("Map size too big. " "Asked for a lot " "of memory.")
                                noise_inv = sp.zeros(shape[0:2] + shape, dtype=sp.float32)
                                noise_inv = algebra.make_mat(
                                    noise_inv,
                                    axis_names=("ra", "dec", "ra", "dec", "freq"),
                                    row_axes=(0, 1, 4),
                                    col_axes=(2, 3, 4),
                                )
                                # Allowcate memory for temporary data. Hold the
                                # number of times each pixel in this scan is
                                # hit. Factor of 2 longer in time in case some
                                # scans are longer than first block (guppi).
                                pixel_hits = sp.empty((2 * dims[0], dims[-1]))
                            first_block = False
                        else:
                            if pol_ind == 0:
                                history.merge(Data)
                        # Figure out the pointing pixel index and the frequency
                        # indicies.
                        Data.calc_pointing()
                        ra_inds = tools.calc_inds(Data.ra, params["field_centre"][0], shape[0], ra_spacing)
                        dec_inds = tools.calc_inds(
                            Data.dec, params["field_centre"][1], shape[1], params["pixel_spacing"]
                        )
                        data = Data.data[:, pol_ind, 0, :]
                        if algorithm in ("grid", "diag_file"):
                            add_data_2_map(data, ra_inds, dec_inds, map_data, noise_inv, weight)
                        elif algorithm in ("disjoint_scans",):
                            add_data_2_map(data - ma.mean(data, 0), ra_inds, dec_inds, map_data, None, weight)
                            pixel_hits[:] = 0
                            pixel_list = pixel_counts(data, ra_inds, dec_inds, pixel_hits, map_shape=shape[0:2])
                            add_scan_noise(pixel_list, pixel_hits, var, noise_inv)
                        # End Blocks for loop.
                    # End file name for loop.
                # Now write the dirty maps out for this polarization.
                # Use memmaps for this since we want to reorganize data
                # and write at the same time.
                # New maps will have the frequency axis as slowly varying, for
                # future efficiency.
                map_file_name = params["output_root"] + "dirty_map_" + utils.polint2str(this_pol) + ".npy"
                mfile = algebra.open_memmap(map_file_name, mode="w+", shape=(shape[2],) + shape[:2])
                map_mem = algebra.make_vect(mfile, axis_names=("freq", "ra", "dec"))
                # And the noise matrix.
                noise_file_name = params["output_root"] + "noise_inv_" + utils.polint2str(this_pol) + ".npy"
                if algorithm in ("disjoint_scans", "ds_grad"):
                    mfile = algebra.open_memmap(noise_file_name, mode="w+", shape=(shape[2],) + shape[:2] * 2)
                    noise_mem = algebra.make_mat(
                        mfile, axis_names=("freq", "ra", "dec", "ra", "dec"), row_axes=(0, 1, 2), col_axes=(0, 3, 4)
                    )
                else:
                    mfile = algebra.open_memmap(noise_file_name, mode="w+", shape=(shape[2],) + shape[:2])
                    noise_mem = algebra.make_mat(
                        mfile, axis_names=("freq", "ra", "dec"), row_axes=(0, 1, 2), col_axes=(0, 1, 2)
                    )
                # Give the data arrays axis information.
                map_mem.set_axis_info("freq", centre_freq, delta_freq)
                map_mem.set_axis_info("ra", params["field_centre"][0], ra_spacing)
                map_mem.set_axis_info("dec", params["field_centre"][1], params["pixel_spacing"])
                noise_mem.set_axis_info("freq", centre_freq, delta_freq)
                noise_mem.set_axis_info("ra", params["field_centre"][0], ra_spacing)
                noise_mem.set_axis_info("dec", params["field_centre"][1], params["pixel_spacing"])
                # Copy the data to the memory maps after rearranging.
                # The roll_axis should return a view, so this should
                # be memory efficient.
                map_mem[...] = sp.rollaxis(map_data, -1)
                noise_mem[...] = sp.rollaxis(noise_inv, -1)

                # Free up all that memory and flush memory maps to file.
                del mfile, map_mem, noise_mem, map_data, noise_inv

                # Save the file names for the history.
                all_file_names.append(kiyopy.utils.abbreviate_file_path(map_file_name))
                all_file_names.append(kiyopy.utils.abbreviate_file_path(noise_file_name))
            except ce.NextIteration:
                pass
            pol_ind += 1
            # End polarization for loop.
        history.add("Made dirty map.", all_file_names)
        h_file_name = params["output_root"] + "history.hist"
        history.write(h_file_name)
示例#29
0
 def process_file(self, middle) :
     """Split off to fix pyfits memory leak."""
     params = self.params
     # Construct the file name and read in all scans.
     file_name = params["input_root"] + middle + ".fits"
     Reader = fitsGBT.Reader(file_name)
     Blocks = Reader.read((), (), force_tuple=True)
     # Plotting limits need to be adjusted for on-off scans.
     if file_name.find("onoff") != -1 :
         onoff=True
     else :
         onoff=False
     # Initialize a few variables.
     counts = 0
     cal_sum_unscaled = 0
     cal_sum = 0
     cal_time = ma.zeros((0, 4))
     sys_time = ma.zeros((0, 4))
     cal_noise_spec = 0
     # Get the number of times in the first block and shorten to a
     # number that should be smaller than all blocks.
     nt = int(Blocks[0].dims[0]*.9)
     # Get the frequency axis.  Must be before loop because the data is
     # rebined in the loop.
     Blocks[0].calc_freq()
     f = Blocks[0].freq
     for Data in Blocks :
         # Rotate to XX, YY etc.
         rotate_pol.rotate(Data, (-5, -7, -8, -6))
         this_count = ma.count(Data.data[:,:,0,:] 
                               + Data.data[:,:,1,:], 0)
         cal_sum_unscaled += ma.sum(Data.data[:,:,0,:] +
                 Data.data[:,:,1,:], 0)
         # Time series of the cal temperture.
         cal_time = sp.concatenate((cal_time, ma.mean(Data.data[:,:,0,:]
             - Data.data[:,:,1,:], -1).filled(-1)), 0)
         # Everything else done in cal units.
         cal_scale.scale_by_cal(Data)
         # Time serise of the system temperture.
         sys_time = sp.concatenate((sys_time, ma.mean(Data.data[:,:,0,:]
             + Data.data[:,:,1,:], -1).filled(-5)), 0)
         # Accumulate variouse sums.
         counts += this_count
         cal_sum += ma.sum(Data.data[:,:,0,:] + Data.data[:,:,1,:], 0)
         # Take power spectrum of on-off/on+off.
         rebin_freq.rebin(Data, 512, mean=True, by_nbins=True)
         cal_diff = ((Data.data[:,[0,-1],0,:] 
                      - Data.data[:,[0,-1],1,:])
                     / (Data.data[:,[0,-1],0,:] 
                        + Data.data[:,[0,-1],1,:]))
         cal_diff -= ma.mean(cal_diff, 0)
         cal_diff = cal_diff.filled(0)[0:nt,...]
         power = abs(fft.fft(cal_diff, axis=0)[range(nt//2+1)])
         power = power**2/nt
         cal_noise_spec += power
     # Normalize.
     cal_sum_unscaled /= 2*counts
     cal_sum /= 2*counts
     # Get time steps and frequency wdith for noise power normalization.
     Data = Blocks[0]
     Data.calc_time()
     dt = abs(sp.mean(sp.diff(Data.time)))
     # Note that Data was rebined in the loop.
     dnu = abs(Data.field["CDELT1"])
     cal_noise_spec *= dt*dnu/len(Blocks)
     # Power spectrum independant axis.
     ps_freqs = sp.arange(nt//2 + 1, dtype=float)
     ps_freqs /= (nt//2 + 1)*dt*2
     # Long time axis.
     t_total = sp.arange(cal_time.shape[0])*dt
     # Make plots.
     h = plt.figure(figsize=(10,10))
     # Unscaled temperature spectrum.
     plt.subplot(3, 2, 1)
     plt.plot(f/1e6, sp.rollaxis(cal_sum_unscaled, -1))
     plt.xlim((7e2, 9e2))
     plt.xlabel("frequency (MHz)")
     plt.title("System temperature - mean over time")
     # Temperture spectrum in terms of noise cal. 4 Polarizations.
     plt.subplot(3, 2, 2)
     plt.plot(f/1e6, sp.rollaxis(cal_sum, -1))
     if onoff :
         plt.ylim((-1, 60))
     else :
         plt.ylim((-10, 40))
     plt.xlim((7e2, 9e2))
     plt.xlabel("frequency (MHz)")
     plt.title("System temperature in cal units")
     # Time serise of cal T.
     plt.subplot(3, 2, 3)
     plt.plot(t_total, cal_time)
     if onoff :
         plt.xlim((0,dt*900))
     else :
         plt.xlim((0,dt*3500))
     plt.xlabel("time (s)")
     plt.title("Noise cal temperature - mean over frequency")
     # Time series of system T.
     plt.subplot(3, 2, 4)
     plt.plot(t_total, sys_time)
     plt.xlabel("time (s)")
     if onoff :
         plt.ylim((-4, 90))
         plt.xlim((0,dt*900))
     else :
         plt.ylim((-4, 35))
         plt.xlim((0,dt*3500))
     plt.title("System temperature in cal units")
     # XX cal PS.
     plt.subplot(3, 2, 5)
     plt.loglog(ps_freqs, cal_noise_spec[:,0,:])
     plt.xlim((1.0/60, 1/(2*dt)))
     plt.ylim((1e-1, 1e3))
     plt.xlabel("frequency (Hz)")
     plt.title("XX cal power spectrum")
     # YY cal PS.
     plt.subplot(3, 2, 6)
     plt.loglog(ps_freqs, cal_noise_spec[:,1,:])
     plt.xlim((1.0/60, 1/(2*dt)))
     plt.ylim((1e-1, 1e3))
     plt.xlabel("frequency (Hz)")
     plt.title("YY cal power spectrum")
     # Adjust spacing.
     plt.subplots_adjust(hspace=.4)
     # Save the figure.
     plt.savefig(params['output_root'] + middle
             + params['output_end'])
示例#30
0
def sub_map(Data, Maps, correlate=False, pols=(), make_plots=False,
            interpolation='nearest') :
    """Subtracts a Map out of Data."""
    
    # Import locally since many machines don't have matplotlib.
    if make_plots :
        import matplotlib.pyplot as plt
    
    # Convert pols to an interable.
    if pols is None :
        pols = range(Data.dims[1])
    elif not hasattr(pols, '__iter__') :
        pols = (pols, )
    elif len(pols) == 0 :
        pols = range(Data.dims[1])
    # If solving for gains, need a place to store them.
    if correlate :
        out_gains = sp.empty((len(pols),) + Data.dims[2:4])
    for pol_ind in pols :
        # Check if there one map was passed or multiple.
        if isinstance(Maps, list) or isinstance(Maps, tuple) :
            if len(Maps) != len(pols) :
                raise ValueError("Must provide one map, or one map per "
                                 "polarization.")
            Map = Maps[pol_ind]
        else :
            Map = Maps
        if not Map.axes == ('freq', 'ra', 'dec') :
            raise ValueError("Expected map axes to be ('freq', 'ra', 'dec').")
        Data.calc_pointing()
        Data.calc_freq()
        # Map Parameters.
        centre = (Map.info['freq_centre'], Map.info['ra_centre'],
                  Map.info['dec_centre'])
        shape = Map.shape
        spacing = (Map.info['freq_delta'], Map.info['ra_delta'], 
                   Map.info['dec_delta'])
        # Nearest code is depricated.  We could just use the general code.
        if interpolation == 'nearest' :
            # These indices are the length of the time axis. Integer indicies.
            ra_ind = map.tools.calc_inds(Data.ra, centre[1], shape[1],
                                         spacing[1])
            dec_ind = map.tools.calc_inds(Data.dec, centre[2], shape[2],
                                          spacing[2])
            # Exclude indices that are off map or out of band. Boolian indices.
            on_map_inds = sp.logical_and(
                                 sp.logical_and(ra_ind>=0, ra_ind<shape[1]),
                                 sp.logical_and(dec_ind>=0, dec_ind<shape[2]))
            # Make an array of map data the size of the time stream data.
            submap = Map[:, ra_ind[on_map_inds], dec_ind[on_map_inds]]
        else :
            map_ra = Map.get_axis('ra')
            map_dec = Map.get_axis('dec')
            on_map_inds = sp.logical_and(
                sp.logical_and(Data.ra > min(map_ra), Data.ra < max(map_ra)),
                sp.logical_and(Data.dec > min(map_dec), Data.dec<max(map_dec)))
            submap = sp.empty((Map.shape[0], sp.sum(on_map_inds)), dtype=float)
            jj = 0
            for ii in range(len(on_map_inds)) :
                if on_map_inds[ii] :
                    submap[:, jj] = Map.slice_interpolate([1, 2], 
                            [Data.ra[ii], Data.dec[ii]], kind=interpolation)
                    jj += 1
        # Length of the data frequency axis.
        freq_ind = map.tools.calc_inds(Data.freq, centre[0], shape[0], 
                                       spacing[0])
        in_band_inds = sp.logical_and(freq_ind >= 0, freq_ind < shape[0])
        submap = submap[freq_ind[in_band_inds], ...]
        # Broadcast to the same shape and combine.
        covered_inds = sp.logical_and(on_map_inds[:, sp.newaxis], 
                                      in_band_inds[sp.newaxis, :])
        # submap is the size of the data that is on the map.  Expand to full 
        # size of data.
        subdata = sp.zeros(sp.shape(covered_inds))
        subdata[covered_inds] = sp.rollaxis(submap, 1, 0).flatten()
        subdata[sp.logical_not(covered_inds)] = 0.0
        # Now start using the actual data.  Loop over cal and pol indicies.
        for cal_ind in range(Data.dims[2]) :
            data = Data.data[:,pol_ind, cal_ind, :]
            data[sp.logical_not(covered_inds)] = ma.masked
            # Find the common good indicies.
            un_mask = sp.logical_not(data.mask)
            # Find the number of good indicies at each frequency.
            counts = sp.sum(un_mask, 0)
            counts[counts == 0] = -1
            # Subtract out the mean from the map.
            tmp_subdata = (subdata - sp.sum(un_mask*subdata, 0)/counts)
            # Correlate to solve for an unknown gain.
            if correlate :
                tmp_data = data.filled(0.0)
                tmp_data = (tmp_data - sp.sum(un_mask*data, 0)
                            / counts)
                gain = (sp.sum(un_mask*tmp_subdata*tmp_data, 0) / 
                        sp.sum(un_mask*tmp_subdata*tmp_subdata, 0))
                gain[counts == -1] = 0.0
                out_gains[pol_ind,cal_ind,:] = gain
            else :
                gain = 1.0
            # Now do the subtraction and mask the off map data.  We use the
            # mean subtracted map, to preserve data mean.
            if make_plots :
                plt.figure()
                plt.plot(ma.mean((gain*tmp_subdata), -1), '.b')
                plt.plot(ma.mean((tmp_subdata), -1), '.r')
                plt.plot(ma.mean((data - ma.mean(data, 0)), -1), '.g')
                #plt.plot(ma.mean((data), -1), '.g')
                #plt.plot((gain*tmp_subdata)[:, 45], '.b')
                #plt.plot((data - ma.mean(data, 0))[:, 45], '.g')
            data[...] -= gain*tmp_subdata
    if correlate :
        return out_gains
示例#31
0
    def test_load_rasters(self):
        # Write a file to test
        f = tempfile.NamedTemporaryFile(suffix='.txt',
                                        prefix='HAZIMPtest_jobs',
                                        delete=False)
        f.write('exposure_latitude, exposure_longitude, ID, haz_0, haz_1\n')
        f.write('8.1, 0.1, 1, 4, 40\n')
        f.write('7.9, 1.5, 2, -9999, -9999\n')
        f.write('8.9, 2.9, 3, 6, 60\n')
        f.write('8.9, 3.1, 4, -9999, -9999\n')
        f.write('9.9, 2.9, 5, -9999, -9999\n')
        f.close()

        inst = JOBS[LOADCSVEXPOSURE]
        con_in = context.Context()
        con_in.exposure_lat = con_in.exposure_long = None
        con_in.exposure_att = {}
        test_kwargs = {'file_name': f.name, 'use_parallel': False}
        inst(con_in, **test_kwargs)
        os.remove(f.name)

        # Write a hazard file
        f = tempfile.NamedTemporaryFile(suffix='.aai',
                                        prefix='HAZIMPtest_jobs',
                                        delete=False)
        f.write('ncols 3 \r\n')
        f.write('nrows 2 \r\n')
        f.write('xllcorner +0. \r\n')
        f.write('yllcorner +8. \r\n')
        f.write('cellsize 1 \r\n')
        f.write('NODATA_value -9999 \r\n')
        f.write('1 2 -9999 \r\n')
        f.write('4 5 6 ')
        f.close()
        files = [f.name]

        # Write another hazard file
        f = tempfile.NamedTemporaryFile(suffix='.aai',
                                        prefix='HAZIMPtest_jobs',
                                        delete=False)
        f.write('ncols 3 \r\n')
        f.write('nrows 2 \r\n')
        f.write('xllcorner +0. \r\n')
        f.write('yllcorner +8. \r\n')
        f.write('cellsize 1 \r\n')
        f.write('NODATA_value -9999 \r\n')
        f.write('10 20 -9999 \r\n')
        f.write('40 50 60 ')
        f.close()
        files.append(f.name)

        haz_v = 'haz_v'
        inst = JOBS[LOADRASTER]
        test_kwargs = {'file_list': files, 'attribute_label': haz_v}
        inst(con_in, **test_kwargs)
        the_nans = isnan(con_in.exposure_att[haz_v])
        con_in.exposure_att[haz_v][the_nans] = -9999
        actual = asarray(
            [con_in.exposure_att['haz_0'], con_in.exposure_att['haz_1']])
        actual = rollaxis(actual, 1)
        msg = "con_in.exposure_att[haz_av] " \
            + str(con_in.exposure_att[haz_v])
        msg += "\n actual " + str(actual)
        self.assertTrue(allclose(con_in.exposure_att[haz_v], actual), msg)

        for a_file in files:
            os.remove(a_file)
示例#32
0
    def test_load_rasters(self):
        # Write a file to test
        f = tempfile.NamedTemporaryFile(
            suffix='.txt', prefix='HAZIMPtest_jobs',
            delete=False)
        f.write('exposure_latitude, exposure_longitude, ID, haz_0, haz_1\n')
        f.write('8.1, 0.1, 1, 4, 40\n')
        f.write('7.9, 1.5, 2, -9999, -9999\n')
        f.write('8.9, 2.9, 3, 6, 60\n')
        f.write('8.9, 3.1, 4, -9999, -9999\n')
        f.write('9.9, 2.9, 5, -9999, -9999\n')
        f.close()

        inst = JOBS[LOADCSVEXPOSURE]
        con_in = context.Context()
        con_in.exposure_lat = con_in.exposure_long = None
        con_in.exposure_att = {}
        test_kwargs = {'file_name': f.name, 'use_parallel': False}
        inst(con_in, **test_kwargs)
        os.remove(f.name)

        # Write a hazard file
        f = tempfile.NamedTemporaryFile(
            suffix='.aai', prefix='HAZIMPtest_jobs',
            delete=False)
        f.write('ncols 3 \r\n')
        f.write('nrows 2 \r\n')
        f.write('xllcorner +0. \r\n')
        f.write('yllcorner +8. \r\n')
        f.write('cellsize 1 \r\n')
        f.write('NODATA_value -9999 \r\n')
        f.write('1 2 -9999 \r\n')
        f.write('4 5 6 ')
        f.close()
        files = [f.name]

        # Write another hazard file
        f = tempfile.NamedTemporaryFile(
            suffix='.aai', prefix='HAZIMPtest_jobs',
            delete=False)
        f.write('ncols 3 \r\n')
        f.write('nrows 2 \r\n')
        f.write('xllcorner +0. \r\n')
        f.write('yllcorner +8. \r\n')
        f.write('cellsize 1 \r\n')
        f.write('NODATA_value -9999 \r\n')
        f.write('10 20 -9999 \r\n')
        f.write('40 50 60 ')
        f.close()
        files.append(f.name)

        haz_v = 'haz_v'
        inst = JOBS[LOADRASTER]
        test_kwargs = {'file_list': files, 'attribute_label': haz_v}
        inst(con_in, **test_kwargs)
        the_nans = isnan(con_in.exposure_att[haz_v])
        con_in.exposure_att[haz_v][the_nans] = -9999
        actual = asarray([con_in.exposure_att['haz_0'],
                          con_in.exposure_att['haz_1']])
        actual = rollaxis(actual, 1)
        msg = "con_in.exposure_att[haz_av] " \
            + str(con_in.exposure_att[haz_v])
        msg += "\n actual " + str(actual)
        self.assertTrue(allclose(con_in.exposure_att[haz_v], actual), msg)

        for a_file in files:
            os.remove(a_file)
def get_benchmark_im(file_id):
    """Gets the experimental image associated with ``file_id``"""
    filepath = os.path.join(IMAGE_FOLDER, file_id + '.tif')
    return sp.rollaxis(sp.log(tifffile.imread(filepath)), 0, 3)
示例#34
0
 def test_build_noise(self):
     map = self.map
     time_stream, ra, dec, az, el, time, mask_inds = \
                                            self.DM.get_all_trimmed()
     nt = len(time)
     Noise = dirty_map.Noise(time_stream, time)
     thermal_noise_levels = sp.zeros((nf_d)) + 0.04  # Kelvin**2
     Noise.add_thermal(thermal_noise_levels)
     Noise.add_mask(mask_inds)
     self.assertTrue(sp.alltrue(Noise.diagonal[mask_inds] > 10))
     Noise.deweight_time_mean()
     Noise.deweight_time_slope()
     Noise.add_correlated_over_f(0.01, -1.2, 0.1)
     Noise.finalize()
     #### Test the full inverse.
     # Frist get a full representation of the noise matrix
     #tmp_mat = sp.zeros((nf_d, nt, nf_d, nt))
     #tmp_mat.flat[::nt*nf_d + 1] += Noise.diagonal.flat
     #for jj in xrange(Noise.time_modes.shape[0]):
     #    tmp_mat += (Noise.time_mode_noise[jj,:,None,:,None]
     #                * Noise.time_modes[jj,None,:,None,None]
     #                * Noise.time_modes[jj,None,None,None,:])
     #for jj in xrange(Noise.freq_modes.shape[0]):
     #    tmp_mat +=  (Noise.freq_mode_noise[jj,None,:,None,:]
     #                 * Noise.freq_modes[jj,:,None,None,None]
     #                 * Noise.freq_modes[jj,None,None,:,None])
     tmp_mat = Noise.get_mat()
     tmp_mat.shape = (nt * nf_d, nt * nf_d)
     # Check that the matrix I built for testing is indeed symetric.
     self.assertTrue(sp.allclose(tmp_mat, tmp_mat.transpose()))
     noise_inv = Noise.get_inverse()
     noise_inv.shape = (nt * nf_d, nt * nf_d)
     # Check that the production matrix is symetric.
     self.assertTrue(sp.allclose(noise_inv, noise_inv.transpose()))
     tmp_eye = sp.dot(tmp_mat, noise_inv)
     #print tmp_eye
     noise_inv.shape = (nf_d, nt, nf_d, nt)
     self.assertTrue(sp.allclose(tmp_eye, sp.identity(nt * nf_d)))
     # Check that the calculation of the diagonal is correct.
     noise_inv_diag = Noise.get_inverse_diagonal()
     self.assertTrue(
         sp.allclose(noise_inv_diag.flat, noise_inv.flat[::nf_d * nt + 1]))
     #### Test the noise weighting of the data.
     noise_weighted_data = Noise.weight_time_stream(time_stream)
     self.assertTrue(
         sp.allclose(noise_weighted_data, al.dot(noise_inv, time_stream)))
     #### Test making noise in map space.
     # First make the noise matrix by brute force.
     P = dirty_map.Pointing(("ra", "dec"), (ra, dec), map, 'nearest')
     P_mat = P.get_matrix()
     tmp_map_noise_inv = al.partial_dot(noise_inv, P_mat)
     tmp_map_noise_inv = al.partial_dot(P_mat.mat_transpose(),
                                        tmp_map_noise_inv)
     # I mess up the meta data by doing this, but rotate the axes so they
     # are in the desired order.
     tmp_map_noise_inv = sp.rollaxis(tmp_map_noise_inv, 2, 0)
     # Now use fast methods.
     map_noise_inv = sp.zeros((nf_d, nra_d, ndec_d, nf_d, nra_d, ndec_d),
                              dtype=float)
     map_noise_inv = al.make_mat(map_noise_inv,
                                 axis_names=('freq', 'ra', 'dec', 'freq',
                                             'ra', 'dec'),
                                 row_axes=(0, 1, 2),
                                 col_axes=(3, 4, 5))
     start = time_module.clock()
     for ii in xrange(nf_d):
         for jj in xrange(nra_d):
             P.noise_to_map_domain(Noise, ii, jj,
                                   map_noise_inv[ii, jj, :, :, :, :])
     stop = time_module.clock()
     #print "Constructing map noise took %5.2f seconds." % (stop - start)
     self.assertTrue(sp.allclose(map_noise_inv, tmp_map_noise_inv))
示例#35
0
 def test_build_noise(self):
     map = self.map
     time_stream, ra, dec, az, el, time, mask_inds = \
                                            self.DM.get_all_trimmed()
     nt = len(time)
     Noise = dirty_map.Noise(time_stream, time)
     thermal_noise_levels = sp.zeros((nf_d)) + 0.04  # Kelvin**2
     Noise.add_thermal(thermal_noise_levels)
     Noise.add_mask(mask_inds)
     self.assertTrue(sp.alltrue(Noise.diagonal[mask_inds] > 10))
     Noise.deweight_time_mean()
     Noise.deweight_time_slope()
     Noise.add_correlated_over_f(0.01, -1.2, 0.1)
     Noise.finalize()
     #### Test the full inverse.
     # Frist get a full representation of the noise matrix
     #tmp_mat = sp.zeros((nf_d, nt, nf_d, nt))
     #tmp_mat.flat[::nt*nf_d + 1] += Noise.diagonal.flat
     #for jj in xrange(Noise.time_modes.shape[0]):
     #    tmp_mat += (Noise.time_mode_noise[jj,:,None,:,None]
     #                * Noise.time_modes[jj,None,:,None,None]
     #                * Noise.time_modes[jj,None,None,None,:])
     #for jj in xrange(Noise.freq_modes.shape[0]):
     #    tmp_mat +=  (Noise.freq_mode_noise[jj,None,:,None,:]
     #                 * Noise.freq_modes[jj,:,None,None,None]
     #                 * Noise.freq_modes[jj,None,None,:,None])
     tmp_mat = Noise.get_mat()
     tmp_mat.shape = (nt*nf_d, nt*nf_d)
     # Check that the matrix I built for testing is indeed symetric.
     self.assertTrue(sp.allclose(tmp_mat, tmp_mat.transpose()))
     noise_inv = Noise.get_inverse()
     noise_inv.shape = (nt*nf_d, nt*nf_d)
     # Check that the production matrix is symetric.
     self.assertTrue(sp.allclose(noise_inv, noise_inv.transpose()))
     tmp_eye = sp.dot(tmp_mat, noise_inv)
     #print tmp_eye
     noise_inv.shape = (nf_d, nt, nf_d, nt)
     self.assertTrue(sp.allclose(tmp_eye, sp.identity(nt*nf_d)))
     # Check that the calculation of the diagonal is correct.
     noise_inv_diag = Noise.get_inverse_diagonal()
     self.assertTrue(sp.allclose(noise_inv_diag.flat, 
                                 noise_inv.flat[::nf_d*nt + 1]))
     #### Test the noise weighting of the data.
     noise_weighted_data = Noise.weight_time_stream(time_stream)
     self.assertTrue(sp.allclose(noise_weighted_data, al.dot(noise_inv,
                                                             time_stream)))
     #### Test making noise in map space.
     # First make the noise matrix by brute force.
     P = dirty_map.Pointing(("ra", "dec"), (ra, dec), map, 'nearest')
     P_mat = P.get_matrix()
     tmp_map_noise_inv = al.partial_dot(noise_inv,
                                        P_mat)
     tmp_map_noise_inv = al.partial_dot(P_mat.mat_transpose(), 
                                        tmp_map_noise_inv)
     # I mess up the meta data by doing this, but rotate the axes so they
     # are in the desired order.
     tmp_map_noise_inv = sp.rollaxis(tmp_map_noise_inv, 2, 0)
     # Now use fast methods.
     map_noise_inv = sp.zeros((nf_d, nra_d, ndec_d, nf_d, nra_d, ndec_d),
                              dtype=float)
     map_noise_inv = al.make_mat(map_noise_inv, axis_names=('freq', 'ra', 
         'dec', 'freq', 'ra', 'dec'), row_axes=(0, 1, 2), 
         col_axes=(3, 4, 5))
     start = time_module.clock()
     for ii in xrange(nf_d):
         for jj in xrange(nra_d):
             P.noise_to_map_domain(Noise, ii, jj,
                                   map_noise_inv[ii,jj,:,:,:,:])
     stop = time_module.clock()
     #print "Constructing map noise took %5.2f seconds." % (stop - start)
     self.assertTrue(sp.allclose(map_noise_inv, tmp_map_noise_inv))
示例#36
0
def get_benchmark_im(file_id):
    """Gets the experimental image associated with ``file_id``"""
    filepath = os.path.join(IMAGE_FOLDER, file_id + '.tif')
    return sp.rollaxis(sp.log(tifffile.imread(filepath)), 0, 3)