Ejemplo n.º 1
0
 def __init__(self, genprior=nullprobs):
     self.genprior = genprior
     self.scoreprobs = {}
     for genotype, peakpriors in self.scorepseudos.items():
         self.scoreprobs[genotype] = cprobs = {}
         for peaks, prior in peakpriors.items():
             # At  some point,  may want  to actually  sample here.
             # There are  tonnes of data  in the calls  I'm making,
             # after all.
             probs = prior.mean()
             dim = 1 if len(probs) == nscores else 2
             cprobs[peaks] = scipy.reshape(
                 probs, (nscores,)*dim)
     self.peakprobs = dict(
         (g, p.mean()) for g, p in self.peakpseudos.items())
     self.posteriors = {}
     for peaks in ppeaks:
         peakidx = peakidxs[peaks]
         dim = len(peaks)
         self.posteriors[peaks] = scipy.zeros(
             dim*(nscores,) + (len(pcalls),), 'f')
         indxs=zip(*[i.flatten() for i in scipy.indices((6,)*dim)])
         for binidx in indxs:
             total = 0
             for gi, g in enumerate(pcalls):
                 prob = (self.genprior[g]*
                         self.peakprobs[g][peakidx]*
                         self.scoreprobs[g][peaks][binidx])
                 self.posteriors[peaks][binidx][gi] = prob
                 total += prob
             assert total != 0
             self.posteriors[peaks][binidx] /= total
Ejemplo n.º 2
0
 def errorSquare(p):
     cd = self.clipData
     y, x = scipy.indices(
         cd.shape
     )  # data2D = data[0:shape[2],self.Plane,0:shape[0]], depth,height,width = z,y,x here.
     f = ferf(*p)
     return (f(x, y) - cd).ravel()
def get_centers_single_image(truth, im_no, border=20):
    """Returns a dict of arrays, one for each pixel type. The arrays are compatible with caffe_tools.fill_database.

    The last row of each array is equal to ``im_num``, indicating which image those centers were created from."""
    indices = sp.indices(truth.shape)
    im_nos = im_no * sp.ones((1, truth.shape[0], truth.shape[1]), dtype=int)
    indices = sp.concatenate((indices, im_nos))

    away_from_border = sp.zeros(truth.shape, dtype=bool)
    away_from_border[border:-border, border:-border] = True

    results = {
        'inside':
        indices[:, (0.75 < truth) & away_from_border],
        'outside':
        indices[:, (truth < 0.25) & away_from_border],
        'inside_damaged':
        indices[:, make_damaged_spot_mask(truth) & away_from_border],
        'outside_damaged':
        indices[:,
                make_outside_near_damaged_spot_mask(truth) & away_from_border],
        'block_border':
        indices[:, make_block_border_mask(truth) & away_from_border],
        'between':
        indices[:, make_between_spot_mask(truth) & away_from_border]
    }

    return results
Ejemplo n.º 4
0
 def acquire(self, experiment):
     X, Y = indices((100, 100))
     Z = exp(-((X-experiment.x)**2+(Y-experiment.y)**2)/experiment.width**2)
     Z += 1-2*rand(100,100)
     Z *= self.exposure
     Z[Z>2] = 2
     Z = Z**self.gain
     return(Z)
Ejemplo n.º 5
0
def process(image, results_obj):
    """ Function called to do the processing """
    X, Y = indices(image.shape)
    x = sum(X*image)/sum(image)
    y = sum(Y*image)/sum(image)
    width = sqrt(abs(sum(((X-x)**2+(Y-y)**2)*image)/sum(image)))
    results_obj.x = x
    results_obj.y = y
    results_obj.width = width
Ejemplo n.º 6
0
def eval_psf_match(p,data1,data2):
   amp,x,y = p
   coords = scipy.indices(data1.shape).astype(scipy.float64)
   coords[0] += y
   coords[1] += x
   data = amp * data1

   shift = ndimage.map_coordinates(data,coords,output=scipy.float64)

   return (shift - data2).flatten()
Ejemplo n.º 7
0
    def __init__(self, *args, **kwargs):
        """
		A linear transformation. If initialized with the DCT basis, the first
		feature corresponds to the DC component.

		@type  A: array_like
		@param A: linear transform matrix

		@type  basis: string
		@param basis: currently only 'DCT' is an available basis

		@type  dim: integer
		@param dim: dimensionality of basis
		"""

        if 'A' in kwargs:
            self.A = asarray(kwargs['A'])
            self.dim = self.A.shape[0]

        if len(args) > 0:
            if 'basis' in kwargs:
                raise ValueError('Did you forget to use the `dim` keyword?')
            self.A = asarray(args[0])
            self.dim = self.A.shape[0]

        elif 'basis' in kwargs:
            if 'dim' not in kwargs:
                raise ValueError('Please specify a dimensionality, `dim`.')

            self.dim = kwargs['dim']

            if kwargs['basis'].upper() == 'DCT':
                I, J = indices([kwargs['dim'], kwargs['dim']])

                A = []

                for p in range(kwargs['dim']):
                    for q in range(kwargs['dim']):
                        F = 2. * multiply(
                            cos(pi * (2. * I + 1.) * p / (2. * kwargs['dim'])),
                            cos(pi * (2. * J + 1.) * q /
                                (2. * kwargs['dim']))) / kwargs['dim']

                        if p == 0:
                            F /= sqrt(2.)

                        if q == 0:
                            F /= sqrt(2.)

                        A.append(F.reshape(1, -1))

                self.A = vstack(A)
        else:
            raise ValueError('Please specify a linear transform.')
Ejemplo n.º 8
0
def _radial_profile(autocorr, r_max, nbins=100):
    r"""
    Helper functions to calculate the radial profile of the autocorrelation
    Masks the image in radial segments from the center and averages the values
    The distance values are normalized and 100 bins are used as default.

    Parameters
    ----------
    autocorr : ND-array
        The image of autocorrelation produced by FFT
    r_max : int or float
        The maximum radius in pixels to sum the image over

    Returns
    -------
    result : named_tuple
        A named tupling containing an array of ``bins`` of radial position
        and an array of ``counts`` in each bin.
    """
    if len(autocorr.shape) == 2:
        adj = sp.reshape(autocorr.shape, [2, 1, 1])
        inds = sp.indices(autocorr.shape) - adj / 2
        dt = sp.sqrt(inds[0]**2 + inds[1]**2)
    elif len(autocorr.shape) == 3:
        adj = sp.reshape(autocorr.shape, [3, 1, 1, 1])
        inds = sp.indices(autocorr.shape) - adj / 2
        dt = sp.sqrt(inds[0]**2 + inds[1]**2 + inds[2]**2)
    else:
        raise Exception('Image dimensions must be 2 or 3')
    bin_size = np.int(np.ceil(r_max / nbins))
    bins = np.arange(bin_size, r_max, step=bin_size)
    radial_sum = np.zeros_like(bins)
    for i, r in enumerate(bins):
        # Generate Radial Mask from dt using bins
        mask = (dt <= r) * (dt > (r - bin_size))
        radial_sum[i] = np.sum(autocorr[mask]) / np.sum(mask)
    # Return normalized bin and radially summed autoc
    norm_autoc_radial = radial_sum / np.max(autocorr)
    tpcf = namedtuple('two_point_correlation_function',
                      ('distance', 'probability'))
    return tpcf(bins, norm_autoc_radial)
Ejemplo n.º 9
0
	def __init__(self, *args, **kwargs):
		"""
		A linear transformation. If initialized with the DCT basis, the first
		feature corresponds to the DC component.

		@type  A: array_like
		@param A: linear transform matrix

		@type  basis: string
		@param basis: currently only 'DCT' is an available basis

		@type  dim: integer
		@param dim: dimensionality of basis
		"""

		if 'A' in kwargs:
			self.A = asarray(kwargs['A'])
			self.dim = self.A.shape[0]

		if len(args) > 0:
			if 'basis' in kwargs:
				raise ValueError('Did you forget to use the `dim` keyword?')
			self.A = asarray(args[0])
			self.dim = self.A.shape[0]

		elif 'basis' in kwargs:
			if 'dim' not in kwargs:
				raise ValueError('Please specify a dimensionality, `dim`.')

			self.dim = kwargs['dim']

			if kwargs['basis'].upper() == 'DCT':
				I, J = indices([kwargs['dim'], kwargs['dim']])

				A = []

				for p in range(kwargs['dim']):
					for q in range(kwargs['dim']):
						F = 2. * multiply(
							cos(pi * (2. * I + 1.) * p / (2. * kwargs['dim'])),
							cos(pi * (2. * J + 1.) * q / (2. * kwargs['dim']))) / kwargs['dim']

						if p == 0:
							F /= sqrt(2.)

						if q == 0:
							F /= sqrt(2.)

						A.append(F.reshape(1, -1))

				self.A = vstack(A)
		else:
			raise ValueError('Please specify a linear transform.')
Ejemplo n.º 10
0
def array_coords(shape):
    """
    Faster version of scipy.indices()
    """
    return scipy.indices(shape).astype(scipy.float32)
    y = shape[0]
    x = shape[1]
    out = scipy.empty((2, y, x))
    t = scipy.arange(y, dtype='f8')
    out[0] = scipy.tile(t, (x, 1)).T
    t = scipy.arange(x, dtype='f8')
    out[1] = scipy.tile(t, (y, 1))
    return out
Ejemplo n.º 11
0
    def _estimate_fit_param(self):
        B = self._orig_image.min()
        w = self._orig_image - B
        A = w.max() 

        X, Y = scipy.indices(self._shape)

        x0 = scipy.average(X, None, w)
        y0 = scipy.average(Y, None, w)

        col = w[:, int(y0)]
        var_x = scipy.average((scipy.arange(col.size) - y0)**2, None, col)

        row = w[int(x0), :]
        var_y = scipy.average((scipy.arange(row.size) - x0)**2, None, row)
    
        return A, B, x0, y0, var_x**0.5, var_y**0.5, 0
def find_hough_intersections(array):
    """Estimates a set of horizontal and vertical Hough lines from ``array`` and returns two arrays; one containing the
    physical locations of each intersection, and one containing the logical coordinates of each intersection."""
    grid_angle = find_grid_angle(array > 0.5)
    x_thetas, x_rs = find_lines_at_angle(array > 0.5, grid_angle)
    y_thetas, y_rs = find_lines_at_angle(array > 0.5, grid_angle + sp.pi/2)

    enumerated_horizontals = draw_enumerated_lines(array.shape, x_thetas, x_rs)
    enumerated_verticals = draw_enumerated_lines(array.shape, y_thetas, y_rs)
    
    intersections = (enumerated_horizontals != -1) & (enumerated_verticals != -1)
    physical_indices = sp.indices(array.shape)[:, intersections]
    vertical_logical_indices = enumerated_verticals[physical_indices[0], physical_indices[1]]
    horizontal_logical_indices = enumerated_horizontals[physical_indices[0], physical_indices[1]]
    logical_indices = sp.array([horizontal_logical_indices, vertical_logical_indices])
    
    return physical_indices, logical_indices
Ejemplo n.º 13
0
def grid_from_arrays(array_dict):
    grid_dict=dict.fromkeys(array_dict.keys())
    arrays=array_dict.values()
    arrays = [sp.asarray(a) for a in arrays]
    shape = (len(x) for x in arrays)
    ix = sp.indices(shape, dtype=int)
    ix = ix.reshape(len(arrays), -1).T
    
    for n, arr in enumerate(arrays):
        ix[:, n] = arrays[n][ix[:, n]]
    
    
    keys=grid_dict.keys()
    for i in range(len(keys)):
        grid_dict[keys[i]]=ix[:,i]
    
    return grid_dict
def find_centers(spots, areas, border_width, im_num=0):
    """Returns a dict of arrays, one for each pixel type. The arrays are compatible with caffe_tools.fill_database.

    The last row of each array is equal to ``im_num``, indicating which image those centers were created from.
    """
    indices = sp.indices(spots['good'].shape)
    indices = sp.concatenate([indices, im_num*sp.ones((1, indices.shape[1], indices.shape[2]), dtype=int)], 0)

    inside_border = sp.zeros(spots['good'].shape, dtype=bool)
    inside_border[border_width:-border_width, border_width:-border_width] = True
    
    centers = {}
    centers['inside'] = indices[:, make_inside_mask(spots) & inside_border]
    centers['outside'] = indices[:, make_outside_mask(spots, areas) & inside_border]
    centers['inside_damaged'] = indices[:, make_inside_damaged_mask(spots) & inside_border]
    centers['outside_damaged'] = indices[:, make_outside_damaged_mask(spots, areas) & inside_border]
    centers['block_border'] = indices[:, make_block_border_mask(spots, areas) & inside_border]
    centers['between'] = indices[:, make_between_mask(spots, areas) & inside_border]
    
    return centers
def get_intersection_indices(condition, axis=-1):
    """
    In case you want to modify the indicies that get returned
    (which is not too likely), see caveat (in the code).
    """
    n = condition.shape[axis]
    id1 = findfirst(condition, axis=axis, no_intersection=n)
    # get the first point where SA > SAcap
    if id1.shape == (1, ):  # if it has collapsed
        if id1 == 0:
            id1[:] = 1
        id0 = id1 - 1
        # if id1==0:id0[:]=0 # no nves
        if id1 == n:
            id1[:] = n - 1
        return id0, id1
    else:
        id1[where(id1 == 0)] = 1

        id0 = id1 - 1  # make id0 the point below, id1 is the point above
        id1[where(id1 == n)] = n - 1
        # id0[where(id1==0)]=0 # no -ves

        # Make the indices for the other axis:
        indices_0 = indices(id0.shape)  # make indices for the boring axes
        indices_0 = list(indices_0)
        indices_1 = indices_0[:]  # copy the list
        # CAVEAT:
        # In case you want to modify the indicies that get returned
        # (which is not too likely), note that other than on axis,
        # the indices are the same objects.

        # insert the interesting axis into the correct axis:
        if axis < 0:
            axis = len(condition.shape) + axis
        indices_0.insert(axis, id0)
        indices_1.insert(axis, id1)

        indices_0 = tuple(indices_0)
        indices_1 = tuple(indices_1)
        return indices_0, indices_1
Ejemplo n.º 16
0
def get_intersection_indices(condition, axis=-1):
    """
    In case you want to modify the indicies that get returned
    (which is not too likely), see caveat (in the code).
    """
    n = condition.shape[axis]
    id1 = findfirst(condition, axis=axis, no_intersection=n)
    # get the first point where SA > SAcap
    if id1.shape == (1,):  # if it has collapsed
        if id1 == 0:
            id1[:] = 1
        id0 = id1 - 1
        # if id1==0:id0[:]=0 # no nves
        if id1 == n:
            id1[:] = n - 1
        return id0, id1
    else:
        id1[where(id1 == 0)] = 1

        id0 = id1 - 1  # make id0 the point below, id1 is the point above
        id1[where(id1 == n)] = n - 1
        # id0[where(id1==0)]=0 # no -ves

        # Make the indices for the other axis:
        indices_0 = indices(id0.shape)  # make indices for the boring axes
        indices_0 = list(indices_0)
        indices_1 = indices_0[:]  # copy the list
        # CAVEAT:
        # In case you want to modify the indicies that get returned
        # (which is not too likely), note that other than on axis,
        # the indices are the same objects.

        # insert the interesting axis into the correct axis:
        if axis < 0:
            axis = len(condition.shape) + axis
        indices_0.insert(axis, id0)
        indices_1.insert(axis, id1)

        indices_0 = tuple(indices_0)
        indices_1 = tuple(indices_1)
        return indices_0, indices_1
def find_hough_intersections(array):
    """Estimates a set of horizontal and vertical Hough lines from ``array`` and returns two arrays; one containing the
    physical locations of each intersection, and one containing the logical coordinates of each intersection."""
    grid_angle = find_grid_angle(array > 0.5)
    x_thetas, x_rs = find_lines_at_angle(array > 0.5, grid_angle)
    y_thetas, y_rs = find_lines_at_angle(array > 0.5, grid_angle + sp.pi / 2)

    enumerated_horizontals = draw_enumerated_lines(array.shape, x_thetas, x_rs)
    enumerated_verticals = draw_enumerated_lines(array.shape, y_thetas, y_rs)

    intersections = (enumerated_horizontals != -1) & (enumerated_verticals !=
                                                      -1)
    physical_indices = sp.indices(array.shape)[:, intersections]
    vertical_logical_indices = enumerated_verticals[physical_indices[0],
                                                    physical_indices[1]]
    horizontal_logical_indices = enumerated_horizontals[physical_indices[0],
                                                        physical_indices[1]]
    logical_indices = sp.array(
        [horizontal_logical_indices, vertical_logical_indices])

    return physical_indices, logical_indices
def get_centers_single_image(truth, im_no, border=20):
    """Returns a dict of arrays, one for each pixel type. The arrays are compatible with caffe_tools.fill_database.

    The last row of each array is equal to ``im_num``, indicating which image those centers were created from."""
    indices = sp.indices(truth.shape)
    im_nos = im_no*sp.ones((1, truth.shape[0], truth.shape[1]), dtype=int)
    indices = sp.concatenate((indices, im_nos))
    
    away_from_border = sp.zeros(truth.shape, dtype=bool)
    away_from_border[border:-border, border:-border] = True
    
    results = {
    'inside': indices[:, (0.75 < truth) & away_from_border]
    ,'outside': indices[:, (truth < 0.25) & away_from_border]
    ,'inside_damaged' : indices[:, make_damaged_spot_mask(truth) & away_from_border]
    ,'outside_damaged': indices[:, make_outside_near_damaged_spot_mask(truth) & away_from_border]
    ,'block_border': indices[:, make_block_border_mask(truth) & away_from_border]
    ,'between': indices[:, make_between_spot_mask(truth) & away_from_border]
    }
    
    return results
Ejemplo n.º 19
0
def shift_template(template,tempcore,target,targcore):

   # Get coordinates of each pixel in the full template array

   coords = scipy.indices(template.shape).astype(scipy.float64)

   # Find the shift between template and target, using just the central
   #  core region of each PSF

   pshift = find_shift(tempcore,targcore)
   
   #
   # Shift the template star to match the centering of the target and return
   #  the result
   #
   
   coords[1] += pshift[1]
   coords[0] += pshift[2]
   shiftstar = ndimage.map_coordinates(template,coords,output=scipy.float64)

   return pshift[0] * shiftstar
def find_centroids(array):
    """Finds the centroid of each connected area in ``array``."""
    labelled = sp.ndimage.label(array)[0] - 1
    foreground_indices = sp.indices(labelled.shape)[:, labelled > -1]
    labels_of_indices = labelled[labelled > -1]

    # What's happening here is that there are thousands of connected areas in ``array``, and finding the centroid of
    # each one by manually going ``indices[:, labelled == label]`` is really slow. Instead, we (implicitly) build a
    # tree. Each node ``n`` in the tree has a label. That node finds the indices corresponding to its own label, then
    # delegates finding the indices corresponding to all smaller labels to its left child, and the indices corresponding
    # to all larger labels to it's right child.
    #
    # Update: I've since learnt about scipy.ndimage.labelled_comprehension. That's a much better way to do this! Leaving
    # this where it is though since well, it works and it's inefficiency is not currently a bottleneck.
    def get_centroids(indices, labels):
        if len(labels) > 0:
            pivot = labels[len(labels) / 2]

            left_selector = labels < pivot
            left_indices = indices[:, left_selector]
            left_labels = labels[left_selector]
            left_centroids = get_centroids(left_indices, left_labels)

            equal_selector = labels == pivot
            equal_indices = indices[:, equal_selector]
            equal_centroid = [equal_indices.mean(1)]

            right_selector = labels > pivot
            right_indices = indices[:, right_selector]
            right_labels = labels[right_selector]
            right_centroids = get_centroids(right_indices, right_labels)

            return left_centroids + equal_centroid + right_centroids
        else:
            return []

    centroids = sp.array(get_centroids(foreground_indices, labels_of_indices))
    centroids = centroids.astype(int).T

    return labelled, centroids
def find_centroids(array):
    """Finds the centroid of each connected area in ``array``."""
    labelled = sp.ndimage.label(array)[0] - 1
    foreground_indices = sp.indices(labelled.shape)[:, labelled > -1]
    labels_of_indices = labelled[labelled > -1]
    
    # What's happening here is that there are thousands of connected areas in ``array``, and finding the centroid of 
    # each one by manually going ``indices[:, labelled == label]`` is really slow. Instead, we (implicitly) build a 
    # tree. Each node ``n`` in the tree has a label. That node finds the indices corresponding to its own label, then
    # delegates finding the indices corresponding to all smaller labels to its left child, and the indices corresponding
    # to all larger labels to it's right child.
    #
    # Update: I've since learnt about scipy.ndimage.labelled_comprehension. That's a much better way to do this! Leaving
    # this where it is though since well, it works and it's inefficiency is not currently a bottleneck.
    def get_centroids(indices, labels):
        if len(labels) > 0:
            pivot = labels[len(labels)/2]
    
            left_selector = labels < pivot
            left_indices = indices[:, left_selector]
            left_labels = labels[left_selector]
            left_centroids = get_centroids(left_indices, left_labels)
    
            equal_selector = labels == pivot
            equal_indices = indices[:, equal_selector]
            equal_centroid = [equal_indices.mean(1)]        
    
            right_selector = labels > pivot
            right_indices = indices[:, right_selector]
            right_labels = labels[right_selector]
            right_centroids = get_centroids(right_indices, right_labels)
    
            return left_centroids + equal_centroid + right_centroids
        else:
            return []
        
    centroids = sp.array(get_centroids(foreground_indices, labels_of_indices))
    centroids = centroids.astype(int).T
    
    return labelled, centroids
Ejemplo n.º 22
0
def get_subscripts(network, shape, **kwargs):
    r'''
    Return the 3D subscripts (i,j,k) into the cubic network

    Parameters
    ----------
    shape : list
        The (i,j,k) shape of the network in number of pores in each direction

    '''
    if network.num_pores('internal') != _sp.prod(shape):
        print('Supplied shape does not match Network size, cannot proceed')
    else:
        template = _sp.atleast_3d(_sp.empty(shape))
        a = _sp.indices(_sp.shape(template))
        i = a[0].flatten()
        j = a[1].flatten()
        k = a[2].flatten()
        ind = _sp.vstack((i, j, k)).T
        vals = _sp.ones((network.Np, 3)) * _sp.nan
        vals[network.pores('internal')] = ind
        return vals
Ejemplo n.º 23
0
def get_subscripts(network, shape, **kwargs):
    r"""
    Return the 3D subscripts (i,j,k) into the cubic network

    Parameters
    ----------
    shape : list
        The (i,j,k) shape of the network in number of pores in each direction

    """
    if network.num_pores('internal') != _sp.prod(shape):
        print('Supplied shape does not match Network size, cannot proceed')
    else:
        template = _sp.atleast_3d(_sp.empty(shape))
        a = _sp.indices(_sp.shape(template))
        i = a[0].flatten()
        j = a[1].flatten()
        k = a[2].flatten()
        ind = _sp.vstack((i, j, k)).T
        vals = _sp.ones((network.Np, 3))*_sp.nan
        vals[network.pores('internal')] = ind
        return vals
Ejemplo n.º 24
0
def grid_from_arrays(array_dict):
    # fix so numbers appear correctly
    for key in array_dict.keys():
        if isinstance(array_dict[key], (int, long, float, complex)):
            array_dict[key] = sp.array([array_dict[key]])

    arrays = array_dict.values()
    arrays = [sp.asarray(a) for a in arrays]

    grid_dict = dict.fromkeys(array_dict.keys())

    shape = [len(x) for x in arrays]
    ix = sp.indices(shape, dtype=int)
    ix = ix.reshape(len(arrays), -1).T

    for n, arr in enumerate(arrays):
        ix[:, n] = arrays[n][ix[:, n]]

    keys = grid_dict.keys()
    for i in range(len(keys)):
        grid_dict[keys[i]] = ix[:, i]

    return grid_dict
Ejemplo n.º 25
0
def fit(f,data, indices = None):
    """
    vrne parametre fita.. Uporaba:
    #>>> f=Function('a*x+b',a=1,b=1)
    #>>> data=array([0,1,1.9,2.9])
    #>>> fit(f,data)
    #{'a': 0.96, 'b': 0.00999999999784}
    """
    pval=f.GetValues()
    pkey=f.GetKeys()
    if indices is None:
       x = indices(data.shape)
    else:
        x = indices
    fn= lambda p: ravel(f(*x,**dict(list(zip(pkey,p))))-data)
    par, cov, info, mesg, success = optimize.leastsq(fn, pval, full_output = True )

    if success==1:
        print("Converged")
    else:
        print("Not converged")
        print(mesg)
        return None
        
    # calculate final chi square
    chisq=sum(info["fvec"]*info["fvec"])
    dof=len(x)-len(pval)
    # chisq, sqrt(chisq/dof) agrees with gnuplot
    print("Converged with chi squared ",chisq)
    print("degrees of freedom, dof ", dof)
    print("RMS of residuals (i.e. sqrt(chisq/dof)) ", sqrt(chisq/dof))
    print("Reduced chisq (i.e. variance of residuals) ", chisq/dof)
    print()

    # uncertainties are calculated as per gnuplot, "fixing" the result
    # for non unit values of the reduced chisq.
    # values at min match gnuplot
    print("Fitted parameters at minimum, with 68% C.I.:")
    try:
        parOut = list(zip(pkey,par))
    except TypeError:
        parOut = list(zip(pkey,(par,)))
    for i,pmin in enumerate(parOut):
        try:
            print("%2i %-10s %12f +/- %10f"%(i,pmin[0],pmin[1],sqrt(cov[i,i])*sqrt(chisq/dof)))
        except:
            pass
    print()
    print("Correlation matrix")

    ## # correlation matrix close to gnuplot
    print("               ", end=' ')
    print(pkey)
    for i, key in enumerate(pkey):

        for j in range(i+1):
            try:
                print("%10f"%(cov[i,j]/sqrt(cov[i,i]*cov[j,j]),), end=' ')
            except:
                pass
        print() 

    return dict(parOut)
Ejemplo n.º 26
0
def doskysub(straight, ylen, xlen, sci, yback, sky2x, sky2y, ccd2wave, disp,
             mswave, offsets, cutoff, airmass):
    sci = sci.copy()

    # If cutoff is not a float, we are using the blueside
    if type(cutoff) == type([]):
        locutoff, hicutoff = cutoff
    else:
        locutoff = cutoff
        hicutoff = 10400.

    nsci = sci.shape[0]
    width = sci.shape[2]

    # Perform telluric correction
    coords = spectools.array_coords(sci[0].shape)
    x = coords[1].flatten()
    y = coords[0].flatten()

    for k in range(nsci):
        continue
        w = genfunc(x, y, ccd2wave[k])
        telluric = correct_telluric.correct(w, airmass[k], disp)
#       sci[k] *= telluric.reshape(sci[k].shape)
#    del coords,x,y,telluric

# Create arrays for output images
#    outcoords = spectools.array_coords((ylen,xlen))
    outcoords = scipy.indices((ylen, xlen)).astype(scipy.float64)
    outcoords[1] *= disp
    outcoords[1] += mswave - disp * xlen / 2.
    xout = outcoords[1].flatten()
    yout = outcoords[0].flatten()

    out = scipy.zeros((nsci, ylen, xlen))

    fudge = scipy.ceil(abs(offsets).max())
    bgimage = scipy.zeros((nsci, ylen + fudge, xlen))
    varimage = bgimage.copy()

    bgcoords = spectools.array_coords((ylen + fudge, xlen))
    bgcoords[1] *= disp
    bgcoords[1] += mswave - disp * xlen / 2.

    #
    # Cosmic Ray Rejection and Background Subtraction
    #
    yfit = yback.flatten()
    ycond = (yfit > straight - 0.4) & (yfit < straight + ylen - 0.6)

    coords = spectools.array_coords(yback.shape)
    xvals = coords[1].flatten()
    yvals = coords[0].flatten()

    ap_y = scipy.zeros(0)
    aper = scipy.zeros(0)
    for k in range(nsci):
        xfit = genfunc(xvals, yfit - straight, ccd2wave[k])
        zfit = sci[k].flatten()

        x = xfit[ycond]
        y = yfit[ycond]
        z = zfit[ycond]

        # The plus/minus 20 provides a better solution for the edges
        wavecond = (x > locutoff - 20.) & (x < hicutoff + 20.)
        x = x[wavecond]
        y = y[wavecond]
        z = z[wavecond]

        # If only resampling...
        if RESAMPLE == 1:
            coords = outcoords.copy()
            samp_x = genfunc(xout, yout, sky2x[k])
            samp_y = genfunc(xout, yout, sky2y[k])
            coords[0] = samp_y.reshape(coords[0].shape)
            coords[1] = samp_x.reshape(coords[1].shape)
            out[k] = scipy.ndimage.map_coordinates(sci[k],
                                                   coords,
                                                   output=scipy.float64,
                                                   order=5,
                                                   cval=-32768)

            out[k][xout.reshape(coords[1].shape) < locutoff] = scipy.nan
            out[k][xout.reshape(coords[1].shape) > hicutoff] = scipy.nan
            out[k][out[k] == -32768] = scipy.nan
            continue

        print "Determining sky for image %d" % (k + 1)
        bgfit = skysub.skysub(x, y, z, disp)
        print "Subtracting sky"

        background = zfit.copy()
        a = time.time()
        for indx in range(background.size):
            x0 = xfit[indx]
            y0 = yfit[indx]
            if x0 < locutoff - 10 or x0 > hicutoff + 10:
                background[indx] = scipy.nan
            else:
                background[indx] = interpolate.bisplev(x0, y0, bgfit)
        sub = zfit - background
        sub[scipy.isnan(sub)] = 0.
        sky = sub * 0.
        sky[ycond] = sub[ycond]
        sky = sky.reshape(sci[k].shape)
        sub = sky.copy()

        background[scipy.isnan(background)] = 0.

        # Note that 2d filtering may flag very sharp source traces!
        """
        sub = sub.reshape(sci[k].shape)
        sky = ndimage.median_filter(sky,5)
        diff = sub-sky
        model = scipy.sqrt(background.reshape(sci[k].shape)+sky)
        crmask = scipy.where(diff>4.*model,diff,0.)
        sub -= crmask
        sci[k] -= crmask
        """

        a = time.time()
        map = cr_reject(sub, background.reshape(sci[k].shape))

        inmask = (1. - 10000. * map) * sub
        med5 = ndimage.median_filter(inmask, 5)
        med5 *= map
        sub1 = (1. - map) * sub + med5
        crs = sub - sub1
        sub = sub1

        # Create straightened slit
        coords = outcoords.copy()
        samp_x = genfunc(xout, yout, sky2x[k])
        samp_y = genfunc(xout, yout, sky2y[k])
        coords[0] = samp_y.reshape(coords[0].shape)
        coords[1] = samp_x.reshape(coords[1].shape)
        out[k] = scipy.ndimage.map_coordinates(sci[k] - crs,
                                               coords,
                                               output=scipy.float64,
                                               order=5,
                                               cval=magicnum)
        vartmp = out[k].copy()
        out[k][xout.reshape(coords[1].shape) < locutoff] = scipy.nan
        out[k][xout.reshape(coords[1].shape) > hicutoff] = scipy.nan
        out[k][out[k] == magicnum] = scipy.nan

        # Output bgsub image
        coords = bgcoords.copy()
        bgy = bgcoords[0].flatten() + offsets[k]
        bgx = bgcoords[1].flatten()
        samp_x = genfunc(bgx, bgy, sky2x[k])
        samp_y = genfunc(bgx, bgy, sky2y[k])
        coords[0] = samp_y.reshape(coords[0].shape)
        coords[1] = samp_x.reshape(coords[1].shape)

        #        varimage[k] = scipy.ndimage.map_coordinates(sci[k],coords,output=scipy.float64,order=5,cval=magicnum)
        crs = scipy.ndimage.map_coordinates(crs,
                                            coords,
                                            output=scipy.float64,
                                            order=1,
                                            cval=magicnum)
        crs[crs > 0.3] = scipy.nan
        varimage[k] = crs + vartmp

        # Only include good data (ie positive variance, wavelength
        #   greater than dichroic cutoff)
        cond = (bgcoords[0] + offsets[k] < 0.) | (bgcoords[0] + offsets[k] >
                                                  ylen)
        cond = (varimage[k] < 0) | cond
        cond = (bgcoords[1] < locutoff) | (bgcoords[1] > hicutoff) | cond
        varimage[k][cond] = scipy.nan

        bgimage[k] = scipy.ndimage.map_coordinates(sub,
                                                   coords,
                                                   output=scipy.float64,
                                                   order=5,
                                                   cval=magicnum)
        bgimage[k][cond] = scipy.nan
        bgimage[k][bgimage[k] == magicnum] = scipy.nan  # Shouldn't be
        #   necessary...

    if RESAMPLE == 1:
        return out, bgimage, varimage, outcoords[1, 0]
    if bgimage.shape[0] > 1:
        bgimage = fastmed(bgimage)
        varimage = fastmed(varimage) / nsci
    elif bgimage.ndim == 3:
        bgimage = bgimage[0].copy()
        varimage = varimage[0].copy()

    return out, bgimage, varimage, outcoords[1, 0]
Ejemplo n.º 27
0
            [ 0.,  1.,  2.]])
        )

        assert array_equal(
            Y,
            sp.array([
            [ 0.,  0.,  0.],
            [ 1.,  1.,  1.],
            [ 2.,  2.,  2.],
            [ 3.,  3.,  3.]])
        )

    if '## indices':

        assert array_equal(
            sp.indices((2, 3)),
            sp.array([
                [
                    [0, 0, 0],
                    [1, 1, 1]
                ],
                [
                    [0, 1, 2],
                    [0, 1, 2]
                ]
            ])
        )

if '## size':

    # Get total number of elements:
Ejemplo n.º 28
0
        def error(p):
            X, Y = scipy.indices(self._shape)
            g = gaussian(*p)

            return (g(X, Y) - self._orig_image).ravel()
Ejemplo n.º 29
0
def postage_stamp(input,output,locations,xsize,ysize,scale,angle):
	pylab.close()

#	f = open(locations).readlines()[0].split()
#	ra = wcs.ra2deg(f[3]+":"+f[4]+":"+f[5])
#	dec = wcs.dec2deg(f[6]+":"+f[7]+":"+f[8])
#

#	if wcs.is_degree(ra)==False:
#		ra = wcs.ra2deg(ra)
#	if wcs.is_degree(dec)==False:
#		dec = wcs.dec2deg(dec)
	ra = 317.72512
	dec = 21.516883
	outheader = wcs.make_header(ra,dec,xsize,ysize,scale)
	outheader = wcs.rotate_header(outheader,angle)
	coords = scipy.indices((ysize,xsize)).astype(scipy.float32)
	skycoords = wcs.pix2sky(outheader,coords[1],coords[0])

	file = pyfits.open(input)
	inheader = file[0].header.copy()
	data = file[0].data.copy()
	ccdcoords = wcs.sky2pix(inheader,skycoords[0],skycoords[1])
	coords[1] = ccdcoords[0]
	coords[0] = ccdcoords[1]

	image = ndimage.map_coordinates(data,coords,output=scipy.float64)
	bounds = scipy.sort(image.flatten())
	vmin = bounds[bounds.size*0.65]
	vmax = bounds[bounds.size*0.995]
	pylab.imshow(image[::-1],cmap=pylab.cm.gist_yarg,vmin=vmin,vmax=vmax)

	pylab.axis('off')
	title = r"B2108 Finding Chart"
	
	pylab.text(420,320,r"Star TO Target offsets:")
	pylab.text(440,370,r"11.91$^{\prime\prime}$ S, 7.20$^{\prime\prime}$ W")
	pylab.text(440,420,r"Slit PA 60 degrees E from N")
	pylab.title(title)

	length = 10./scale
	dx = length
	dy = length
	pylab.arrow(300,300,-1.*dx,0)
	pylab.arrow(300,300,0,-1*dx)

	pylab.rc('text',usetex=True)
	pylab.text(310,290-dy,'N')
	pylab.text(310.,290-dy/2.,r'10$^{\prime\prime}$')

	ax = pylab.gca()

	ax.figure.set_size_inches((7.5,7.5))
	import matplotlib as mpl
	a = 0
	for i in locations:
		ra = i[0]
		dec = i[1]
		x,y = wcs.sky2pix(outheader,ra,dec)
		y -= ysize/2.
		y *= -1
		y += ysize/2.
		if a==0:
			a = 1
			pylab.text(x+30,y-30,"Target")
		else:
			pylab.text(x+30,y-30,"Offset Star")
		ax.patches.append(pylab.Circle((x,y),25,transform=ax.transData,fill=False,ec='r',lw=1.5))
		
	pylab.savefig(output+".eps")
	import os
	os.system('/usr/bin/convert %s.eps %s.png' % (output,output))
Ejemplo n.º 30
0
def _victor_purpura_multiunit_dist_for_trial_pair(
        a, b, reassignment_cost, kernel):
    # The algorithm used is based on the one given in
    #
    # Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal
    # coding in visual cortex: a metric-space analysis. Journal of
    # Neurophysiology.
    #
    # It constructs a matrix cost[i, j_1, ... j_L] containing the minimal cost
    # when only considering the first i spikes of the merged spikes of a and
    # j_w spikes of the spike trains of b (the reference given above denotes
    # this matrix with G). In this implementation the only the one submatrix
    # for one specific i is stored as in each step only i-1 and i will be
    # accessed. That saves some memory.

    # Initialization of various variables needed by the algorithm. Also swap
    # a and b if it will save time as the algorithm is not symmetric.
    a_num_spikes = [st.size for st in a]
    b_num_spikes = [st.size for st in b]
    a_num_total_spikes = sp.sum(a_num_spikes)

    complexity_same = a_num_total_spikes * sp.prod(b_num_spikes)
    complexity_swapped = sp.prod(a_num_spikes) * sp.sum(b_num_spikes)
    if complexity_swapped < complexity_same:
        a, b = b, a
        a_num_spikes, b_num_spikes = b_num_spikes, a_num_spikes
        a_num_total_spikes = sp.sum(a_num_spikes)

    if a_num_total_spikes <= 0:
        return sp.sum(b_num_spikes)

    b_dims = tuple(sp.asarray(b_num_spikes) + 1)

    cost = sp.asfarray(sp.sum(sp.indices(b_dims), axis=0))

    a_merged = _merge_trains_and_label_spikes(a)
    b_strides = sp.cumprod((b_dims + (1,))[::-1])[:-1]
    flat_b_indices = sp.arange(cost.size)
    b_indices = sp.vstack(sp.unravel_index(flat_b_indices, b_dims))
    flat_neighbor_indices = sp.maximum(
        0, sp.atleast_2d(flat_b_indices).T - b_strides[::-1])
    invalid_neighbors = b_indices.T == 0

    b_train_mat = sp.empty((len(b), sp.amax(b_num_spikes))) * b[0].units
    for i, st in enumerate(b):
        b_train_mat[i, :st.size] = st.rescale(b[0].units)
        b_train_mat[i, st.size:] = sp.nan * b[0].units

    reassignment_costs = sp.empty((a_merged[0].size,) + b_train_mat.shape)
    reassignment_costs.fill(reassignment_cost)
    reassignment_costs[sp.arange(a_merged[1].size), a_merged[1], :] = 0.0
    k = 1 - 2 * kernel(sp.atleast_2d(
        a_merged[0]).T - b_train_mat.flatten()).simplified.reshape(
            (a_merged[0].size,) + b_train_mat.shape) + reassignment_costs

    decreasing_sequence = flat_b_indices[::-1]

    # Do the actual calculations.
    for a_idx in xrange(1, a_num_total_spikes + 1):
        base_costs = cost.flat[flat_neighbor_indices]
        base_costs[invalid_neighbors] = sp.inf
        min_base_cost_labels = sp.argmin(base_costs, axis=1)
        cost_all_possible_shifts = k[a_idx - 1, min_base_cost_labels, :] + \
            sp.atleast_2d(base_costs[flat_b_indices, min_base_cost_labels]).T
        cost_shift = cost_all_possible_shifts[
            sp.arange(cost_all_possible_shifts.shape[0]),
            b_indices[min_base_cost_labels, flat_b_indices] - 1]

        cost_delete_in_a = cost.flat[flat_b_indices]

        # cost_shift is dimensionless, but there is a bug in quantities with
        # the minimum function:
        # <https://github.com/python-quantities/python-quantities/issues/52>
        # The explicit request for the magnitude circumvents this problem.
        cost.flat = sp.minimum(cost_delete_in_a, cost_shift.magnitude) + 1
        cost.flat[0] = sp.inf

        # Minimum with cost for deleting in b
        # The calculation order is somewhat different from the order one would
        # expect from the naive algorithm. This implementation, however,
        # optimizes the use of the CPU cache giving a considerable speed
        # improvement.
        # Basically this codes calculates the values of a row of elements for
        # each dimension of cost.
        for dim_size, stride in zip(b_dims[::-1], b_strides):
            for i in xrange(stride):
                segment_size = dim_size * stride
                for j in xrange(i, cost.size, segment_size):
                    s = sp.s_[j:j + segment_size:stride]
                    seq = decreasing_sequence[-cost.flat[s].size:]
                    cost.flat[s] = sp.minimum.accumulate(
                        cost.flat[s] + seq) - seq

    return cost.flat[-1]
Ejemplo n.º 31
0
def _victor_purpura_multiunit_dist_for_trial_pair(a, b, reassignment_cost,
                                                  kernel):
    # The algorithm used is based on the one given in
    #
    # Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal
    # coding in visual cortex: a metric-space analysis. Journal of
    # Neurophysiology.
    #
    # It constructs a matrix cost[i, j_1, ... j_L] containing the minimal cost
    # when only considering the first i spikes of the merged spikes of a and
    # j_w spikes of the spike trains of b (the reference given above denotes
    # this matrix with G). In this implementation the only the one submatrix
    # for one specific i is stored as in each step only i-1 and i will be
    # accessed. That saves some memory.

    # Initialization of various variables needed by the algorithm. Also swap
    # a and b if it will save time as the algorithm is not symmetric.
    a_num_spikes = [st.size for st in a]
    b_num_spikes = [st.size for st in b]
    a_num_total_spikes = sp.sum(a_num_spikes)

    complexity_same = a_num_total_spikes * sp.prod(b_num_spikes)
    complexity_swapped = sp.prod(a_num_spikes) * sp.sum(b_num_spikes)
    if complexity_swapped < complexity_same:
        a, b = b, a
        a_num_spikes, b_num_spikes = b_num_spikes, a_num_spikes
        a_num_total_spikes = sp.sum(a_num_spikes)

    if a_num_total_spikes <= 0:
        return sp.sum(b_num_spikes)

    b_dims = tuple(sp.asarray(b_num_spikes) + 1)

    cost = sp.asfarray(sp.sum(sp.indices(b_dims), axis=0))

    a_merged = _merge_trains_and_label_spikes(a)
    b_strides = sp.cumprod((b_dims + (1, ))[::-1])[:-1]
    flat_b_indices = sp.arange(cost.size)
    b_indices = sp.vstack(sp.unravel_index(flat_b_indices, b_dims))
    flat_neighbor_indices = sp.maximum(
        0,
        sp.atleast_2d(flat_b_indices).T - b_strides[::-1])
    invalid_neighbors = b_indices.T == 0

    b_train_mat = sp.empty((len(b), sp.amax(b_num_spikes))) * b[0].units
    for i, st in enumerate(b):
        b_train_mat[i, :st.size] = st.rescale(b[0].units)
        b_train_mat[i, st.size:] = sp.nan * b[0].units

    reassignment_costs = sp.empty((a_merged[0].size, ) + b_train_mat.shape)
    reassignment_costs.fill(reassignment_cost)
    reassignment_costs[sp.arange(a_merged[1].size), a_merged[1], :] = 0.0
    k = 1 - 2 * kernel(sp.atleast_2d(a_merged[0]).T -
                       b_train_mat.flatten()).simplified.reshape(
                           (a_merged[0].size, ) +
                           b_train_mat.shape) + reassignment_costs

    decreasing_sequence = flat_b_indices[::-1]

    # Do the actual calculations.
    for a_idx in xrange(1, a_num_total_spikes + 1):
        base_costs = cost.flat[flat_neighbor_indices]
        base_costs[invalid_neighbors] = sp.inf
        min_base_cost_labels = sp.argmin(base_costs, axis=1)
        cost_all_possible_shifts = k[a_idx - 1, min_base_cost_labels, :] + \
            sp.atleast_2d(base_costs[flat_b_indices, min_base_cost_labels]).T
        cost_shift = cost_all_possible_shifts[
            sp.arange(cost_all_possible_shifts.shape[0]),
            b_indices[min_base_cost_labels, flat_b_indices] - 1]

        cost_delete_in_a = cost.flat[flat_b_indices]

        # cost_shift is dimensionless, but there is a bug in quantities with
        # the minimum function:
        # <https://github.com/python-quantities/python-quantities/issues/52>
        # The explicit request for the magnitude circumvents this problem.
        cost.flat = sp.minimum(cost_delete_in_a, cost_shift.magnitude) + 1
        cost.flat[0] = sp.inf

        # Minimum with cost for deleting in b
        # The calculation order is somewhat different from the order one would
        # expect from the naive algorithm. This implementation, however,
        # optimizes the use of the CPU cache giving a considerable speed
        # improvement.
        # Basically this codes calculates the values of a row of elements for
        # each dimension of cost.
        for dim_size, stride in zip(b_dims[::-1], b_strides):
            for i in xrange(stride):
                segment_size = dim_size * stride
                for j in xrange(i, cost.size, segment_size):
                    s = sp.s_[j:j + segment_size:stride]
                    seq = decreasing_sequence[-cost.flat[s].size:]
                    cost.flat[s] = sp.minimum.accumulate(cost.flat[s] +
                                                         seq) - seq

    return cost.flat[-1]
Ejemplo n.º 32
0
        x = sp.arange(0, 2.1)
        y = sp.arange(0, 3.1)
        (X, Y) = sp.meshgrid(x, y)

        assert array_equal(
            X,
            sp.array([[0., 1., 2.], [0., 1., 2.], [0., 1., 2.], [0., 1., 2.]]))

        assert array_equal(
            Y,
            sp.array([[0., 0., 0.], [1., 1., 1.], [2., 2., 2.], [3., 3., 3.]]))

    if '## indices':

        assert array_equal(
            sp.indices((2, 3)),
            sp.array([[[0, 0, 0], [1, 1, 1]], [[0, 1, 2], [0, 1, 2]]]))

if '## size':

    # Get total number of elements:

    assert sp.zeros((2, 3, 4)).size == 24
    assert sp.size(sp.zeros((2, 3, 4))) == 24
    assert sp.size(1) == 1

    # 2 vs 1x2 vs 1x2:

    assert not sp.array_equal(
        [1, 2],  # number of dimensions: 1. size of dimension 1: 2
        [
Ejemplo n.º 33
0
if len(sys.argv) > 1:
    coordfile = sys.argv[1]
else:
    coordfile = 'mask.coords'
if len(sys.argv) > 2:
    scifile = sys.argv[2]
else:
    scifile = 'image.fits'
if len(sys.argv) > 3:
    maskfile = sys.argv[3]
else:
    maskfile = 'mask.fits'

d = pyfits.open(scifile)[0].data.shape
npts = d[0] * d[1]
points = scipy.indices(d)[::-1].reshape((2, npts)).T + 1

mask = scipy.zeros(d)
f = open(coordfile).readlines()[1:]
for poly in f:
    verts = scipy.asarray(poly.split('{')[1].split('}')[0].split())
    verts = verts.reshape((verts.size / 2, 2)).astype(scipy.float32)
    #IF NORMAL MODE
    mask += path.Path(verts).contains_points(points).T.reshape(d)
    #IF HOLLOW MODE
    #if poly==f[0]:
    #    mask -= path.Path(verts).contains_points(points).T.reshape(d)
    #else:
    #    mask += path.Path(verts).contains_points(points).T.reshape(d)

mask[mask > 1] = 1