Пример #1
0
 def _count_bases(self, sequence):
     """PRIVATE: Return an array of base counts by column across all sequences
     aggregated.
     
     The array returned looks something like the following, when several 
     (n=2) sequences are aggregated:
     
         array([[ 0.,  1.,  0.,  1.,  0.],
                [ 0.,  0.,  0.,  2.,  0.],
                [ 0.,  0.,  0.,  2.,  0.],
                ...
                ])
     
     where each row represents a base position - thus row[0] is the count
     for sequence position 0.  The values within the vector give the counts
     of each possible base where the index is given in self.nucleotide_positions
     (in __init__):
     
         self.nucleotide_positions = {'A':0, 'C':1, 'G':2, 'T':3, 'N':4}
     
     Using numpy arrays lets us quickly sum across columns or rows, making
     summary stats easier.
     
     """
     for k, base in enumerate(sequence):
         if numpy.ndim(self.bases) == 1 and k == 0:
             self.bases[self.nucleotide_positions[base]] += 1
         elif (numpy.ndim(self.bases) == 1) or (numpy.ndim(self.bases) > 1 and k == len(self.bases)):
             self.bases = numpy.vstack((self.bases, numpy.zeros(5)))
             self.bases[k][self.nucleotide_positions[base]] += 1
         else:
             self.bases[k][self.nucleotide_positions[base]] += 1
Пример #2
0
def sum_to_shape(X, s):
    """
    Sum axes of the array such that the resulting shape is as given.

    Thus, the shape of the result will be s or an error is raised.
    """
    # First, sum and remove axes that are not in s
    if np.ndim(X) > len(s):
        axes = tuple(range(-np.ndim(X), -len(s)))
    else:
        axes = ()
    Y = np.sum(X, axis=axes)

    # Second, sum axes that are 1 in s but keep the axes
    axes = ()
    for i in range(-np.ndim(Y), 0):
        if s[i] == 1:
            if np.shape(Y)[i] > 1:
                axes = axes + (i,)
        else:
            if np.shape(Y)[i] != s[i]:
                raise ValueError("Shape %s can't be summed to shape %s" %
                                 (np.shape(X), s))
    Y = np.sum(Y, axis=axes, keepdims=True)
    
    return Y
Пример #3
0
 def __init__(self, x, y):
             
     assert np.ndim(x)==2 and np.ndim(y)==2 and np.shape(x)==np.shape(y), \
         'x and y must be 2D arrays of the same size.'
     
     if np.any(np.isnan(x)) or np.any(np.isnan(y)):
         x = np.ma.masked_where( (isnan(x)) | (isnan(y)) , x)
         y = np.ma.masked_where( (isnan(x)) | (isnan(y)) , y)
         
     self.x_vert = x
     self.y_vert = y
     
     mask_shape = tuple([n-1 for n in self.x_vert.shape])
     self.mask_rho = np.ones(mask_shape, dtype='d')
     
     # If maskedarray is given for verticies, modify the mask such that 
     # non-existant grid points are masked.  A cell requires all four
     # verticies to be defined as a water point.
     if isinstance(self.x_vert, np.ma.MaskedArray):
         mask = (self.x_vert.mask[:-1,:-1] | self.x_vert.mask[1:,:-1] | \
                 self.x_vert.mask[:-1,1:] | self.x_vert.mask[1:,1:])
         self.mask_rho = np.asarray(~(~np.bool_(self.mask_rho) | mask), dtype='d')
     
     if isinstance(self.y_vert, np.ma.MaskedArray):
         mask = (self.y_vert.mask[:-1,:-1] | self.y_vert.mask[1:,:-1] | \
                 self.y_vert.mask[:-1,1:] | self.y_vert.mask[1:,1:])
         self.mask_rho = np.asarray(~(~np.bool_(self.mask_rho) | mask), dtype='d')
     
     self._calculate_subgrids()
     self._calculate_metrics()        
Пример #4
0
def prob_of_label(vol, labelvol):
    """
    compute the probability of the labels in labelvol in each of the volumes in vols

    Parameters:
        vol (float numpy array of dim (nd + 1): volume with a prob dist at each voxel in a nd vols
        labelvol (int numpy array of dim nd): nd volume of labels

    Returns:
        nd volume of probabilities
    """

    # check dimensions
    ndims = np.ndim(labelvol)
    assert np.ndim(vol) == ndims + 1, "vol dimensions do not match [%d] vs [%d]" % (np.ndim(vol)-1, ndims)
    shp = vol.shape
    nb_voxels = np.prod(shp[0:ndims])
    nb_labels = shp[-1]

    # reshape volume to be [nb_voxels, nb_labels]
    flat_vol = np.reshape(vol, (nb_voxels, nb_labels))

    # normalize accross second dimension
    rows_sums = flat_vol.sum(axis=1)
    flat_vol_norm = flat_vol / rows_sums[:, np.newaxis]

    # index into the flattened volume
    idx = list(range(nb_voxels))
    v = flat_vol_norm[idx, labelvol.flat]
    return np.reshape(v, labelvol.shape)
Пример #5
0
def _test():
    imdata = np.array([[1,2,3],[4,5,6]])
    immask = imdata>2
    print imdata
    print immask
    print imdata[immask]
    print np.ndim(immask)
Пример #6
0
def reshape_skew(m, s=-1):
    """reshape n(n-1)/2 vector into nxn skew symm matrix or vice versa
    indices in m are in row-major order (it's Python, babe)
    when s == -1, returns skew-symmetric matrix 
        M = -M.T
    when s == 1, returns symmetric matrix
        M = M.T
    """
    s = sign(s)
    if ndim(m) == 1: # m is a vector, we need a matrix
        n = 0.5*(1 + np.sqrt(1 + 8*len(m)))
        if not (n==round(n)):
            print "length of m doesn't lead to a square-sized matrix of size n(n-1)/2"
            return
        n = int(n)
        out = zeros((n,n))
        ind_start = 0
        for i in xrange(n):
            out[i,i+1:] = m[ind_start:ind_start+n-i-1]
            ind_start += n-i-1
        out += s*out.T
    elif ndim(m) == 2: # m is a matrix, we need a vector
        if not np.equal(*m.shape):
            print "matrix m is not square"
            return
        if (norm(m - s*m.T)) > _small_number:
            print "matrix m is not skew-symmetric or symmetric"
            return
        n = m.shape[0]
        out = np.zeros(n*(n-1)/2)
        ind_start = 0
        for i in range(n):
            out[ind_start:ind_start+n-i-1] = m[i,i+1:]
            ind_start += n-i-1
    return out
Пример #7
0
Файл: vq.py Проект: b-t-g/Sim
def py_vq(obs, code_book):
    """ Python version of vq algorithm.

    The algorithm computes the euclidian distance between each
    observation and every frame in the code_book.

    Parameters
    ----------
    obs : ndarray
        Expects a rank 2 array. Each row is one observation.
    code_book : ndarray
        Code book to use. Same format than obs. Should have same number of
        features (eg columns) than obs.

    Returns
    -------
    code : ndarray
        code[i] gives the label of the ith obversation, that its code is
        code_book[code[i]].
    mind_dist : ndarray
        min_dist[i] gives the distance between the ith observation and its
        corresponding code.

    Notes
    -----
    This function is slower than the C version but works for
    all input types.  If the inputs have the wrong types for the
    C versions of the function, this one is called as a last resort.

    It is about 20 times slower than the C version.

    """
    # n = number of observations
    # d = number of features
    if np.ndim(obs) == 1:
        if not np.ndim(obs) == np.ndim(code_book):
            raise ValueError(
                    "Observation and code_book should have the same rank")
        else:
            return _py_vq_1d(obs, code_book)
    else:
        (n, d) = shape(obs)

    # code books and observations should have same number of features and same
    # shape
    if not np.ndim(obs) == np.ndim(code_book):
        raise ValueError("Observation and code_book should have the same rank")
    elif not d == code_book.shape[1]:
        raise ValueError("Code book(%d) and obs(%d) should have the same " \
                         "number of features (eg columns)""" %
                         (code_book.shape[1], d))

    code = zeros(n, dtype=int)
    min_dist = zeros(n)
    for i in range(n):
        dist = np.sum((obs[i] - code_book) ** 2, 1)
        code[i] = argmin(dist)
        min_dist[i] = dist[code[i]]

    return code, sqrt(min_dist)
Пример #8
0
def SeparateStreets(lines):
    # Separate Street Lines - Remove lines associated with horizon
    # INPUT - CANNY IMAGE
    # OUTPUT - LINES

    if lines is None:
        return None

    # Handle 2 dimensions, or 3
    new_lines = None
    if np.ndim(lines) is 2:
        new_lines = np.copy(lines)
    elif np.ndim(lines) is 3:
        new_lines = np.copy(lines[0])
    else:
        print 'Strange number of dimensions? in SeparateStreets()'
        return new_lines

    indices_for_removal = np.array([])
    for i in range(np.shape(new_lines)[0]):
        rho_theta = new_lines[i]
        theta_degrees = rho_theta[1] * 180 / np.pi

        # if isCloseToHorizontal(theta_degrees) or isCloseToVertical(theta_degrees):
        if isMarkingsOfCurrentLane(theta_degrees):  # and not isCloseToVertical(theta_degrees)
            indices_for_removal = np.append(indices_for_removal, [i])
            # print 'Removed:' + str(theta_degrees)

    new_lines = np.delete(new_lines, indices_for_removal, axis=0)
    return new_lines
Пример #9
0
def as2d(a):
  if np.ndim(a) == 0:
    return a[np.newaxis,np.newaxis]
  elif np.ndim(a) == 1:
    return a[:,np.newaxis]
  else:
    return a
Пример #10
0
 def Zo(s):
     if np.ndim(h) == 1:
         s = s[:, np.newaxis]
     if np.ndim(h) == 2:
         s = s[:, np.newaxis, np.newaxis]
                 
     return (hc * s + C(s) * h) / (hc + h)
Пример #11
0
def GetLaneFromStdDeviation(cluster):

    if cluster is None:
        print "GetLaneFromMedian: cluster is empty"
        return None

    cluster_removed_outliers = RemoveOutliers(cluster)

    if cluster_removed_outliers is None:
        # IN THIS CASE NO VALUES ARE WITHIN ONE STD DEVIATION!
        if np.ndim(cluster) is 1:
            distances = cluster[0]
            angles = cluster[1]
        else:
            distances = cluster[:, 0]
            angles = cluster[:, 1]
        # return None
        # print 'Aww'
    elif np.ndim(cluster_removed_outliers) is 1:
        distances = cluster_removed_outliers[0]
        angles = cluster_removed_outliers[1]
    else:
        distances = cluster_removed_outliers[:, 0]
        angles = cluster_removed_outliers[:, 1]

    avg_angle = np.mean(angles)
    avg_distance = np.mean(distances)

    return np.array([avg_distance, avg_angle])
Пример #12
0
def outer(A, B, ndim=1):
    """
    Computes outer product over the last axes of A and B.

    The other axes are broadcasted. Thus, if A has shape (..., N) and B has
    shape (..., M), then the result has shape (..., N, M).

    Using the argument `ndim` it is possible to change that how many axes
    trailing axes are used for the outer product. For instance, if ndim=3, A and
    B have shapes (...,N1,N2,N3) and (...,M1,M2,M3), the result has shape
    (...,N1,M1,N2,M2,N3,M3).
    """
    if not utils.is_integer(ndim) or ndim < 0:
        raise ValueError('ndim must be non-negative integer')
    if ndim > 0:
        if ndim > np.ndim(A):
            raise ValueError('Argument ndim larger than ndim of the first '
                             'parameter')
        if ndim > np.ndim(B):
            raise ValueError('Argument ndim larger than ndim of the second '
                             'parameter')
        shape_A = np.shape(A) + (1,)*ndim
        shape_B = np.shape(B)[:-ndim] + (1,)*ndim + np.shape(B)[-ndim:]
        A = np.reshape(A, shape_A)
        B = np.reshape(B, shape_B)
    return A * B
Пример #13
0
def nan_detrend(x, y, deg=1):
    """Subtract a polynomial fit from the data, ignoring NaNs.

    Parameters
    ----------
    x : array_like
        x data.
    y : array_like
        Data to detrend.
    deg : int
        Degree of polynomial to subtract. (Can be zero i.e. constant)


    Returns
    -------
    y_out : numpy.array
        Detrended data.

    """
    y_out = np.nan*np.zeros_like(y)

    if np.ndim(x) == 1:
        nans = np.isnan(x) | np.isnan(y)
        p = nan_polyfit(x, y, deg)
        y_out[~nans] = y[~nans] - np.polyval(p, x[~nans])
    elif np.ndim(x) == 2:
        for i in xrange(x.shape[1]):
            nans = np.isnan(x[:, i]) | np.isnan(y[:, i])
            p = nan_polyfit(x[:, i], y[:, i], deg)
            y_out[~nans, i] = y[~nans, i] - np.polyval(p, x[~nans, i])
    else:
        raise RuntimeError('Arguments must be 1 or 2 dimensional arrays.')

    return y_out
Пример #14
0
def xcorr(x,y,**kwargs):
	"""cross correlation by rfft"""
	x = np.asarray(x)
	y = np.asarray(y)
	if np.ndim(x) == np.ndim(y):
		shape=kwargs.get('shape',np.max((x.shape, y.shape), axis = 0))
		return np.fft.irfftn(np.conjugate(np.fft.rfftn(x,s=shape))*np.fft.rfftn(y,s=shape))
	elif np.ndim(y) == 1:
		axis = kwargs.get('axis', 0)
		shape=kwargs.get('shape', max(x.shape[axis], len(y)))
		shape+=shape%2
		outshape = np.array(x.shape[:])
		outshape[axis] = shape
		out = np.zeros(outshape)
		y = np.fft.ifftshift(np.pad(y, pad_width = (int((shape-len(y)+1)/2), int((shape-len(y))/2)), mode = 'constant'))
		y_fft = np.fft.rfft(y, n=shape)
		x_fft = np.fft.rfft(x, n=shape, axis=axis)
		if axis == 0:
			for ii in range(len(x_fft[0])):
				out[:,ii] = np.fft.irfft(x_fft[:,ii]*np.conjugate(y_fft))
		else:
			for ii in range(len(x_fft)):
				out[ii] = np.fft.irfft(x_fft[ii]*np.conjugate(y_fft))
		return out
	else:
		raise ValueError('Only inputs with dimensions of 1 or 2 can be processed.')
Пример #15
0
def _populate_vars(dz, zdr, kdp, rho, ldr, T, verbose):

    """
    Check for presence of each var, and update dicts as needed.
    Flattens multi-dimensional arrays to optimize processing.
    The output array from csu_fhc_summer() will be re-dimensionalized later.
    """
    varlist = [dz, zdr, kdp, rho, ldr, T]
    keylist = ['DZ', 'DR', 'KD', 'RH', 'LD', 'T']
    fhc_vars = {}
    radar_data = {}
    for i, key in enumerate(keylist):
        var = varlist[i]
        if var is not None:
            if key == 'DZ':
                shp = np.shape(var)
                sz = np.size(var)
            if np.ndim(var) > 1:
                radar_data[key] = np.array(var).ravel().astype('float32')
            elif np.ndim(var) == 1:
                radar_data[key] = np.array(var).astype('float32')
            else:
                radar_data[key] = np.array([var]).astype('float32')
            fhc_vars[key] = 1
        else:
            fhc_vars[key] = 0
    if verbose:
        print('USING VARIABLES: ', fhc_vars)
    return radar_data, fhc_vars, shp, sz
Пример #16
0
 def components(model,X):
     """
     Given a Gaussian Mixture Model (GMM), returns the additive components of the model
     Parameters
     ----------
     model - GMM object fitted to the data; For e.g.,
         from sklearn.mixture import GaussianMixture as GMM
         model = GMM(n_components = 3).fit(X), where X is the input data
     X - Array of size (n_samples,n_features) to which the GMM was fit.
     
     Returns
     -------
     components - Array of size (n_samples,n_components); the Gaussian components weighted,
         and added to fit the data
     """
     import numpy as np
     if np.ndim(X)==1:
         X = X.reshape((-1,1))
     #X = np.sort(X,axis = 0)
     N = np.shape(X)[0]
     X = np.array([np.linspace(np.min(feature),np.max(feature),N) for feature in X.T]).T        
     responsibilities = model.predict_proba(X)
     pdf = np.exp(model.score_samples(X))
     if np.ndim(pdf)==1:
         pdf = pdf.reshape((-1,1))
     comp = responsibilities*pdf
     return comp
Пример #17
0
def nodes2elems(nodes, tri):
    """
    Calculate a element centre value based on the average value for the
    nodes from which it is formed. This necessarily involves an average,
    so the conversion from nodes2elems and elems2nodes is not
    necessarily reversible.

    Parameters
    ----------
    nodes : ndarray
        Array of unstructured grid node values to move to the element
        centres.
    tri : ndarray
        Array of shape (nelem, 3) comprising the list of connectivity
        for each element.

    Returns
    -------
    elems : ndarray
        Array of values at the grid nodes.

    """

    if np.ndim(nodes) == 1:
        elems = nodes[tri].mean(axis=-1)
    elif np.ndim(nodes) == 2:
        elems = nodes[..., tri].mean(axis=-1)
    else:
        raise Exception('Too many dimensions (maximum of two)')

    return elems
Пример #18
0
    def _maskremove(self, img, mask, removeAction):
        diff = mask.sum(axis=0)
        diff = (diff > 0).sum().astype("int")
        if (diff == 0):
            return None
        h, w = (0, 0)
        if (np.ndim(img) == 3):
            h, w, p = img.shape
        elif np.ndim(img) == 2:
            h, w = img.shape
        else:
            return None  # Picture cannot be edit
        efunc = self.getEnergyFunction(img)
        efunc[mask > 0] = -abs(efunc.max()) * h * w # Be as little as possible so it cannot be reached otherwise.

        res = img
        for i in range(diff):
            self.progress.emit(i * 100 / diff)
            QtCore.QCoreApplication.processEvents()
            seam = ML.findOptimalSeam(efunc,stopFunc=self._haveToQuit)
            if (self._haveToQuit()):
                return []
            res = removeAction(res, seam)
            efunc = ML.removeSeams(efunc, seam)
        return res
Пример #19
0
    def _applyImage(self, data):
        img = data['img']
        itera = self.itbox.value()
        overl = self.olbox.value()

        seams = []
        if 'mask' in data:
            mask = data['mask']
            seams = self._findSeams(img, mask)
        if self._haveToQuit():
            return None
        if (len(seams) == 0): # No Masking
            diff = self.sbox.value()
            h, w = (0, 0)
            if (np.ndim(img) == 3):
                h, w, p = img.shape
            elif np.ndim(img) == 2:
                h, w = img.shape
            else:
                return None # Picture cannot be edit
            res = img
            s = self.getEnergyFunction(img)
            seams = ML.findTopDisjointSeams(s, diff, lambda i: self.progress.emit(i),stopFunc=self._haveToQuit)
            if (self._haveToQuit()):
                return None
            res = ML.removeSeamsInGradient(res, seams, itera, overl,progressFunc=lambda i: self.progress.emit(i),stopFunc=self._haveToQuit)
            return res
        else:
            return ML.removeSeamsInGradient(img, seams, itera, overl,progressFunc=lambda i: self.progress.emit(i),stopFunc=self._haveToQuit)
    def convert_to_firing_rate(self, calciumtraces):

        firingrate = []
        denoisedcalicumtrace = []
        for ii in xrange(0, np.size(calciumtraces, 1)):
            fluor = calciumtraces[:, ii]
            print 'Deconvolving..', ii

            deconvolvedresult = constrained_foopsi(fluor)
            print np.ndim(deconvolvedresult[5])

            if np.ndim(deconvolvedresult[5]) > 1:
                print 'Skipping..Cell ', ii
                continue
            firingrate.append(deconvolvedresult[5])
            denoisedcalicumtrace.append(deconvolvedresult[0])

        firingrate = np.asarray(np.vstack(firingrate)).T
        denoisedcalicumtrace = np.asarray(np.vstack(denoisedcalicumtrace)).T

        np.savetxt(os.path.join(self.WorkingDirectory, key + "_denoiseddata.csv"), denoisedcalicumtrace, delimiter=",")
        np.savetxt(os.path.join(self.WorkingDirectory, key + "_firingrate.csv"), firingrate, delimiter=",")

        plt.plot(np.clip(firingrate, 0, np.max(firingrate)), '.')
        plt.savefig('/Users/seetha/Desktop/test1.png')
Пример #21
0
 def _compute_moments(self, u_X):
     """
     Tile the plates of the parent's moments.
     """
     # Utilize broadcasting: If a tiled axis is unit length in u_X, there
     # is no need to tile it.
     u = list()
     for ind in range(len(u_X)):
         ui = u_X[ind]
         shape_u = np.shape(ui)
         if np.ndim(ui) > 0:
             # Add variable dimensions
             tiles_ind = tiles + (1,)*len(self.dims[ind])
             # Utilize broadcasting: Do not tile leading empty axes
             nd = min(len(tiles_ind), np.ndim(ui))
             tiles_ind = tiles_ind[(-nd):]
             # For simplicity, make tiles and shape equal length
             (tiles_ind, shape_u) = misc.make_equal_length(tiles_ind,
                                                           shape_u)
             # Utilize broadcasting: Use tiling only if the parent's
             # moment has non-unit axis length.
             tiles_ind = [tile if sh > 1 else 1
                          for (tile, sh) in zip(tiles_ind, shape_u)]
                 
             # Tile
             ui = np.tile(ui, tiles_ind)
         u.append(ui)
     return u
Пример #22
0
    def getMapping(self, mol):
        (sel1, sel2) = self._getSelections(mol)

        if np.ndim(sel1) == 2:
            protatoms = np.ones(len(sel1)) * -1
            for i in range(np.size(sel1, 1)):
                protatoms[i] = np.where(sel1[i] == True)[0][0]
        else:
            protatoms = np.where(sel1)[0]
        if np.ndim(sel2) == 2:
            ligatoms = np.ones(len(sel1)) * -1
            for i in range(np.size(sel2, 1)):
                ligatoms[i] = np.where(sel2[i] == True)[0][0]
        else:
            ligatoms = np.where(sel2)[0]

        numatoms1 = len(protatoms)
        numatoms2 = len(ligatoms)

        if np.array_equal(protatoms, ligatoms):
            map = np.zeros((numatoms1 * (numatoms1-1) / 2, 2), dtype=int)
            start = 0
            for i in range(numatoms1):
                finish = start + numatoms1 - i - 1
                map[start:finish, 0] = protatoms[i]
                map[start:finish, 1] = protatoms[i+1:]
                start = finish
        else:
            map = np.zeros((numatoms1 * numatoms2, 2), dtype=int)
            for i in range(numatoms2):
                start = i * numatoms1
                finish = (i+1) * numatoms1
                map[start:finish, 0] = protatoms
                map[start:finish, 1] = ligatoms[i]
        return map
Пример #23
0
def read_h5_stack(fn, group='stack', crop=[None]*6, **kwargs):
    """Read a volume in HDF5 format into numpy.ndarray.

    Parameters
    ----------
    fn : string
        The filename of the input HDF5 file.
    group : string, optional (default 'stack')
        The group within the HDF5 file containing the dataset.
    crop : list of int, optional (default '[None]*6', no crop)
        A crop to get of the volume of interest. Only available for 2D and 3D
        volumes.

    Returns
    -------
    stack : numpy ndarray
        The stack contained in fn, possibly cropped.
    """
    fn = os.path.expanduser(fn)
    dset = h5py.File(fn, 'r')
    if group not in dset:
        raise ValueError("HDF5 file (%s) doesn't have group (%s)!" % 
                            (fn, group))
    a = dset[group]
    if ndim(a) == 2:
        xmin, xmax, ymin, ymax = crop[:4]
        a = a[xmin:xmax, ymin:ymax]
    elif ndim(a) == 3:
        xmin, xmax, ymin, ymax, zmin, zmax = crop
        a = a[xmin:xmax, ymin:ymax, zmin:zmax]
    stack = array(a)
    dset.close()
    return stack
Пример #24
0
def sample_vMF(theta, kappa,size=1):
    """
      Sampling from vMF
      This is based on the implementation I found online here:
          http://stats.stackexchange.com/questions/156729/sampling-from-von-mises-fisher-distribution-in-python
          (**** NOTE THE FIX BY KEVIN *****)
      which is based on : 
            Directional Statistics (Mardia and Jupp, 1999) and on the Ulrich-Wood's algorithm for sampling. 


    """

    warn('Not sure about sampling vMF, use with caution!!!! ')
    #print "kappa : ", kappa
    #print "norm direction :" , np.linalg.norm(theta)
    np.testing.assert_array_almost_equal( np.linalg.norm(theta) , 1 )
    #print "kappa : ", kappa
    assert kappa > 0 , "kappa must be positive !"

    if np.ndim(theta)==2:
       theta = np.squeeze(theta)
    assert np.ndim(theta)==1, "theta should be one one-dimensional!"
    
    res_sampling = _rvMF(size, kappa * theta)

    return np.vstack(res_sampling)    
Пример #25
0
 def helper():
     flist = glob.iglob(os.path.join(directory, "*.img"))
     flist = list(itertools.ifilter(lambda f: os.path.getmtime(f) > time0[0], 
                                    flist))
     
     if not flist:
         return
     flist.sort(key=os.path.getmtime)
     
     fpath = flist[-1]
     time0[0] = os.path.getmtime(fpath)
     data = open_img(fpath).data
     data = np.rot90(data, -1)
     if np.ndim(iw.image)==2 and iw.image.shape==data.shape:
         newdata = np.concatenate([data[np.newaxis], iw.image[np.newaxis]], 
                                  axis=0)
     elif np.ndim(iw.image)==3 and iw.image.shape[1:3]==data.shape:
         newdata = np.concatenate([data[np.newaxis], iw.image], axis=0)
     elif np.ndim(data)==2:
         newdata = data
     else:
         return
     print fpath
     iw.setImage(newdata, autoLevels=False)
     iw.labels.insert(0, fpath)
     iw.win.setWindowTitle(fpath)
Пример #26
0
def sparse_jac_repeat(tape_tag, x, nnz, rind, cind, values):
    """
    computes sparse Jacobian J for a function F:R^N -> R^M with
    the sparsity pattern that has been computed previously (e.g. by calling sparse_jac_no_repeat)

    I guess it also reuses the options that have been set previously. So it would be not necessary to set the options again.

    
    [nnz, rind, cind, values] = sparse_jac_repeat(tape_tag, x, rind, cind, values)

    INPUT:
    The base point x at which the Jacobian should be computed, i.e. J = dF(x)/dx
    
    OUTPUT:
    nnz is the guessed number of nonzeros in the Jacobian. This can be larger than the true number of nonzeros.

    sparse matrix representation in standard format:
    rind is an nnz-array of row indices
    cind is an nnz-array of column indices
    values are the corresponding Jacobian entries
    """
    assert type(tape_tag) == int
    assert type(nnz) == int

    assert numpy.ndim(x) == 1
    assert numpy.ndim(rind) == 1
    assert numpy.ndim(cind) == 1
    assert numpy.ndim(values) == 1

    x = numpy.asarray(x, dtype=float)
    rind = numpy.asarray(rind, dtype=numpy.uint32)
    cind = numpy.asarray(cind, dtype=numpy.uint32)
    values = numpy.asarray(values, dtype=float)

    return _colpack.sparse_jac_repeat(tape_tag, x, nnz, rind, cind, values)
Пример #27
0
def fastdtw(x, y, dist):
    """
    Computes Dynamic Time Warping (DTW) of two sequences in a faster way.
    Instead of iterating through each element and calculating each distance,
    this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
    :param array x: N1*M array
    :param array y: N2*M array
    :param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.
    If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
    Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
    """
    assert len(x)
    assert len(y)
    if ndim(x)==1:
        x = x.reshape(-1,1)
    if ndim(y)==1:
        y = y.reshape(-1,1)
    r, c = len(x), len(y)
    D0 = zeros((r + 1, c + 1))
    D0[0, 1:] = inf
    D0[1:, 0] = inf
    D1 = D0[1:, 1:]
    D0[1:,1:] = cdist(x,y,dist)
    C = D1.copy()
    for i in range(r):
        for j in range(c):
            D1[i, j] += min(D0[i, j], D0[i, j+1], D0[i+1, j])
    if len(x)==1:
        path = zeros(len(y)), range(len(y))
    elif len(y) == 1:
        path = range(len(x)), zeros(len(x))
    else:
        path = _traceback(D0)
    return D1[-1, -1] / sum(D1.shape), C, D1, path
Пример #28
0
    def project(self, *args, **kwargs):
        """ Project molecule.

        Parameters
        ----------
        mol : :class:`Molecule <htmd.molecule.molecule.Molecule>`
            A :class:`Molecule <htmd.molecule.molecule.Molecule>` object to project.

        Returns
        -------
        data : np.ndarray
            An array containing the projected data.
        """
        # -------------- DEPRECATION PATCH --------------
        if isinstance(self, np.ndarray) or isinstance(self, Molecule):
            from warnings import warn
            warn('Static use of the project method will be deprecated in the next version of HTMD. '
                 'Please change your projection code according to the tutorials on www.htmd.org')
            data = _MetricDistanceOld.project(self, *args, **kwargs)
            logger.warning('Static use of the project method will be deprecated in the next version of HTMD. '
                            'Please change your projection code according to the tutorials on www.htmd.org')
            return data
        # ------------------ CUT HERE -------------------
        mol = args[0]
        (sel1, sel2) = self._getSelections(mol)

        if np.ndim(sel1) == 1 and np.ndim(sel2) == 1:  # normal distances
            metric = pp_calcDistances(mol, sel1, sel2, self.metric, self.threshold, self.pbc, truncate=self.truncate)
        else:  # minimum distances by groups
            metric = pp_calcMinDistances(mol, sel1, sel2)

        return metric
Пример #29
0
def interpolate_1x1(lon, lat, data, lon0=0.):
    """ interpolate on a standard grid
    """
    # already standard
    if is_standard_grid(lon, lat, lon0=lon0):
        return lon, lat, data

    # 
    lon, data = rectify_longitude_data(lon, data, lon0)

    res = 1
    nx = int(360/res)
    ny = int(180/res)

    lon360 = np.linspace(lon0+0.5,lon0+359.5,nx)
    lat180 = np.linspace(-89.5,89.5,ny)

    print "Interpolate onto a standard 1deg grid...",
    if np.ndim(data) == 2:
        if isinstance(data, np.ma.MaskedArray):
            data = data.filled(np.nan)
        data = interp2(lon, lat, data, lon360,lat180, order=0)
        lon, lat = lon360, lat180

    elif np.ndim(data) == 3:
        nt = np.size(data,0)
        data180x360 = np.zeros((nt,ny,nx))
        for i in range(nt):
            data180x360[i,:,:] = interp2(lon, lat, np.squeeze(data[i,:,:]), lon360,lat180)
        lon, lat, data = lon360, lat180, data180x360
    print "Done"

    return lon, lat, data
Пример #30
0
    def transformImage(self, containerImage):
            if type(containerImage) is not type(self.sourceImage):
                raise TypeError
            if np.ndim(containerImage) is not np.ndim(self.sourceImage):
                raise ValueError

            self.homography = Homography(sourcePoints=self.sourcePoints, targetPoints=self.targetPoints, effect=self.effect)
            x = [f[0] for f in self.targetPoints]
            y = [s[1] for s in self.targetPoints]

            rowmin, rowmax = min(y), max(y) + 1
            colmin, colmax = min(x), max(x) + 1



            if self.effect != None:
                self.sourcePoints = [(0,0), (self.sbound[1], 0), (0, self.sbound[0]), (self.sbound[1], self.sbound[0])]
                self.homography = Homography(sourcePoints=self.sourcePoints, targetPoints=self.targetPoints, effect=self.effect)
            else:
                self.homography = Homography(sourcePoints=self.sourcePoints, targetPoints=self.targetPoints, effect=self.effect)

            rbs =  interpolate.RectBivariateSpline(np.arange(0.0, self.sbound[0]), np.arange(0.0, self.sbound[1]), self.sourceImage, kx=1, ky=1)
            # Identify the target bounding box.
            # For every point within the box, perform an inverse projection of the coordinates.
            for i in np.arange(rowmin, rowmax):
                for j in np.arange(colmin, colmax):
                    a = j, i
                    px, py = self.homography.inverseProject(a)
                    if 0 <= px and px <= self.sbound[1] - 1 and 0 <= py and py <= self.sbound[0] - 1:
                        # read value of source and use 2D interpolation
                        containerImage[i][j] = rbs(py, px)

            return containerImage
Пример #31
0
def skyann(map,
           xcent,
           ycent,
           rinn,
           rout,
           mask=None,
           clip_low=None,
           clip_high=3.0):

    ny, nx = map.shape

    nobj = xcent.size
    skylev = numpy.empty_like(xcent)
    skyrms = numpy.empty_like(xcent)

    for iobj in range(nobj):
        # Extract coords for this object and convert to zero-based.
        if numpy.ndim(xcent) == 0:
            thisxcent = xcent - 1
        else:
            thisxcent = xcent[iobj] - 1
        if numpy.ndim(ycent) == 0:
            thisycent = ycent - 1
        else:
            thisycent = ycent[iobj] - 1

        # Bounds.
        rb = rout + 0.5

        xmin = int(math.floor(thisxcent - rb))
        if xmin < 0:
            xmin = 0

        xmax = int(math.ceil(thisxcent + rb))
        if xmax >= nx:
            xmax = nx - 1

        ymin = int(math.floor(thisycent - rb))
        if ymin < 0:
            ymin = 0

        ymax = int(math.ceil(thisycent + rb))
        if ymax >= ny:
            ymax = ny - 1

        # Extract what we need.
        thismap = map[ymin:ymax + 1, xmin:xmax + 1]
        if mask is not None:
            thismask = mask[ymin:ymax + 1, xmin:xmax + 1]
        else:
            thismask = None

        xtmp = numpy.arange(xmin, xmax + 1)
        ytmp = numpy.arange(ymin, ymax + 1)
        xx = numpy.tile(xtmp, (ymax - ymin + 1, 1))
        yy = numpy.transpose(numpy.tile(ytmp, (xmax - xmin + 1, 1)))

        # Make mask for desired annulus.
        rr = numpy.hypot(xx - thisxcent, yy - thisycent)
        ww = numpy.logical_and(rr >= rinn, rr <= rout)

        annmap = thismap[ww]
        if thismask is not None:
            annmask = thismask[ww]
        else:
            annmask = None

        # Compute sky level.
        thisskylev, thisskyrms = skylevel(annmap,
                                          mask=annmask,
                                          clip_low=clip_low,
                                          clip_high=clip_high)

        if numpy.ndim(xcent) == 0:
            skylev = thisskylev
            skyrms = thisskyrms
        else:
            skylev[iobj] = thisskylev
            skyrms[iobj] = thisskyrms

    return skylev, skyrms
Пример #32
0
    def gradient(f, *varargs, axis=None, edge_order=1):
        f = np.asanyarray(f)
        N = f.ndim  # number of dimensions

        axes = axis
        del axis

        if axes is None:
            axes = tuple(range(N))
        else:
            axes = normalize_axis_tuple(axes, N)

        len_axes = len(axes)
        n = len(varargs)
        if n == 0:
            # no spacing argument - use 1 in all axes
            dx = [1.0] * len_axes
        elif n == 1 and np.ndim(varargs[0]) == 0:
            # single scalar for all axes
            dx = varargs * len_axes
        elif n == len_axes:
            # scalar or 1d array for each axis
            dx = list(varargs)
            for i, distances in enumerate(dx):
                if np.ndim(distances) == 0:
                    continue
                elif np.ndim(distances) != 1:
                    raise ValueError("distances must be either scalars or 1d")
                if len(distances) != f.shape[axes[i]]:
                    raise ValueError(
                        "when 1d, distances must match the "
                        "length of the corresponding dimension"
                    )
                diffx = np.diff(distances)
                # if distances are constant reduce to the scalar case
                # since it brings a consistent speedup
                if (diffx == diffx[0]).all():
                    diffx = diffx[0]
                dx[i] = diffx
        else:
            raise TypeError("invalid number of arguments")

        if edge_order > 2:
            raise ValueError("'edge_order' greater than 2 not supported")

        # use central differences on interior and one-sided differences on the
        # endpoints. This preserves second order-accuracy over the full domain.

        outvals = []

        # create slice objects --- initially all are [:, :, ..., :]
        slice1 = [slice(None)] * N
        slice2 = [slice(None)] * N
        slice3 = [slice(None)] * N
        slice4 = [slice(None)] * N

        otype = f.dtype.char
        if otype not in ["f", "d", "F", "D", "m", "M"]:
            otype = "d"

        # Difference of datetime64 elements results in timedelta64
        if otype == "M":
            # Need to use the full dtype name because it contains unit
            # information
            otype = f.dtype.name.replace("datetime", "timedelta")
        elif otype == "m":
            # Needs to keep the specific units, can't be a general unit
            otype = f.dtype

        # Convert datetime64 data into ints. Make dummy variable `y`
        # that is a view of ints if the data is datetime64, otherwise
        # just set y equal to the array `f`.
        if f.dtype.char in ["M", "m"]:
            y = f.view("int64")
        else:
            y = f

        for i, axis in enumerate(axes):
            if y.shape[axis] < edge_order + 1:
                raise ValueError(
                    "Shape of array too small to calculate a numerical "
                    "gradient, at least (edge_order + 1) elements are "
                    "required."
                )
            # result allocation
            out = np.empty_like(y, dtype=otype)

            uniform_spacing = np.ndim(dx[i]) == 0

            # Numerical differentiation: 2nd order interior
            slice1[axis] = slice(1, -1)
            slice2[axis] = slice(None, -2)
            slice3[axis] = slice(1, -1)
            slice4[axis] = slice(2, None)

            if uniform_spacing:
                out[slice1] = (f[slice4] - f[slice2]) / (2.0 * dx[i])
            else:
                dx1 = dx[i][0:-1]
                dx2 = dx[i][1:]
                a = -(dx2) / (dx1 * (dx1 + dx2))
                b = (dx2 - dx1) / (dx1 * dx2)
                c = dx1 / (dx2 * (dx1 + dx2))
                # fix the shape for broadcasting
                shape = np.ones(N, dtype=int)
                shape[axis] = -1
                a.shape = b.shape = c.shape = shape
                # 1D equivalent --
                # out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
                out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4]

            # Numerical differentiation: 1st order edges
            if edge_order == 1:
                slice1[axis] = 0
                slice2[axis] = 1
                slice3[axis] = 0
                dx_0 = dx[i] if uniform_spacing else dx[i][0]
                # 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0])
                out[slice1] = (y[slice2] - y[slice3]) / dx_0

                slice1[axis] = -1
                slice2[axis] = -1
                slice3[axis] = -2
                dx_n = dx[i] if uniform_spacing else dx[i][-1]
                # 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
                out[slice1] = (y[slice2] - y[slice3]) / dx_n

            # Numerical differentiation: 2nd order edges
            else:
                slice1[axis] = 0
                slice2[axis] = 0
                slice3[axis] = 1
                slice4[axis] = 2
                if uniform_spacing:
                    a = -1.5 / dx[i]
                    b = 2.0 / dx[i]
                    c = -0.5 / dx[i]
                else:
                    dx1 = dx[i][0]
                    dx2 = dx[i][1]
                    a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2))
                    b = (dx1 + dx2) / (dx1 * dx2)
                    c = -dx1 / (dx2 * (dx1 + dx2))
                # 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2]
                out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]

                slice1[axis] = -1
                slice2[axis] = -3
                slice3[axis] = -2
                slice4[axis] = -1
                if uniform_spacing:
                    a = 0.5 / dx[i]
                    b = -2.0 / dx[i]
                    c = 1.5 / dx[i]
                else:
                    dx1 = dx[i][-2]
                    dx2 = dx[i][-1]
                    a = (dx2) / (dx1 * (dx1 + dx2))
                    b = -(dx2 + dx1) / (dx1 * dx2)
                    c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2))
                # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
                out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]

            outvals.append(out)

            # reset the slice object in this dimension to ":"
            slice1[axis] = slice(None)
            slice2[axis] = slice(None)
            slice3[axis] = slice(None)
            slice4[axis] = slice(None)

        if len_axes == 1:
            return outvals[0]
        else:
            return outvals
Пример #33
0
def apply_along_axis(obj, func, axis=None, skipna=False, args=(), **kwargs):
    """ Apply along-axis numpy method to DimArray

    apply_along_axis(obj, ...)
    Where ... are the parameters below:

    parameters:
    ----------
    - func: numpy function name (`str`)
    - {axis}
    - {skipna}
    - args    : variable list of arguments before "axis"
    - kwargs  : variable dict of keyword arguments after "axis"
    
    returns:
    --------
    - DimArray, or scalar 

    Examples:
    >>> a = DimArray([[0,1],[2,3.]])
    >>> b = a.copy()
    >>> b[0,0] = np.nan
    >>> c = DimArray.from_arrays([a,b],keys=['a','b'],axis='items')
    >>> c
    dimarray: 7 non-null elements (1 null)
    dimensions: 'items', 'x0', 'x1'
    0 / items (2): a to b
    1 / x0 (2): 0 to 1
    2 / x1 (2): 0 to 1
    array([[[  0.,   1.],
            [  2.,   3.]],
    <BLANKLINE>
           [[ nan,   1.],
            [  2.,   3.]]])
    >>> c.sum(axis=0)
    dimarray: 3 non-null elements (1 null)
    dimensions: 'x0', 'x1'
    0 / x0 (2): 0 to 1
    1 / x1 (2): 0 to 1
    array([[ nan,   2.],
           [  4.,   6.]])
    >>> c.sum(0, skipna=True)
    dimarray: 4 non-null elements (0 null)
    dimensions: 'x0', 'x1'
    0 / x0 (2): 0 to 1
    1 / x1 (2): 0 to 1
    array([[ 0.,  2.],
           [ 4.,  6.]])
    >>> c.median(0)
    dimarray: 3 non-null elements (1 null)
    dimensions: 'x0', 'x1'
    0 / x0 (2): 0 to 1
    1 / x1 (2): 0 to 1
    array([[ nan,   1.],
           [  2.,   3.]])
    """

    # Deal with `axis` parameter, whether `int`, `str` or `tuple`
    obj, idx, name = _deal_with_axis(obj, axis)

    # Apply numpy function, dealing with NaNs
    result = _apply_along_axis(obj.values,
                               func,
                               axis=idx,
                               skipna=skipna,
                               args=(),
                               **kwargs)

    if type(func) is str:
        funcname = func
        func = getattr(np, func)
    else:
        funcname = func.__name__

    # If `axis` was None (operations on the flattened array), just returns the numpy array
    if axis is None or not isinstance(result, np.ndarray):
        return result

    #
    # New axes
    #
    # standard case: collapsed axis
    if np.ndim(result) == obj.ndim - 1:
        newaxes = [ax for ax in obj.axes if ax.name != name]

    # cumulative functions: axes remain unchanged
    elif funcname in ('cumsum', 'cumprod', 'gradient'):
        newaxes = obj.axes.copy()

    # diff: reduce axis size by one
    elif funcname == "diff":
        oldaxis = obj.axes[idx]
        newaxis = oldaxis[1:]  # assume backward differencing
        newaxes = obj.axes.copy()
        newaxes[idx] = newaxis

    # axes do not fit for some reason
    else:
        raise Exception("cannot find new axes for this transformation: " +
                        repr(funcname))

    newobj = obj._constructor(result, newaxes, **obj._metadata)

    # add stamp
    #stamp = "{transform}({axis})".format(transform=funcname, axis=str(obj.axes[idx]))
    #newobj._metadata_stamp(stamp)

    return newobj
Пример #34
0
    def __reverse_indexing(slices, m_child, plates, dims):
        """
        A helpful function for performing reverse indexing/slicing
        """

        j = -1  # plate index for parent
        i = -1  # plate index for child
        child_slices = ()
        parent_slices = ()
        msg_plates = ()

        # Compute plate axes in the message from children
        ndim = len(dims)
        if ndim > 0:
            m_plates = np.shape(m_child)[:-ndim]
        else:
            m_plates = np.shape(m_child)

        for s in reversed(slices):

            if misc.is_scalar_integer(s):
                # Case: integer
                parent_slices = (s, ) + parent_slices
                msg_plates = (plates[j], ) + msg_plates
                j -= 1
            elif s is None:
                # Case: newaxis
                if -i <= len(m_plates):
                    child_slices = (0, ) + child_slices
                i -= 1
            elif isinstance(s, slice):
                # Case: slice
                if -i <= len(m_plates):
                    child_slices = (slice(None), ) + child_slices
                parent_slices = (s, ) + parent_slices
                if ((-i > len(m_plates) or m_plates[i] == 1)
                        and slicelen(s) == plates[j]):
                    # Broadcasting can be applied. The message does not need
                    # to be explicitly shaped to the full size
                    msg_plates = (1, ) + msg_plates
                else:
                    # No broadcasting. Must explicitly form the full size
                    # axis
                    msg_plates = (plates[j], ) + msg_plates
                j -= 1
                i -= 1
            else:
                raise RuntimeError(
                    "BUG: Unknown index type. Should capture earlier.")

        # Set the elements of the message
        m_parent = np.zeros(msg_plates + dims)
        if np.ndim(m_parent) == 0 and np.ndim(m_child) == 0:
            m_parent = m_child
        elif np.ndim(m_parent) == 0:
            m_parent = m_child[child_slices]
        elif np.ndim(m_child) == 0:
            m_parent[parent_slices] = m_child
        else:
            m_parent[parent_slices] = m_child[child_slices]

        return m_parent
Пример #35
0
    def unpack_array(self, M):
        """ OrderedDict.unpack_array(array)
            unpacks an input 1d vector or 2d column array into the data dictionary
                following the same order that it was unpacked
            important that the structure of the data dictionary, and the shapes
                of the contained values are the same as the data from which the 
                array was packed
        
            Inputs:
                 array - either a 1D vector or 2D column array
                 
            Outputs:
                 a reference to self, updates self in place
                 
        """

        # dont require dict to have numpy
        import numpy as np
        from VyPy.tools.arrays import atleast_2d_col, array_type, matrix_type

        # check input type
        vector = np.ndim(M) == 1

        # valid types for output
        valid_types = (int, float, array_type, matrix_type)

        # counter for unpacking
        _index = [0]

        # the unpacking function
        def do_unpack(D):
            for k, v in D.iteritems():

                # type checking
                if isinstance(v, OrderedDict):
                    do_unpack(v)  # recursion!
                    continue
                elif not isinstance(v, valid_types):
                    continue

                # get this value's rank
                rank = np.ndim(v)

                # get unpack index
                index = _index[0]

                # skip if too big
                if rank > 2:
                    continue

                # scalars
                elif rank == 0:
                    if vector:
                        D[k] = M[index]
                        index += 1
                    else:  #array
                        continue
                        #raise RuntimeError , 'array size mismatch, all values in data must have same number of rows for array unpacking'

                # 1d vectors
                elif rank == 1:
                    n = len(v)
                    if vector:
                        D[k][:] = M[index:(index + n)]
                        index += n
                    else:  #array
                        D[k][:] = M[:, index]
                        index += 1

                # 2d arrays
                elif rank == 2:
                    n, m = v.shape
                    if vector:
                        D[k][:, :] = np.reshape(M[index:(index + (n * m))],
                                                [n, m],
                                                order='F')
                        index += n * m
                    else:  #array
                        D[k][:, :] = M[:, index:(index + m)]
                        index += m

                #: switch rank

                _index[0] = index

            #: for each itme

        #: def do_unpack()

        # do the unpack
        do_unpack(self)

        # check
        if not M.shape[-1] == _index[0]:
            raise IndexError, 'did not unpack all values'

        # done!
        return self
Пример #36
0
    def to_fluxd(self, wfb, aper, eph, unit=None, **kwargs):
        """Express as spectral flux density in an observation.

        Assumes the small angle approximation.


        Parameters
        ----------
        wfb : `~astropy.units.Quantity`, `~synphot.SpectralElement`, list
            Wavelengths, frequencies, bandpass, or list of
            bandpasses of the observation.  Bandpasses require
            `~synphot`.  Ignored if ``S`` is provided.

        aper: `~astropy.units.Quantity`, `~sbpy.activity.Aperture`
            Aperture of the observation as a circular radius (length
            or angular units), or as an sbpy `~sbpy.activity.Aperture`.

        eph: dictionary-like, `~sbpy.data.Ephem`
            Ephemerides of the comet.  Required fields: 'rh', 'delta'.
            Optional: 'phase'.

        unit : `~astropy.units.Unit`, string, optional
            The flux density unit for the output.

        """

        # This method handles the geometric quantities.  Sub-classes
        # will handle the photometric quantities in `_source_fluxd`.

        # rho = effective circular aperture radius at the distance of
        # the comet.  Keep track of array dimensionality as Ephem
        # objects can needlessly increase the number of dimensions.
        if isinstance(aper, Aperture):
            rho = aper.coma_equivalent_radius()
            ndim = np.ndim(rho)
        else:
            rho = aper
            ndim = np.ndim(rho)
        rho = rho.to('km', sbu.projected_size(eph))

        ndim = max(ndim, np.ndim(self))

        # validate unit
        if unit is not None:
            unit = u.Unit(unit)

        # get source spectral flux density
        # * sunlight for Afrho,
        # * blackbody emission for Efrho
        # quantity = (delta**2 * F / rho) / source
        # must have spectral flux density units
        source = self._source_fluxd(wfb, eph, unit=unit, **kwargs)

        if isinstance(source, u.Magnitude):
            _source = source.physical
        else:
            _source = source
        fluxd = self * rho / eph['delta']**2 * _source

        # using Ephem can unnecessarily promote fluxd to an array
        if np.ndim(fluxd) > ndim:
            fluxd = np.squeeze(fluxd)

        # and back to magnitudes, as needed
        return fluxd.to(source.unit)
Пример #37
0
    def __call__(self, sample):
        #--- transf must follow this order:
        #--- translate -> rotate -> shear -> scale
        ch, H, W = sample['image'].shape
        #--- centering mat
        C, Cm = np.eye(3), np.eye(3)
        C[0, 2] = W / 2
        C[1, 2] = H / 2
        Cm[0, 2] = -W / 2
        Cm[1, 2] = -H / 2
        T = np.eye(3, 3)

        #--- Translate:
        if np.random.rand() < self.prob:
            #--- normal distribution is used to translate the data, an small
            #--- stdv is recomended in order to keep the data inside the image
            #--- [0.001 < stdv < 0.02 is recomended]
            T[0:2, 2] = np.random.rand(2) * [W, H] * self.t_stdv
        #--- rotate
        if torch.rand(1)[0] < self.prob:
            #--- r_kappa value controls von mises "concentration", so to kepp
            #--- the rotation under controlled parameters r_kappa=30 keeps
            #--- theta around +-pi/8 (if mu=0.0)
            D = np.eye(3)
            theta = np.random.vonmises(0.0, self.r_kappa)
            D[0:2, 0:2] = [[math.cos(theta), math.sin(theta)],
                           [-math.sin(theta),
                            math.cos(theta)]]
            T = np.dot(np.dot(np.dot(T, C), D), Cm)
        #--- Shear (vert and Horz)
        if np.random.rand() < self.prob:
            #--- under -pi/8 < theta < pi/8 range tan(theta) ~ theta, then
            #--- computation of tan(theta) is ignored. kappa will be
            #--- selected to comply this restriction [kappa ~> 20 is a good value]
            theta = np.random.vonmises(0.0, self.sh_kappa)
            D = np.eye(3)
            D[0, 1] = theta
            T = np.dot(np.dot(np.dot(T, C), D), Cm)
        if np.random.rand() < self.prob:
            theta = np.random.vonmises(0.0, self.sh_kappa)
            D = np.eye(3)
            D[1, 0] = theta
            T = np.dot(np.dot(np.dot(T, C), D), Cm)
        #--- scale
        if np.random.rand() < self.prob:
            #--- Use log_normal distribution with mu=0.0 to perform scale,
            #--- since scale factor must be > 0, stdv is used to control the
            #--- deviation from 1 [0.1 < stdv < 0.5 is recomended]
            D = np.eye(3)
            D[0, 0], D[1, 1] = np.exp(np.random.rand(2) * self.sc_stdv)
            T = np.dot(np.dot(np.dot(T, C), D), Cm)

        if (T == np.eye(3)).all():
            return sample
        else:
            if np.ndim(sample['image']) > 2:
                for t in sample['image']:
                    t[:] = affine_transform(t, T)
            else:
                sample['image'] = affine_transform(sample['image'], T)
            if np.ndim(sample['label']) > 2:
                #--- affine transform over label must keep values in the matrix
                #--- then, no interpolation or adding is performed
                for t in sample['label']:
                    t[:] = affine_transform(t, T, mode='nearest', order=0)
            else:
                sample['label'] = affine_transform(sample['label'],
                                                   T,
                                                   mode='nearest',
                                                   order=0)

            return sample
Пример #38
0
def handle_scalar_broadcasting(nd, x, bdim):
  assert isinstance(get_aval(x), ShapedArray)
  if bdim is None or nd == onp.ndim(x):
    return x
  else:
    return x.reshape(x.shape + (1,) * (nd - x.ndim))
Пример #39
0
 def __init__(self, cov):
     if numpy.ndim(cov) == 1:
         self.cov = numpy.diag(numpy.array(cov, dtype=numpy.float64))
     else:
         self.cov = numpy.array(cov, dtype=numpy.float64)
Пример #40
0
def convert_julian(JD, ASTYPE=None, FORMAT='dict'):
    """
    Converts from Julian day to calendar date and time

    Translated from caldat in "Numerical Recipes in C", by William H. Press,
        Brian P. Flannery, Saul A. Teukolsky, and William T. Vetterling.
        Cambridge University Press, 1988 (second printing).
    Hatcher, D. A., "Simple Formulae for Julian Day Numbers and Calendar Dates",
        Quarterly Journal of the Royal Astronomical Society, 25(1), 1984.


    Arguments
    ---------
    JD: Julian Day (days since 01-01-4713 BCE at 12:00:00)

    Keyword arguments
    -----------------
    ASTYPE: convert output to variable type
    FORMAT: format of output variables
        'dict': dictionary with variable keys
        'tuple': tuple with variable order YEAR,MONTH,DAY,HOUR,MINUTE,SECOND
        'zip': aggregated variable sets

    Returns
    -------
    year: calendar year
    month: calendar month
    day: day of the month
    hour: hour of the day
    minute: minute of the hour
    second: second of the minute
    """

    #-- convert to array if only a single value was imported
    if (np.ndim(JD) == 0):
        JD = np.atleast_1d(JD)
        SINGLE_VALUE = True
    else:
        SINGLE_VALUE = False

    JDO = np.floor(JD + 0.5)
    C = np.zeros_like(JD)
    #-- calculate C for dates before and after the switch to Gregorian
    IGREG = 2299161.0
    ind1, = np.nonzero(JDO < IGREG)
    C[ind1] = JDO[ind1] + 1524.0
    ind2, = np.nonzero(JDO >= IGREG)
    B = np.floor((JDO[ind2] - 1867216.25) / 36524.25)
    C[ind2] = JDO[ind2] + B - np.floor(B / 4.0) + 1525.0
    #-- calculate coefficients for date conversion
    D = np.floor((C - 122.1) / 365.25)
    E = np.floor((365.0 * D) + np.floor(D / 4.0))
    F = np.floor((C - E) / 30.6001)
    #-- calculate day, month, year and hour
    DAY = np.floor(C - E + 0.5) - np.floor(30.6001 * F)
    MONTH = F - 1.0 - 12.0 * np.floor(F / 14.0)
    YEAR = D - 4715.0 - np.floor((7.0 + MONTH) / 10.0)
    HOUR = np.floor(24.0 * (JD + 0.5 - JDO))
    #-- calculate minute and second
    G = (JD + 0.5 - JDO) - HOUR / 24.0
    MINUTE = np.floor(G * 1440.0)
    SECOND = (G - MINUTE / 1440.0) * 86400.0

    #-- convert all variables to output type (from float)
    if ASTYPE is not None:
        YEAR = YEAR.astype(ASTYPE)
        MONTH = MONTH.astype(ASTYPE)
        DAY = DAY.astype(ASTYPE)
        HOUR = HOUR.astype(ASTYPE)
        MINUTE = MINUTE.astype(ASTYPE)
        SECOND = SECOND.astype(ASTYPE)

    #-- if only a single value was imported initially: remove singleton dims
    if SINGLE_VALUE:
        YEAR = YEAR.item(0)
        MONTH = MONTH.item(0)
        DAY = DAY.item(0)
        HOUR = HOUR.item(0)
        MINUTE = MINUTE.item(0)
        SECOND = SECOND.item(0)

    #-- return date variables in output format (default python dictionary)
    if (FORMAT == 'dict'):
        return dict(year=YEAR,
                    month=MONTH,
                    day=DAY,
                    hour=HOUR,
                    minute=MINUTE,
                    second=SECOND)
    elif (FORMAT == 'tuple'):
        return (YEAR, MONTH, DAY, HOUR, MINUTE, SECOND)
    elif (FORMAT == 'zip'):
        return zip(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND)
    def tt_approximation(self, fun, shape):
        '''
        Approximation of a tensor of order d in tensor train format based on a
        Principal Component Analysis.

        For a prescribed precision, set TPCA.max_rank = np.inf and TPCA.tol to
        the desired precision (possibly an array of length d-1).

        For a prescribed rank, set TPCA.tol = np.inf and TPCA.max_rank to the
        desired rank (possibly an array of length d-1).

        See also the documentation of the class
        TensorPrincipalComponentAnalysis.

        Parameters
        ----------
        fun : fun or tensap.Function
            Function of d variables i_1, ..., i_d which returns the entries of
            the tensor.
        shape : list or numpy.ndarray
            The shape of the tensor.

        Raises
        ------
        ValueError
            If the provided tolerance and max ranks are not correct.

        Returns
        -------
        tensap.TreeBasedTensor
            A tensor in tree based format with a linear tree.
        dict
            Dictionnary containing the outputs of the method.

        '''
        solver = deepcopy(self)
        d = len(shape)
        tree = tensap.DimensionTree.linear(d)
        is_active_node = np.full(tree.nb_nodes, True)
        is_active_node[tree.dim2ind[1:] - 1] = False
        rep_tt = np.nonzero(is_active_node)[0]
        rep_tt = np.flip(rep_tt[1:])

        if np.ndim(self.tol) == 1 and len(self.tol) == d - 1:
            tol = solver.tol
            solver.tol = np.zeros(tree.nb_nodes)
            solver.tol[rep_tt] = tol
        elif np.ndim(self.tol) == 1 and len(self.tol) > 1:
            raise ValueError('tol should be a scalar or an array of length ' +
                             'd-1.')

        if np.ndim(self.max_rank) == 1 and len(self.max_rank) == d - 1:
            rank = solver.max_rank
            solver.max_rank = np.zeros(tree.nb_nodes)
            solver.max_rank[rep_tt] = rank
        elif np.ndim(self.max_rank) == 1 and len(self.max_rank) > 1:
            raise ValueError('max_rank should be a scalar or an array of ' +
                             'length d-1.')

        return solver.tree_based_approximation(fun, shape, tree,
                                               is_active_node)
Пример #42
0
def is_python_scalar(x):
    try:
        return x.aval.weak_type and np.ndim(x) == 0
    except AttributeError:
        return type(x) in python_scalar_dtypes
Пример #43
0
def gpu_strs(gpus):
  if gpus is not None and np.ndim(gpus) == 0:
    gpus = [gpus]
  return ['/cpu:0'] if gpus is None else ['/gpu:%d' % x for x in gpus]
    def tree_based_approximation(self, fun, shape, tree, is_active_node=None):
        '''
        Approximation of a tensor of order d in tree based tensor format based
        on a Principal Component Analysis.

        For a prescribed precision, set TPCA.max_rank = np.inf and TPCA.tol to
        the desired precision (possibly an array of length d-1).

        For a prescribed rank, set TPCA.tol = np.inf and TPCA.max_rank to the
        desired rank (possibly an array of length d-1).

        See also the documentation of the class
        TensorPrincipalComponentAnalysis.

        Parameters
        ----------
        fun : fun or tensap.Function
            Function of d variables i_1, ..., i_d which returns the entries of
            the tensor.
        shape : list or numpy.ndarray
            The shape of the tensor.
        tree : tensap.DimensionTree
            The required dimension tree.
        is_active_node : list or numpy.ndarray, optional
            An array of booleans indicating which nodes of the tree are active.
            The default is None, settings all the nodes active.

        Raises
        ------
        ValueError
            If the provided tolerance and max ranks are not correct.

        Returns
        -------
        tensap.TreeBasedTensor
            A tensor in tree based format.
        dict
            Dictionnary containing the outputs of the method.

        '''
        solver = deepcopy(self)
        d = len(shape)

        if is_active_node is None:
            is_active_node = np.full(tree.nb_nodes, True)

        if (np.ndim(self.tol) == 0 or len(self.tol) == 1) and self.tol < 1:
            solver.tol /= np.sqrt(np.count_nonzero(is_active_node) - 1)

        if np.ndim(self.tol) == 0 or len(self.tol) == 1:
            solver.tol = np.full(tree.nb_nodes, solver.tol)
        elif len(self.tol) != tree.nb_nodes:
            raise ValueError('tol should be a scalar or an array of length ' +
                             'tree.nb_nodes.')

        if np.ndim(self.max_rank) == 0 or len(self.max_rank) == 1:
            solver.max_rank = np.full(tree.nb_nodes, self.max_rank)
        elif len(self.max_rank) != tree.nb_nodes:
            raise ValueError('max_rank should be a scalar or an array of ' +
                             'length tree.nb_nodes.')

        grids = [np.reshape(np.arange(x), (-1, 1)) for x in shape]
        alpha_basis = np.empty(tree.nb_nodes, dtype=object)
        alpha_grids = np.empty(tree.nb_nodes, dtype=object)
        outputs = np.empty(tree.nb_nodes, dtype=object)
        samples = np.empty(tree.nb_nodes, dtype=object)
        tensors = [[]] * tree.nb_nodes
        number_of_evaluations = 0
        for nu in range(d):
            alpha = tree.dim2ind[nu]
            B_alpha = np.eye(shape[nu])
            if is_active_node[alpha - 1]:
                tol_alpha = np.min(
                    (solver.tol[alpha - 1], solver.max_rank[alpha - 1]))
                pc_alpha, outputs[alpha-1] = \
                    solver.alpha_principal_components(fun, shape, nu,
                                                      tol_alpha, B_alpha,
                                                      grids[nu])
                samples[alpha - 1] = outputs[alpha - 1]['samples']
                shape_alpha = [shape[nu], pc_alpha.shape[1]]
                tensors[alpha - 1] = tensap.FullTensor(pc_alpha, 2,
                                                       shape_alpha)

                B_alpha = np.matmul(B_alpha, pc_alpha)
                I_alpha = tensap.magic_indices(B_alpha)[0]
                alpha_grids[alpha - 1] = grids[nu][I_alpha, :]
                alpha_basis[alpha - 1] = B_alpha[I_alpha, :]

                number_of_evaluations += outputs[alpha -
                                                 1]['number_of_evaluations']
                if solver.display:
                    print('alpha = %i : rank = %i, nb_eval = %i' %
                          (alpha, shape_alpha[-1],
                           outputs[alpha - 1]['number_of_evaluations']))
            else:
                alpha_grids[alpha - 1] = grids[nu]
                alpha_basis[alpha - 1] = B_alpha

        for level in np.arange(np.max(tree.level), 0, -1):
            for alpha in np.intersect1d(tree.nodes_with_level(level),
                                        tree.internal_nodes):
                S_alpha = tree.children(alpha)
                B_alpha = TensorPrincipalComponentAnalysis.\
                    _tensor_product_b_alpha(alpha_basis[S_alpha-1])
                alpha_grids[alpha-1] = \
                    tensap.FullTensorGrid(alpha_grids[S_alpha-1]).array()

                tol_alpha = np.min(
                    (solver.tol[alpha - 1], solver.max_rank[alpha - 1]))
                pc_alpha, outputs[alpha-1] = \
                    solver.alpha_principal_components(fun, shape,
                                                      tree.dims[alpha-1],
                                                      tol_alpha,
                                                      B_alpha,
                                                      alpha_grids[alpha-1])
                samples[alpha - 1] = outputs[alpha - 1]['samples']
                shape_alpha = np.concatenate(
                    ([x.shape[1]
                      for x in alpha_basis[S_alpha - 1]], [pc_alpha.shape[1]]))
                tensors[alpha - 1] = tensap.FullTensor(pc_alpha,
                                                       len(S_alpha) + 1,
                                                       shape_alpha)

                B_alpha = np.matmul(B_alpha, pc_alpha)
                I_alpha = tensap.magic_indices(B_alpha)[0]
                alpha_grids[alpha - 1] = alpha_grids[alpha - 1][I_alpha, :]
                alpha_basis[alpha - 1] = B_alpha[I_alpha, :]
                number_of_evaluations += outputs[alpha -
                                                 1]['number_of_evaluations']
                if solver.display:
                    print('alpha = %i : rank = %i, nb_eval = %i' %
                          (alpha, shape_alpha[-1],
                           outputs[alpha - 1]['number_of_evaluations']))

        alpha = tree.root
        S_alpha = tree.children(alpha)
        B_alpha = TensorPrincipalComponentAnalysis.\
            _tensor_product_b_alpha(alpha_basis[S_alpha-1])
        I_alpha = tensap.FullTensorGrid(alpha_grids[S_alpha - 1]).array()
        shape_alpha = [x.shape[1] for x in alpha_basis[S_alpha - 1]]
        ind = [np.nonzero(tree.dims[alpha - 1] == x)[0][0] for x in range(d)]
        tensors[alpha - 1] = tensap.FullTensor(
            np.linalg.solve(B_alpha, fun(I_alpha[:, ind])), len(S_alpha),
            shape_alpha)
        alpha_grids[alpha - 1] = I_alpha
        number_of_evaluations += I_alpha.shape[0]
        samples[alpha - 1] = I_alpha
        if solver.display:
            print('Interpolation - nb_eval = %i' % I_alpha.shape[0])

        f = tensap.TreeBasedTensor(tensors, tree)

        output = {
            'number_of_evaluations': number_of_evaluations,
            'samples': samples,
            'alpha_basis': alpha_basis,
            'alpha_grids': alpha_grids,
            'outputs': outputs
        }

        return f, output
Пример #45
0
def propagate(
    fn: _tp.Callable,
    x: _tp.Union[float, _Indexable[float]],
    cov: _tp.Union[float, _Indexable[float], _Indexable[_Indexable[float]]],
    **kwargs,
) -> _tp.Tuple[np.ndarray, np.ndarray]:
    """
    Numerically propagates the covariance of function inputs to function outputs.

    The function computes C' = J C J^T, where C is the covariance matrix of the input,
    C' the matrix of the output, and J is the Jacobi matrix of first derivatives of the
    mapping function fn. The Jacobi matrix is computed numerically.

    Parameters
    ----------
    fn: callable
        Function that computes y = fn(x). x and y are each allowed to be scalars or
        one-dimensional arrays.
    x: float or array-like with shape (N,)
        Input vector.
    cov: float or array-like with shape (N,) or shape(N, N)
        Covariance matrix of input vector. If the array is one-dimensional, it is
        interpreted as the diagonal of a covariance matrix with zero off-diagonal
        elements.
    **kwargs:
        Extra arguments are passed to :func:`jacobi`.

    Returns
    -------
    y, ycov
        y is the result of fn(x).
        ycov is the propagated covariance matrix.
        If ycov is a matrix, unless y is a number. In that case, ycov is also
        reduced to a number.
    """
    x = np.array(x)
    y = fn(x)
    jac = jacobi(fn, x, **kwargs)[0]

    x_nd = np.ndim(x)
    y_nd = np.ndim(y)
    x_len = len(x) if x_nd == 1 else 1
    y_len = len(y) if y_nd == 1 else 1

    jac_nd = np.ndim(jac)
    if jac_nd == 0:
        jac = np.atleast_2d(jac)
    elif jac_nd == 1:
        jac = jac.reshape((y_len, x_len))

    xcov = np.atleast_1d(cov)
    xcov_nd = np.ndim(xcov)

    if xcov.shape[0] != x_len:
        raise ValueError("x and cov have incompatible shapes")

    ycov = np.einsum("il,kl,l" if xcov_nd == 1 else "ij,kl,jl", jac, jac, xcov)

    if np.ndim(y) == 0:
        ycov = ycov[0, 0]

    return y, ycov
    def hopca(self, fun, shape):
        '''
        Return the set of alpha-principal components of an algebraic tensor,
        for all alpha in {0,1,...,d-1}.

        For prescribed precision, set TPCA.max_rank = np.inf and TPCA.tol to
        the desired precision (possibly an array of length d).

        For prescribed rank, set TPCA.tol = np.inf and TPCA.max_rank to the
        desired rank (possibly an array of length d).

        See also the documentation of the class
        TensorPrincipalComponentAnalysis.

        Parameters
        ----------
        fun : fun or tensap.Function
            Function of d variables i_1, ..., i_d which returns the entries of
            the tensor.
        shape : list or numpy.ndarray
            The shape of the tensor.

        Raises
        ------
        ValueError
            If the provided tolerance and max ranks are not correct.

        Returns
        -------
        f_pc : list
            List of the alpha-principal components of the tensor.
        output : list
            List containing the outputs of the method
            alpha_principal_components.

        '''
        solver = deepcopy(self)
        d = len(shape)

        if np.ndim(self.tol) == 0 or len(self.tol) == 1:
            solver.tol = np.full(d, self.tol)
        elif len(self.tol) != d:
            raise ValueError('tol should be a scalar or an array of length d.')

        if np.ndim(self.max_rank) == 0 or len(self.max_rank) == 1:
            solver.max_rank = np.full(d, self.max_rank)
        elif len(self.max_rank) != d:
            raise ValueError('max_rank should be a scalar or an array of ' +
                             'length d.')

        f_pc = []
        output = []
        for alpha in range(d):
            I_alpha = np.reshape(np.arange(shape[alpha]), (-1, 1))
            B_alpha = np.eye(shape[alpha])
            tol_alpha = np.min((solver.tol[alpha], solver.max_rank[alpha]))
            f_pc_alpha, output_alpha = \
                solver.alpha_principal_components(fun, shape, alpha, tol_alpha,
                                                  B_alpha, I_alpha)
            f_pc.append(f_pc_alpha)
            output.append(output_alpha)
        return f_pc, output
Пример #47
0
    def _batch_reducible_process(
            self, element: types.Extracts) -> Sequence[types.Extracts]:
        """Invokes the tfjs model on the provided inputs and stores the result."""
        result = copy.copy(element)
        result[constants.PREDICTIONS_KEY] = []

        batched_features = collections.defaultdict(list)
        feature_rows = element[constants.FEATURES_KEY]
        for r in feature_rows:
            for key, value in r.items():
                if value.dtype == np.int64:
                    value = value.astype(np.int32)
                batched_features[key].append(value)

        for spec in self._eval_config.model_specs:
            model_name = spec.name if len(
                self._eval_config.model_specs) > 1 else ''
            if model_name not in self._loaded_models:
                raise ValueError(
                    'model for "{}" not found: eval_config={}'.format(
                        spec.name, self._eval_config))

            model_features = {}
            for k in self._model_properties[model_name]['inputs']:
                k_name = k.split(':')[0]
                if k_name not in batched_features:
                    raise ValueError(
                        'model requires feature "{}" not available in '
                        'input.'.format(k_name))
                dim = self._model_properties[model_name]['inputs'][k]
                elems = []
                for i in batched_features[k_name]:
                    if np.ndim(i) > len(dim):
                        raise ValueError(
                            'ranks for input "{}" are not compatible '
                            'with the model.'.format(k_name))
                    # TODO(dzats): See if we can support case where multiple dimensions
                    # are not defined.
                    elems.append(np.reshape(i, dim))
                model_features[k] = elems

            model_features = {
                k: np.concatenate(v)
                for k, v in model_features.items()
            }

            batched_entries = collections.defaultdict(list)
            for feature, value in model_features.items():
                batched_entries[_DATA_JSON].append(value.tolist())
                batched_entries[_DTYPE_JSON].append(str(value.dtype))
                batched_entries[_SHAPE_JSON].append(value.shape)
                batched_entries[_TF_INPUT_NAME_JSON].append(feature)

            cur_subdir = str(uuid.uuid4())
            cur_input_path = os.path.join(
                self._model_properties[model_name]['path'], _EXAMPLES_SUBDIR,
                cur_subdir)
            tf.io.gfile.makedirs(cur_input_path)
            for entry, value in batched_entries.items():
                with tf.io.gfile.GFile(os.path.join(cur_input_path, entry),
                                       'w') as f:
                    f.write(json.dumps(value))

            cur_output_path = os.path.join(
                self._model_properties[model_name]['path'], _OUTPUTS_SUBDIR,
                cur_subdir)
            tf.io.gfile.makedirs(cur_output_path)
            inference_command = [
                self._binary_path, '--model_path=' + os.path.join(
                    self._model_properties[model_name]['path'], _MODEL_JSON),
                '--inputs_dir=' + cur_input_path,
                '--outputs_dir=' + cur_output_path
            ]

            popen = subprocess.Popen(inference_command,
                                     stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE)
            stdout, stderr = popen.communicate()
            if popen.returncode != 0:
                raise ValueError(
                    'Inference failed with status {}\nstdout:\n{}\nstderr:\n{}'
                    .format(popen.returncode, stdout, stderr))

            try:
                with tf.io.gfile.GFile(
                        os.path.join(cur_output_path, _DATA_JSON)) as f:
                    data = json.load(f)
                with tf.io.gfile.GFile(
                        os.path.join(cur_output_path, _DTYPE_JSON)) as f:
                    dtype = json.load(f)
                with tf.io.gfile.GFile(
                        os.path.join(cur_output_path, _SHAPE_JSON)) as f:
                    shape = json.load(f)
            except FileNotFoundError as e:
                raise FileNotFoundError(
                    'Unable to find files containing inference result. This likely '
                    'means that inference did not succeed. Error {}'.format(e))

            name = [
                n.split(':')[0]
                for n in self._model_properties[model_name]['outputs'].keys()
            ]

            tf.io.gfile.rmtree(cur_input_path)
            tf.io.gfile.rmtree(cur_output_path)

            outputs = {}
            for n, s, t, d in zip(name, shape, dtype, data):
                d_val = [d[str(i)] for i in range(len(d))]
                outputs[n] = np.reshape(np.array(d_val, t), s)

            for v in outputs.values():
                if len(v) != len(feature_rows):
                    raise ValueError(
                        'Did not get the expected number of results.')

            for i in range(len(feature_rows)):
                output = {k: v[i] for k, v in outputs.items()}

                if len(output) == 1:
                    output = list(output.values())[0]

                if len(self._eval_config.model_specs) == 1:
                    result[constants.PREDICTIONS_KEY].append(output)
                else:
                    if i >= len(result[constants.PREDICTIONS_KEY]):
                        result[constants.PREDICTIONS_KEY].append({})
                    result[constants.PREDICTIONS_KEY][i].update(
                        {spec.name: output})
        return [result]
Пример #48
0
def jacobi(
    fn: _tp.Callable,
    x: _tp.Union[int, float, _tp.Sequence],
    *args,
    method: _tp.Optional[int] = None,
    mask: _tp.Optional[np.ndarray] = None,
    rtol: float = 0,
    maxiter: int = 10,
    maxgrad: int = 3,
    step: _tp.Optional[_tp.Tuple[float, float]] = None,
    diagnostic: _tp.Optional[dict] = None,
):
    """
    Return first derivative and its error estimate.

    Parameters
    ----------
    fn : Callable
        Function with the signature `fn(x, *args)`, where `x` is a number or an
        array of numbers and `*args` are optional auxiliary arguments.
    x : Number or array of numbers
        The derivative is computed with respect to `x`. If `x` is an array, the Jacobi
        matrix is computed with respect to each element of `x`.
    *args : tuple
        Additional arguments passed to the function.
    method : {-1, 0, 1} or None, optional
        Whether to compute central (0), forward (1) or backward derivatives (-1).
        The default (None) uses auto-detection.
    mask : array or None, optional
        If `x` is an array and `mask` is not None, compute the Jacobi matrix only for the
        part of the array selected by the mask.
    rtol : float, optional
        Relative tolerance for the derivative. The algorithm stops when this relative
        tolerance is reached. If 0 (the default), the algorithm iterates until the
        error estimate of the derivative does not improve further.
    maxiter : int, optional
        Maximum number of iterations of the algorithm.
    maxgrad : int, optional
        Maximum grad of the extrapolation polynomial.
    step : tuple of float or None, optional
        Factors that reduce the step size in each iteration relative to the previous
        step.
    diagnostic : dict or None, optional
        If an empty dict is passed to this keyword, it is filled with diagnostic
        information produced by the algorithm. This reduces performance and is only
        intended for debugging.

    Returns
    -------
    array, array
        Derivative and its error estimate.
    """
    if maxiter <= 0:
        raise ValueError("invalid value for keyword maxiter")
    if maxgrad < 0:
        raise ValueError("invalid value for keyword maxgrad")
    if step is not None:
        if not (0 < step[0] < 0.5):
            raise ValueError("invalid value for step[0]")
        if not (0 < step[1] < 1):
            raise ValueError("invalid value for step[1]")

    squeeze = np.ndim(x) == 0
    x = np.atleast_1d(x).astype(float)
    assert x.ndim == 1

    x_indices = np.arange(len(x))
    if mask is not None:
        x_indices = x_indices[mask]
    nx = len(x_indices)

    if isinstance(diagnostic, dict):
        diagnostic["method"] = np.zeros(nx, dtype=np.int8)
        diagnostic["iteration"] = np.zeros(len(x_indices), dtype=np.uint8)

    if method is not None and method not in (-1, 0, 1):
        raise ValueError("invalid value for keyword method")

    f0 = None
    jac = None
    err = None
    for ik, k in enumerate(x_indices):
        # if step is None, use optimal step sizes for central derivatives
        h = _steps(x[k], step or (0.25, 0.5), maxiter)
        # if method is None, auto-detect for each x[k]
        md, f0, r = _first(method, f0, fn, x, k, h[0], args)

        if diagnostic:
            diagnostic["method"][ik] = md

        if md != 0 and step is None:
            # optimal step sizes for forward derivatives
            h = _steps(x[k], (0.125, 0.125), maxiter)

        r_shape = np.shape(r)
        r = np.reshape(r, -1)
        nr = len(r)
        re = np.full(nr, np.inf)
        todo = np.ones(nr, dtype=bool)
        fd = [r]

        if jac is None:
            jac = np.empty(r_shape + (nx, ), dtype=r.dtype)
            err = np.empty(r_shape + (nx, ), dtype=r.dtype)
            if diagnostic:
                diagnostic["call"] = np.zeros((nr, nx), dtype=np.uint8)

        if diagnostic:
            diagnostic["call"][:, ik] = 2 if md == 0 else 3

        for i in range(1, len(h)):
            fdi = _derive(md, f0, fn, x, k, h[i], args)
            fdi = np.reshape(fdi, -1)
            fd.append(fdi if i == 1 else fdi[todo])
            if diagnostic:
                diagnostic["call"][todo, ik] += 2
                diagnostic["iteration"][ik] += 1

            # polynomial fit with one extra degree of freedom
            grad = min(i - 1, maxgrad)
            start = i - (grad + 1)
            stop = i + 1
            q, c = np.polyfit(h[start:stop]**2,
                              fd[start:],
                              grad,
                              rcond=None,
                              cov=True)
            ri = q[-1]
            # pulls have roughly unit variance, however,
            # the pull distribution is not gaussian and looks
            # more like student's t
            rei = c[-1, -1]**0.5

            # update estimates that have significantly smaller error
            sub_todo = rei < re[todo]
            todo1 = todo.copy()
            todo[todo1] = sub_todo
            r[todo] = ri[sub_todo]
            re[todo] = rei[sub_todo]

            # do not improve estimates further which meet the tolerance
            if rtol > 0:
                sub_todo &= rei > rtol * np.abs(ri)
                todo[todo1] = sub_todo

            if np.sum(todo) == 0:
                break

            # shrink previous vectors of estimates
            fd = [fdi[sub_todo] for fdi in fd]

        jac[..., ik] = r.reshape(r_shape)
        err[..., ik] = re.reshape(r_shape)

    if diagnostic:
        diagnostic["call"].shape = r_shape + (nx, )

    if squeeze:
        if diagnostic:
            diagnostic["call"] = np.squeeze(diagnostic["call"])
        jac = np.squeeze(jac)
        err = np.squeeze(err)

    return jac, err
Пример #49
0
def xcor(list_of_wls,
         list_of_orders,
         wlm,
         fxm,
         drv,
         RVrange,
         plot=False,
         list_of_errors=None):
    """
    This routine takes a combined dataset (in the form of lists of wl spaces,
    spectral orders and possible a matching list of errors on those spectal orders),
    as well as a template (wlm,fxm) to cross-correlate with, and the cross-correlation
    parameters (drv,RVrange). The code takes on the order of ~10 minutes for an entire
    HARPS dataset, which appears to be superior to my old IDL pipe.

    The CCF used is the Geneva-style weighted average; not the Pearson CCF. Therefore
    it measures true 'average' planet lines, with flux on the y-axis of the CCF.
    The template must therefore be (something close to) a binary mask, with values
    inside spectral lines (the CCF is scale-invariant so their overall scaling
    doesn't matter),

    It returns the RV axis and the resulting CCF in a tuple.

    Thanks to Brett Morris (bmorris3), this code now implements a clever numpy broadcasting trick to
    instantly apply and interpolate the wavelength shifts of the model template onto
    the data grid in 2 dimensions. The matrix multiplication operator (originally
    recommended to me by Matteo Brogi) allowed this 2D template matrix to be multiplied
    with a 2D spectral order. np.hstack() is used to concatenate all orders end to end,
    effectively making a giant single spectral order (with NaNs in between due to masking).

    All these steps have eliminated ALL the forloops from the equation, and effectuated a
    speed gain of a factor between 2,000 and 3,000. The time to do cross correlations is now
    typically measured in 100s of milliseconds rather than minutes.

    This way of calculation does impose some strict rules on NaNs, though. To keep things fast,
    NaNs are now used to set the interpolated template matrix to zero wherever there are NaNs in the data.
    These NaNs are found by looking at the first spectrum in the stack, with the assumption that
    every NaN is in an all-NaN column. In the standard cross-correlation work-flow, isolated
    NaNs are interpolated over (healed), after all.

    The places where there are NaN columns in the data are therefore set to 0 in the template matrix.
    The NaN values themselves are then set to to an arbitrary value, since they will never
    weigh into the average by construction.


    Parameters
    ----------
    list_of_wls : list
        List of wavelength axes of the data.

    list_of_orders : list
        List of corresponding 2D orders.

    list_of_errors : list
        Optional, list of corresponding 2D error matrices.

    wlm : np.ndarray
        Wavelength axis of the template.

    fxm : np.ndarray
        Weight-axis of the template.

    drv : int,float
        The velocity step onto which the CCF is computed. Typically ~1 km/s.

    RVrange : int,float
        The velocity range in the positive and negative direction over which to
        evaluate the CCF. Typically >100 km/s.

    plot : bool
        Set to True for diagnostic plotting.

    Returns
    -------
    RV : np.ndarray
        The radial velocity grid over which the CCF is evaluated.

    CCF : np.ndarray
        The weighted average flux in the spectrum as a function of radial velocity.

    CCF_E : np.ndarray
        Optional. The error on each CCF point propagated from the error on the spectral values.

    Tsums : np.ndarray
        The sum of the template for each velocity step. Used for normalising the CCFs.
    """

    import tayph.functions as fun
    import astropy.constants as const
    import tayph.util as ut
    from tayph.vartests import typetest, dimtest, postest, nantest
    import numpy as np
    import scipy.interpolate
    import astropy.io.fits as fits
    import matplotlib.pyplot as plt
    import sys
    import pdb

    #===FIRST ALL SORTS OF TESTS ON THE INPUT===
    if len(list_of_wls) != len(list_of_orders):
        raise ValueError(
            f'In xcor(): List of wls and list of orders have different length ({len(list_of_wls)} & {len(list_of_orders)}).'
        )

    dimtest(wlm, [len(fxm)], 'wlm in ccf.xcor()')
    typetest(wlm, np.ndarray, 'wlm in ccf.xcor')
    typetest(fxm, np.ndarray, 'fxm in ccf.xcor')
    typetest(drv, [int, float], 'drv in ccf.xcor')
    typetest(
        RVrange,
        float,
        'RVrange in ccf.xcor()',
    )
    postest(RVrange, 'RVrange in ccf.xcor()')
    postest(drv, 'drv in ccf.xcor()')
    nantest(wlm, 'fxm in ccf.xcor()')
    nantest(fxm, 'fxm in ccf.xcor()')
    nantest(drv, 'drv in ccf.xcor()')
    nantest(RVrange, 'RVrange in ccf.xcor()')

    drv = float(drv)
    N = len(list_of_wls)  #Number of orders.

    if np.ndim(list_of_orders[0]) == 1.0:
        n_exp = 1
    else:
        n_exp = len(list_of_orders[0][:, 0])  #Number of exposures.

        #===Then check that all orders indeed have n_exp exposures===
        for i in range(N):
            if len(list_of_orders[i][:, 0]) != n_exp:
                raise ValueError(
                    f'In xcor(): Not all orders have {n_exp} exposures.')

#===END OF TESTS. NOW DEFINE CONSTANTS===
    c = const.c.to('km/s').value  #In km/s
    RV = fun.findgen(
        2.0 * RVrange / drv +
        1) * drv - RVrange  #..... CONTINUE TO DEFINE THE VELOCITY GRID
    beta = 1.0 - RV / c  #The doppler factor with which each wavelength is to be shifted.
    n_rv = len(RV)

    #===STACK THE ORDERS IN MASSIVE CONCATENATIONS===
    stack_of_orders = np.hstack(list_of_orders)
    stack_of_wls = np.concatenate(list_of_wls)
    if list_of_errors is not None:
        stack_of_errors = np.hstack(list_of_errors)  #Stack them horizontally

        #Check that the number of NaNs is the same in the orders as in the errors on the orders;
        #and that they are in the same place; meaning that if I add the errors to the orders, the number of
        #NaNs does not increase (NaN+value=NaN).
        if (np.sum(np.isnan(stack_of_orders)) != np.sum(
                np.isnan(stack_of_errors + stack_of_orders))) and (np.sum(
                    np.isnan(stack_of_orders)) != np.sum(
                        np.isnan(stack_of_errors))):
            raise ValueError(
                f"in CCF: The number of NaNs in list_of_orders and list_of_errors is not equal ({np.sum(np.isnan(list_of_orders))},{np.sum(np.isnan(list_of_errors))})"
            )

#===HERE IS THE JUICY BIT===
    shifted_wavelengths = stack_of_wls * beta[:, np.
                                              newaxis]  #2D broadcast of wl_data, each row shifted by beta[i].
    T = scipy.interpolate.interp1d(wlm, fxm, bounds_error=False, fill_value=0)(
        shifted_wavelengths)  #...making this a 2D thing.
    T[:, np.isnan(
        stack_of_orders[0]
    )] = 0.0  #All NaNs are assumed to be in all-NaN columns. If that is not true, the below nantest will fail.
    T_sums = np.sum(T, axis=1)

    #We check whether there are isolated NaNs:
    n_nans = np.sum(np.isnan(stack_of_orders),
                    axis=0)  #This is the total number of NaNs in each column.
    n_nans[n_nans == len(
        stack_of_orders
    )] = 0  #Whenever the number of NaNs equals the length of a column, set the flag to zero.
    if np.max(
            n_nans
    ) > 0:  #If there are any columns which still have NaNs in them, we need to crash.
        raise ValueError(
            f"in CCF: Not all NaN values are purely in columns. There are still isolated NaNs. Remove those."
        )

    stack_of_orders[np.isnan(
        stack_of_orders)] = 47e20  #Set NaNs to arbitrarily high values.
    CCF = stack_of_orders @ T.T / T_sums  #Here it the entire cross-correlation. Over all orders and velocity steps. No forloops.
    CCF_E = CCF * 0.0

    #If the errors were provided, we do the same to those:
    if list_of_errors is not None:
        stack_of_errors[np.isnan(
            stack_of_errors
        )] = 42e20  #we have already tested that these NaNs are in the same place.
        CCF_E = stack_of_errors**2 @ (
            T.T / T_sums)**2  #This has been mathematically proven.


#===THAT'S ALL. TEST INTEGRITY AND RETURN THE RESULT===
    nantest(
        CCF, 'CCF in ccf.xcor()'
    )  #If anything went wrong with NaNs in the data, these tests will fail because the matrix operation @ is non NaN-friendly.
    nantest(CCF_E, 'CCF_E in ccf.xcor()')

    if list_of_errors != None:
        return (RV, CCF, np.sqrt(CCF_E), T_sums)
    return (RV, CCF, T_sums)
Пример #50
0
def check_dims(array):
    if np.ndim(array) == 1:
        array = np.expand_dims(array, 1)
    return array
    def get_samples(self, sampleIndices, featVect_orig, numSamples=10):
        '''
        Input   featVect        the complete feature vector
                sampleIndices   the raveled(!) indices which we want to sample
                numSamples      how many samples to draw
                
        '''

        featVect = np.copy(featVect_orig)

        # to avoid mistakes, remove the feature values of the part that we want to sample
        featVect.ravel()[sampleIndices.ravel()] = 0

        # reshape inputs if necessary
        if np.ndim(sampleIndices) == 1:
            sampleIndices = sampleIndices.reshape(3, self.win_size,
                                                  self.win_size)
        if np.ndim(featVect) == 1:
            featVect = featVect.reshape(
                [3, self.image_dims[0], self.image_dims[1]])

        # get a patch surrounding the sample indices and the indices relative to that
        patch, patchIndices = self._get_surr_patch(featVect, sampleIndices)

        # For each color channel, we will conditionally sample pixel
        # values from a multivariate distribution

        samples = np.zeros((numSamples, 3, self.win_size * self.win_size))

        for c in [0, 1, 2]:

            patch_c = patch[c].ravel()
            patchIndices_c = patchIndices[c].ravel()

            # get the conditional mean and covariance
            if self.padding_size == 0:
                cond_mean = self.meanVects[c]
                cond_cov = self.covMat[c]
            else:
                cond_mean, cond_cov = self._get_cond_params(
                    patch_c, patchIndices_c, c)

            # sample from the conditional distribution

    #        samples = np.random.multivariate_normal(cond_mean, cond_cov, numSamples)
    # -- FASTER:
            dimGauss = self.win_size * self.win_size
            # --- (1) find real matrix A such that AA^T=Sigma ---
            A = np.linalg.cholesky(cond_cov)
            # --- (2) get (numSamples) samples from a standard normal ---
            z = np.random.normal(size=numSamples * dimGauss).reshape(
                dimGauss, numSamples)
            # --- (3) x=mu+Az ---
            samples[:, c] = cond_mean[np.newaxis, :] + np.dot(A, z).T

        samples = samples.reshape((numSamples, -1))

        # get the min/max values for this particular sample
        # (since the data is preprocessed these can be different for each pixel!)\
        #print self.minMaxVals[0].shape
        minVals_sample = self.minMaxVals[0].ravel()[sampleIndices.ravel()]
        maxVals_sample = self.minMaxVals[1].ravel()[sampleIndices.ravel()]
        # clip the values
        for i in xrange(samples.shape[0]):
            samples[i][samples[i] < minVals_sample] = minVals_sample[
                samples[i] < minVals_sample]
            samples[i][samples[i] > maxVals_sample] = maxVals_sample[
                samples[i] > maxVals_sample]

        return samples
Пример #52
0
def _handle_scalar_broadcasting(nd, x, d):
    if d is not_mapped or nd == np.ndim(x):
        return x
    else:
        return x.reshape(x.shape + (1, ) * (nd - np.ndim(x)))
    # sort image file names according to how they were stacked (when labeled in Fiji)
    files = [
        fn for fn in os.listdir(os.curdir)
        if (".jpg" in fn and "_labelled" not in fn)
    ]
    files.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
    #print(files)

    comparisonbodyparts = list(set(DataCombined.columns.get_level_values(1)))

    for index, imagename in enumerate(files):
        
        image = io.imread(imagename)
        plt.axis('off')

        if np.ndim(image)==2:
            h, w = np.shape(image)
        else:
            h, w, nc = np.shape(image)
            
        plt.figure(
            figsize=(w * 1. / 100 * scale, h * 1. / 100 * scale))
        plt.subplots_adjust(
            left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)

        # This is important when using data combined / which runs consecutively!
        imindex = np.where(
            np.array(DataCombined.index.values) == folder + '/' + imagename)[0]

        plt.imshow(image, 'bone')
        for cc, scorer in enumerate(Scorers):
Пример #54
0
    def initialize(self,
                   inputs,
                   input_lengths,
                   mel_targets=None,
                   linear_targets=None,
                   pml_targets=None,
                   gta=False,
                   locked_alignments=None,
                   logs_enabled=True):
        '''Initializes the model for inference.

        Sets "pml_outputs", and "alignments" fields.

        Args:
          inputs: int32 Tensor with shape [N, T_in] where N is batch size, T_in is number of
            steps in the input time series, and values are character IDs
          input_lengths: int32 Tensor with shape [N] where N is batch size and values are the lengths
            of each sequence in inputs.
          mel_targets: float32 Tensor with shape [N, T_out, M] where N is batch size, T_out is number
            of steps in the output time series, M is num_mels, and values are entries in the mel
            spectrogram. Only needed for training.
          linear_targets: float32 Tensor with shape [N, T_out, F] where N is batch_size, T_out is number
            of steps in the output time series, F is num_freq, and values are entries in the linear
            spectrogram. Only needed for training.
          pml_targets: float32 Tensor with shape [N, T_out, P] where N is batch_size, T_out is number of
            steps in the PML vocoder features trajectories, P is pml_dimension, and values are PML vocoder
            features. Only needed for training.
          gta: boolean flag that is set to True when ground truth alignment is required
          locked_alignments: when explicit attention alignment is required, the locked alignments are passed in this
            parameter and the attention alignments are locked to these values
          logs_enabled: boolean flag that defaults to True, if False no construction logs output
        '''
        # fix the alignments shape to (batch_size, encoder_steps, decoder_steps) if not already including
        # batch dimension
        locked_alignments_ = locked_alignments

        if locked_alignments_ is not None:
            if np.ndim(locked_alignments_) < 3:
                locked_alignments_ = np.expand_dims(locked_alignments_, 0)

        with tf.variable_scope('inference') as scope:
            is_training = pml_targets is not None
            batch_size = tf.shape(inputs)[0]
            hp = self._hparams

            # Embeddings
            embedding_table = tf.get_variable(
                'embedding', [len(symbols), hp.embed_depth],
                dtype=tf.float32,
                initializer=tf.truncated_normal_initializer(stddev=0.5))
            embedded_inputs = tf.nn.embedding_lookup(
                embedding_table, inputs)  # [N, T_in, embed_depth=256]

            # Encoder
            prenet_outputs = prenet(
                embedded_inputs, is_training,
                hp.prenet_depths)  # [N, T_in, prenet_depths[-1]=128]
            encoder_outputs = encoder_cbhg(
                prenet_outputs,
                input_lengths,
                is_training,  # [N, T_in, encoder_depth=256]
                hp.encoder_depth)

            # Attention
            attention_mechanism = BahdanauAttention(hp.attention_depth,
                                                    encoder_outputs)

            attention_cell = LockableAttentionWrapper(
                GRUCell(hp.attention_depth),
                attention_mechanism,
                alignment_history=True,
                locked_alignments=locked_alignments_,
                output_attention=False,
                name='attention_wrapper')  # [N, T_in, attention_depth=256]

            # Apply prenet before concatenation in AttentionWrapper.
            prenet_cell = DecoderPrenetWrapper(attention_cell, is_training,
                                               hp.prenet_depths)

            # Concatenate attention context vector and RNN cell output into a 2*attention_depth=512D vector.
            concat_cell = ConcatOutputAndAttentionWrapper(
                prenet_cell)  # [N, T_in, 2*attention_depth=512]

            # Decoder (layers specified bottom to top):
            decoder_cell = MultiRNNCell(
                [
                    OutputProjectionWrapper(concat_cell, hp.decoder_depth),
                    ResidualWrapper(GRUCell(hp.decoder_depth)),
                    ResidualWrapper(GRUCell(hp.decoder_depth))
                ],
                state_is_tuple=True)  # [N, T_in, decoder_depth=256]

            # Project onto r PML feature vectors (predict r outputs at each RNN step):
            output_cell = OutputProjectionWrapper(
                decoder_cell, hp.pml_dimension * hp.outputs_per_step)
            decoder_init_state = output_cell.zero_state(batch_size=batch_size,
                                                        dtype=tf.float32)

            if is_training or gta:
                helper = TacoTrainingHelper(inputs, pml_targets,
                                            hp.pml_dimension,
                                            hp.outputs_per_step)
            else:
                helper = TacoTestHelper(batch_size, hp.pml_dimension,
                                        hp.outputs_per_step)

            (decoder_outputs,
             _), final_decoder_state, _ = tf.contrib.seq2seq.dynamic_decode(
                 BasicDecoder(output_cell, helper, decoder_init_state),
                 maximum_iterations=hp.max_iters)  # [N, T_out/r, P*r]

            # Reshape outputs to be one output per entry
            pml_outputs = tf.reshape(
                decoder_outputs,
                [batch_size, -1, hp.pml_dimension])  # [N, T_out, P]

            # Grab alignments from the final decoder state:
            alignments = tf.transpose(
                final_decoder_state[0].alignment_history.stack(), [1, 2, 0])

            self.inputs = inputs
            self.input_lengths = input_lengths
            self.pml_outputs = pml_outputs
            self.alignments = alignments
            self.pml_targets = pml_targets
            self.attention_cell = attention_cell

            if logs_enabled:
                log('Initialized Tacotron model. Dimensions: ')
                log('  embedding:               %d' %
                    embedded_inputs.shape[-1])
                log('  prenet out:              %d' % prenet_outputs.shape[-1])
                log('  encoder out:             %d' %
                    encoder_outputs.shape[-1])
                log('  attention out:           %d' %
                    attention_cell.output_size)
                log('  concat attn & out:       %d' % concat_cell.output_size)
                log('  decoder cell out:        %d' % decoder_cell.output_size)
                log('  decoder out (%d frames):  %d' %
                    (hp.outputs_per_step, decoder_outputs.shape[-1]))
                log('  decoder out (1 frame):   %d' % pml_outputs.shape[-1])
Пример #55
0
def mnorm(m, axis=None):
    """norm of a matrix of vectors stacked along the *axis* dimension.
    """
    if axis is None:
        axis = np.ndim(m) - 1
    return np.sqrt((m**2).sum(axis))
    def _get_cond_params(self, surrPatch, inPatchIdx, channel):
        '''
        Input:
                surrpatch   the variables over which we have a distribution
                inPatchIdx  the index/indices from what we want to sample
        Output: 
                cond_mean    the conditional mean of the inner patch,
                            conditioned on the surrounding pixels
                cond_cov     the conditional covariance
        '''

        # get the part of the surrPacth vector which we use to condition the values on
        x2 = np.delete(surrPatch, inPatchIdx)
        # split the mean vector into mu1 and mu2 (matching what we want to sample/condition on)
        mu1 = np.take(self.meanVects[channel], inPatchIdx)
        mu2 = np.delete(self.meanVects[channel], inPatchIdx)

        path_dotProdForMean = self.path_folder + '{}_cov{}_win{}_dotProdForMean_{}_{}'.format(
            self.netname, self.patchSize, self.win_size, inPatchIdx[0],
            inPatchIdx[-1])

        # get the dot product for the mean (check if precomputed, otherwise do this first)
        if not os.path.exists(path_dotProdForMean + '.npy'):
            cov11 = self.covMats[channel][inPatchIdx][:, inPatchIdx]
            cov12 = np.delete(
                self.covMats[channel][inPatchIdx, :], inPatchIdx,
                axis=1) if np.ndim(inPatchIdx > 1) else np.delete(
                    self.covMats[channel][inPatchIdx, :], inPatchIdx)
            cov21 = np.delete(self.covMats[channel][:, inPatchIdx],
                              inPatchIdx,
                              axis=0)
            cov22 = np.delete(np.delete(self.covMats[channel],
                                        inPatchIdx,
                                        axis=0),
                              inPatchIdx,
                              axis=1)
            # compute the conditional mean and covariance
            dotProdForMean = np.dot(cov12, scipy.linalg.inv(cov22))
            np.save(path_dotProdForMean, dotProdForMean)
        else:
            dotProdForMean = np.load(path_dotProdForMean + '.npy')

        # with the dotproduct, we can now evaluate the conditional mean
        cond_mean = mu1 + np.dot(dotProdForMean, x2 - mu2)

        path_condCov = self.path_folder + '{}_cov{}_win{}_cond_cov_{}_{}_indep'.format(
            self.netname, self.patchSize, self.win_size, inPatchIdx[0],
            inPatchIdx[-1])

        # get the conditional covariance
        if not os.path.exists(path_condCov + '.npy'):
            cov11 = self.covMats[channel][inPatchIdx][:, inPatchIdx]
            cov12 = np.delete(
                self.covMats[channel][inPatchIdx, :], inPatchIdx,
                axis=1) if np.ndim(inPatchIdx > 1) else np.delete(
                    self.covMat[inPatchIdx, :], inPatchIdx)
            cov21 = np.delete(self.covMats[channel][:, inPatchIdx],
                              inPatchIdx,
                              axis=0)
            cov22 = np.delete(np.delete(self.covMats[channel],
                                        inPatchIdx,
                                        axis=0),
                              inPatchIdx,
                              axis=1)
            cond_cov = cov11 - np.dot(np.dot(cov12, scipy.linalg.inv(cov22)),
                                      cov21)
            np.save(path_condCov, cond_cov)
        else:
            cond_cov = np.load(path_condCov + '.npy')

        return cond_mean, cond_cov
Пример #57
0
def extract_svd(input_stack, L):
    C = numpy.copy(input_stack)  # temporary array
    print('size of input_stack', numpy.shape(input_stack))
    C = C / numpy.max(numpy.abs(C))

    reps_acs = 16  #16
    mysize = 4  #16
    K = 3  # rank of 10 prevent singular? artifacts(certain disruption)
    half_mysize = mysize / 2
    dimension = numpy.ndim(C) - 1  # collapse coil dimension
    if dimension == 1:
        tmp_stack = numpy.empty((mysize, ), dtype=dtype)
        svd_size = mysize
        C_size = numpy.shape(C)[0]
        data = numpy.empty((svd_size, L * reps_acs), dtype=dtype)
#             for jj in xrange(0,L):
#                 C[:,jj]=tailor_fftn(C[:,jj])
#                 for kk in xrange(0,reps_acs):
#                     tmp_stack = numpy.reshape(tmp_stack,(svd_size,),order = 'F')
#                     data[:,jj] = numpy.reshape(tmp_stack,(svd_size,),order = 'F')
    elif dimension == 2:
        tmp_stack = numpy.empty((
            mysize,
            mysize,
        ), dtype=dtype)
        svd_size = mysize**2
        data = numpy.empty((svd_size, L * reps_acs), dtype=dtype)
        C_size = numpy.shape(C)[0:2]
        for jj in xrange(0, L):
            #                 matplotlib.pyplot.imshow(C[...,jj].real)
            #                 matplotlib.pyplot.show()
            #                 tmp_pt=(C_size[0]-reps_acs)/2
            C[:, :, jj] = tailor_fftn(C[:, :, jj])
            for kk in xrange(0, reps_acs):
                a = numpy.mod(kk, reps_acs**0.5)
                b = kk / (reps_acs**0.5)
                tmp_stack = C[C_size[0] / 2 - half_mysize -
                              (reps_acs**0.5) / 2 + a:C_size[0] / 2 +
                              half_mysize - (reps_acs**0.5) / 2 + a,
                              C_size[1] / 2 - half_mysize -
                              (reps_acs**0.5) / 2 + b:C_size[1] / 2 +
                              half_mysize - (reps_acs**0.5) / 2 + b, jj]
                data[:, jj * reps_acs + kk] = numpy.reshape(tmp_stack,
                                                            (svd_size, ),
                                                            order='F')

    elif dimension == 3:
        tmp_stack = numpy.empty((mysize, mysize, mysize), dtype=dtype)
        svd_size = mysize**3
        data = numpy.empty((svd_size, L), dtype=dtype)
        C_size = numpy.shape(C)[0:3]
        for jj in xrange(0, L):
            C[:, :, :, jj] = tailor_fftn(C[:, :, :, jj])
            tmp_stack = C[C_size[0] / 2 - half_mysize:C_size[0] / 2 +
                          half_mysize, C_size[1] / 2 -
                          half_mysize:C_size[1] / 2 + half_mysize,
                          C_size[2] / 2 - half_mysize:C_size[2] / 2 +
                          half_mysize, jj]
            data[:, jj] = numpy.reshape(tmp_stack, (svd_size, ), order='F')

#         OK, data is the matrix of size (mysize*n, L) for SVD
#         import scipy.linalg
#         import scipy.sparse.linalg
    (s_blah, vh_blah) = scipy.linalg.svd(data)[1:3]

    for jj in xrange(0, numpy.size(s_blah)):  #
        if s_blah[jj] > 0.1 * s_blah[
                0]:  # 10% of maximum singular value to decide the rank
            K = jj + 1
#                 pass
        else:
            break

    v_blah = vh_blah.conj().T

    C = C * 0.0  # now C will be used as the output stack
    V_para = v_blah[:, 0:K]
    print('shape of V_para', numpy.shape(V_para))
    V_para = numpy.reshape(V_para, (reps_acs**0.5, reps_acs**0.5, L, K),
                           order='F')

    C2 = numpy.zeros((C.shape[0], C.shape[1], L, K), dtype=dtype)
    for jj in xrange(0, L):  # coils
        for kk in xrange(0, K):  # rank
            C2[C.shape[0] / 2 - reps_acs**0.5 / 2:C.shape[0] / 2 +
               reps_acs**0.5 / 2, C.shape[1] / 2 -
               reps_acs**0.5 / 2:C.shape[1] / 2 + reps_acs**0.5 / 2, jj,
               kk] = V_para[:, :, jj, kk]
            C2[:, :, jj, kk] = tailor_fftn(C2[:, :, jj, kk])


#         C_value = numpy.empty_like(C)

    for mm in xrange(0, C.shape[0]):  # dim 0
        for nn in xrange(0, C.shape[1]):  # dim 1
            tmp_g = C2[mm, nn, :, :]
            #                 G =   C2[mm,nn,:,:].T # Transpose (non-conjugated) of G
            # #                 Gh = G.conj().T # hermitian
            #                 Gh=C2[mm,nn,:,:].conj()
            #                 G=

            g = numpy.dot(tmp_g.conj(),
                          tmp_g.T)  #construct g matrix for eigen-decomposition
            #                 w,v = scipy.linalg.eig(g.astype(dtype), overwrite_a=True,
            #                                        check_finite=False) # eigen value:w, eigen vector: v

            #                 print('L=',L,numpy.shape(g))
            #                 w,v = scipy.sparse.linalg.eigs(g , 3)
            w, v = myeig(g.astype(dtype))

            ind = numpy.argmax(numpy.abs(w))  # find the maximum
            #                 print('ind=',ind)
            #                 the_eig = numpy.abs(w[ind]) # find the abs of maximal eigen value
            tmp_v = v[:, ind]  #*the_eig
            #                 ref_angle=(numpy.sum(v[:,ind])/(numpy.abs(numpy.sum(v[:,ind]))))
            #                 v[:,ind] = v[:,ind]/ref_angle # correct phase by summed value

            ref_angle = numpy.sum(tmp_v)
            ref_angle = ref_angle / numpy.abs(ref_angle)

            C[mm, nn, :] = tmp_v / ref_angle  # correct phase by summed value
    C = C / numpy.max(numpy.abs(C))
    #         matplotlib.pyplot.figure(1)
    #         for jj in xrange(0,L):
    #             matplotlib.pyplot.subplot(2,4,jj+1)
    #             matplotlib.pyplot.imshow(abs(input_stack[...,jj]),
    #                                      norm=matplotlib.colors.Normalize(vmin=0.0, vmax=0.2),
    #                                      cmap=matplotlib.cm.gray)
    #             matplotlib.pyplot.subplot(2,4,jj+1+4)
    #             matplotlib.pyplot.imshow(abs(C[...,jj]),
    #                                      norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0),
    #                                      cmap=matplotlib.cm.gray)
    #
    # #             matplotlib.pyplot.subplot(2,8,jj+1+8)
    # #             matplotlib.pyplot.imshow(numpy.log(C[...,jj]).imag, cmap=matplotlib.cm.gray)
    #
    #         matplotlib.pyplot.show()

    #         for jj in xrange(0,L):
    #             matplotlib.pyplot.subplot(2,2,jj+1)
    #             matplotlib.pyplot.imshow((input_stack[...,jj].real))
    #         matplotlib.pyplot.show()

    return C  # normalize the coil sensitivities
Пример #58
0
    def float2frmt(self, y):
        """
        Called a.o. by `itemDelegate.displayText()` for on-the-fly number
        conversion. Returns fixpoint representation for `y` (scalar or array-like)
        with numeric format `self.frmt` and `self.W` bits. The result has the
        same shape as `y`.

        The float is multiplied by `self.scale` and quantized / saturated
        using fixp() for all formats before it is converted to different number
        formats.

        Parameters
        ----------
        y: scalar or array-like
            y has to be an integer or float decimal number either numeric or in
            string format.

        Returns
        -------
        A string, a float or an ndarray of float or string is returned in the
        numeric format set in `self.frmt`. It has the same shape as `y`. For all
        formats except `float` a fixpoint representation with `self.W` binary
        digits is returned.
        """

        """
        Define vectorized functions using numpys automatic type casting:
        Vectorized functions for inserting binary point in string `bin_str`
        after position `pos`.

        Usage:  insert_binary_point(bin_str, pos)

        Parameters: bin_str : string
                    pos     : integer
        """
        insert_binary_point = np.vectorize(lambda bin_str, pos:(
                                    bin_str[:pos+1] + "." + bin_str[pos+1:]))

        binary_repr_vec = np.frompyfunc(np.binary_repr, 2, 1)
        #======================================================================

        if self.frmt == 'float': # return float input value unchanged (no string)
            return y
        elif self.frmt == 'float32':
            return np.float32(y)
        elif self.frmt == 'float16':
            return np.float16(y)

        elif self.frmt in {'hex', 'bin', 'dec', 'csd'}:
            # return a quantized & saturated / wrapped fixpoint (type float) for y
            y_fix = self.fixp(y, scaling='mult')

            if self.frmt == 'dec':
                if self.WF == 0:
                    y_str = np.int64(y_fix) # get rid of trailing zero
                    # y_str = np.char.mod('%d', y_fix)
                    # elementwise conversion from integer (%d) to string
                    # see https://docs.scipy.org/doc/numpy/reference/routines.char.html
                else:
                    # y_str = np.char.mod('%f',y_fix)
                    y_str = y_fix
            elif self.frmt == 'csd':
                y_str = dec2csd_vec(y_fix, self.WF) # convert with WF fractional bits

            else: # bin or hex
                # represent fixpoint number as integer in the range -2**(W-1) ... 2**(W-1)
                y_fix_int = np.int64(np.round(y_fix / self.LSB))
                # convert to (array of) string with 2's complement binary
                y_bin_str = binary_repr_vec(y_fix_int, self.W)

                if self.frmt == 'hex':
                    y_str = bin2hex_vec(y_bin_str, self.WI)

                else: # self.frmt == 'bin':
                    # insert radix point if required
                    if self.WF > 0:
                        y_str = insert_binary_point(y_bin_str, self.WI)
                    else:
                        y_str = y_bin_str

            if isinstance(y_str, np.ndarray) and np.ndim(y_str) < 1:
                y_str = y_str.item() # convert singleton array to scalar

            return y_str
        else:
            raise Exception('Unknown output format "%s"!'%(self.frmt))
            return None
Пример #59
0
def smooth_volume(data_img,
                  fwhm,
                  mask_img=None,
                  noise_img=None,
                  inplace=False):
    """Filter volume data with an isotropic gaussian kernel.

    Smoothing can be constrained to occur within the voxels defined within
    ``mask_img`` and optionally can ignore/interpolate out the voxels
    identified within ``noise_img``.

    Parameters
    ----------
    data_img : nibabel image
        3D or 4D image data.
    fwhm : positive float
        Size of isotropic smoothing kernel in mm.
    mask_img : nibabel image
        3D binary image defining smoothing range.
    noise_img : nibabel image
        3D binary image defining voxels to be interpolated out.
    inplace : bool
        If True, overwrite data in data_img. Otherwise perform a copy.

    Returns
    -------
    smooth_data : nibabel image
        Image like ``data_img`` but after smoothing.

    """
    data = data_img.get_data().astype(np.float, copy=not inplace)

    if np.ndim(data) == 3:
        need_squeeze = True
        data = np.expand_dims(data, -1)
    else:
        need_squeeze = False

    if mask_img is None:
        mask = np.ones(data.shape[:3], np.bool)
    else:
        mask = mask_img.get_data().astype(np.bool)
    smooth_from = mask.copy()

    if noise_img is not None:
        smooth_from &= ~noise_img.get_data().astype(np.bool)

    sigma = voxel_sigmas(fwhm, data_img)
    norm = gaussian_filter(smooth_from.astype(np.float), sigma)
    valid_norm = norm > 0

    for f in range(data.shape[-1]):
        with np.errstate(all="ignore"):
            data_f = gaussian_filter(data[..., f] * smooth_from, sigma) / norm
        data_f[~valid_norm] = 0
        data[..., f] = data_f

    data[~mask] = 0

    if need_squeeze:
        data = data.squeeze()

    return nib.Nifti1Image(data, data_img.affine, data_img.header)
Пример #60
0
def btens_to_params(btens, ztol=1e-10):
    r"""Compute trace, anisotropy and assymetry parameters from b-tensors

    Parameters
    ----------
    btens : (3, 3) OR (N, 3, 3) numpy.ndarray
        input b-tensor, or b-tensors, where N = number of b-tensors
    ztol : float
        Any parameters smaller than this value are considered to be 0

    Returns
    -------
    bval: numpy.ndarray
        b-value(s) (trace(s))
    bdelta: numpy.ndarray
        normalized tensor anisotropy(s)
    b_eta: numpy.ndarray
        tensor assymetry(s)

    Notes
    -----
    This function can be used to get b-tensor parameters directly from the
    GradientTable `btens` attribute.

    Examples
    --------
    >>> lte = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
    >>> bval, bdelta, b_eta = btens_to_params(lte)
    >>> print("bval={}; bdelta={}; b_eta={}".format(bdelta, bval, b_eta))
    bval=[ 1.]; bdelta=[ 1.]; b_eta=[ 0.]

    """
    # Bad input checks
    value_error_msg = "`btens` must be a 2D or 3D numpy array, respectively" \
                      " with (3, 3) or (N, 3, 3) shape, where N corresponds" \
                      " to the number of b-tensors"
    if not isinstance(btens, np.ndarray):
        raise ValueError(value_error_msg)

    nd = np.ndim(btens)
    if nd == 2:
        btens_shape = btens.shape
    elif nd == 3:
        btens_shape = btens.shape[1:]
    else:
        raise ValueError(value_error_msg)

    if not btens_shape == (3, 3):
        raise ValueError(value_error_msg)

    # Reshape so that loop below works when only one input b-tensor is provided
    if nd == 2:
        btens = btens.reshape((1, 3, 3))

    # Pre-allocate
    n_btens = btens.shape[0]
    bval = np.empty(n_btens)
    bdelta = np.empty(n_btens)
    b_eta = np.empty(n_btens)

    # Loop over b-tensor(s)
    for i in range(btens.shape[0]):
        i_btens = btens[i, :, :]
        i_bval, i_bdelta, i_b_eta = _btens_to_params_2d(i_btens, ztol)
        bval[i] = i_bval
        bdelta[i] = i_bdelta
        b_eta[i] = i_b_eta

    return bval, bdelta, b_eta