Beispiel #1
0
def one_module(p1, p2, flat=False):
    """
    :param p1: actual coordinate of the point close to the origin
    :param p2:  actual coordinate of the point close to the end of first line
    :param dx: number of pixel in a line
    :param dy: number of pixel in a column
    :param px: pixel size in x
    :param py: pixel size in y
    :return: 2x (dy+1)x(dx+1) array of corner position
    """
    xyz1 = p1[1:4] / 1000.0  # in meter
    xyz2 = p2[1:4] / 1000.0  # in meter
    if flat:
        xyz1[2] = 0
        xyz2[2] = 0
    x = xyz2 - xyz1
    x /= numpy.linalg.norm(x)
    z = numpy.array([0., 0., 1.])
    y = numpy.cross(z, x)
    z = numpy.cross(x, y)
    m = pix * numpy.vstack((x, y, z))
    vol_xyz = numpy.zeros((dy + 1, dx + 1, 3))
    vol_xyz[:, :, 1] = numpy.outer(numpy.arange(0, dy + 1), numpy.ones(dx + 1))
    vol_xyz[:, :, 0] = numpy.outer(numpy.ones(dy + 1), numpy.arange(dx + 1))
    n = numpy.dot(vol_xyz, m) + xyz1
    return numpy.ascontiguousarray(n[:, :, 1]), numpy.ascontiguousarray(n[:, :, 0]), numpy.ascontiguousarray(n[:, :, 2])
Beispiel #2
0
def filter2d_cv(src, kernel, anchor = (-1,-1) ):
    if kernel.flags['C_CONTIGUOUS'] == False:
        kernel = numpy.ascontiguousarray(kernel, 'f')
    if src.flags['C_CONTIGUOUS'] == False:
        src = numpy.ascontiguousarray(src)
    dst = cv2.filter2D(src, -1, kernel, borderType=cv2.BORDER_CONSTANT)
    return dst
Beispiel #3
0
def convert_leaves_all_probs_pred_old(image, leaves, all_probs, num_leaves, classifiers_fn=None):
    global CLASSIFIERS, CLASSIFIER_FEATURE
    if classifiers_fn is None:
        classifiers_fn = os.environ['CLASSIFIERS_FN']
    get_classifier_confidence = lambda x: x[0][0] * x[0][1]
    if CLASSIFIERS is None:
        all_classifiers = sorted(file_parse.load(classifiers_fn))
        name_classifiers = []
        for x in range(len(all_classifiers)):
            if x < len(all_classifiers):  # TODO(brandyn): Fix memory issue so that we can use the last classifier too
                name_classifiers.append((all_classifiers[x][0],
                                         classifiers.loads(all_classifiers[x][1])))
            else:
                name_classifiers.append((all_classifiers[x][0],
                                         name_classifiers[-1][1]))
            all_classifiers[x] = None  # NOTE(Brandyn): This is done to save memory
        print('ILP Classifiers %r' % ([x for x, _ in name_classifiers],))
        CLASSIFIERS = [x for _, x in name_classifiers]
    if CLASSIFIER_FEATURE is None:
        CLASSIFIER_FEATURE = features.select_feature('bovw_hog')
    feature = CLASSIFIER_FEATURE(np.ascontiguousarray(image[:, :, :3]))
    preds = np.ascontiguousarray([get_classifier_confidence(classifier.predict(feature))
                                  for classifier in CLASSIFIERS], dtype=np.float64)
    out0 = imseg.convert_labels_to_integrals(leaves, num_leaves)
    out1 = imseg.convert_all_probs_to_integrals(all_probs)
    return preds, np.ascontiguousarray(np.dstack([out0, out1]))
Beispiel #4
0
    def getPeakProperty(self, p_name):
        """
        Return a numpy array containing the requested property.
        """
        if not p_name in self.peak_properties:
            raise MultiFitterException("No such property '" + p_name + "'")

        # Properties that are calculated from other properties.
        if(self.peak_properties[p_name] == "compound"):

            # Return 0 length array if there are no localizations.
            if(self.getNFit() == 0):
                return numpy.zeros(0, dtype = numpy.float64)
                
            # Peak significance calculation.
            if(p_name == "significance"):
                bg_sum = self.getPeakProperty("bg_sum")
                fg_sum = self.getPeakProperty("fg_sum")
                return fg_sum/numpy.sqrt(bg_sum)
            
        # Floating point properties.
        elif(self.peak_properties[p_name] == "float"):
            values = numpy.ascontiguousarray(numpy.zeros(self.getNFit(), dtype = numpy.float64))
            self.clib.mFitGetPeakPropertyDouble(self.mfit,
                                                values,
                                                ctypes.c_char_p(p_name.encode()))
            return values

        # Integer properties.
        elif(self.peak_properties[p_name] == "int"):
            values = numpy.ascontiguousarray(numpy.zeros(self.getNFit(), dtype = numpy.int32))
            self.clib.mFitGetPeakPropertyInt(self.mfit,
                                             values,
                                             ctypes.c_char_p(p_name.encode()))
            return values
Beispiel #5
0
    def __init__(self,tracks,colors=None, line_width=2.,affine=None):
	if affine==None:
		self.affine=np.eye(4)
	else: self.affine=affine
	self.tracks_no=len(tracks)
	self.tracks_len=[len(t) for t in tracks]
	self.tracks=tracks
        self.vertices = np.ascontiguousarray(np.concatenate(self.tracks).astype('f4'))        
	if colors==None:
        	self.colors = np.ascontiguousarray(np.ones((len(self.vertices),4)).astype('f4'))
	else:
            if isinstance(colors, (list, tuple)):
                self.colors = np.tile(colors,(np.sum(self.tracks_len),1))            
            self.colors = np.ascontiguousarray(colors.astype('f4'))	
        self.vptr=self.vertices.ctypes.data
        self.cptr=self.colors.ctypes.data        
        self.count=np.array(self.tracks_len, dtype=np.int32)
        self.first=np.r_[0,np.cumsum(self.count)[:-1]].astype(np.int32)
        self.firstptr=self.first.ctypes.data
        self.countptr=self.count.ctypes.data
        self.line_width=line_width
        self.items=self.tracks_no
        self.show_aabb = False        
        mn=self.vertices.min()
	mx=self.vertices.max()
	self.make_aabb((np.array([mn,mn,mn]),np.array([mx,mx,mx])),margin = 0)        
Beispiel #6
0
def test_cmyk():
    ref = imread(os.path.join(data_dir, 'color.png'))

    img = Image.open(os.path.join(data_dir, 'color.png'))
    img = img.convert('CMYK')

    f = NamedTemporaryFile(suffix='.jpg')
    fname = f.name
    f.close()
    img.save(fname)
    try:
        img.close()
    except AttributeError:  # `close` not available on PIL
        pass

    new = imread(fname)

    ref_lab = rgb2lab(ref)
    new_lab = rgb2lab(new)

    for i in range(3):
        newi = np.ascontiguousarray(new_lab[:, :, i])
        refi = np.ascontiguousarray(ref_lab[:, :, i])
        sim = ssim(refi, newi, dynamic_range=refi.max() - refi.min())
        assert sim > 0.99
def test_mem_layout():
    # Test with different memory layouts of X and y
    X_ = np.asfortranarray(X)
    clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
    clf.fit(X_, y)
    assert_array_equal(clf.predict(T), true_result)
    assert_equal(100, len(clf.estimators_))

    X_ = np.ascontiguousarray(X)
    clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
    clf.fit(X_, y)
    assert_array_equal(clf.predict(T), true_result)
    assert_equal(100, len(clf.estimators_))

    y_ = np.asarray(y, dtype=np.int32)
    y_ = np.ascontiguousarray(y_)
    clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
    clf.fit(X, y_)
    assert_array_equal(clf.predict(T), true_result)
    assert_equal(100, len(clf.estimators_))

    y_ = np.asarray(y, dtype=np.int32)
    y_ = np.asfortranarray(y_)
    clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
    clf.fit(X, y_)
    assert_array_equal(clf.predict(T), true_result)
    assert_equal(100, len(clf.estimators_))
Beispiel #8
0
def multiple_mahalanobis(effect, covariance):
    """Returns the squared Mahalanobis distance for a given set of samples

    Parameters
    ----------
    effect: array of shape (n_features, n_samples),
        Each column represents a vector to be evaluated

    covariance: array of shape (n_features, n_features, n_samples),
        Corresponding covariance models stacked along the last axis

    Returns
    -------
    sqd: array of shape (n_samples,)
         the squared distances (one per sample)
    """ 
    # check size
    if effect.ndim == 1:
        effect = effect[:, np.newaxis]
    if covariance.ndim == 2:
        covariance = covariance[:, :, np.newaxis]
    if effect.shape[0] != covariance.shape[0]:
        raise ValueError('Inconsistant shape for effect and covariance')
    if covariance.shape[0] != covariance.shape[1]:
        raise ValueError('Inconsistant shape for covariance')

    # transpose and make contuguous for the sake of speed
    Xt, Kt = np.ascontiguousarray(effect.T), np.ascontiguousarray(covariance.T)

    # compute the inverse of the covariances
    Kt = multiple_fast_inv(Kt)

    # derive the squared Mahalanobis distances
    sqd = np.sum(np.sum(Xt[:, :, np.newaxis] * Xt[:, np.newaxis] * Kt, 1), 1)
    return sqd
def _compute_targets(rois, overlaps, labels):
    """Compute bounding-box regression targets for an image."""
    # Indices of ground-truth ROIs
    gt_inds = np.where(overlaps == 1)[0]
    if len(gt_inds) == 0:
        # Bail if the image has no ground-truth ROIs
        return np.zeros((rois.shape[0], 5), dtype=np.float32)
    # Indices of examples for which we try to make predictions
    ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]

    # Get IoU overlap between each ex ROI and gt ROI
    ex_gt_overlaps = bbox_overlaps(
        np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
        np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))

    # Find which gt ROI each ex ROI has max overlap with:
    # this will be the ex ROI's gt target
    gt_assignment = ex_gt_overlaps.argmax(axis=1)
    gt_rois = rois[gt_inds[gt_assignment], :]
    ex_rois = rois[ex_inds, :]

    targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
    targets[ex_inds, 0] = labels[ex_inds]
    targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
    return targets
Beispiel #10
0
def propagate(image, labels, mask, weight):
    """Propagate the labels to the nearest pixels
    
    image - gives the Z height when computing distance
    labels - the labeled image pixels
    mask   - only label pixels within the mask
    weight - the weighting of x/y distance vs z distance
             high numbers favor x/y, low favor z
    
    returns a label matrix and the computed distances
    """
    if image.shape != labels.shape:
        raise ValueError("Image shape %s != label shape %s"%(repr(image.shape),repr(labels.shape)))
    if image.shape != mask.shape:
        raise ValueError("Image shape %s != mask shape %s"%(repr(image.shape),repr(mask.shape)))
    labels_out = np.zeros(labels.shape, np.int32)
    distances  = -np.ones(labels.shape,np.float64)
    distances[labels > 0] = 0
    labels_and_mask = np.logical_and(labels != 0, mask)
    coords = np.argwhere(labels_and_mask)
    i1,i2 = _propagate.convert_to_ints(0.0)
    ncoords = coords.shape[0]
    pq = np.column_stack((np.ones((ncoords,),int) * i1,
                             np.ones((ncoords,),int) * i2,
                             labels[labels_and_mask],
                             coords))
    _propagate.propagate(np.ascontiguousarray(image,np.float64),
                         np.ascontiguousarray(pq,np.int32),
                         np.ascontiguousarray(mask,np.int8),
                         labels_out, distances, float(weight))
    labels_out[labels > 0] = labels[labels > 0]
    return labels_out,distances
def sorted_points_and_ids(xin, yin, zin, xperiod, yperiod, zperiod, 
    approx_xcell_size, approx_ycell_size, approx_zcell_size):
    """ Determine the cell_id of every point, sort the points 
    according to cell_id, and return the sorted points as well as 
    the cell id indexing array. 

    Notes 
    -----
    The x-coordinates of points with cell_id = icell are given by 
    xout[cell_id_indices[icell]:cell_id_indices[icell+1]]. 
    """
    npts = len(xin)
    num_xdivs, xcell_size = determine_cell_size(xperiod, approx_xcell_size)
    num_ydivs, ycell_size = determine_cell_size(yperiod, approx_ycell_size)
    num_zdivs, zcell_size = determine_cell_size(zperiod, approx_zcell_size)
    ncells = num_xdivs*num_ydivs*num_zdivs

    ix = digitized_position(xin, xcell_size, num_xdivs)
    iy = digitized_position(yin, ycell_size, num_ydivs)
    iz = digitized_position(zin, zcell_size, num_zdivs)

    cell_ids = cell_id_from_cell_tuple(ix, iy, iz, num_ydivs, num_zdivs)
    cell_id_sorting_indices = np.argsort(cell_ids)

    cell_id_indices = np.searchsorted(cell_ids, np.arange(ncells), 
        sorter = cell_id_sorting_indices)
    cell_id_indices = np.append(cell_id_indices, npts)

    xout = np.ascontiguousarray(xin[cell_id_sorting_indices], dtype=np.float64)
    yout = np.ascontiguousarray(yin[cell_id_sorting_indices], dtype=np.float64)
    zout = np.ascontiguousarray(zin[cell_id_sorting_indices], dtype=np.float64)

    cell_id_indices = np.ascontiguousarray(cell_id_indices, dtype=np.int64)

    return xout, yout, zout, cell_id_indices
Beispiel #12
0
def dbscan(x, y, z, c, eps, min_points, z_factor = 0.5, verbose = True):
    n_peaks = x.size

    l = numpy.zeros(n_peaks, dtype = numpy.int32)

    c_x = numpy.ascontiguousarray(x.astype(numpy.float32))
    c_y = numpy.ascontiguousarray(y.astype(numpy.float32))
    c_z = numpy.ascontiguousarray(z.astype(numpy.float32))*z_factor
    c_c = numpy.ascontiguousarray(c.astype(numpy.int32))
    c_l = numpy.ascontiguousarray(l)
    lib_dbscan.dbscan(c_x,
                      c_y,
                      c_z,
                      c_c,
                      c_l,
                      n_peaks,
                      eps,
                      min_points,
                      int(verbose))

    # Print number of clusters
    if verbose:
        n_clusters_ = len(set(c_l)) - (1 if -1 in c_l else 0)
        print('Estimated number of clusters: %d' % n_clusters_)

    return c_l
Beispiel #13
0
def _handle_input(image, selem, out, mask, out_dtype=None):

    if image.dtype not in (np.uint8, np.uint16):
        image = img_as_ubyte(image)

    selem = np.ascontiguousarray(img_as_ubyte(selem > 0))
    image = np.ascontiguousarray(image)

    if mask is None:
        mask = np.ones(image.shape, dtype=np.uint8)
    else:
        mask = img_as_ubyte(mask)
        mask = np.ascontiguousarray(mask)

    if out is None:
        if out_dtype is None:
            out_dtype = image.dtype
        out = np.empty_like(image, dtype=out_dtype)

    if image is out:
        raise NotImplementedError("Cannot perform rank operation in place.")

    is_8bit = image.dtype in (np.uint8, np.int8)

    if is_8bit:
        max_bin = 255
    else:
        max_bin = max(4, image.max())

    bitdepth = int(np.log2(max_bin))
    if bitdepth > 10:
        warnings.warn("Bitdepth of %d may result in bad rank filter "
                      "performance due to large number of bins." % bitdepth)

    return image, selem, out, mask, max_bin
Beispiel #14
0
    def _set_data(self, coors, ngroups, conns, mat_ids, descs, nodal_bcs=None):
        """
        Set mesh data.

        Parameters
        ----------
        coors : array
            Coordinates of mesh nodes.
        ngroups : array
            Node groups.
        conns : list of arrays
            The array of mesh elements (connectivities) for each element group.
        mat_ids : list of arrays
            The array of material ids for each element group.
        descs: list of strings
            The element type for each element group.
        nodal_bcs : dict of arrays, optional
            The nodes defining regions for boundary conditions referred
            to by the dict keys in problem description files.
        """
        self.coors = nm.ascontiguousarray(coors)

        if ngroups is None:
            self.ngroups = nm.zeros((self.coors.shape[0],), dtype=nm.int32)

        else:
            self.ngroups = nm.ascontiguousarray(ngroups)

        self.conns = [nm.asarray(conn, dtype=nm.int32) for conn in conns]
        self.mat_ids = [nm.asarray(mat_id, dtype=nm.int32)
                        for mat_id in mat_ids]
        self.descs = descs
        self.nodal_bcs = get_default(nodal_bcs, {})
Beispiel #15
0
    def __iter__(self):
        ''' This is were all the fun starts '''
        x, y, z, g = self.a.shape
        # for all seeds
        for i in range(self.seed_no):
            if self.seed_list == None:
                rx = (x - 1) * np.random.rand()
                ry = (y - 1) * np.random.rand()
                rz = (z - 1) * np.random.rand()
                seed = np.ascontiguousarray(
                    np.array([rx, ry, rz]), dtype=np.float64)
            else:
                seed = np.ascontiguousarray(
                    self.seed_list[i], dtype=np.float64)
            # for all peaks
            for ref in range(g):
                track = eudx_both_directions(seed.copy(),
                                             ref,
                                             self.a,
                                             self.ind,
                                             self.odf_vertices,
                                             self.a_low,
                                             self.ang_thr,
                                             self.step_sz,
                                             self.total_weight,
                                             self.max_points)

                if track == None:
                    pass
                else:
                    if track.shape[0] > 1:
                        yield track + self.voxel_shift
def connect_extrema(im_pos, target, markers, visualize=False):
	'''
	im_pos : XYZ positions of each point in image formation (n x m x 3)
	'''
	height, width,_ = im_pos.shape
	centroid = np.array(target)

	im_pos = np.ascontiguousarray(im_pos.astype(np.int16))
	cost_map = np.ascontiguousarray(np.zeros([height, width], dtype=np.uint16))

	extrema = dgn.geodesic_map_MPI(cost_map, im_pos, np.array(centroid, dtype=np.int16), 1, 1)
	cost_map = extrema[-1]

	trails = []
	for m in markers:
		trail = dgn.geodesic_trail(cost_map.copy()+(32000*(im_pos[:,:,2]==0)).astype(np.uint16), np.array(m, dtype=np.int16))
		trails += [trail.copy()]
	if visualize:
		cost_map = deepcopy(cost_map)
		circ = circle(markers[0][0],markers[0][1], 5)
		circ = np.array([np.minimum(circ[0], height-1), np.minimum(circ[1], width-1)])
		circ = np.array([np.maximum(circ[0], 0), np.maximum(circ[1], 0)])
		cost_map[circ[0], circ[1]] = 0
		for i,t in enumerate(trails[1:]):
			# embed()
			cost_map[t[:,0], t[:,1]] = 0
			circ = circle(markers[i+1][0],markers[i+1][1], 5)
			circ = np.array([np.minimum(circ[0], height-1), np.minimum(circ[1], width-1)])
			circ = np.array([np.maximum(circ[0], 0), np.maximum(circ[1], 0)])
			cost_map[circ[0], circ[1]] = 0
		return trails, cost_map
	else:
		return trails
Beispiel #17
0
    def read_sparse_array(self, hdr):
        ''' Read sparse matrix type

        Matlab (TM) 4 real sparse arrays are saved in a N+1 by 3 array
        format, where N is the number of non-zero values.  Column 1 values
        [0:N] are the (1-based) row indices of the each non-zero value,
        column 2 [0:N] are the column indices, column 3 [0:N] are the
        (real) values.  The last values [-1,0:2] of the rows, column
        indices are shape[0] and shape[1] respectively of the output
        matrix. The last value for the values column is a padding 0. mrows
        and ncols values from the header give the shape of the stored
        matrix, here [N+1, 3].  Complex data is saved as a 4 column
        matrix, where the fourth column contains the imaginary component;
        the last value is again 0.  Complex sparse data do _not_ have the
        header imagf field set to True; the fact that the data are complex
        is only detectable because there are 4 storage columns
        '''
        res = self.read_sub_array(hdr)
        tmp = res[:-1,:]
        dims = res[-1,0:2]
        I = np.ascontiguousarray(tmp[:,0],dtype='intc') #fixes byte order also
        J = np.ascontiguousarray(tmp[:,1],dtype='intc')
        I -= 1  # for 1-based indexing
        J -= 1
        if res.shape[1] == 3:
            V = np.ascontiguousarray(tmp[:,2],dtype='float')
        else:
            V = np.ascontiguousarray(tmp[:,2],dtype='complex')
            V.imag = tmp[:,3]
        return scipy.sparse.coo_matrix((V,(I,J)), dims)
Beispiel #18
0
    def _get_cache_key(self, solver_fn, solver, neuron_type, gain, bias,
                       x, targets, rng, E):
        h = hashlib.sha1()

        if PY2:
            h.update(str(Fingerprint(solver_fn)))
            h.update(str(Fingerprint(solver)))
            h.update(str(Fingerprint(neuron_type)))
        else:
            h.update(str(Fingerprint(solver_fn)).encode('utf-8'))
            h.update(str(Fingerprint(solver)).encode('utf-8'))
            h.update(str(Fingerprint(neuron_type)).encode('utf-8'))

        h.update(np.ascontiguousarray(gain).data)
        h.update(np.ascontiguousarray(bias).data)
        h.update(np.ascontiguousarray(x).data)
        h.update(np.ascontiguousarray(targets).data)

        # rng format doc:
        # noqa <http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.RandomState.get_state.html#numpy.random.RandomState.get_state>
        state = rng.get_state()
        h.update(state[0].encode())  # string 'MT19937'
        h.update(state[1].data)  # 1-D array of 624 unsigned integer keys
        h.update(struct.pack('q', state[2]))  # integer pos
        h.update(struct.pack('q', state[3]))  # integer has_gauss
        h.update(struct.pack('d', state[4]))  # float cached_gaussian

        if E is not None:
            h.update(np.ascontiguousarray(E).data)
        return h.hexdigest()
Beispiel #19
0
def equalize_lib_sizes(counts, groups, lib_size, dispersion=0, common_size=None):
    """ Equalize the library sizes
    """
    if not common_size:
        common_size = np.exp(np.mean(np.log(lib_size)))
    try: len(dispersion)
    except TypeError: dispersion = np.repeat(dispersion, 
            counts.shape[0]) 
    input_mean = np.empty(counts.shape, dtype=np.double)
    output_mean = input_mean.copy()
    for key, group in groups.iteritems():
        beta = glm_one_group_numba(
                np.ascontiguousarray(counts.ix[:,group].as_matrix(),
                    dtype=np.int32),
                dispersion,
                np.ascontiguousarray(np.log(lib_size[group]),
                    dtype=np.double)
                )
        beta = np.asarray(beta)
        bn_lambda = np.exp(beta).T.reshape(len(beta),1)
        temp_lib_size = np.array(lib_size[group]).reshape(1,
                len(lib_size[group]))
        out_size = np.repeat(common_size, len(group)).reshape(1,
                len(group))
        input_mean[:, group] = np.dot(bn_lambda, temp_lib_size)
        output_mean[:, group] = np.dot(bn_lambda, 
                out_size)
    pseudo = q2qnbinom(np.asarray(counts.as_matrix()), 
            input_mean, output_mean, dispersion)
    pseudo[pseudo < 0] = 0
    return pseudo, common_size
Beispiel #20
0
    def fit(self,X,y,k=None,sorted=False):
        """


        :param sorted:
        :param X:
        :param y:
        :param k:
        """
        X = np.asarray(X)
        y = np.asarray(y)
        if not X.flags['C_CONTIGUOUS']:
            X = np.ascontiguousarray(X)
        if not y.flags['C_CONTIGUOUS']:
            y = np.ascontiguousarray(y)
        assert y.ndim == 1
        if X.ndim > 1:
            assert X.ndim==2 and (X.shape[0] == y.shape[0] or X.shape[1] == y.shape[0])

            if(X.shape[0] != y.shape[0]):
                X = X.transpose()
            self.N = X.shape[1]
        else:
            assert X.shape[0] == y.shape[0]
            self.N = 1
        if k == None:
            k = []
            for i in range(0,self.N):
                k.append(3)
        k = list(k)
        assert len(k) == self.N

        self.f.fit(X,y,k,sorted)
Beispiel #21
0
def DoubleInPointerFilter_assign_fail_test():
  import numpy as np
  from ATK.Core import DoubleInPointerFilter
  d = np.ascontiguousarray(np.arange(1000, dtype=np.float64)[None,:])
  filter = DoubleInPointerFilter(d, False)
  d = np.ascontiguousarray(np.arange(1000, dtype=np.float64).reshape(2,-1))
  filter.set_pointer(d)
def hist_3d_index(x, y, z, shape):
    """
    Fast 3d histogram of 3D indices with C++ inner loop optimization.
    Is more than 2 orders faster than np.histogramdd() and uses less RAM.
    The indices are given in x, y, z coordinates and have to fit into a histogram of the dimensions shape.
    
    Parameters
    ----------
    x : array like
    y : array like
    z : array like
    shape : tuple
        tuple with x,y,z dimensions: (x, y, z)

    Returns
    -------
    np.ndarray with given shape

    """
    if len(shape) != 3:
        raise ValueError('The shape has to describe a 3-d histogram')
    # change memory alignment for c++ library
    x = np.ascontiguousarray(x.astype(np.int32))
    y = np.ascontiguousarray(y.astype(np.int32))
    z = np.ascontiguousarray(z.astype(np.int32))
    result = np.zeros(shape=shape, dtype=np.uint32).ravel()  # ravel hist in c-style, 3D --> 1D
    compiled_analysis_functions.hist_3d(x, y, z, shape[0], shape[1], shape[2], result)
    return np.reshape(result, shape)  # rebuilt 3D hist from 1D hist
Beispiel #23
0
    def __init__(self, x, y, z, Lbox, cell_size):
        """
        Initialize the grid. 

        Parameters 
        ----------
        x, y, z : arrays
            Length-Npts arrays containing the spatial position of the Npts points. 
        
        Lbox : float
            Length scale defining the periodic boundary conditions

        cell_size : float 
            The approximate cell size into which the box will be divided. 
        """

        self.cell_size = cell_size.astype(np.float)
        self.Lbox = Lbox.astype(np.float)
        self.num_divs = np.floor(Lbox/cell_size).astype(int)
        self.dL = Lbox/self.num_divs
        
        #build grid tree
        idx_sorted, slice_array = self.compute_cell_structure(x, y, z)
        self.x = np.ascontiguousarray(x[idx_sorted],dtype=np.float64)
        self.y = np.ascontiguousarray(y[idx_sorted],dtype=np.float64)
        self.z = np.ascontiguousarray(z[idx_sorted],dtype=np.float64)
        self.slice_array = slice_array
        self.idx_sorted = idx_sorted
def do_parameter_selection(argv):
    path, test_path = argv;
    params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
              'min_samples_leaf':1, 'random_state':None, 'do_consider_correct':1,
          'learn_rate': 0.2, 'n1': 2000, 'n2': 1, 'tau': 0.01};
    print 'loading data...'
    X, dr, sr, groups = load_dataset(path)
    test_X, test_rd, test_rs, test_groups = load_dataset(test_path);
    
#    test_X = np.asfortranarray(test_X, dtype=DTYPE);
    test_rd = np.ascontiguousarray(test_rd);
    test_rs = np.ascontiguousarray(test_rs);
    test_docpair_samples = DocPairSampler(np.random.RandomState()).sample(test_rd, test_groups, 20000);
    
    from sklearn.grid_search import IterGrid;
    param_grid = IterGrid({'n_estimators':[200,400,600,800,1000], 'n1':[1000,2000,5000], 'learn_rate':[.1,.2,.3] });
    for param in param_grid:
        print param;
        params.update(param);
        ranker = GradientBoostingRanker(**params);
        ranker.fit(X, dr, sr, groups);
        test_y_pred = ranker.predict(test_X);
        test_pred_sort_groups = PredictSortGroups(test_y_pred, test_groups);
        test_loss = ranker.loss_(test_rd, test_rs, test_y_pred, test_groups, test_pred_sort_groups, ranker.random_state, test_docpair_samples);
        print ranker.train_score_[-1], test_loss;
Beispiel #25
0
 def read(self):
     """Read the visibilities and return as a (data,weight) tuple. """
     print "Reading " + str(self.data_size()) + " samples..."
     data = numpy.ascontiguousarray(numpy.zeros(self._imagingdata.dataSize, dtype=numpy.complex128))
     weights = numpy.ascontiguousarray(numpy.zeros(self._imagingdata.dataSize, dtype=numpy.float64))
     _wsclean.read(self._userdata, data, weights)
     return data, weights
Beispiel #26
0
 def initializeC(self, image):
     super(MultiFitterZ, self).initializeC(image)
     self.clib.daoInitializeZ(self.mfit,
                              numpy.ascontiguousarray(self.wx_params),
                              numpy.ascontiguousarray(self.wy_params),
                              self.min_z,
                              self.max_z)
Beispiel #27
0
def tucker_als(idx, val, shape, core_shape, iters=25, growth_tol=0.01, batch_run=False):
    '''
    The function computes Tucker ALS decomposition of sparse tensor
    provided in COO format. Usage:
    u0, u1, u2, g = newtuck(idx, val, shape, core_shape)
    '''
    def log_status(msg):
        if not batch_run:
            print msg

    if not (idx.flags.c_contiguous and val.flags.c_contiguous):
        raise ValueError('Warning! Imput arrays must be C-contigous.')


    #TODO: it's better to implement check for future
    #if np.any(idx[1:, 0]-idx[:-1, 0]) < 0):
    #    print 'Warning! Index array must be sorted by first column in ascending order.'

    r0, r1, r2 = core_shape

    u1 = np.random.rand(shape[1], r1)
    u1 = np.linalg.qr(u1, mode='reduced')[0]

    u2 = np.random.rand(shape[2], r2)
    u2 = np.linalg.qr(u2, mode='reduced')[0]

    u1 = np.ascontiguousarray(u1)
    u2 = np.ascontiguousarray(u2)

    g_norm_old = 0

    for i in xrange(iters):
        log_status('Step %i of %i' % (i+1, iters))
        u0 = tensordot2(idx, val, shape, u2, u1, ((2, 0), (1, 0)))\
            .reshape(shape[0], r1*r2)
        uu = np.linalg.svd(u0, full_matrices=0)[0]
        u0 = np.ascontiguousarray(uu[:, :r0])

        u1 = tensordot2(idx, val, shape, u2, u0, ((2, 0), (0, 0)))\
            .reshape(shape[1], r0*r2)
        uu = np.linalg.svd(u1, full_matrices=0)[0]
        u1 = np.ascontiguousarray(uu[:, :r1])

        u2 = tensordot2(idx, val, shape, u1, u0, ((1, 0), (0, 0)))\
            .reshape(shape[2], r0*r1)
        uu, ss, vv = np.linalg.svd(u2, full_matrices=0)
        u2 = np.ascontiguousarray(uu[:, :r2])

        g_norm_new = np.linalg.norm(np.diag(ss[:r2]))
        g_growth = (g_norm_new - g_norm_old) / g_norm_new
        g_norm_old = g_norm_new
        log_status('growth of the core: %f' % g_growth)
        if g_growth < growth_tol:
            log_status('Core is no longer growing. Norm of the core: %f' % g_norm_old)
            break

    g = np.diag(ss[:r2]).dot(vv[:r2, :])
    g = g.reshape(r2, r1, r0).transpose(2, 1, 0)
    log_status('Done')
    return u0, u1, u2, g
Beispiel #28
0
def optimum_reparam_pair(q, time, q1, q2, lam=0.0):
    """
    calculates the warping to align srsf pair q1 and q2 to q

    :param q: vector of size N or array of NxM samples of first SRSF
    :param time: vector of size N describing the sample points
    :param q1: vector of size N or array of NxM samples samples of second SRSF
    :param q2: vector of size N or array of NxM samples samples of second SRSF
    :param lam: controls the amount of elasticity (default = 0.0)

    :rtype: vector
    :return gam: describing the warping function used to align q2 with q1

    """
    if q1.ndim == 1 and q2.ndim == 1:
        q_c = column_stack((q1, q2))
        gam = orN.coptimum_reparam_pair(ascontiguousarray(q), time,
                                        ascontiguousarray(q_c), lam)

    if q1.ndim == 2 and q2.ndim == 2:
        gam = orN.coptimum_reparamN2_pair(ascontiguousarray(q), time,
                                          ascontiguousarray(q1),
                                          ascontiguousarray(q2), lam)

    return gam
Beispiel #29
0
    def initializeC(self, image):
        """
        This initializes the C fitting library.

        It needs the image in order to know what size arrays to create
        as we won't always have SCMOS calibration data.
        """
        if self.scmos_cal is None:
            if self.verbose:
                print("Using zeros for sCMOS calibration data.")
            self.scmos_cal = numpy.ascontiguousarray(numpy.zeros(image.shape), dtype = numpy.float64)
        else:
            self.scmos_cal = numpy.ascontiguousarray(self.scmos_cal, dtype = numpy.float64)

        if self.rqe is None:
            if self.verbose:
                print("Using ones for relative quantum efficiency data.")
            self.rqe = numpy.ascontiguousarray(numpy.ones(image.shape), dtype = numpy.float64)
        else:
            self.rqe = numpy.ascontiguousarray(self.rqe, dtype = numpy.float64)

        if (image.shape[0] != self.scmos_cal.shape[0]) or (image.shape[1] != self.scmos_cal.shape[1]):
            raise MultiFitterException("Image shape and sCMOS calibration shape do not match.")

        if (image.shape[0] != self.rqe.shape[0]) or (image.shape[1] != self.rqe.shape[1]):
            raise MultiFitterException("Image shape and RQE shape do not match.")
        
        self.im_shape = self.scmos_cal.shape

        if self.verbose and self.als_fit:
            print("Anscombe least squares fitting requested.")
Beispiel #30
0
def lasso(X, Y, B=None, lam=1., max_iter=None, tol=None):
    '''
    B = lasso(X, Y, B={np.zeros()}, lam=1. max_iter={1024}, tol={1e-5})

    Solve LASSO Optimisation

        B* = arg min_B ½/n || Y - BX ||₂² + λ||B||₁

    where $n$ is the number of samples.

    Milk uses coordinate descent, looping through the coordinates in order
    (with an active set strategy to update only non-zero βs, if possible). The
    problem is convex and the solution is guaranteed to be optimal (within
    floating point accuracy).

    Parameters
    ----------
    X : ndarray
        Design matrix
    Y : ndarray
        Matrix of outputs
    B : ndarray, optional
        Starting values for approximation. This can be used for a warm start if
        you have an estimate of where the solution should be. If used, the
        solution might be written in-place (if the array has the right format).
    lam : float, optional
        λ (default: 1.0)
    max_iter : int, optional
        Maximum nr of iterations (default: 1024)
    tol : float, optional
        Tolerance. Whenever a parameter is to be updated by a value smaller
        than ``tolerance``, that is considered a null update. Be careful that
        if the value is too small, performance will degrade horribly.
        (default: 1e-5)

    Returns
    -------
    B : ndarray
    '''
    X = np.ascontiguousarray(X, dtype=np.float32)
    Y = np.ascontiguousarray(Y, dtype=np.float32)
    if B is None:
        B = np.zeros((Y.shape[0],X.shape[0]), np.float32)
    else:
        B = np.ascontiguousarray(B, dtype=np.float32)
    if max_iter is None:
        max_iter = 1024
    if tol is None:
        tol = 1e-5
    if X.shape[0] != B.shape[1] or \
        Y.shape[0] != B.shape[0] or \
        X.shape[1] != Y.shape[1]:
        raise ValueError('milk.supervised.lasso: Dimensions do not match')
    if np.any(np.isnan(X)) or np.any(np.isnan(B)):
        raise ValueError('milk.supervised.lasso: NaNs are only supported in the ``Y`` matrix')
    W = np.ascontiguousarray(~np.isnan(Y), dtype=np.float32)
    Y = np.nan_to_num(Y)
    n = Y.size
    _lasso.lasso(X, Y, W, B, max_iter, float(2*n*lam), float(tol))
    return B
Beispiel #31
0
def array_like(obj,
               name,
               dtype=np.double,
               ndim=1,
               maxdim=None,
               shape=None,
               order='C',
               contiguous=False,
               optional=False):
    """
    Convert array-like to a ndarray and check conditions

    Parameters
    ----------
    obj : array_like
         An array, any object exposing the array interface, an object whose
        __array__ method returns an array, or any (nested) sequence.
    name : str
        Name of the variable to use in exceptions
    dtype : {None, numpy.dtype, str}
        Required dtype. Default is double. If None, does not change the dtype
        of obj (if present) or uses NumPy to automatically detect the dtype
    ndim : {int, None}
        Required number of dimensions of obj. If None, no check is performed.
        If the numebr of dimensions of obj is less than ndim, additional axes
        are inserted on the right. See examples.
    maxdim : {int, None}
        Maximum allowed dimension.  Use ``maxdim`` instead of ``ndim`` when
        inputs are allowed to have ndim 1, 2, ..., or maxdim.
    shape : {tuple[int], None}
        Required shape obj.  If None, no check is performed. Partially
        restricted shapes can be checked using None. See examples.
    order : {'C', 'F'}
        Order of the array
    contiguous : bool
        Ensure that the array's data is contiguous with order ``order``
    optional : bool
        Flag indicating whether None is allowed

    Returns
    -------
    ndarray
        The converted input.

    Examples
    --------
    Convert a list or pandas series to an array
    >>> import pandas as pd
    >>> x = [0, 1, 2, 3]
    >>> a = array_like(x, 'x', ndim=1)
    >>> a.shape
    (4,)

    >>> a = array_like(pd.Series(x), 'x', ndim=1)
    >>> a.shape
    (4,)
    >>> type(a.orig)
    pandas.core.series.Series

    Squeezes singleton dimensions when required
    >>> x = np.array(x).reshape((4, 1))
    >>> a = array_like(x, 'x', ndim=1)
    >>> a.shape
    (4,)

    Right-appends when required size is larger than actual
    >>> x = [0, 1, 2, 3]
    >>> a = array_like(x, 'x', ndim=2)
    >>> a.shape
    (4, 1)

    Check only the first and last dimension of the input
    >>> x = np.arange(4*10*4).reshape((4, 10, 4))
    >>> y = array_like(x, 'x', ndim=3, shape=(4, None, 4))

    Check only the first two dimensions
    >>> z = array_like(x, 'x', ndim=3, shape=(4, 10))

    Raises ValueError if constraints are not satisfied
    >>> z = array_like(x, 'x', ndim=2)
    Traceback (most recent call last):
     ...
    ValueError: x is required to have ndim 2 but has ndim 3

    >>> z = array_like(x, 'x', shape=(10, 4, 4))
    Traceback (most recent call last):
     ...
    ValueError: x is required to have shape (10, 4, 4) but has shape (4, 10, 4)

    >>> z = array_like(x, 'x', shape=(None, 4, 4))
    Traceback (most recent call last):
     ...
    ValueError: x is required to have shape (*, 4, 4) but has shape (4, 10, 4)
    """
    if optional and obj is None:
        return None
    arr = np.asarray(obj, dtype=dtype, order=order)
    if maxdim is not None:
        if arr.ndim > maxdim:
            msg = '{0} must have ndim <= {1}'.format(name, maxdim)
            raise ValueError(msg)
    elif ndim is not None:
        if arr.ndim > ndim:
            arr = _right_squeeze(arr, stop_dim=ndim)
        elif arr.ndim < ndim:
            arr = np.reshape(arr, arr.shape + (1, ) * (ndim - arr.ndim))
        if arr.ndim != ndim:
            msg = '{0} is required to have ndim {1} but has ndim {2}'
            raise ValueError(msg.format(name, ndim, arr.ndim))
    if shape is not None:
        for actual, req in zip(arr.shape, shape):
            if req is not None and actual != req:
                req_shape = str(shape).replace('None, ', '*, ')
                msg = '{0} is required to have shape {1} but has shape {2}'
                raise ValueError(msg.format(name, req_shape, arr.shape))
    if contiguous:
        arr = np.ascontiguousarray(arr, dtype=dtype)
    return arr
Beispiel #32
0
def as_floatX(x):
    if not hasattr(x, '__len__'):
        return np.array(x, dtype=floatX)
    return np.ascontiguousarray(x, dtype=floatX)
Beispiel #33
0
if ret_val:
    ret_val = cv2.imwrite(filename, img)
    print(ret_val)

pyyolo.init(datacfg, cfgfile, weightfile)

# from file
print('----- test original C using a file')
outputs = pyyolo.test(filename, thresh, hier_thresh)
for output in outputs:
    print(output)

# camera
print('----- test python API using a file')
i = 1
while i < 2:
    # ret_val, img = cam.read()
    img = cv2.imread(filename)
    img = img.transpose(2, 0, 1)
    c, h, w = img.shape[0], img.shape[1], img.shape[2]
    # print w, h, c
    data = img.ravel() / 255.0
    data = np.ascontiguousarray(data, dtype=np.float32)
    outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)
    for output in outputs:
        print(output)
    i = i + 1

# free model
pyyolo.cleanup()
Beispiel #34
0
    def _call(self):

        # compute the sampling frequency if necessary
        if 'dt' in self.data['Sensors']['Lumbar']:
            dt = self.data['Sensors']['Lumbar']['dt'][()]
        else:
            dt = mean(diff(self.data['Sensors']['Lumbar']['Unix Time'][:100]))
            self.data = ('Sensors/Lumbar/dt', dt)  # save for future use
        # set-up the filter that will be used
        sos = butter(self.lp_ord,
                     2 * self.lp_cut * dt,
                     btype='low',
                     output='sos')

        if 'Processed' in self.data:
            days = [
                i for i in self.data['Processed']['Sit2Stand'].keys()
                if 'Day' in i
            ]
        else:
            days = ['Day 1']
        for iday, day in enumerate(days):
            try:
                start, stop = self.data['Processed']['Sit2Stand'][day][
                    'Indices']
            except KeyError:
                start, stop = 0, self.data['Sensors']['Lumbar'][
                    'Accelerometer'].shape[0]
            # compute the magnitude of the acceleration
            m_acc = norm(
                self.data['Sensors']['Lumbar']['Accelerometer'][start:stop],
                axis=1)

            f_acc = ascontiguousarray(sosfiltfilt(sos, m_acc))

            # reconstructed acceleration
            if self.method == 'dwt':
                # deconstruct the filtered acceleration magnitude
                coefs = pywt.wavedec(f_acc, self.dwave, mode=self.ext_mode)

                # set all but the desired level of coefficients to be 0s
                if (len(coefs) - self.recon_level) < 1:
                    warn(
                        f'Chosen reconstruction level is too high, setting to {len(coefs) - 1}',
                        UserWarning)
                    ind = 1
                else:
                    ind = len(coefs) - self.recon_level

                for i in range(1, len(coefs)):
                    if i != ind:
                        coefs[i][:] = 0
                r_acc = pywt.waverec(coefs, self.dwave, mode=self.ext_mode)
            elif self.method == 'moving average':
                n_window = int(around(self.window / dt))
                r_acc, *_ = mov_stats(f_acc, n_window)

            # CWT power peak detection
            coefs, freqs = pywt.cwt(r_acc,
                                    arange(1, 65),
                                    self.cwave,
                                    sampling_period=dt)

            # sum the coefficients over the frequencies in the power band
            f_mask = (freqs <= self.power_end_f) & (freqs >=
                                                    self.power_start_f)
            power = sum(coefs[f_mask, :], axis=0)

            # find the peaks in the power data
            if self.std_height:
                if self.std_trim != 0:
                    trim = int(self.std_trim / dt)
                    self.power_peak_kw['height'] = std(power[trim:-trim],
                                                       ddof=1)
                else:
                    self.power_peak_kw['height'] = std(power, ddof=1)

            power_peaks, _ = find_peaks(power, **self.power_peak_kw)

            self.data = (PROC.format(day_n=iday + 1,
                                     value='Filtered Acceleration'), f_acc)
            self.data = (PROC.format(day_n=iday + 1,
                                     value='Reconstructed Acceleration'),
                         r_acc[:m_acc.size])
            self.data = (PROC.format(day_n=iday + 1, value='Power'), power)
            self.data = (PROC.format(day_n=iday + 1,
                                     value='Power Peaks'), power_peaks)
Beispiel #35
0
def cythonize(*matrices):
    return tuple(np.ascontiguousarray(matrix, dtype=np.float64) for matrix in matrices)
Beispiel #36
0
    def __next__(self):
        self.count += 1
        if self.count == self.nB:
            raise StopIteration

        ia = self.count * self.batch_size
        ib = min((self.count + 1) * self.batch_size, self.nF)

        img_all, labels_all, img_paths, img_shapes = [], [], [], []
        for index, files_index in enumerate(range(ia, ib)):
            img_path = self.img_files[self.shuffled_vector[files_index]]
            label_path = self.label_files[self.shuffled_vector[files_index]]

            img = cv2.imread(img_path)  # BGR
            assert img is not None, 'File Not Found ' + img_path

            augment_hsv = True
            if self.augment and augment_hsv:
                # SV augmentation by 50%
                fraction = 0.50
                img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
                S = img_hsv[:, :, 1].astype(np.float32)
                V = img_hsv[:, :, 2].astype(np.float32)

                a = (random.random() * 2 - 1) * fraction + 1
                S *= a
                if a > 1:
                    np.clip(S, a_min=0, a_max=255, out=S)

                a = (random.random() * 2 - 1) * fraction + 1
                V *= a
                if a > 1:
                    np.clip(V, a_min=0, a_max=255, out=V)

                img_hsv[:, :, 1] = S.astype(np.uint8)
                img_hsv[:, :, 2] = V.astype(np.uint8)
                cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)

            h, w, _ = img.shape
            img, ratio, padw, padh = letterbox(img, height=self.img_size)

            # Load labels
            if os.path.isfile(label_path):
                labels0 = np.loadtxt(label_path,
                                     dtype=np.float32).reshape(-1, 5)

                # Normalized xywh to pixel xyxy format
                labels = labels0.copy()
                labels[:, 1] = ratio * w * (labels0[:, 1] -
                                            labels0[:, 3] / 2) + padw
                labels[:, 2] = ratio * h * (labels0[:, 2] -
                                            labels0[:, 4] / 2) + padh
                labels[:, 3] = ratio * w * (labels0[:, 1] +
                                            labels0[:, 3] / 2) + padw
                labels[:, 4] = ratio * h * (labels0[:, 2] +
                                            labels0[:, 4] / 2) + padh
            else:
                labels = np.array([])

            # Augment image and labels
            if self.augment:
                img, labels, M = random_affine(img,
                                               labels,
                                               degrees=(-5, 5),
                                               translate=(0.10, 0.10),
                                               scale=(0.90, 1.10))

            plotFlag = False
            if plotFlag:
                import matplotlib.pyplot as plt
                plt.figure(figsize=(10, 10)) if index == 0 else None
                plt.subplot(4, 4, index + 1).imshow(img[:, :, ::-1])
                plt.plot(labels[:, [1, 3, 3, 1, 1]].T,
                         labels[:, [2, 2, 4, 4, 2]].T, '.-')
                plt.axis('off')

            nL = len(labels)
            if nL > 0:
                # convert xyxy to xywh
                labels[:,
                       1:5] = xyxy2xywh(labels[:, 1:5].copy()) / self.img_size

            if self.augment:
                # random left-right flip
                lr_flip = True
                if lr_flip & (random.random() > 0.5):
                    img = np.fliplr(img)
                    if nL > 0:
                        labels[:, 1] = 1 - labels[:, 1]

                # random up-down flip
                ud_flip = False
                if ud_flip & (random.random() > 0.5):
                    img = np.flipud(img)
                    if nL > 0:
                        labels[:, 2] = 1 - labels[:, 2]

            if nL > 0:
                labels = np.concatenate((np.zeros(
                    (nL, 1), dtype='float32') + index, labels), 1)
                labels_all.append(labels)

            img_all.append(img)
            img_paths.append(img_path)
            img_shapes.append((h, w))

        # Normalize
        img_all = np.stack(img_all)[:, :, :, ::-1].transpose(
            0, 3, 1, 2)  # BGR to RGB and cv2 to pytorch
        img_all = np.ascontiguousarray(img_all, dtype=np.float32)
        img_all /= 255.0

        labels_all = torch.from_numpy(np.concatenate(labels_all, 0))
        return torch.from_numpy(img_all), labels_all, img_paths, img_shapes
Beispiel #37
0
def imread(fname):
    im = np.asarray(Image.open(fname).convert('RGB'))
    if im.ndim == 3 and im.shape[2] > 3:
        im = im[:, :, :3]
    return np.ascontiguousarray(im)
Beispiel #38
0
#img = bpy.data.images["DummyImage4"]
#img_array = [1 for pix in range(len(img.pixels))] # fill with zeros
#img.pixels = img_array
##tex.image = img

#for i in range(3):
#    tex.voxel_data.resolution[i] = -512
for i in range(3):
    tex.voxel_data.resolution[i] = rez

vdp = ctypes.cast(bpy.data.textures[tex_name].voxel_data.as_pointer(),
                  ctypes.POINTER(VoxelData))

import yt
ds = yt.load("~/data/IsolatedGalaxy/galaxy0030/galaxy0030")
cg = ds.covering_grid(level, [0, 0, 0], (rez, rez, rez))
rho = np.log10(cg["density"])
rho = (rho - rho.min()) / (rho.max() - rho.min())
rho = rho.astype("float32").copy()
rho = np.ascontiguousarray(rho)
arr = (ctypes.c_float * rho.size)()
arr2 = np.ctypeslib.as_array(arr, rho.shape)
arr2[:] = rho.flat[:]
vdp.contents.dataset = ctypes.cast(arr, ctypes.POINTER(ctypes.c_float))

vdp.contents.ok = 1
vdp.contents.cachedframe = bpy.context.scene.frame_current
print(rho.strides)

# NOTE for a pretty image you want this setup with "over sampling" checked in the materials panel: http://blender.stackexchange.com/questions/15010/rendering-a-3d-volume
Beispiel #39
0
def pointsToPolyData(points, copy_z=False):
    """Create ``vtkPolyData`` from a numpy array of XYZ points. If the points
    have more than 3 dimensions, then all dimensions after the third will be
    added as attributes. Assume the first three dimensions are the XYZ
    coordinates.

    Args:
        points (np.ndarray or pandas.DataFrame): The points and pointdata
        copy_z (bool): A flag on whether to append the z values as a PointData
            array

    Return:
        vtkPolyData : points with point-vertex cells
    """
    __displayname__ = 'Points to PolyData'
    __category__ = 'filter'
    # This prevents an error that occurs when only one point is passed
    if points.ndim < 2:
        points = points.reshape((1,-1))
    keys = ['Field %d' % i for i in range(points.shape[1] - 3)]
    # Check if input is anything other than a NumPy array and cast it
    # e.g. you could send a Pandas dataframe
    if not isinstance(points, np.ndarray):
        if isinstance(points, pd.DataFrame):
            # If a pandas data frame, lets grab the keys
            keys = points.keys()[3::]
        points = np.array(points)
    # If points are not 3D
    if points.shape[1] < 2:
        raise RuntimeError('Points must be 3D. Try adding a third dimension of zeros.')

    atts = points[:, 3::]
    points = points[:, 0:3]

    npoints = points.shape[0]

    # Make VTK cells array
    cells = np.hstack((np.ones((npoints, 1)),
                       np.arange(npoints).reshape(-1, 1)))
    cells = np.ascontiguousarray(cells, dtype=np.int64)
    cells = np.reshape(cells, (2*npoints))
    vtkcells = vtk.vtkCellArray()
    vtkcells.SetCells(npoints, nps.numpy_to_vtk(cells, deep=True, array_type=vtk.VTK_ID_TYPE))

    # Convert points to vtk object
    pts = vtk.vtkPoints()
    pts.SetData(convertArray(points))

    # Create polydata
    pdata = vtk.vtkPolyData()
    pdata.SetPoints(pts)
    pdata.SetVerts(vtkcells)

    # Add attributes if given
    scalSet = False
    for i, key in enumerate(keys):
        data = convertArray(atts[:, i], name=key)
        pdata.GetPointData().AddArray(data)
        if not scalSet:
            pdata.GetPointData().SetActiveScalars(key)
            scalSet = True
    if copy_z:
        z = convertArray(points[:, 2], name='Elevation')
        pdata.GetPointData().AddArray(z)
    return wrapvtki(pdata)
Beispiel #40
0
    def create_batches(self, batch_size, shuffle=True):
        # 1 batch = [(image, [([x, y, w, h], id), ([x, y, w, h], id), ...]), ...]
        batch = []
        counter_samples=0

        while True:
            indices = range(len(self.img_ids))

            if shuffle:
                indices = np.random.permutation(indices)
            for index in indices:
                index += 1
                try:
                    img = self.coco.loadImgs(self.img_ids[index])[0]
                except:
                    print(index)
                    continue
                path = os.path.join(self.image_dir, self.get_image_path(id=img['id'], name=img['file_name']))
                I = cv2.imread(path).astype(np.uint8)[:, :, ::-1]
                I = np.ascontiguousarray(I)


                try:
                    if len(I.shape) != 3:
                        continue
                except:
                    print("no image exist")
                    continue

                ann_ids = self.coco.getAnnIds(imgIds=img['id'], catIds=self.cat_ids, iscrowd=None)
                anns = self.coco.loadAnns(ann_ids)
                ann_list = []

                rles = []
                for ann in anns:
                    bb = [f for f in ann["bbox"]]
                    try:
                        rle = frPyObjects(ann['segmentation'], I.shape[0], I.shape[1])[0]
                        bbb = toBbox(rle)
                    except:
                        continue
                    # make sure we dont include unknown classes
                    if self.id2i[ann["category_id"]] < 0 or self.id2i[ann["category_id"]] > config.TOTAL_CLASSES:
                        print("This class cannot be processed %d ...", self.id2i[ann["category_id"]])
                        continue

                    rles.append(rle)
                    ann_list.append((decode(rle).astype(np.float), bb, self.id2i[ann["category_id"]]))

                # print("------- RLEs-SHAPE")
                # print(len(rles))

                if len(rles) == 0:
                    print("NO RLE was extracted continue with next picture ...")
                    continue

                mask = decode(rles).astype(np.float)
                batch.append((I, mask, ann_list))

                # print("------- MASKS-SHAPE")
                # print(len(mask))
                # print(np.max(mask), np.min(mask))

                if len(batch) >= batch_size:
                    self.counter_samples += len(batch)
                    print("Getting new batch with %d elements ..." % (len(batch)))
                    yield batch
                    del mask
                    del ann_list
                    del rles
                    del batch
                    batch = []
Beispiel #41
0
def get_img_label(img_name, img_dir, gt_dir):
    # get the labels、targets for the input image
    name = img_name.split('.')[0]
    img_path = os.path.join(img_dir, img_name)
    gt_path = os.path.join(gt_dir, name + '.txt')
    img = cv2.imread(img_path)
    img_size = img.shape
    h_feat = img_size[0] // 2 // 2 // 2 // 2  #feature map height
    w_feat = img_size[1] // 2 // 2 // 2 // 2  #feature map width
    #get the image data in bytes
    with open(img_path, 'rb') as ff:
        img_bytes = ff.read()
    ###read gt_box
    with open(gt_path, 'r') as ff:
        lines = ff.readlines()
    box_list = []
    side_flag_list = []
    for line in lines:
        line = line.strip().split(',')
        side_flag_list.append(int(line[-1]))
        line = line[0:-1]
        x1, y1, x2, y2 = map(int, line)
        box_list.append(np.array([x1, y1, x2, y2]))
    gt_boxes = np.stack(box_list)
    gt_side_flag = np.stack(side_flag_list)
    ###get all anchors
    base_anchors = gen_base_anchors()
    A = base_anchors.shape[0]
    K = h_feat * w_feat
    base_anchors = base_anchors.reshape(1, A, 4)
    shift_x = np.arange(w_feat) * _stripe  #对于feature map上每个点,x方向anchor偏移量
    shift_y = np.arange(h_feat) * _stripe  #对于feature map上每个点,y方向anchor偏移量
    shift_x, shift_y = np.meshgrid(shift_x, shift_y)  #生成二维点阵的x,y方向偏移量
    shift_x = shift_x.ravel()  #二维变一维
    shift_y = shift_y.ravel()
    shift = np.stack([shift_x, shift_y, shift_x, shift_y]).transpose()
    shift = shift.reshape(K, 1, 4)
    all_anchors = base_anchors + shift
    all_anchors = all_anchors.reshape((K * A, 4))
    total_anchors = K * A
    #remove the anchors that are out of the image
    index_inside = np.where((all_anchors[:, 0] >= 0) & (all_anchors[:, 1] >= 0)
                            & (all_anchors[:, 2] < img_size[1])
                            & (all_anchors[:, 3] < img_size[0]))[0]
    anchors = all_anchors[index_inside, :]

    ### get labels, 1=positive, 0=negetive, -1=don't care
    labels = np.ones(anchors.shape[0], dtype=np.int) * -1
    labels2 = np.ones(anchors.shape[0], dtype=np.int) * -1
    overlaps = bbox_overlaps(np.ascontiguousarray(anchors, dtype=np.float),
                             np.ascontiguousarray(gt_boxes, dtype=np.float))
    max_ol_gt_index = overlaps.argmax(axis=1)
    max_ol_gt = overlaps[np.arange(anchors.shape[0]), max_ol_gt_index]
    max_ol_anchor_index = overlaps.argmax(axis=0)
    max_ol_anchor = overlaps[max_ol_anchor_index, np.arange(gt_boxes.shape[0])]

    labels[max_ol_gt < OVERLAP_NEGATIVE_THR] = 0
    labels[max_ol_gt >= OVERLAP_POSITIVE_THR] = 1
    labels2[
        max_ol_anchor_index] = 1  #there is at least one positive anchor for each gt_box
    spectial_anchor_index = np.where(
        (labels != 1) & (labels2 == 1)
    )[0]  #spectial anchor is the anchor that is the max overlap anchor of gt_box A,
    # but this anchor's max overlaps with all the gt_box is gt_box B and <OVERLAP_NEGATIVE_THR
    spectial_anchor_gt_index = np.zeros(spectial_anchor_index.shape[0],
                                        dtype=np.int)
    for ii in range(spectial_anchor_gt_index.shape[0]):
        spectial_anchor_gt_index[ii] = np.where(
            max_ol_anchor_index == spectial_anchor_index[ii])[0][0]

    labels[spectial_anchor_index] = 1

    ### side label:the anchor if is the left side or right side of the text area
    side_labels = np.zeros(anchors.shape[0], dtype=np.int)
    side_labels = gt_side_flag[max_ol_gt_index]
    side_labels[spectial_anchor_index] = gt_side_flag[spectial_anchor_gt_index]

    side_labels[np.where(labels != 1)[0]] = 0

    ### get the targets
    gt_for_anchors = gt_boxes[max_ol_gt_index]
    gt_for_anchors[spectial_anchor_index] = gt_boxes[spectial_anchor_gt_index]
    box_targets_y, box_targets_offset = target_calc(anchors, gt_for_anchors,
                                                    side_labels)
    box_weights_y = np.zeros((anchors.shape[0], 2))
    box_weights_y[labels == 1, :] = 1
    box_targets_y = box_targets_y * box_weights_y

    ### get the anchor that are out of the image back
    labels = _unmap(labels, total_anchors, index_inside, fill=-1)
    side_labels = _unmap(side_labels, total_anchors, index_inside, fill=0)
    box_targets_y = _unmap(box_targets_y, total_anchors, index_inside, fill=0)
    box_targets_offset = _unmap(box_targets_offset,
                                total_anchors,
                                index_inside,
                                fill=0)

    ### reshape
    labels = labels.reshape((1, h_feat, w_feat, A))
    side_labels = side_labels.reshape(1, h_feat, w_feat, A)
    box_targets_y = box_targets_y.reshape((1, h_feat, w_feat, A * 2))
    box_targets_offset = box_targets_offset.reshape(1, h_feat, w_feat, A)

    return img_bytes, labels, side_labels, box_targets_y, box_targets_offset
Beispiel #42
0
    def __getitem__(self, index):
        if self.image_weights:
            index = self.indices[index]

        hyp = self.hyp
        if self.mosaic:
            # Load mosaic
            img, labels = load_mosaic(self, index)
            shapes = None

            # MixUp https://arxiv.org/pdf/1710.09412.pdf
            # if random.random() < 0.5:
            #     img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
            #     r = np.random.beta(0.3, 0.3)  # mixup ratio, alpha=beta=0.3
            #     img = (img * r + img2 * (1 - r)).astype(np.uint8)
            #     labels = np.concatenate((labels, labels2), 0)

        else:
            # Load image
            img, (h0, w0), (h, w) = load_image(self, index)

            # Letterbox
            shape = self.batch_shapes[self.batch[
                index]] if self.rect else self.img_size  # final letterboxed shape
            img, ratio, pad = letterbox(img,
                                        shape,
                                        auto=False,
                                        scaleup=self.augment)
            shapes = (h0, w0), (
                (h / h0, w / w0), pad)  # for COCO mAP rescaling

            # Load labels
            labels = []
            x = self.labels[index]
            if x.size > 0:
                # Normalized xywh to pixel xyxy format
                labels = x.copy()
                labels[:,
                       1] = ratio[0] * w * (x[:, 1] -
                                            x[:, 3] / 2) + pad[0]  # pad width
                labels[:,
                       2] = ratio[1] * h * (x[:, 2] -
                                            x[:, 4] / 2) + pad[1]  # pad height
                labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
                labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]

        if self.augment:
            # Augment imagespace
            if not self.mosaic:
                img, labels = random_affine(img,
                                            labels,
                                            degrees=hyp['degrees'],
                                            translate=hyp['translate'],
                                            scale=hyp['scale'],
                                            shear=hyp['shear'])

            # Augment colorspace
            augment_hsv(img,
                        hgain=hyp['hsv_h'],
                        sgain=hyp['hsv_s'],
                        vgain=hyp['hsv_v'])

            # Apply cutouts
            # if random.random() < 0.9:
            #     labels = cutout(img, labels)

        nL = len(labels)  # number of labels
        if nL:
            # convert xyxy to xywh
            labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])

            # Normalize coordinates 0 - 1
            labels[:, [2, 4]] /= img.shape[0]  # height
            labels[:, [1, 3]] /= img.shape[1]  # width

        if self.augment:
            # random left-right flip
            lr_flip = True
            if lr_flip and random.random() < 0.5:
                img = np.fliplr(img)
                if nL:
                    labels[:, 1] = 1 - labels[:, 1]

            # random up-down flip
            ud_flip = False
            if ud_flip and random.random() < 0.5:
                img = np.flipud(img)
                if nL:
                    labels[:, 2] = 1 - labels[:, 2]

        labels_out = torch.zeros((nL, 6))
        if nL:
            labels_out[:, 1:] = torch.from_numpy(labels)

        # Convert
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img = np.ascontiguousarray(img)

        return torch.from_numpy(img), labels_out, self.img_files[index], shapes
Beispiel #43
0
    def load_from_table(self, data, b0_thr=0):
        """Build the structure from an input matrix.

        The first three columns represent the gradient directions.
        Then, we accept two formats to describe each gradient:
            - if the shape of data is Nx4, the 4^ column is the b-value;
            - if the shape of data is Nx7, the last 4 columns are, respectively, the gradient strength, big delta, small delta and TE.

        Parameters
        ----------
        data : numpy.ndarray
            Matrix containing tall the values.
        b0_thr : float
            The threshold on the b-values to identify the b0 images (default: 0)
        """
        if data.ndim == 1:
            data = np.expand_dims(data, axis=0)
        self.raw = data

        # number of samples
        # self.nS = self.raw.shape[0] JL: incomplete getter/setter incompatible with 3.6; this is never used any as getter always returns derived value

        # set/calculate the b-values
        if self.raw.shape[1] == 4:
            self.version = 0
            self.b = self.raw[:, 3]
        elif self.raw.shape[1] == 7:
            self.version = 1
            self.b = (267.513e6 * self.raw[:, 3] * self.raw[:, 5])**2 * (
                self.raw[:, 4] - self.raw[:, 5] / 3.0) * 1e-6  # in mm^2/s
        else:
            ERROR('Unrecognized scheme format')

        # store information about the volumes
        self.b0_thr = b0_thr
        self.b0_idx = np.where(self.b <= b0_thr)[0]
        self.b0_count = len(self.b0_idx)
        self.dwi_idx = np.where(self.b > b0_thr)[0]
        self.dwi_count = len(self.dwi_idx)

        # ensure the directions are in the spherical range [0,180]x[0,180]
        idx = np.where(self.raw[:, 1] < 0)[0]
        self.raw[idx, 0:3] = -self.raw[idx, 0:3]

        # store information about each shell in a dictionary
        self.shells = []

        tmp = np.ascontiguousarray(self.raw[:, 3:])
        schemeUnique, schemeUniqueInd = np.unique(tmp.view([('', tmp.dtype)] *
                                                           tmp.shape[1]),
                                                  return_index=True)
        schemeUnique = schemeUnique.view(tmp.dtype).reshape(
            (schemeUnique.shape[0], tmp.shape[1]))
        schemeUnique = [tmp[index] for index in sorted(schemeUniqueInd)]
        bUnique = [self.b[index] for index in sorted(schemeUniqueInd)]
        for i in range(len(schemeUnique)):
            if bUnique[i] <= b0_thr:
                continue
            shell = {}
            shell['b'] = bUnique[i]
            if self.version == 0:
                shell['G'] = None
                shell['Delta'] = None
                shell['delta'] = None
                shell['TE'] = None
            else:
                shell['G'] = schemeUnique[i][0]
                shell['Delta'] = schemeUnique[i][1]
                shell['delta'] = schemeUnique[i][2]
                shell['TE'] = schemeUnique[i][3]

            shell['idx'] = np.where((tmp == schemeUnique[i]).all(axis=1))[0]
            shell['grad'] = self.raw[shell['idx'], 0:3]
            self.shells.append(shell)
Beispiel #44
0
def denoise_nl_means(image,
                     patch_size=7,
                     patch_distance=11,
                     h=0.1,
                     multichannel=False,
                     fast_mode=True,
                     sigma=0.,
                     *,
                     preserve_range=None,
                     channel_axis=None):
    """Perform non-local means denoising on 2-D or 3-D grayscale images, and
    2-D RGB images.

    Parameters
    ----------
    image : 2D or 3D ndarray
        Input image to be denoised, which can be 2D or 3D, and grayscale
        or RGB (for 2D images only, see ``multichannel`` parameter).
    patch_size : int, optional
        Size of patches used for denoising.
    patch_distance : int, optional
        Maximal distance in pixels where to search patches used for denoising.
    h : float, optional
        Cut-off distance (in gray levels). The higher h, the more permissive
        one is in accepting patches. A higher h results in a smoother image,
        at the expense of blurring features. For a Gaussian noise of standard
        deviation sigma, a rule of thumb is to choose the value of h to be
        sigma of slightly less.
    multichannel : bool, optional
        Whether the last axis of the image is to be interpreted as multiple
        channels or another spatial dimension. This argument is deprecated:
        specify `channel_axis` instead.
    fast_mode : bool, optional
        If True (default value), a fast version of the non-local means
        algorithm is used. If False, the original version of non-local means is
        used. See the Notes section for more details about the algorithms.
    sigma : float, optional
        The standard deviation of the (Gaussian) noise.  If provided, a more
        robust computation of patch weights is computed that takes the expected
        noise variance into account (see Notes below).
    preserve_range : bool, optional
        Whether to keep the original range of values. Otherwise, the input
        image is converted according to the conventions of `img_as_float`.
        Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
    channel_axis : int or None, optional
        If None, the image is assumed to be a grayscale (single channel) image.
        Otherwise, this parameter indicates which axis of the array corresponds
        to channels.

        .. versionadded:: 0.19
           ``channel_axis`` was added in 0.19.

    Returns
    -------
    result : ndarray
        Denoised image, of same shape as `image`.

    Notes
    -----

    The non-local means algorithm is well suited for denoising images with
    specific textures. The principle of the algorithm is to average the value
    of a given pixel with values of other pixels in a limited neighbourhood,
    provided that the *patches* centered on the other pixels are similar enough
    to the patch centered on the pixel of interest.

    In the original version of the algorithm [1]_, corresponding to
    ``fast=False``, the computational complexity is::

        image.size * patch_size ** image.ndim * patch_distance ** image.ndim

    Hence, changing the size of patches or their maximal distance has a
    strong effect on computing times, especially for 3-D images.

    However, the default behavior corresponds to ``fast_mode=True``, for which
    another version of non-local means [2]_ is used, corresponding to a
    complexity of::

        image.size * patch_distance ** image.ndim

    The computing time depends only weakly on the patch size, thanks to
    the computation of the integral of patches distances for a given
    shift, that reduces the number of operations [1]_. Therefore, this
    algorithm executes faster than the classic algorithm
    (``fast_mode=False``), at the expense of using twice as much memory.
    This implementation has been proven to be more efficient compared to
    other alternatives, see e.g. [3]_.

    Compared to the classic algorithm, all pixels of a patch contribute
    to the distance to another patch with the same weight, no matter
    their distance to the center of the patch. This coarser computation
    of the distance can result in a slightly poorer denoising
    performance. Moreover, for small images (images with a linear size
    that is only a few times the patch size), the classic algorithm can
    be faster due to boundary effects.

    The image is padded using the `reflect` mode of `skimage.util.pad`
    before denoising.

    If the noise standard deviation, `sigma`, is provided a more robust
    computation of patch weights is used.  Subtracting the known noise variance
    from the computed patch distances improves the estimates of patch
    similarity, giving a moderate improvement to denoising performance [4]_.
    It was also mentioned as an option for the fast variant of the algorithm in
    [3]_.

    When `sigma` is provided, a smaller `h` should typically be used to
    avoid oversmoothing.  The optimal value for `h` depends on the image
    content and noise level, but a reasonable starting point is
    ``h = 0.8 * sigma`` when `fast_mode` is `True`, or ``h = 0.6 * sigma`` when
    `fast_mode` is `False`.

    References
    ----------
    .. [1] A. Buades, B. Coll, & J-M. Morel. A non-local algorithm for image
           denoising. In CVPR 2005, Vol. 2, pp. 60-65, IEEE.
           :DOI:`10.1109/CVPR.2005.38`

    .. [2] J. Darbon, A. Cunha, T.F. Chan, S. Osher, and G.J. Jensen, Fast
           nonlocal filtering applied to electron cryomicroscopy, in 5th IEEE
           International Symposium on Biomedical Imaging: From Nano to Macro,
           2008, pp. 1331-1334.
           :DOI:`10.1109/ISBI.2008.4541250`

    .. [3] Jacques Froment. Parameter-Free Fast Pixelwise Non-Local Means
           Denoising. Image Processing On Line, 2014, vol. 4, pp. 300-326.
           :DOI:`10.5201/ipol.2014.120`

    .. [4] A. Buades, B. Coll, & J-M. Morel. Non-Local Means Denoising.
           Image Processing On Line, 2011, vol. 1, pp. 208-212.
           :DOI:`10.5201/ipol.2011.bcm_nlm`

    Examples
    --------
    >>> a = np.zeros((40, 40))
    >>> a[10:-10, 10:-10] = 1.
    >>> rng = np.random.default_rng()
    >>> a += 0.3 * rng.standard_normal(a.shape)
    >>> denoised_a = denoise_nl_means(a, 7, 5, 0.1)

    """
    if image.ndim == 2:
        image = image[..., np.newaxis]
        channel_axis = -1
    if image.ndim != 3:
        raise NotImplementedError("Non-local means denoising is only \
        implemented for 2D grayscale and RGB images or 3-D grayscale images.")

    if preserve_range is None and np.issubdtype(image.dtype, np.integer):
        warn(
            'Image dtype is not float. By default denoise_nl_means will '
            'assume you want to preserve the range of your image '
            '(preserve_range=True). In scikit-image 0.19 this behavior will '
            'change to preserve_range=False. To avoid this warning, '
            'explicitly specify the preserve_range parameter.',
            stacklevel=2)
        preserve_range = True

    image = convert_to_float(image, preserve_range)
    if not image.flags.c_contiguous:
        image = np.ascontiguousarray(image)

    kwargs = dict(s=patch_size, d=patch_distance, h=h, var=sigma * sigma)
    if channel_axis is not None:  # 2-D images
        if fast_mode:
            return _fast_nl_means_denoising_2d(image, **kwargs)
        else:
            return _nl_means_denoising_2d(image, **kwargs)
    else:  # 3-D grayscale
        if fast_mode:
            return _fast_nl_means_denoising_3d(image, **kwargs)
        else:
            return _nl_means_denoising_3d(image, **kwargs)
Beispiel #45
0
def train(train_A_dir, train_B_dir, model_dir, model_name, random_seed, validation_A_dir, validation_B_dir, output_dir, tensorboard_log_dir):

    np.random.seed(random_seed)

    num_epochs = 5000
    mini_batch_size = 1 # mini_batch_size = 1 is better
    generator_learning_rate = 0.0002
    generator_learning_rate_decay = generator_learning_rate / 200000
    discriminator_learning_rate = 0.0001
    discriminator_learning_rate_decay = discriminator_learning_rate / 200000
    sampling_rate = 16000
    num_mcep = 24
    frame_period = 5.0
    n_frames = 128
    lambda_cycle = 10
    lambda_identity = 5
    max_samples = 1000

    print('Data Preprocessing...')

    start_time = time.time()

    wavs_A = load_wavs(wav_dir = train_A_dir, sr = sampling_rate)
    wavs_B = load_wavs(wav_dir = train_B_dir, sr = sampling_rate)

    f0s_A, timeaxes_A, sps_A, aps_A, coded_sps_A = world_encode_data(wavs = wavs_A, fs = sampling_rate, frame_period = frame_period, coded_dim = num_mcep)
    f0s_B, timeaxes_B, sps_B, aps_B, coded_sps_B = world_encode_data(wavs = wavs_B, fs = sampling_rate, frame_period = frame_period, coded_dim = num_mcep)

    log_f0s_mean_A, log_f0s_std_A = logf0_statistics(f0s_A)
    log_f0s_mean_B, log_f0s_std_B = logf0_statistics(f0s_B)

    print('Log Pitch A: Mean %f, Std %f' %(log_f0s_mean_A, log_f0s_std_A))
    print('Log Pitch B: Mean %f, Std %f' %(log_f0s_mean_B, log_f0s_std_B))

    coded_sps_A_transposed = transpose_in_list(lst = coded_sps_A)
    coded_sps_B_transposed = transpose_in_list(lst = coded_sps_B)

    coded_sps_A_norm, coded_sps_A_mean, coded_sps_A_std = coded_sps_normalization_fit_transoform(coded_sps = coded_sps_A_transposed)
    coded_sps_B_norm, coded_sps_B_mean, coded_sps_B_std = coded_sps_normalization_fit_transoform(coded_sps = coded_sps_B_transposed)
    print("Input data loaded.")

    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    np.savez(os.path.join(model_dir, 'logf0s_normalization.npz'), mean_A = log_f0s_mean_A, std_A = log_f0s_std_A, mean_B = log_f0s_mean_B, std_B = log_f0s_std_B)
    np.savez(os.path.join(model_dir, 'mcep_normalization.npz'), mean_A = coded_sps_A_mean, std_A = coded_sps_A_std, mean_B = coded_sps_B_mean, std_B = coded_sps_B_std)

    if validation_A_dir is not None:
        validation_A_output_dir = os.path.join(output_dir, 'converted_A')
        if not os.path.exists(validation_A_output_dir):
            os.makedirs(validation_A_output_dir)

    if validation_B_dir is not None:
        validation_B_output_dir = os.path.join(output_dir, 'converted_B')
        if not os.path.exists(validation_B_output_dir):
            os.makedirs(validation_B_output_dir)

    end_time = time.time()
    time_elapsed = end_time - start_time

    print('Preprocessing Done.')

    print('Time Elapsed for Data Preprocessing: %02d:%02d:%02d' % (time_elapsed // 3600, (time_elapsed % 3600 // 60), (time_elapsed % 60 // 1)))

    model = CycleGAN(num_features = num_mcep)

    num_iterations = 0

    for epoch in range(num_epochs):
        print('Epoch: %d' % epoch)
        '''
        if epoch > 60:
            lambda_identity = 0
        if epoch > 1250:
            generator_learning_rate = max(0, generator_learning_rate - 0.0000002)
            discriminator_learning_rate = max(0, discriminator_learning_rate - 0.0000001)
        '''

        start_time_epoch = time.time()

        pool_A, pool_B = list(coded_sps_A_norm), list(coded_sps_B_norm)
        f0sA, f0sB = list(f0s_A), list(f0s_B)
        dataset_A, dataset_B = sample_train_data(pool_A=pool_A, pool_B=pool_B, f0s_A=f0sA, f0s_B=f0sB, n_frames=n_frames, max_samples=max_samples)
        # dataset_A, dataset_B = sample_train_data(dataset_A = coded_sps_A_norm, dataset_B = coded_sps_B_norm, n_frames = n_frames)
        print('dataset_A', np.shape(dataset_A), 'dataset_B', np.shape(dataset_B))

        n_samples = dataset_A.shape[0]

        for i in range(n_samples // mini_batch_size):

            num_iterations += 1

            if num_iterations > 10000:
                lambda_identity = 0
            if num_iterations > 200000:
                generator_learning_rate = max(0, generator_learning_rate - generator_learning_rate_decay)
                discriminator_learning_rate = max(0, discriminator_learning_rate - discriminator_learning_rate_decay)

            start = i * mini_batch_size
            end = (i + 1) * mini_batch_size

            generator_loss, discriminator_loss = model.train(input_A = dataset_A[start:end], input_B = dataset_B[start:end], lambda_cycle = lambda_cycle, lambda_identity = lambda_identity, generator_learning_rate = generator_learning_rate, discriminator_learning_rate = discriminator_learning_rate)

            if i % 50 == 0:
                #print('Iteration: %d, Generator Loss : %f, Discriminator Loss : %f' % (num_iterations, generator_loss, discriminator_loss))
                print('Iteration: {:07d}, Generator Learning Rate: {:.7f}, Discriminator Learning Rate: {:.7f}, Generator Loss : {:.3f}, Discriminator Loss : {:.3f}'.format(num_iterations, generator_learning_rate, discriminator_learning_rate, generator_loss, discriminator_loss))

        model.save(directory = model_dir, filename = model_name)

        end_time_epoch = time.time()
        time_elapsed_epoch = end_time_epoch - start_time_epoch

        print('Time Elapsed for This Epoch: %02d:%02d:%02d' % (time_elapsed_epoch // 3600, (time_elapsed_epoch % 3600 // 60), (time_elapsed_epoch % 60 // 1)))

        if generator_learning_rate <= 0:
            print('training end')
            break

        if validation_A_dir is not None:
            if epoch % 50 == 0:
                print('Generating Validation Data B from A...')
                for file in os.listdir(validation_A_dir):
                    filepath = os.path.join(validation_A_dir, file)
                    wav, _ = librosa.load(filepath, sr = sampling_rate, mono = True)
                    wav = wav_padding(wav = wav, sr = sampling_rate, frame_period = frame_period, multiple = 4)
                    f0, timeaxis, sp, ap = world_decompose(wav = wav, fs = sampling_rate, frame_period = frame_period)
                    f0_converted = pitch_conversion(f0 = f0, mean_log_src = log_f0s_mean_A, std_log_src = log_f0s_std_A, mean_log_target = log_f0s_mean_B, std_log_target = log_f0s_std_B)
                    coded_sp = world_encode_spectral_envelop(sp = sp, fs = sampling_rate, dim = num_mcep)
                    coded_sp_transposed = coded_sp.T
                    coded_sp_norm = (coded_sp_transposed - coded_sps_A_mean) / coded_sps_A_std
                    coded_sp_converted_norm = model.test(inputs = np.array([coded_sp_norm]), direction = 'A2B')[0]
                    coded_sp_converted = coded_sp_converted_norm * coded_sps_B_std + coded_sps_B_mean
                    coded_sp_converted = coded_sp_converted.T
                    coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
                    decoded_sp_converted = world_decode_spectral_envelop(coded_sp = coded_sp_converted, fs = sampling_rate)
                    wav_transformed = world_speech_synthesis(f0 = f0_converted, decoded_sp = decoded_sp_converted, ap = ap, fs = sampling_rate, frame_period = frame_period)
                    librosa.output.write_wav(os.path.join(validation_A_output_dir, os.path.basename(file)), wav_transformed, sampling_rate)

        if validation_B_dir is not None:
            if epoch % 50 == 0:
                print('Generating Validation Data A from B...')
                for file in os.listdir(validation_B_dir):
                    filepath = os.path.join(validation_B_dir, file)
                    wav, _ = librosa.load(filepath, sr = sampling_rate, mono = True)
                    wav = wav_padding(wav = wav, sr = sampling_rate, frame_period = frame_period, multiple = 4)
                    f0, timeaxis, sp, ap = world_decompose(wav = wav, fs = sampling_rate, frame_period = frame_period)
                    f0_converted = pitch_conversion(f0 = f0, mean_log_src = log_f0s_mean_B, std_log_src = log_f0s_std_B, mean_log_target = log_f0s_mean_A, std_log_target = log_f0s_std_A)
                    coded_sp = world_encode_spectral_envelop(sp = sp, fs = sampling_rate, dim = num_mcep)
                    coded_sp_transposed = coded_sp.T
                    coded_sp_norm = (coded_sp_transposed - coded_sps_B_mean) / coded_sps_B_std
                    coded_sp_converted_norm = model.test(inputs = np.array([coded_sp_norm]), direction = 'B2A')[0]
                    coded_sp_converted = coded_sp_converted_norm * coded_sps_A_std + coded_sps_A_mean
                    coded_sp_converted = coded_sp_converted.T
                    coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
                    decoded_sp_converted = world_decode_spectral_envelop(coded_sp = coded_sp_converted, fs = sampling_rate)
                    wav_transformed = world_speech_synthesis(f0 = f0_converted, decoded_sp = decoded_sp_converted, ap = ap, fs = sampling_rate, frame_period = frame_period)
                    librosa.output.write_wav(os.path.join(validation_B_output_dir, os.path.basename(file)), wav_transformed, sampling_rate)
Beispiel #46
0
def _find_binning_thresholds(data, max_bins, subsample, random_state):
    """Extract feature-wise quantiles from numerical data.

    Missing values are ignored for finding the thresholds.

    Parameters
    ----------
    data : array-like, shape (n_samples, n_features)
        The data to bin.
    max_bins: int
        The maximum number of bins to use for non-missing values. If for a
        given feature the number of unique values is less than ``max_bins``,
        then those unique values will be used to compute the bin thresholds,
        instead of the quantiles.
    subsample : int or None
        If ``n_samples > subsample``, then ``sub_samples`` samples will be
        randomly chosen to compute the quantiles. If ``None``, the whole data
        is used.
    random_state: int or numpy.random.RandomState or None
        Pseudo-random number generator to control the random sub-sampling.
        See :term:`random_state`.

    Return
    ------
    binning_thresholds: list of arrays
        For each feature, stores the increasing numeric values that can
        be used to separate the bins. Thus ``len(binning_thresholds) ==
        n_features``.
    """
    rng = check_random_state(random_state)
    if subsample is not None and data.shape[0] > subsample:
        subset = rng.choice(np.arange(data.shape[0]), subsample, replace=False)
        data = data.take(subset, axis=0)

    binning_thresholds = []
    for f_idx in range(data.shape[1]):
        col_data = data[:, f_idx]
        # ignore missing values when computing bin thresholds
        missing_mask = np.isnan(col_data)
        if missing_mask.any():
            col_data = col_data[~missing_mask]
        col_data = np.ascontiguousarray(col_data, dtype=X_DTYPE)
        distinct_values = np.unique(col_data)
        if len(distinct_values) <= max_bins:
            midpoints = distinct_values[:-1] + distinct_values[1:]
            midpoints *= .5
        else:
            # We sort again the data in this case. We could compute
            # approximate midpoint percentiles using the output of
            # np.unique(col_data, return_counts) instead but this is more
            # work and the performance benefit will be limited because we
            # work on a fixed-size subsample of the full data.
            percentiles = np.linspace(0, 100, num=max_bins + 1)
            percentiles = percentiles[1:-1]
            midpoints = np.percentile(col_data,
                                      percentiles,
                                      interpolation='midpoint').astype(X_DTYPE)
            assert midpoints.shape[0] == max_bins - 1

        # We avoid having +inf thresholds: +inf thresholds are only allowed in
        # a "split on nan" situation.
        np.clip(midpoints, a_min=None, a_max=ALMOST_INF, out=midpoints)

        binning_thresholds.append(midpoints)

    return binning_thresholds
def hash_array(s):
    s = np.ascontiguousarray(s)
    byte_view = s.view(np.uint8)
    return hashlib.sha1(byte_view).hexdigest()
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
                         return_code=True, dict_init=None, callback=None,
                         batch_size=3, verbose=False, shuffle=True, n_jobs=1,
                         method='lars', iter_offset=0, random_state=None,
                         n_atoms=None, chunk_size=None):
    """Solves a dictionary learning matrix factorization problem online.

    Finds the best dictionary and the corresponding sparse code for
    approximating the data matrix X by solving::

        (U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
                     (U,V)
                     with || V_k ||_2 = 1 for all  0 <= k < n_components

    where V is the dictionary and U is the sparse code. This is
    accomplished by repeatedly iterating over mini-batches by slicing
    the input data.

    Parameters
    ----------
    X: array of shape (n_samples, n_features)
        Data matrix.

    n_components : int,
        Number of dictionary atoms to extract.

    alpha : int,
        Sparsity controlling parameter.

    n_iter : int,
        Number of iterations to perform.

    return_code : boolean,
        Whether to also return the code U or just the dictionary V.

    dict_init : array of shape (n_components, n_features),
        Initial value for the dictionary for warm restart scenarios.

    callback :
        Callable that gets invoked every five iterations.

    batch_size : int,
        The number of samples to take in each batch.

    verbose :
        Degree of output the procedure will print.

    shuffle : boolean,
        Whether to shuffle the data before splitting it in batches.

    n_jobs : int,
        Number of parallel jobs to run, or -1 to autodetect.

    method : {'lars', 'cd'}
        lars: uses the least angle regression method to solve the lasso problem
        (linear_model.lars_path)
        cd: uses the coordinate descent method to compute the
        Lasso solution (linear_model.Lasso). Lars will be faster if
        the estimated components are sparse.

    iter_offset : int, default 0
        Number of previous iterations completed on the dictionary used for
        initialization.

    random_state : int or RandomState
        Pseudo number generator state used for random sampling.

    Returns
    -------
    code : array of shape (n_samples, n_components),
        the sparse code (only returned if `return_code=True`)

    dictionary : array of shape (n_components, n_features),
        the solutions to the dictionary learning problem

    See also
    --------
    dict_learning
    DictionaryLearning
    MiniBatchDictionaryLearning
    SparsePCA
    MiniBatchSparsePCA

    """

    if n_atoms is not None:
        n_components = n_atoms
        warnings.warn("Parameter n_atoms has been renamed to "
                      "'n_components' and will be removed in release 0.14.",
                      DeprecationWarning, stacklevel=2)

    if chunk_size is not None:
        chunk_size = batch_size
        warnings.warn("Parameter chunk_size has been renamed to "
                      "'batch_size' and will be removed in release 0.14.",
                      DeprecationWarning, stacklevel=2)

    if method not in ('lars', 'cd'):
        raise ValueError('Coding method not supported as a fit algorithm.')
    method = 'lasso_' + method

    t0 = time.time()
    n_samples, n_features = X.shape
    # Avoid integer division problems
    alpha = float(alpha)
    random_state = check_random_state(random_state)

    if n_jobs == -1:
        n_jobs = cpu_count()

    # Init V with SVD of X
    if dict_init is not None:
        dictionary = dict_init
    else:
        _, S, dictionary = randomized_svd(X, n_components)
        dictionary = S[:, np.newaxis] * dictionary
    r = len(dictionary)
    if n_components <= r:
        dictionary = dictionary[:n_components, :]
    else:
        dictionary = np.r_[dictionary,
                           np.zeros((n_components - r, dictionary.shape[1]))]
    dictionary = np.ascontiguousarray(dictionary.T)

    if verbose == 1:
        print '[dict_learning]',

    n_batches = floor(float(len(X)) / batch_size)
    if shuffle:
        X_train = X.copy()
        random_state.shuffle(X_train)
    else:
        X_train = X
    batches = np.array_split(X_train, n_batches)
    batches = itertools.cycle(batches)

    # The covariance of the dictionary
    A = np.zeros((n_components, n_components))
    # The data approximation
    B = np.zeros((n_features, n_components))

    for ii, this_X in itertools.izip(xrange(iter_offset, iter_offset + n_iter),
                                     batches):
        dt = (time.time() - t0)
        if verbose == 1:
            sys.stdout.write(".")
            sys.stdout.flush()
        elif verbose:
            if verbose > 10 or ii % ceil(100. / verbose) == 0:
                print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
                       % (ii, dt, dt / 60))

        this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
                                  alpha=alpha).T

        # Update the auxiliary variables
        if ii < batch_size - 1:
            theta = float((ii + 1) * batch_size)
        else:
            theta = float(batch_size ** 2 + ii + 1 - batch_size)
        beta = (theta + 1 - batch_size) / (theta + 1)

        A *= beta
        A += np.dot(this_code, this_code.T)
        B *= beta
        B += np.dot(this_X.T, this_code.T)

        # Update dictionary
        dictionary = _update_dict(dictionary, B, A, verbose=verbose,
                                  random_state=random_state)
        # XXX: Can the residuals be of any use?

        # Maybe we need a stopping criteria based on the amount of
        # modification in the dictionary
        if callback is not None:
            callback(locals())

    if return_code:
        if verbose > 1:
            print 'Learning code...',
        elif verbose == 1:
            print '|',
        code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
                             n_jobs=n_jobs)
        if verbose > 1:
            dt = (time.time() - t0)
            print 'done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60)
        return code, dictionary.T

    return dictionary.T
Beispiel #49
0
def get_mnist_images(max_images=0, fold='train'):
  """Returns mnist images, batch dimension last."""
  
  import gzip
  from tensorflow.contrib.learn.python.learn.datasets import base
  import numpy
  
  def extract_images(f):
    """Extract the images into a 4D uint8 numpy array [index, y, x, depth].
    Args:
      f: A file object that can be passed into a gzip reader.
    Returns:
      data: A 4D uint8 numpy array [index, y, x, depth].
    Raises:
      ValueError: If the bytestream does not start with 2051.
    """
    #    print('Extracting', f.name) # todo: remove
    with gzip.GzipFile(fileobj=f) as bytestream:
      magic = _read32(bytestream)
      if magic != 2051:
        raise ValueError('Invalid magic number %d in MNIST image file: %s' %
                         (magic, f.name))
      num_images = _read32(bytestream)
      if max_images:
        num_images = max_images
      rows = _read32(bytestream)
      cols = _read32(bytestream)
      buf = bytestream.read(rows * cols * num_images)
      data = numpy.frombuffer(buf, dtype=numpy.uint8)
      data = data.reshape(num_images, rows, cols, 1)
      return data

  def _read32(bytestream):
    dt = numpy.dtype(numpy.uint32).newbyteorder('>')
    return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]

  if fold == 'train': # todo: rename
    TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
  elif fold == 'test':
    TRAIN_IMAGES = 't10k-images-idx3-ubyte.gz'
  else:
    assert False, 'unknown fold %s'%(fold)
    
  source_url = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
  local_file = base.maybe_download(TRAIN_IMAGES, '/tmp',
                                     source_url + TRAIN_IMAGES)
  train_images = extract_images(open(local_file, 'rb'))
  dsize = train_images.shape[0]
  if fold == 'train':
    if not max_images:
      dsize == 60000
    else:
      dsize = max_images
      assert dsize <= 60000
  else:
    if not max_images:
      dsize == 60000
    else:
      dsize = max_images
      assert dsize <= 10000

  train_images = train_images.reshape(dsize, 28**2).T.astype(np.float64)/255
  train_images = np.ascontiguousarray(train_images)
  return train_images.astype(default_np_dtype)
Beispiel #50
0
def tensor_from_rgb_image(image: np.ndarray) -> torch.Tensor:
    image = np.moveaxis(image, -1, 0)
    image = np.ascontiguousarray(image)
    image = torch.from_numpy(image)
    return image
Beispiel #51
0
def _pose_flip_lr(x):
    """Flip `x` horizontally."""
    if isinstance(x, Pose):
        return x.flip_lr()
    return tensor(np.ascontiguousarray(np.array(x)[..., ::-1]))
        node.outputs[0].shape= ['B',t1,'t4',t0]
        graph.inputs = [node.outputs[0]]
    if node.op == 'Add' and node.name == 'Add_62':
        graph.outputs = [node.outputs[0]]
graph.cleanup()
onnx.save(gs.export_onnx(graph), onnxFile0)
'''

graph = gs.import_onnx(onnx.load(onnxFile0))

for node in graph.nodes:
    if node.op == "MatMul" and node.name == 'MatMul_61':
        convKernel = node.inputs[1].values.transpose(1, 0).reshape(
            256, t1, 1, t0).astype(np.float32)
        convKernelV = gs.Constant("ConvKernelV",
                                  np.ascontiguousarray(convKernel))
        continue

    if node.op == "Add" and node.name == 'Add_62':
        convBias = node.inputs[0].values
        convBiasV = gs.Constant("ConvBiasV", np.ascontiguousarray(convBias))
        continue

convV = gs.Variable("ConvV", np.dtype(np.float32), ['B', t1, 't4', 1])
convN = gs.Node("Conv",
                "ConvN",
                inputs=[graph.inputs[0], convKernelV, convBiasV],
                outputs=[convV])
convN.attrs = OrderedDict([
    ('dilations', [1, 1]),
    ('kernel_shape', [1, t0]),
Beispiel #53
0
def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image,
                 num_classes):
    """Generate a random sample of RoIs comprising foreground and background
    examples.
    """

    _bbox_para_num = 5

    # overlaps: (rois x gt_boxes)
    overlaps = rbbx_overlaps(  # D
        np.ascontiguousarray(all_rois[:, 1:_bbox_para_num + 1],
                             dtype=np.float32),  # D
        np.ascontiguousarray(gt_boxes[:, :_bbox_para_num], dtype=np.float32),
        cfg.GPU_ID)  # D

    an_gt_diffs = angle_distance(all_rois[:, 1:_bbox_para_num + 1],
                                 gt_boxes[:, :_bbox_para_num])  # D

    gt_assignment = overlaps.argmax(axis=1)
    max_overlaps = overlaps.max(axis=1)

    max_overlaps_angle_diff = an_gt_diffs[np.arange(len(gt_assignment)),
                                          gt_assignment]  # D

    labels = gt_boxes[gt_assignment, 5]  # D: label is in the last column

    # Select foreground RoIs as those with >= FG_THRESH overlap

    #################### angle filter
    #  print np.shape(max_overlaps_angle_diff)
    fg_inds = np.where((max_overlaps >= cfg.TRAIN.FG_THRESH) & (
        max_overlaps_angle_diff <= cfg.TRAIN.R_POSITIVE_ANGLE_FILTER))[0]  # D
    ####################
    #  print 'anglediff',max_overlaps_angle_diff[fg_inds]
    # print gt_boxes

    # Guard against the case when an image has fewer than fg_rois_per_image
    # foreground RoIs
    fg_rois_per_this_image = int(min(fg_rois_per_image, fg_inds.size))
    # Sample foreground regions without replacement
    if fg_inds.size > 0:
        # print(type(fg_inds))
        fg_inds = npr.choice(fg_inds,
                             size=fg_rois_per_this_image,
                             replace=False)

    # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)

    ####################
    bg_inds = np.where(
        ((max_overlaps < cfg.TRAIN.BG_THRESH_HI)
         & (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))
        | ((max_overlaps >= cfg.TRAIN.FG_THRESH)
           & (max_overlaps_angle_diff > cfg.TRAIN.R_NEGATIVE_ANGLE_FILTER)))[0]
    ####################
    # print 'proposal fg',len(fg_inds),'bg',len(bg_inds)
    # print

    # Compute number of background RoIs to take from this image (guarding
    # against there being fewer than desired)
    bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
    bg_rois_per_this_image = int(min(bg_rois_per_this_image, bg_inds.size))
    # Sample background regions without replacement
    if bg_inds.size > 0:
        bg_inds = npr.choice(bg_inds,
                             size=bg_rois_per_this_image,
                             replace=False)

    # The indices that we're selecting (both fg and bg)
    keep_inds = np.append(fg_inds, bg_inds)
    # Select sampled values from various arrays:
    labels = labels[keep_inds]
    # Clamp labels for the background RoIs to 0
    labels[fg_rois_per_this_image:] = 0
    rois = all_rois[keep_inds]

    bbox_target_data = _compute_targets(
        rois[:, 1:_bbox_para_num + 1],
        gt_boxes[gt_assignment[keep_inds], :_bbox_para_num], labels)  # D

    bbox_targets, bbox_inside_weights = \
        _get_bbox_regression_labels(bbox_target_data, num_classes)

    return labels, rois, bbox_targets, bbox_inside_weights
Beispiel #54
0
def extrude_triangulation(vertices, faces, height, transform=None, **kwargs):
    """
    Extrude a 2D triangulation into a watertight mesh.

    Parameters
    ----------
    vertices : (n, 2) float
      2D vertices
    faces : (m, 3) int
      Triangle indexes of vertices
    height : float
      Distance to extrude triangulation
    **kwargs : dict
      Passed to Trimesh constructor

    Returns
    ---------
    mesh : trimesh.Trimesh
      Mesh created from extrusion
    """
    vertices = np.asanyarray(vertices, dtype=np.float64)
    height = float(height)
    faces = np.asanyarray(faces, dtype=np.int64)

    if not util.is_shape(vertices, (-1, 2)):
        raise ValueError('Vertices must be (n,2)')
    if not util.is_shape(faces, (-1, 3)):
        raise ValueError('Faces must be (n,3)')
    if np.abs(height) < tol.merge:
        raise ValueError('Height must be nonzero!')

    # make sure triangulation winding is pointing up
    normal_test = triangles.normals([util.stack_3D(vertices[faces[0]])])[0]

    normal_dot = np.dot(normal_test, [0.0, 0.0, np.sign(height)])[0]

    # make sure the triangulation is aligned with the sign of
    # the height we've been passed
    if normal_dot < 0.0:
        faces = np.fliplr(faces)

    # stack the (n,3) faces into (3*n, 2) edges
    edges = faces_to_edges(faces)
    edges_sorted = np.sort(edges, axis=1)
    # edges which only occur once are on the boundary of the polygon
    # since the triangulation may have subdivided the boundary of the
    # shapely polygon, we need to find it again
    edges_unique = grouping.group_rows(edges_sorted, require_count=1)

    # (n, 2, 2) set of line segments (positions, not references)
    boundary = vertices[edges[edges_unique]]

    # we are creating two vertical  triangles for every 2D line segment
    # on the boundary of the 2D triangulation
    vertical = np.tile(boundary.reshape((-1, 2)), 2).reshape((-1, 2))
    vertical = np.column_stack(
        (vertical, np.tile([0, height, 0, height], len(boundary))))
    vertical_faces = np.tile([3, 1, 2, 2, 1, 0], (len(boundary), 1))
    vertical_faces += np.arange(len(boundary)).reshape((-1, 1)) * 4
    vertical_faces = vertical_faces.reshape((-1, 3))

    # stack the (n,2) vertices with zeros to make them (n, 3)
    vertices_3D = util.stack_3D(vertices)

    # a sequence of zero- indexed faces, which will then be appended
    # with offsets to create the final mesh
    faces_seq = [faces[:, ::-1], faces.copy(), vertical_faces]
    vertices_seq = [
        vertices_3D,
        vertices_3D.copy() + [0.0, 0, height], vertical
    ]

    # append sequences into flat nicely indexed arrays
    vertices, faces = util.append_faces(vertices_seq, faces_seq)
    if transform is not None:
        # apply transform here to avoid later bookkeeping
        vertices = tf.transform_points(vertices, transform)
        # if the transform flips the winding flip faces back
        # so that the normals will be facing outwards
        if tf.flips_winding(transform):
            # fliplr makes arrays non-contiguous
            faces = np.ascontiguousarray(np.fliplr(faces))
    # create mesh object with passed keywords
    mesh = Trimesh(vertices=vertices, faces=faces, **kwargs)
    # only check in strict mode (unit tests)
    if tol.strict:
        assert mesh.volume > 0.0

    return mesh
Beispiel #55
0
    def smooth(self,
               positions,
               fields=None,
               index_fields=None,
               method=None,
               create_octree=False,
               nneighbors=64,
               kernel_name='cubic'):
        r"""Operate on the mesh, in a particle-against-mesh fashion, with
        non-local input.

        This uses the octree indexing system to call a "smoothing" operation
        (defined in yt/geometry/particle_smooth.pyx) that can take input from
        several (non-local) particles and construct some value on the mesh.
        The canonical example is to conduct a smoothing kernel operation on the
        mesh.

        Parameters
        ----------
        positions : array_like (Nx3)
            The positions of all of the particles to be examined.  A new
            indexed octree will be constructed on these particles.
        fields : list of arrays
            All the necessary fields for computing the particle operation.  For
            instance, this might include mass, velocity, etc.
        index_fields : list of arrays
            All of the fields defined on the mesh that may be used as input to
            the operation.
        method : string
            This is the "method name" which will be looked up in the
            `particle_smooth` namespace as `methodname_smooth`.  Current
            methods include `volume_weighted`, `nearest`, `idw`,
            `nth_neighbor`, and `density`.
        create_octree : bool
            Should we construct a new octree for indexing the particles?  In
            cases where we are applying an operation on a subset of the
            particles used to construct the mesh octree, this will ensure that
            we are able to find and identify all relevant particles.
        nneighbors : int, default 64
            The number of neighbors to examine during the process.
        kernel_name : string, default 'cubic'
            This is the name of the smoothing kernel to use. Current supported
            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
            `wendland4`, and `wendland6`.

        Returns
        -------
        List of fortran-ordered, mesh-like arrays.
        """
        # Here we perform our particle deposition.
        positions.convert_to_units("code_length")
        if create_octree:
            morton = compute_morton(positions[:, 0], positions[:, 1],
                                    positions[:, 2], self.ds.domain_left_edge,
                                    self.ds.domain_right_edge)
            morton.sort()
            particle_octree = ParticleOctreeContainer(
                [1, 1, 1],
                self.ds.domain_left_edge,
                self.ds.domain_right_edge,
                over_refine=self._oref)
            # This should ensure we get everything within one neighbor of home.
            particle_octree.n_ref = nneighbors * 2
            particle_octree.add(morton)
            particle_octree.finalize()
            pdom_ind = particle_octree.domain_ind(self.selector)
        else:
            particle_octree = self.oct_handler
            pdom_ind = self.domain_ind
        if fields is None: fields = []
        if index_fields is None: index_fields = []
        cls = getattr(particle_smooth, "%s_smooth" % method, None)
        if cls is None:
            raise YTParticleDepositionNotImplemented(method)
        nz = self.nz
        mdom_ind = self.domain_ind
        nvals = (nz, nz, nz, (mdom_ind >= 0).sum())
        op = cls(nvals, len(fields), nneighbors, kernel_name)
        op.initialize()
        mylog.debug("Smoothing %s particles into %s Octs", positions.shape[0],
                    nvals[-1])
        # Pointer operations within 'process_octree' require arrays to be
        # contiguous cf. https://bitbucket.org/yt_analysis/yt/issues/1079
        fields = [np.ascontiguousarray(f, dtype="float64") for f in fields]
        op.process_octree(self.oct_handler, mdom_ind, positions, self.fcoords,
                          fields, self.domain_id, self._domain_offset,
                          self.ds.periodicity, index_fields, particle_octree,
                          pdom_ind, self.ds.geometry)
        # If there are 0s in the smoothing field this will not throw an error,
        # but silently return nans for vals where dividing by 0
        # Same as what is currently occurring, but suppressing the div by zero
        # error.
        with np.errstate(invalid='ignore'):
            vals = op.finalize()
        if vals is None: return
        if isinstance(vals, list):
            vals = [np.asfortranarray(v) for v in vals]
        else:
            vals = np.asfortranarray(vals)
        return vals
Beispiel #56
0
 def agruparTabla(self, tab):
     #tab = np.array(tab)
     tab = np.ascontiguousarray(tab)
     unique_a = np.unique(tab.view([('', tab.dtype)] * tab.shape[1]))
     return unique_a.view(tab.dtype).reshape(
         (unique_a.shape[0], tab.shape[1]))
Beispiel #57
0
def test_ascontiguousarray():
    a = afnumpy.random.random((2, 3))
    b = numpy.array(a)
    fassert(afnumpy.ascontiguousarray(a), numpy.ascontiguousarray(b))
Beispiel #58
0
# 8.99218
# 5.38 Earth, 5.13
# a=0.08036 au

if __name__ == '__main__':
    with fits.open(
            "hlsp_everest_k2_llc_205071984-c02_kepler_v2.0_lc.fits") as hdus:
        data = hdus[1].data
    t = data["TIME"]
    y = data["FLUX"]
    q = data["QUALITY"]
    # Remove flagged EVEREST data points
    m = numpy.isfinite(t) & numpy.isfinite(y)
    for b in [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17]:
        m &= (q & (2**(b - 1))) == 0
    t = numpy.ascontiguousarray(t[m], dtype=numpy.float64)
    y = numpy.ascontiguousarray(y[m], dtype=numpy.float64)
    y = y / numpy.median(y)
    skip = 64
    t = t[skip:]
    y = y[skip:]
    #t, y = cleaned_array(t, y)
    print(min(t), max(t), max(t) - min(t), len(t))
    trend = scipy.signal.medfilt(y, 31)
    trend = scipy.signal.savgol_filter(trend, 25, 2)
    y_filt = y / trend
    y_filt = sigma_clip(y_filt, sigma_upper=2, sigma_lower=float('inf'))
    #for i in range(len(y_filt)):
    #    print(t[i], ',', y_filt[i])
    # Periods
    # Period 8.99198 d  2067.92701
Beispiel #59
0
def greycomatrix_with_nan(image,
                          distances,
                          angles,
                          levels=None,
                          symmetric=False,
                          normed=False):
    """Calculate the grey-level co-occurrence matrix with nan values."""

    assert_nD(image, 2)
    assert_nD(distances, 1, 'distances')
    assert_nD(angles, 1, 'angles')

    image = np.ascontiguousarray(image)

    image_max = image.max()

    if levels is None:
        levels = 256

    if image_max >= levels:
        raise ValueError("The maximum grayscale value in the image should be "
                         "smaller than the number of levels.")

    distances = np.ascontiguousarray(distances, dtype=np.float64)
    angles = np.ascontiguousarray(angles, dtype=np.float64)

    P = np.zeros((levels, levels, len(distances), len(angles)),
                 dtype=np.uint32,
                 order='C')

    # count co-occurences
    rows = image.shape[0]
    cols = image.shape[1]

    for a_idx in range(angles.shape[0]):
        angle = angles[a_idx]
        for d_idx in range(distances.shape[0]):
            distance = distances[d_idx]
            offset_row = round(np.sin(angle) * distance)
            offset_col = round(np.cos(angle) * distance)
            start_row = np.uint(max(0, -offset_row))
            end_row = np.uint(min(rows, rows - offset_row))
            start_col = np.uint(max(0, -offset_col))
            end_col = np.uint(min(cols, cols - offset_col))
            for r in range(start_row, end_row):
                for c in range(start_col, end_col):
                    i = image[r, c]
                    # compute the location of the offset pixel
                    row = np.uint(r + offset_row)
                    col = np.uint(c + offset_col)
                    j = image[row, col]
                    if 0 <= i < levels and 0 <= j < levels and not np.isnan(
                            i) and not np.isnan(j):
                        P[np.uint8(i), np.uint8(j), d_idx, a_idx] += 1

    # make each GLMC symmetric
    if symmetric:
        Pt = np.transpose(P, (1, 0, 2, 3))
        P = P + Pt

    # normalize each GLMC
    if normed:
        P = P.astype(np.float64)
        glcm_sums = np.apply_over_axes(np.sum, P, axes=(0, 1))
        glcm_sums[glcm_sums == 0] = 1
        P /= glcm_sums

    return P
    mesh_refine_end = time.time()
    mesh_refine_time = run_time(mesh_refine_end - mesh_refine_start, 'Mesh Refinement')
    section_times.append(mesh_refine_time)
    print('Mesh refined in {} s \n'.format(mesh_refine_time.time))

    mesh = new_mesh

    # Gathering all the data from the mesh AFTER having done the mesh refinement and defined the mesh for plotting

    print('Rearranging mesh data\n')
    rearrange_start = time.time()

    V, vertex_number, x_coords, y_coords, z_coords, r_coords, sorting_index, x_sorted, y_sorted, z_sorted, r_sorted = rearrange_mesh_data(mesh, center_of_mass, degree_PDE)

    #To be able to gather the coordinate arrays with MPI, the coordinates need to be C_contiguous
    x_coords, y_coords, z_coords, r_coords = [np.ascontiguousarray(coord_array) for coord_array in [x_coords, y_coords, z_coords, r_coords]]

    rearrange_end = time.time()
    rearrange_time = run_time(rearrange_end - rearrange_start, 'Mesh data rearrange')
    section_times.append(rearrange_time)
    print('Mesh data rearranged in {} s \n'.format(rearrange_time.time))

    # Defining a few BVP from combinations we use often. Naming scheme: 'weak form_source'

    #BVPs for a discrete dirac mass distribution, for Newton and MOND with/out interpolations
    newton_dirac = BVP(F_Newton, u_Newton, f_multiple_dirac, 'Newton, discrete dirac')
    mond_deep_dirac = BVP(F_MOND_deep, u_displaced_cpp, f_multiple_dirac, 'Deep MOND, discrete dirac')
    mond_simple_dirac = BVP(F_MOND_simple, u_displaced_cpp, f_multiple_dirac, 'Simple MOND, discrete dirac')
    mond_standard_dirac = BVP(F_MOND_standard, u_displaced_cpp, f_multiple_dirac, 'Standard MOND, discrete dirac')

    #BVPs for a discrete gauss mass distribution.