Пример #1
0
def get_cut_coords(map3d, n_axials=12, delta_z_axis=3):
    """
    Heuristically computes optimal cut_coords for plot_map(...) call.

    Parameters
    ----------
    map3d: 3D array
        the data under consideration
    n_axials: int, optional (default 12)
        number of axials in the plot
    delta_z_axis: int, optional (default 3)
        z-axis spacing

    Returns
    -------
    cut_coords: 1D array of length n_axials
        the computed cut_coords

    """

    z_axis_max = np.unravel_index(np.abs(map3d).argmax(), map3d.shape)[2]
    z_axis_min = np.unravel_index((-np.abs(map3d)).argmin(), map3d.shape)[2]
    z_axis_min, z_axis_max = (min(z_axis_min, z_axis_max), max(z_axis_max, z_axis_min))
    z_axis_min = min(z_axis_min, z_axis_max - delta_z_axis * n_axials)

    cut_coords = np.linspace(z_axis_min, z_axis_max, n_axials)

    return cut_coords
Пример #2
0
def test_pad_input():
    """Test `match_template` when `pad_input=True`.

    This test places two full templates (one with values lower than the image
    mean, the other higher) and two half templates, which are on the edges of
    the image. The two full templates should score the top (positive and
    negative) matches and the centers of the half templates should score 2nd.
    """
    # Float prefactors ensure that image range is between 0 and 1
    template = 0.5 * diamond(2)
    image = 0.5 * np.ones((9, 19))
    mid = slice(2, 7)
    image[mid, :3] -= template[:, -3:]  # half min template centered at 0
    image[mid, 4:9] += template         # full max template centered at 6
    image[mid, -9:-4] -= template       # full min template centered at 12
    image[mid, -3:] += template[:, :3]  # half max template centered at 18

    result = match_template(image, template, pad_input=True,
                            constant_values=image.mean())

    # get the max and min results.
    sorted_result = np.argsort(result.flat)
    i, j = np.unravel_index(sorted_result[:2], result.shape)
    assert_equal(j, (12, 0))
    i, j = np.unravel_index(sorted_result[-2:], result.shape)
    assert_equal(j, (18, 6))
Пример #3
0
def ensemble_tuning(X_train, y_train):
    kf = sklearn.cross_validation.KFold(X_train.shape[0], n_folds=5,
                                        shuffle=True, random_state=1234)

    model1 = sklearn.ensemble.RandomForestClassifier(criterion="entropy", max_depth=15, n_estimators=500,
                                                    min_samples_leaf=4, min_samples_split=16, random_state=1234)
    #model2 = sklearn.neighbors.KNeighborsClassifier(n_neighbors=170, algorithm="kd_tree", weights="distance")
    model2 = sklearn.linear_model.LogisticRegression(C=2, penalty="l2")

    scores = np.zeros((5, 101), dtype=np.float32)

    for fold, (train_index, test_index) in enumerate(kf):
        model1.fit(X_train[train_index], y_train[train_index])
        pred1 = model1.predict_proba(X_train[test_index])[:, 1]

        model2.fit(X_train[train_index], y_train[train_index])
        pred2 = model2.predict_proba(X_train[test_index])[:, 1]

        print("Calculating scores...")
        for alpha in np.ndindex(101):
            scores[fold][alpha] = sklearn.metrics.roc_auc_score(y_train[test_index], 0.01*alpha[0]*pred1 + np.max(1 - 0.01*alpha[0], 0)*pred2)

        sc1 = np.mean(scores, axis = 0) * 1.0 / (fold+1) * 5
        print(np.max(sc1), np.unravel_index(sc1.argmax(), sc1.shape), sc1[0], sc1[100])

    scores1 = np.mean(scores, axis = 0)
    print(np.max(scores1), np.unravel_index(scores1.argmax(), scores1.shape), scores1[0], scores1[100])
Пример #4
0
def agg_lut(lathi,lonhi,latlo,lonlo):
    # outputlut=np.zeros((lathi.shape[0],lathi.shape[1],2))-9999
    print("Computing Geographic Look Up Table")
    outputlut=np.empty((latlo.shape[0],latlo.shape[1],2),dtype=list)
    nx=lathi.shape[1]
    ny=lathi.shape[0]

    maxdist=((latlo[0,0]-latlo[1,1])**2 + (lonlo[0,0]-lonlo[1,1])**2)/1.5
    
    for i in range(ny):
        dists=(latlo-lathi[i,0])**2+(lonlo-lonhi[i,0])**2
        y,x=np.unravel_index(dists.argmin(),dists.shape)
        # print(i,ny,y,x,lathi[i,0],lonhi[i,0],latlo[y,x],lonlo[y,x])
        for j in range(nx):
            xmax=min(nx,x+3)
            xmin=max(0,x-3)
            ymax=min(ny,y+3)
            ymin=max(0,y-3)
            
            windists=((latlo[ymin:ymax,xmin:xmax]-lathi[i,j])**2+
                      (lonlo[ymin:ymax,xmin:xmax]-lonhi[i,j])**2)
            yoff,xoff=np.unravel_index(windists.argmin(),windists.shape)
            x=xoff+xmin
            y=yoff+ymin
            if not outputlut[y,x,0]:
                outputlut[y,x,0]=list()
                outputlut[y,x,1]=list()
            
            if windists[yoff,xoff]<maxdist:
                outputlut[y,x,0].append(i)
                outputlut[y,x,1].append(j)
                
    return outputlut
Пример #5
0
def test_normalization():
    """Test that `match_template` gives the correct normalization.

    Normalization gives 1 for a perfect match and -1 for an inverted-match.
    This test adds positive and negative squares to a zero-array and matches
    the array with a positive template.
    """
    n = 5
    N = 20
    ipos, jpos = (2, 3)
    ineg, jneg = (12, 11)
    image = np.full((N, N), 0.5)
    image[ipos:ipos + n, jpos:jpos + n] = 1
    image[ineg:ineg + n, jneg:jneg + n] = 0

    # white square with a black border
    template = np.zeros((n + 2, n + 2))
    template[1:1 + n, 1:1 + n] = 1

    result = match_template(image, template)

    # get the max and min results.
    sorted_result = np.argsort(result.flat)
    iflat_min = sorted_result[0]
    iflat_max = sorted_result[-1]
    min_result = np.unravel_index(iflat_min, result.shape)
    max_result = np.unravel_index(iflat_max, result.shape)

    # shift result by 1 because of template border
    assert np.all((np.array(min_result) + 1) == (ineg, jneg))
    assert np.all((np.array(max_result) + 1) == (ipos, jpos))

    assert np.allclose(result.flat[iflat_min], -1)
    assert np.allclose(result.flat[iflat_max], 1)
Пример #6
0
    def test_dtypes(self):
        # Test with different data types
        for dtype in [np.int16, np.uint16, np.int32,
                      np.uint32, np.int64, np.uint64]:
            coords = np.array(
                [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype)
            shape = (5, 8)
            uncoords = 8*coords[0]+coords[1]
            assert_equal(np.ravel_multi_index(coords, shape), uncoords)
            assert_equal(coords, np.unravel_index(uncoords, shape))
            uncoords = coords[0]+5*coords[1]
            assert_equal(
                np.ravel_multi_index(coords, shape, order='F'), uncoords)
            assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))

            coords = np.array(
                [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]],
                dtype=dtype)
            shape = (5, 8, 10)
            uncoords = 10*(8*coords[0]+coords[1])+coords[2]
            assert_equal(np.ravel_multi_index(coords, shape), uncoords)
            assert_equal(coords, np.unravel_index(uncoords, shape))
            uncoords = coords[0]+5*(coords[1]+8*coords[2])
            assert_equal(
                np.ravel_multi_index(coords, shape, order='F'), uncoords)
            assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
Пример #7
0
    def test_grad(self):
        eps = 1e-7
        f, args, vals = self.get_args()
        output0 = f(*vals)

        # Go through and backpropagate all of the gradients from the outputs
        grad0 = []
        for i in range(len(output0) - 2):
            grad0.append([])
            for j in range(output0[i].size):
                ind = np.unravel_index(j, output0[i].shape)

                g = theano.function(
                    args, theano.grad(self.op(*args)[i][ind], args))
                grad0[-1].append(g(*vals))

        # Loop over each input and numerically compute the gradient
        for k in range(len(vals)):
            for l in range(vals[k].size):
                inner = np.unravel_index(l, vals[k].shape)
                vals[k][inner] += eps
                plus = f(*vals)
                vals[k][inner] -= 2*eps
                minus = f(*vals)
                vals[k][inner] += eps

                # Compare to the backpropagated gradients
                for i in range(len(output0) - 2):
                    for j in range(output0[i].size):
                        ind = np.unravel_index(j, output0[i].shape)
                        delta = 0.5 * (plus[i][ind] - minus[i][ind]) / eps
                        ref = grad0[i][j][k][inner]
                        assert np.abs(delta - ref) < 2*eps, \
                            "{0}".format((k, l, i, j, delta, ref, delta-ref))
Пример #8
0
def find_subset(target_grid,lon_src,lat_src):

	ny,nx = lon_src.shape

	lon_bl_tgt = target_grid.coords[0][0][0,0] ; lat_bl_tgt = target_grid.coords[0][1][0,0] # bottom left
	lon_br_tgt = target_grid.coords[0][0][-1,0] ; lat_br_tgt = target_grid.coords[0][1][-1,0] # bottom right
	lon_ul_tgt = target_grid.coords[0][0][0,-1] ; lat_ul_tgt = target_grid.coords[0][1][0,-1] # upper left
	lon_ur_tgt = target_grid.coords[0][0][-1,-1] ; lat_ur_tgt = target_grid.coords[0][1][-1,-1] # upper right
	
	dist_2_bottom_left_corner = distance_on_unit_sphere(lon_bl_tgt,lat_bl_tgt,lon_src,lat_src)
	j_bl_src, i_bl_src = _np.unravel_index(dist_2_bottom_left_corner.argmin(),dist_2_bottom_left_corner.shape)

	dist_2_bottom_right_corner = distance_on_unit_sphere(lon_br_tgt,lat_br_tgt,lon_src,lat_src)
	j_br_src, i_br_src = _np.unravel_index(dist_2_bottom_right_corner.argmin(),dist_2_bottom_right_corner.shape)

	dist_2_upper_left_corner = distance_on_unit_sphere(lon_ul_tgt,lat_ul_tgt,lon_src,lat_src)
	j_ul_src, i_ul_src = _np.unravel_index(dist_2_upper_left_corner.argmin(),dist_2_upper_left_corner.shape)

	dist_2_upper_right_corner = distance_on_unit_sphere(lon_ur_tgt,lat_ur_tgt,lon_src,lat_src)
	j_ur_src, i_ur_src = _np.unravel_index(dist_2_upper_right_corner.argmin(),dist_2_upper_right_corner.shape)

	imin = min(i_bl_src,i_ul_src) ; imax = max(i_br_src,i_ur_src)
	jmin = min(j_bl_src,j_br_src) ; jmax = max(j_ul_src,j_ur_src)

	# for safety
	imin = max(imin-2,0)
	jmin = max(jmin-2,0)
	imax = min(imax+2,nx)
	jmax = min(jmax+2,ny)

	print('Subset source grid : full dimension is ', nx , ny, ' subset is ', imin, imax, jmin, jmax)

	return imin, imax, jmin, jmax
Пример #9
0
def get_x_y_wind(lat, lon, dataset, model):
    indices = []
    # m = Basemap(rsphere=(6371229.00, 6356752.3142), projection='merc',
    #             llcrnrlat=40.5833284543, urcrnrlat=47.4999927992,
    #                    llcrnrlon=-129, urcrnrlon=-123.7265625)
    # xpt, ypt = m(lon,lat)
    # print "x ", xpt/100
    # print "y ", ypt/100
    print "lat ", lat
    print "lon ", lon
    lat_name = 'lat'
    lon_name = 'lon'
    file_lats = dataset.variables[lat_name][:]
    file_lons = dataset.variables[lon_name][:]
    file_lat = np.abs(file_lats - lat).argmin()
    file_lon = np.abs(file_lons - lon).argmin()
    print "Argmin lat", file_lat
    print "Argmin lon", file_lon
    file_lat = np.unravel_index(file_lat, file_lats.shape)
    file_lon = np.unravel_index(file_lon, file_lons.shape)
    print "Unravel lat", file_lat
    print "Unravel lon", file_lon
    file_lat_y = file_lat[0]
    file_lat_x = file_lat[1]
    print"lat y x", file_lat_y, file_lat_x
    file_lon_y = file_lon[0]
    file_lon_x = file_lon[1]
    print"lon y x", file_lon_y, file_lon_x
    print "lat ", file_lats[file_lat_y][file_lat_x]
    print "lon ", file_lons[file_lon_y][file_lon_x]
    indices.append(file_lat_y)
    indices.append(file_lat_x)
    return indices
Пример #10
0
def move(_particles, _flighttime, clean = False):
    '''
    Двигаем частицы до первого столкновения.
    Возвращаем :
    передвинутые частицы
    новый массив времен
    время полета до столкновения
    кортеж пар столкнувшихся частиц
    '''
    if clean :
        particles = copy.copy(_particles)
        flighttime = copy.copy(_flighttime)
    else :
        particles = _particles
        flighttime = _flighttime
    # Движение
    t = flighttime.min()

    for p in particles:
        p.r += p.v*t

    flighttime -= t
    # Ищем столкнувшиеся пары
    z = flighttime.flatten() < almostzero
    indx =  np.where(np.logical_and(z.data,np.logical_not(z.mask)))[0]
    n = len(particles)
    # Оптимизация на наиболее частого случая
    if len(indx) == 1:
        return particles, flighttime, t, [np.unravel_index(indx[0], (n,n)),]
    pairs = [np.unravel_index(i,(n,n)) for i in indx]
    collidingparts = np.array(pairs).flatten()
    unique_collidingparts = np.unique(collidingparts)
    if len(collidingparts) <> len(unique_collidingparts):
        raise ArithmeticError, "Multibody collision detected!!!"
    return particles, flighttime,t,pairs
Пример #11
0
 def classify_obj(self, zchi2arr1, zfit1, flags1, zchi2arr2, zfit2, flags2):
     flag_val = int('0b100',2) # From BOSS zwarning flag definitions
     for ifiber in xrange(zchi2arr1.shape[0]):
         self.minrchi2[ifiber,0] = n.min(zchi2arr1[ifiber]) / (self.npixflux) # Calculate reduced chi**2 values to compare templates of differing lengths
         if zchi2arr2 != None: self.minrchi2[ifiber,1] = n.min(zchi2arr2[ifiber]) / (self.npixflux)
         minpos = self.minrchi2[ifiber].argmin() # Location of best chi2 of array of best (individual template) chi2s
         
         if minpos == 0: # Means overall chi2 minimum came from template 1
             self.type.append('GALAXY')
             self.minvector.append(zfit1.minvector[ifiber])
             self.z[ifiber] = zfit1.z[ifiber]
             self.z_err[ifiber] = zfit1.z_err[ifiber]
             minloc = n.unravel_index(zchi2arr1[ifiber].argmin(), zchi2arr1[ifiber].shape)[:-1]
             self.zwarning = n.append(self.zwarning, flags1[ifiber])
             argsort = self.minrchi2[ifiber].argsort()
             if len(argsort) > 1:
                 if argsort[1] == 1:
                     if ( n.min(zchi2arr2[ifiber]) - n.min(zchi2arr1[ifiber]) ) < zfit1.threshold: self.zwarning[ifiber] = int(self.zwarning[ifiber]) | flag_val #THIS THRESHOLD PROBABLY ISN'T RIGHT AND NEEDS TO BE CHANGED
         
         elif minpos == 1: # Means overall chi2 minimum came from template 2
             self.type.append('STAR')
             self.minvector.append(zfit2.minvector[ifiber])
             self.z[ifiber] = zfit2.z[ifiber]
             self.z_err[ifiber] = zfit2.z_err[ifiber]
             minloc = n.unravel_index(zchi2arr2[ifiber].argmin(), zchi2arr2[ifiber].shape)[:-1]
             self.zwarning = n.append(self.zwarning, flags2[ifiber])
             argsort = self.minrchi2[ifiber].argsort()
             if argsort[1] == 0:
                 if ( n.min(zchi2arr1[ifiber]) - n.min(zchi2arr2[ifiber]) ) < zfit2.threshold: self.zwarning[ifiber] = int(self.zwarning[ifiber]) | flag_val
Пример #12
0
    def maxmindam(self,probsize,meanvasdam):

        m = probsize[0]
        n = probsize[1]

        damage = vasdam.damcalc(self,probsize,meanvasdam)

        maxdam = np.argmax(damage)
        mindam = np.argmin(damage)

        maxdamval = np.max(damage)
        mindamval = np.min(damage)
        
        dimsdam = damage.shape
        
        maxidx = np.unravel_index(maxdam,dimsdam)
        minidx = np.unravel_index(mindam,dimsdam)

        maxvasdam = np.array([maxidx[0],m+maxidx[0],maxidx[1],n+maxidx[1]]).reshape([2,2])
        minvasdam = np.array([minidx[0],m+minidx[0],minidx[1],n+minidx[1]]).reshape([2,2])

        location = np.array([maxvasdam,int(maxdamval),minvasdam,int(mindamval)])
        

        return  location
Пример #13
0
def nearest(array, value):
    """
    Find nearest position in array to value.
    Parameters
    ----------
    array : 'array_like'
    value: 'float', 'list'

    Returns
    ----------
    Searches n x 1 and n x m x 1  arrays for floats.
    Searches n x m and n x m x p arrays for lists of size m or p.
    """

    if isinstance(array, (list, tuple)):
        array = np.asarray(array)

    if isinstance(value, (float, int)):
        pos = (np.abs(array - value)).argmin()
        if len(array.shape) == 2:
            return np.unravel_index(pos, array.shape)
        else:
            return pos
    else:
        pos = (np.sum((array - value)**2, axis=1)**(1 / 2)).argmin()
        if len(array.shape) == 3:
            return np.unravel_index(pos, array.shape)
        else:
            return pos
Пример #14
0
def split_quater(img1,img2,n):
    iq1 = img1
    iq2 = img2
    if n == 1:
        iq1 = np.copy(iq1[0:239,0:239])
        iq2 = np.copy(iq2[0:239,0:239])
    elif n == 2:
        iq1 = np.copy(iq1[0:239,240:479])
        iq2 = np.copy(iq2[0:239,240:479])
    elif n == 3:
        iq1 = np.copy(iq1[240:479,0:239])
        iq2 = np.copy(iq2[240:479,0:239])
    elif n == 4:
        iq1 = np.copy(iq1[240:479,240:479])
        iq2 = np.copy(iq2[240:479,240:479])
    elif n == 5:
        iq1 = img1
        iq2 = img2
    iq1 = cv2.GaussianBlur(iq1,(5,5),0)
    iq2 = cv2.GaussianBlur(iq2,(5,5),0)

    corr_img11 = cross_image(iq1,iq1)
    corr_img12 = cross_image(iq1,iq2)

    #cv2.imwrite('corr_img11.jpg',corr_img11)
    #cv2.imwrite('corr_img12.jpg',corr_img22)


    y_self,x_self = np.unravel_index(np.argmax(corr_img11), corr_img11.shape)
    y,x = np.unravel_index(np.argmax(corr_img12), corr_img12.shape)

    y_diff = y - y_self
    x_diff = x - x_self
    print(y_diff,x_diff)
    return (y_diff,x_diff)
    def _fixEvenKernel(kernel):
        """Take a kernel with even dimensions and make them odd, centered correctly.

        Parameters
        ----------
        kernel : `numpy.array`
            a numpy.array

        Returns
        -------
        out : `numpy.array`
            a fixed kernel numpy.array
        """
        # Make sure the peak (close to a delta-function) is in the center!
        maxloc = np.unravel_index(np.argmax(kernel), kernel.shape)
        out = np.roll(kernel, kernel.shape[0]//2 - maxloc[0], axis=0)
        out = np.roll(out, out.shape[1]//2 - maxloc[1], axis=1)
        # Make sure it is odd-dimensioned by trimming it.
        if (out.shape[0] % 2) == 0:
            maxloc = np.unravel_index(np.argmax(out), out.shape)
            if out.shape[0] - maxloc[0] > maxloc[0]:
                out = out[:-1, :]
            else:
                out = out[1:, :]
            if out.shape[1] - maxloc[1] > maxloc[1]:
                out = out[:, :-1]
            else:
                out = out[:, 1:]
        return out
Пример #16
0
    def reshape(self, *args, **kwargs):
        shape = check_shape(args, self.shape)
        order, copy = check_reshape_kwargs(kwargs)

        # Return early if reshape is not required
        if shape == self.shape:
            if copy:
                return self.copy()
            else:
                return self

        new = lil_matrix(shape, dtype=self.dtype)

        if order == 'C':
            ncols = self.shape[1]
            for i, row in enumerate(self.rows):
                for col, j in enumerate(row):
                    new_r, new_c = np.unravel_index(i * ncols + j, shape)
                    new[new_r, new_c] = self[i, j]
        elif order == 'F':
            nrows = self.shape[0]
            for i, row in enumerate(self.rows):
                for col, j in enumerate(row):
                    new_r, new_c = np.unravel_index(i + j * nrows, shape, order)
                    new[new_r, new_c] = self[i, j]
        else:
            raise ValueError("'order' must be 'C' or 'F'")

        return new
Пример #17
0
def calculateArea(flowDirectionGrid,flowAcc,areas,noDataValue,mask = None):

    # Get the sorted indices of the array in reverse order (e.g. largest first)
    idcs = flowAcc.argsort(axis= None)

    #Mask out the indexes outside of the mask
    if not mask is None:
        allRows,allCols = np.unravel_index(idcs,flowAcc.shape)
        gdIdcs = mask[allRows,allCols]
        idcs = idcs[gdIdcs]

    #Loop through all the indices in sorted order
    for idx in idcs:
        #Get the currents row column index
        [i, j] = np.unravel_index(idx, flowAcc.shape)  # Get the row/column indices

        #Get the index of the point downstream of this and the distance to that point
        [downI,downJ,inBounds] = getFlowToCell(i,j,flowDirectionGrid[i,j],flowAcc.shape[0],flowAcc.shape[1])

        #So long as we are not draining to the outskirts, proceed
        if inBounds and not flowAcc[downI,downJ] == noDataValue:

            #Accumulate area
            areas[downI,downJ] += areas[i,j]

    return areas
Пример #18
0
def estrampcpx(inData, estData=None):
    if estData is not None:
        if np.any(np.iscomplex(estData)):
            estData=estData.conj()        
            inData=inData*estData
        else:
            estData=np.exp(-1.j*estData)
            inData=inData*estData
    indx=np.log10(abs(np.fft.fftshift(np.fft.fft2(np.angle(inData))))).argmax() #to display fft you may want to take log10 of the value here
    subi=np.unravel_index(indx, inData.shape)
    X,Y=np.meshgrid(np.r_[0:inData.shape[1]], np.r_[0:inData.shape[0]])
    fxmax=inData.shape[1]/2.
    fymax=inData.shape[0]/2.
    surface=np.pi*( (subi[0]-fymax)*Y/fymax + (subi[1]-fxmax)*X/fxmax )
    #print [ subi, fxmax, fymax ]
    outData=inData*np.exp(-1.j*surface)
    outVal=[(subi[0]-fymax)/fymax , (subi[1]-fxmax)/fxmax]
    #test if we get the direction right.
    indx2=np.log10(abs(np.fft.fftshift(np.fft.fft2(np.angle(outData))))).argmax() 
    subi2=np.unravel_index(indx2, inData.shape)
    dist1=np.sqrt( (subi[0]-fymax)**2. + (subi[1]-fxmax)**2.)
    dist2=np.sqrt( (subi2[0]-fymax)**2. + (subi2[1]-fxmax)**2.)
    #print [dist1, dist2]
    if dist1<dist2:
        #we got the direction wrong
        outVal=[-(subi[0]-fymax)/fymax , -(subi[1]-fxmax)/fxmax]
    #outData=np.exp(-1.j*surface)
    return outVal
def diagnose_performance(output_parallel, output_serial, print_extradeets_or_not):
    error_perc = 100 * np.abs(output_serial - output_parallel) / np.abs(output_serial)
    error = np.abs(output_serial - output_parallel)

    # Relative error
    print('Relative error')
    worst_ind = np.unravel_index(np.argmax(error_perc), output_parallel.shape)
    print('Max error of {}% at {}'.format(error_perc[worst_ind], worst_ind))
    print('Worst match: naive = {}, parallel = {}'
          .format(output_serial[worst_ind],
                  output_parallel[worst_ind]))
                  
    # Absolute error
    print('Absolute error')
    worst_ind = np.unravel_index(np.argmax(error), output_parallel.shape)
    print('Max error of {}% at {}'.format(error[worst_ind], worst_ind))
    print('Worst match: naive = {}, parallel = {}'
    .format(output_serial[worst_ind],
            output_parallel[worst_ind]))
            
    correctness = np.allclose(output_serial, output_parallel, rtol=0, atol=1e-3)
    print('Outputs match? {}'.format(correctness))
    return(correctness)
    
    if print_extradeets_or_not:
        print('Inputs:')
        print(inputs)
        print('Weights:')
        print(weights)
        print('Outputs (dim: {})'.format(output_parallel.shape))
        print(output_parallel)
        print('Naive output (dim: {})'.format(output_serial.shape))
        print(output_serial)
Пример #20
0
 def iterkeys(self,index):
     #import pdb; pdb.set_trace()
     if not isinstance(index,tuple) and self.shape[0] == 1:
         index = (1,index)
     if isinstance(index, int):
         key = np.unravel_index(index-1, self.shape, order='F')
         yield tuple(k+1 for k in key)
     elif isinstance(index,slice):
         index = range((index.start or 1)-1,
                       index.stop or np.prod(self.shape),
                       index.step or 1)
         for key in np.transpose(np.unravel_index(index, self.shape, order='F')): # 0-based
             yield tuple(k+1 for k in key)
     elif isinstance(index,(list,np.ndarray)):
         index = np.asarray(index)-1
         for key in np.transpose(np.unravel_index(index, self.shape, order='F')):
             yield tuple(k+1 for k in key)
     else:
         assert isinstance(index,tuple),index.__class__
         indices = []  # 1-based
         for i,ix in enumerate(index):
             if isinstance(ix,slice):
                 indices.append(np.arange((ix.start or 1),
                                          (ix.stop  or self.shape[i]) + 1,
                                          ix.step   or 1,
                                          dtype=int))
             else:
                 indices.append(np.asarray(ix))
         assert len(index) == 2
         indices[0].shape = (-1,1)
         for key in np.broadcast(*indices):
             yield tuple(map(int,key))
Пример #21
0
 def testPredict(self): 
     #Create a set of indices 
     lmbda = 0.0 
     
     iterativeSoftImpute = IterativeSoftImpute(lmbda, k=10)
     matrixIterator = iter(self.matrixList)
     ZList = iterativeSoftImpute.learnModel(matrixIterator)
     
     XhatList = iterativeSoftImpute.predict(ZList, self.indsList)
     
     #Check we get the exact matrices returned 
     for i, Xhat in enumerate(XhatList): 
         nptst.assert_array_almost_equal(numpy.array(Xhat.todense()), self.matrixList[i].todense())
         
         self.assertEquals(Xhat.nnz, self.indsList[i].shape[0])
         
         self.assertAlmostEquals(MCEvaluator.meanSqError(Xhat, self.matrixList[i]), 0)
         self.assertAlmostEquals(MCEvaluator.rootMeanSqError(Xhat, self.matrixList[i]), 0)
         
     #Try moderate lambda 
     lmbda = 0.1 
     iterativeSoftImpute = IterativeSoftImpute(lmbda, k=10)
     matrixIterator = iter(self.matrixList)
     ZList = list(iterativeSoftImpute.learnModel(matrixIterator)) 
     
     XhatList = iterativeSoftImpute.predict(iter(ZList), self.indsList)
     
     for i, Xhat in enumerate(XhatList): 
         for ind in self.indsList[i]:
             U, s, V = ZList[i]
             Z = (U*s).dot(V.T)
             self.assertEquals(Xhat[numpy.unravel_index(ind, Xhat.shape)], Z[numpy.unravel_index(ind, Xhat.shape)])
         
         self.assertEquals(Xhat.nnz, self.indsList[i].shape[0])
Пример #22
0
def grid_maximum(matrix):
    """ Find where in grid highest probability lies """
    
    arr = np.copy(matrix)
    # 1st maximum
    i, j = np.unravel_index(arr.argmax(), arr.shape)
        
    lon_bins = np.linspace(llcrnrlon, urcrnrlon+2, xBins)
    lat_bins = np.linspace(llcrnrlat, urcrnrlat+1, xBins*xyRatio)

    lon_corr = lon_bins[1]-lon_bins[0]
    lat_corr = lat_bins[1]-lat_bins[0]
    
    #for lon in lon_bins:
    #    for lat in lat_bins:
    #        coord = lat+lat_corr*0.5, lon+lon_corr*0.5
    #        print rg.get(coord)['admin1']

    maximum = lat_bins[i]+lat_corr*0.5, lon_bins[j]+lon_corr*0.5

    # 2nd maximum
    arr[i, j] = 0
    i, j = np.unravel_index(arr.argmax(), arr.shape)
    second_maximum = lat_bins[i]+lat_corr*0.5, lon_bins[j]+lon_corr*0.5

    # 3rd maximum
    arr[i, j] = 0
    i, j = np.unravel_index(arr.argmax(), arr.shape)
    third_maximum = lat_bins[i]+lat_corr*0.5, lon_bins[j]+lon_corr*0.5

    return maximum, second_maximum, third_maximum
Пример #23
0
    def write_cv_results_toFile(self, scores, precision_scores, recall_scores, param_grid, result_path):
        
        final_results_dict = {}
        all_scores_dict = {'error' : scores, 'precision' : precision_scores, 'recall' : recall_scores}
        for score in all_scores_dict:
            avg_score = np.mean(all_scores_dict[score], axis=1)
            std_score = np.std(all_scores_dict[score], axis=1)

            if score == 'error':
                opt_ind = np.unravel_index(np.argmin(avg_score), avg_score.shape)
            else:
                opt_ind = np.unravel_index(np.argmax(avg_score), avg_score.shape)
            
            final_results_dict[score] = avg_score[opt_ind]
            final_results_dict[score + '_std'] = std_score[opt_ind]
            params_dict = {}
            for element in self.grid_dictionary:
                params_dict[element] =  param_grid[opt_ind[0]][self.grid_dictionary[element]]
            final_results_dict[score + '_params'] = params_dict
            n_estimators = opt_ind[1] + 1
            final_results_dict[score + '_params'].update(dict(n_trees = str(n_estimators)))
            if not 'fe_params' in params_dict:
                final_results_dict[score + '_params'].update(dict(fe_params = None))
        
        final_results_dict['channel_type'] = self.config.channel_type

        json.dump(final_results_dict, open(result_path,'w'))  
Пример #24
0
  def reorder_in_x(self,xmat):

    xfactors=self.xfactors
    tdim=self.xmatrix.shape
    ndim=len(tdim)
    mf=xfactors.shape[0]
    xmatredim=np.zeros(mf,dtype=int)
    for i in range(mf):
      xmatredim[i]=np.prod(xfactors[i,:])
    xmatre=np.zeros(xmatredim)

    pdim=np.prod(tdim)
    i_in=np.arange(pdim)
    i_inx=np.array(np.unravel_index(i_in,tdim))
    i_outx=[None]*ndim
    for i in range(ndim):
      i_outx[i]=np.array(np.unravel_index(i_inx[i],xfactors[:,i]))
    i_outx=np.array(i_outx)
    
    i_out=[None]*mf
    for i in range(mf):
      i_out[i]=np.array(np.ravel_multi_index(i_outx[:,i],xfactors[i]))
    i_out=np.array(i_out)

    xmatre[tuple(i_out)]=self.xmatrix[tuple(i_inx)]

    return(xmatre)
Пример #25
0
  def invert_reorder_in_x(self,xmatre,xfactors):

    (mf,nf)=xfactors.shape
    ## tdim2=xmatre.shape
    ## ndim2=len(tdim2)

    ndim=nf
    tdim=np.zeros(nf,dtype=int)
    for i in range(nf):
      tdim[i]=np.prod(xfactors[:,i])
    xmatrix=np.zeros(tdim)

    pdim=np.prod(tdim)
    i_in=np.arange(pdim)
    i_inx=np.array(np.unravel_index(i_in,tdim))

    i_outx=[None]*ndim
    for i in range(ndim):
      i_outx[i]=np.array(np.unravel_index(i_inx[i],xfactors[:,i]))
    i_outx=np.array(i_outx)
    
    i_out=[None]*mf
    for i in range(mf):
      i_out[i]=np.array(np.ravel_multi_index(i_outx[:,i],xfactors[i]))
    i_out=np.array(i_out)

    xmatrix[tuple(i_inx)]=xmatre[tuple(i_out)]
    xmatrix=xmatrix.astype(np.uint8)

    return(xmatrix)
Пример #26
0
def klst(X):
    history=[]
    add_vec =[]    
    print("データ行列の大きさ ", X.shape)
    count = X.shape[0]
    dist = calc(X)
    print(dist.shape)
    exit()
    i,j = np.unravel_index(dist.argmax(), dist.shape)
    max = dist[i,j]
    ind=[x for x in range(0,count)]
    
    while dist.shape[0] > 1:
        dist = pd.DataFrame(dist, index = ind ,columns = ind)

        p =slm(dist.values)
        print("最短距離ベクトル", p)
    #     print(dist)
        dist, add_vec = cut_n_comb(dist, p, add_vec)
        ind= dist.index
        dist = dist.values
        i,j = np.unravel_index(dist.argmax(), dist.shape)
        max = dist[i,j]
        if max == 0:#の例外処理が必要
            break
        print("合成結果:", add_vec)
        count -= 1
        print(count-1, "残り処理回数")
        print("-----------------------------")
def seuillage_80(masque,image,handle=False):
	"""
		Permet de retirer 20 pourcent des pixels les plus clairs.
	"""
	nb_pixel=sum(sum(masque))
	seuil=nb_pixel*20/100
	image=humuscle(image)
	if handle:
		print('nombre de pixel avant seuillage')
		print(nb_pixel)
		print('nombre de pixel a supprimer')
		print(seuil)

	if handle:
		imshow('masque initial',masque)
	k=0
	while(k<seuil):
		masque[np.unravel_index(np.argmax(image),image.shape)]=0
		image[np.unravel_index(np.argmax(image),image.shape)]=image.min()
		k+=1

	if handle:
		imshow('masque apres seuillage a 80%',masque)
		print('nombre de pixel apres seuillage')
		print(sum(sum(masque)))
	return masque
Пример #28
0
def nonMaximumSuppression(I, dims, pos, c, maxima):
    # if pos== true, find local maximum, else - find local minimum
    #copy image
    I_temp = I
    w = dims[0]
    h = dims[1]
    n = 5
    margin = 5
    tau = 50
#try np here too
    for i in range(n + margin, w - n - margin):
        #    print "1 entered 1st cycle i=", i
        for j in range(n + margin, h - n - margin):
            window = I_temp[i:i+n, j:j+n]

            if np.sum(window) < 0:
                mval = 0
            elif pos:
                mval = np.amax(window)
                mcoords = np.unravel_index(np.argmax(window), window.shape) + np.array((i,j))
            else:
                mval = np.amin(window)
                mcoords = np.unravel_index(np.argmax(window), window.shape) + np.array((i,j))
            I_temp [i:i+n, j:j+n] = -1
            print mcoords, "mval= ", mval
            if pos:
                if mval >= tau:
                    maxima.append(maximum(mcoords[0],mcoords[1], mval, c))

            else:
                if mval <= tau and mval > 0:
                    maxima.append(maximum(mcoords[0],mcoords[1], mval, c))
Пример #29
0
def plot_drain_pit(self, pit, drain, prop, s, elev, area):
    from matplotlib import pyplot
    
    cmap = 'Greens'

    ipit, jpit = np.unravel_index(pit, elev.shape)
    Idrain, Jdrain = np.unravel_index(drain, elev.shape)
    Iarea, Jarea = np.unravel_index(area, elev.shape)

    imin = max(0, min(ipit, Idrain.min(), Iarea.min())-1)
    imax = min(elev.shape[0], max(ipit, Idrain.max(), Iarea.max()) + 2)
    jmin = max(0, min(jpit, Jdrain.min(), Jarea.min())-1)
    jmax = min(elev.shape[1], max(jpit, Jdrain.max(), Jarea.max()) + 2)
    roi = (slice(imin, imax), slice(jmin, jmax))

    pyplot.figure()
    
    ax = pyplot.subplot(221);
    pyplot.axis('off')
    im = pyplot.imshow(elev[roi], interpolation='none'); im.set_cmap(cmap)
    pyplot.plot(jpit-jmin, ipit-imin, lw=0, color='k', marker='$P$', ms=10)
    pyplot.plot(Jarea-jmin, Iarea-imin, 'k.')
    pyplot.plot(Jdrain-jmin, Idrain-imin, lw=0, color='k', marker='$D$', ms=10)

    pyplot.subplot(223, sharex=ax, sharey=ax); pyplot.axis('off')
    im = pyplot.imshow(elev[roi], interpolation='none'); im.set_cmap(cmap)
    for i, j, val in zip(Idrain, Jdrain, prop):
        pyplot.plot(j-jmin, i-imin, lw=0, color='k', marker='$%.2f$' % val, ms=16)

    pyplot.subplot(224, sharex=ax, sharey=ax); pyplot.axis('off')
    im = pyplot.imshow(elev[roi], interpolation='none'); im.set_cmap(cmap)
    for i, j, val in zip(Idrain, Jdrain, s):
        pyplot.plot(j-jmin, i-imin, lw=0, color='k', marker='$%.2f$' % val, ms=16)

    pyplot.tight_layout()
Пример #30
0
    def from_hdu(cls, hdu, hdu_bands=None):
        """Make a WcsNDMap object from a FITS HDU.

        Parameters
        ----------
        hdu : `~astropy.io.fits.BinTableHDU` or `~astropy.io.fits.ImageHDU`
            The map FITS HDU.
        hdu_bands : `~astropy.io.fits.BinTableHDU`
            The BANDS table HDU.
        """
        geom = WcsGeom.from_header(hdu.header, hdu_bands)
        shape = tuple([ax.nbin for ax in geom.axes])
        shape_wcs = tuple([np.max(geom.npix[0]),
                           np.max(geom.npix[1])])
        meta = cls._get_meta_from_header(hdu.header)
        map_out = cls(geom, meta=meta)

        # TODO: Should we support extracting slices?
        if isinstance(hdu, fits.BinTableHDU):
            pix = hdu.data.field('PIX')
            pix = np.unravel_index(pix, shape_wcs[::-1])
            vals = hdu.data.field('VALUE')
            if 'CHANNEL' in hdu.data.columns.names and shape:
                chan = hdu.data.field('CHANNEL')
                chan = np.unravel_index(chan, shape[::-1])
                idx = chan + pix
            else:
                idx = pix

            map_out.set_by_idx(idx[::-1], vals)
        else:
            map_out.data = hdu.data

        return map_out
Пример #31
0
    def forward(self,
                forest,
                features,
                num_obj,
                labels=None,
                boxes_for_nms=None,
                batch_size=0):
        # generate dropout
        if self.dropout > 0.0:
            tree_dropout_mask = get_dropout_mask(self.dropout,
                                                 self.hidden_size / 2)
            chain_dropout_mask = get_dropout_mask(self.dropout,
                                                  self.hidden_size / 2)
        else:
            tree_dropout_mask = None
            chain_dropout_mask = None

        # generate tree lstm input/output class
        tree_out_h = None
        tree_out_dists = None
        tree_out_commitments = None
        tree_h_order = Variable(torch.LongTensor(num_obj).zero_().cuda())
        tree_order_idx = 0
        tree_lstm_io = tree_utils.TreeLSTM_IO(tree_out_h, tree_h_order,
                                              tree_order_idx, tree_out_dists,
                                              tree_out_commitments,
                                              tree_dropout_mask)

        #chain_out_h = None
        #chain_out_dists = None
        #chain_out_commitments = None
        #chain_h_order = Variable(torch.LongTensor(num_obj).zero_().cuda())
        #chain_order_idx = 0
        #chain_lstm_io = tree_utils.TreeLSTM_IO(chain_out_h, chain_h_order, chain_order_idx, chain_out_dists,
        #                                       chain_out_commitments, chain_dropout_mask)
        for idx in range(len(forest)):
            self.decoderTreeLSTM(forest[idx], features, tree_lstm_io)
            #self.decoderChainLSTM(forest[idx], features, chain_lstm_io)

        out_tree_h = torch.index_select(tree_lstm_io.hidden, 0,
                                        tree_lstm_io.order.long())
        out_dists = torch.index_select(tree_lstm_io.dists, 0,
                                       tree_lstm_io.order.long())[:-batch_size]
        out_commitments = torch.index_select(
            tree_lstm_io.commitments, 0,
            tree_lstm_io.order.long())[:-batch_size]

        # Do NMS here as a post-processing step
        if boxes_for_nms is not None and not self.training:
            is_overlap = nms_overlaps(boxes_for_nms.data).view(
                boxes_for_nms.size(0), boxes_for_nms.size(0),
                boxes_for_nms.size(1)).cpu().numpy() >= self.nms_thresh
            # is_overlap[np.arange(boxes_for_nms.size(0)), np.arange(boxes_for_nms.size(0))] = False

            out_dists_sampled = F.softmax(out_dists, 1).data.cpu().numpy()
            out_dists_sampled[:, 0] = 0

            out_commitments = out_commitments.data.new(
                out_commitments.shape[0]).fill_(0)

            for i in range(out_commitments.size(0)):
                box_ind, cls_ind = np.unravel_index(out_dists_sampled.argmax(),
                                                    out_dists_sampled.shape)
                out_commitments[int(box_ind)] = int(cls_ind)
                out_dists_sampled[is_overlap[box_ind, :, cls_ind],
                                  cls_ind] = 0.0
                out_dists_sampled[
                    box_ind] = -1.0  # This way we won't re-sample

            out_commitments = Variable(out_commitments.view(-1))
        else:
            out_commitments = out_commitments.view(-1)

        if self.training and (labels is not None):
            out_commitments = labels.clone()
        else:
            out_commitments = torch.cat(
                (out_commitments,
                 Variable(
                     torch.randn(batch_size).long().fill_(0).cuda()).view(-1)),
                0)

        return out_dists, out_commitments
Пример #32
0
def semi_automatic_training():
    """ This appplication lets you maneuver through windows selected by the OW process.
    In short: OW below the OW_start threshold, which passes the R2_cirterion are considered eddies,
    have a look at https://github.com/JASaa/eddies-R2 for more info about the sample selection process"""

    # Loop through every netcdf file in directory, usually they are spaced by 5 days
    for fName in os.listdir(args.fDir):
        
        if not fName.endswith(".nc"):
            continue
            
        logger.info("loading netcdf")

        # load data
        (ds,t,lon,lat,depth,uvel_full,vvel_full,sst_full,ssl_full) =  load_netcdf4(args.fDir + fName)

        # Confidence level, usually 90%
        R2_criterion = 0.90

        # OW value at which to begin the evaluation of R2, default was -1, want to use -8 to be absolutely sure
        OW_start = -6.0

        # Number of local minima to evaluate using R2 method.
        # Set low (like 20) to see a few R2 eddies quickly.
        # Set high (like 1e5) to find all eddies in domain.
        max_evaluation_points = 100000 

        # Minimum number of cells required to be identified as an eddie.
        min_eddie_cells = 3 # set to 3 to be coherent with the use of the R2 method, 3 points seems like a reasonable minimun for a correlation 

        # z-level to plot.  Usually set to 0 for the surface.
        k_plot = 0

        dlon = abs(lon[0]-lon[1])
        dlat = abs(lat[0]-lat[1])

        # Create eddy images for each day in datase
        #for day, time in enumerate(t):
        # Shuffle the time so that the expert won't see the same long-lasting eddies

        for day in random.sample(range(0, len(t)), len(t)): 

            dateStr = "{:%d-%m-%Y}".format(datetime.date(1950, 1, 1) + datetime.timedelta(hours=float(t[day])) )
            logger.info(f"Creating images for dataset {dateStr}")

            # create a text trap
            text_trap = io.StringIO()
            sys.stdout = text_trap

            # Run the OW-R2 algorithm
            lon,lat,u,v,vorticity,OW,OW_eddies,eddie_census,nEddies,circulation_mask = eddy_detection(
                    lon,lat,depth,uvel_full,vvel_full,day,R2_criterion,OW_start,max_evaluation_points,min_eddie_cells)

            # restore stdout
            sys.stdout = sys.__stdout__

            sst_train = []
            ssl_train = []
            uvel_train = []
            vvel_train = []
            phase_train = []
            nDataset = 5

            # =========================================================
            # ============== Prepare datasets and lists ===============
            # =========================================================

            eddyCtrIdx = []
            for i in range(0,nEddies):
                lonIdx = np.argmax(lon>eddie_census[2,i])-1
                latIdx = np.argmax(lat>eddie_census[3,i])-1
                eddyCtrIdx.append( (lonIdx, latIdx) )

            # Netcdf uses (lat,lon) we want to use (lon,lat) and discard the depth
            sst = sst_full[day,:,:].T
            ssl = ssl_full[day,:,:].T
            uvel = uvel_full[day,0,:,:].T
            vvel = vvel_full[day,0,:,:].T
            # Calculate the phase angle (direction) of the current
            with np.errstate(all='ignore'): # Disable zero div warning
                phase = xr.ufuncs.rad2deg( xr.ufuncs.arctan2(vvel, uvel) ) + 180
            OW = OW[:,:,0]
            nLon = len(lon)
            nLat = len(lat)

            datasets = (sst, ssl, uvel, vvel, phase, OW) 
            
            # =========================================================
            # ======= Create rectangular patches around eddies ========
            # =========================================================

            logger.info(f"+ Creating rectangles for {nEddies} eddies")

            savedImgCounter = 0 # saved image counter for file ID
            for eddyId, ctrIdx in enumerate(eddyCtrIdx): # nEddies

                ctrCoord = lon[ctrIdx[0]], lat[ctrIdx[1]]
                diameter_km = eddie_census[5][eddyId]

                bfs_diameter_km, bfs_center = eddy_metrics(OW_eddies, ctrIdx, lon, lat)

                # Positive rotation (counter-clockwise) is a cyclone in the northern hemisphere because of the coriolis effect
                if (eddie_census[1][eddyId] > 0.0): cyclone = 1 # 1 is a cyclone, 0 is nothing and -1 is anti-cyclone (negative rotation)
                else: cyclone = -1

                logger.info(f"+++ Creating rectangles for {check_cyclone(cyclone)} with center {ctrCoord} and diameter {diameter_km}")
                
                # Find rectangle metrics
                height = args.size * abs(diameter_km / 110.54) # 1 deg = 110.54 km, 1.2 to be sure the image covers the eddy
                width = args.size * abs(diameter_km / (111.320 * cos(lat[ctrIdx[1]]))) # 1 deg = 111.320*cos(latitude) km, using center latitude as ref

                lon_bnds = ctrCoord[0]-width/2.0, ctrCoord[0]+width/2.0
                lat_bnds = ctrCoord[1]-height/2.0, ctrCoord[1]+height/2.0
                
                # Indeces of current eddy image
                lonIdxs = np.where((lon >= lon_bnds[0]) & (lon <= lon_bnds[1]))[0]
                latIdxs = np.where((lat >= lat_bnds[0]) & (lat <= lat_bnds[1]))[0]

                eddy_data = np.array([np.zeros((lonIdxs.size,latIdxs.size)) for _ in range(6)])
    
                # Plot and flag to save eddy
                #add = plot_grids(eddy_data, lo, la, title)

                #-------- Move closer to center of eddy ------------

                title = dateStr + "_" + check_cyclone(cyclone)

                choices = ('Center', 'incLon', 'incLat', 'decLon', 'decLat')
                response = 'Center'
                #response = 'Yes' # Skip this section for debugging non-eddy section
                while response in choices:

                    lo = lon[lonIdxs]
                    la = lat[latIdxs]

                    for i, loIdx in enumerate(lonIdxs):
                        for j, laIdx in enumerate(latIdxs):
                            for k, measurement in enumerate(datasets): # for every measurement type in datasets
                                eddy_data[k,i,j] = measurement[loIdx,laIdx]

                    # Store a larger grid to make it easier to see if we have an eddy and if we should center image 
                    if (lonIdxs[0]-5 < 0 or lonIdxs[-1]+5 >= nLon) or (latIdxs[0]-3 < 0 or latIdxs[-1]+3 >= nLat):
                        larger_grid = None
                    else:
                        larger_grid = [ np.zeros(lonIdxs.size+10), np.zeros(latIdxs.size+6), 
                                        np.zeros((lonIdxs.size+10,latIdxs.size+6)), ]
                        for i, loIdx in enumerate(range(lonIdxs[0]-5, lonIdxs[-1]+6)):
                            for j, laIdx in enumerate(range(latIdxs[0]-3, latIdxs[-1]+4)):
                                larger_grid[0][i] = lon[loIdx]
                                larger_grid[1][j] = lat[laIdx]
                                larger_grid[2][i,j] = ssl[loIdx,laIdx]

                    response = plot_grids(eddy_data, lo, la, larger_grid, title)
                    if response not in choices: # TODO: feel like this is a silly way of doing this
                        break
                    if response == 'Center':
                        # Find the center from water level
                        logger.info(f"+++ Centering eddy towards a minima/maxima depending on eddy type")
                        if cyclone==1:
                            idx = np.unravel_index(eddy_data[1].argmax(), eddy_data[1].shape)
                            ctrCoord = lon[lonIdxs[idx[0]]], lat[latIdxs[idx[1]]]
                            logger.info(f"+++ Argmax center -> lon: {ctrCoord[0]}, Center lat: {ctrCoord[1]}")
                        else:
                            idx = np.unravel_index(eddy_data[1].argmin(), eddy_data[1].shape)
                            ctrCoord = lon[lonIdxs[idx[0]]], lat[latIdxs[idx[1]]]
                            logger.info(f"+++ Argmin center -> lon: {ctrCoord[0]}, Center lat: {ctrCoord[1]}")

                        # New width and height in case we've moved in lon/lat direction
                        width, height = abs(lo[0]-lo[-1])+dlon, abs(la[0]-la[-1])+dlat

                        lon_bnds = ctrCoord[0]-width/2.0, ctrCoord[0]+width/2.0
                        lat_bnds = ctrCoord[1]-height/2.0, ctrCoord[1]+height/2.0

                        # Indeces of current eddy image
                        lonIdxs = np.where((lon >= lon_bnds[0]) & (lon <= lon_bnds[1]))[0]
                        latIdxs = np.where((lat >= lat_bnds[0]) & (lat <= lat_bnds[1]))[0]

                    elif response == 'incLon':
                        if (lonIdxs[0] <= 0 or lonIdxs[-1] >= nLon-1): 
                            logger.info(f"+++ Longitude can't be increased further")
                        else:
                            lonIdxs = np.arange(lonIdxs[0]-1, lonIdxs[-1]+2)
                            logger.info(f"+++ Increasing lontitude by 1 cell in both directions to ({lonIdxs[0]}:{lonIdxs[-1]})")
                    elif response == 'incLat':
                        if (latIdxs[0] <= 0 or latIdxs[-1] >= nLat-1): 
                            logger.info(f"+++ Latitude can't be increased further")
                        else:
                            latIdxs = np.arange(latIdxs[0]-1, latIdxs[-1]+2)
                            logger.info(f"+++ Increasing latitude by 1 cell in both directions to ({latIdxs[0]}:{latIdxs[-1]})")
                    elif response == 'decLon':
                        lonIdxs = np.arange(lonIdxs[0]+1, lonIdxs[-1])
                        logger.info(f"+++ Decreasing lontitude by 1 cell in both directions to ({lonIdxs[0]}:{lonIdxs[-1]})")
                    elif response == 'decLat':
                        latIdxs = np.arange(latIdxs[0]+1, latIdxs[-1])
                        logger.info(f"+++ Decreasing latitude by 1 cell in both directions to ({latIdxs[0]}:{latIdxs[-1]})")
                    eddy_data = np.array([np.zeros((lonIdxs.size,latIdxs.size)) for _ in range(6)])      

                #----------------------------------------------------------
                
                lo = lon[lonIdxs]
                la = lat[latIdxs]

                #guiEvent, guiValues = show_figure(fig)
                #add = 'Yes' # Bypass GUI selection
                if response=='Yes':
                    savedImgCounter = savedImgCounter + 1
                    # Create images?
                    '''
                    dirPath = 'C:/Master/TTK-4900-Master/images/'+dateStr+'/'
                    if not os.path.exists(dirPath):
                        os.makedirs(dirPath)
                    imPath = dirPath + title + f"_{savedImgCounter}.png"   
                    plt.savefig(imPath, bbox_inches='tight')
                    '''

                    sst_train.append([eddy_data[0], cyclone]) # [data, label]
                    ssl_train.append([eddy_data[1], cyclone]) 
                    uvel_train.append([eddy_data[2], cyclone]) 
                    vvel_train.append([eddy_data[3], cyclone]) 
                    phase_train.append([eddy_data[4], cyclone]) 

                    logger.info(f"+++++ Saving image {eddyId} as an eddy")   

                else: 
                    logger.info(f"+++++ Discarding image {eddyId}")
                
            # =========================================================
            # ================ Select non-eddy images =================
            # =========================================================

            if savedImgCounter <= 0:
                logger.info(f"+++++ No eddies found")
                continue   

            # Subgrid (sg) longitude and latitude length
            sgLon, sgLat = dim.find_avg_dim(sst_train, start_axis=0) 
            logger.info(f"+++++ Using average dimensions ({sgLon}, {sgLat}) for non-eddy")

            loRange, laRange = range(0, nLon, sgLon), range(0, nLat, sgLat)
        
            # Create OW array of compatible dimensions for comparing masks
            OW_noeddy = OW[:loRange[-1],:laRange[-1]]
            OW_noeddy = create_subgrids( np.ma.masked_where(OW_noeddy < -0.8, OW_noeddy), sgLon, sgLat, 1 )

            # Get a 2d grid of indeces -> make it moldable to the average grid -> convert to subgrids
            idx_subgrids = create_subgrids( np.array( index_list(nLon, nLat) )[:loRange[-1],:laRange[-1]], sgLon, sgLat, 2 )

            noneddy_idx_subgrids = []
            for i, grid in enumerate(OW_noeddy):
                if not np.ma.is_masked(grid):
                    noneddy_idx_subgrids.append(idx_subgrids[i])

            nNoneddies = len(noneddy_idx_subgrids)
            data_noeddy = np.array([[np.zeros((sgLon,sgLat)) for _ in range(nNoneddies)] for _ in range(6)])
            
            # Shuffle the noneddies and loop thorugh untill we have chosen the same amount of non-eddies as eddies
            random.shuffle(noneddy_idx_subgrids)
            added = 0
            for grid_id, idx_grid in enumerate(noneddy_idx_subgrids):
                OW_ = np.zeros((idx_grid.shape[:2]))
                for i in range(len(idx_grid)):
                    for j in range(len(idx_grid[0])):
                        idx = idx_grid[i,j][0], idx_grid[i,j][1]
                        for k in range(len(data_noeddy)):
                            data_noeddy[k,grid_id,i,j] = datasets[k][idx]
                #print(idx_grid)
                lo, la = lon[idx_grid[:,0,0]], lat[idx_grid[0,:,1]]
                title = dateStr + "_noneddy"
                add = plot_grids(data_noeddy[:,grid_id,:,:], lo, la, None, title)
                if add=='Yes':
                    added = added + 1
                    sst_train.append([data_noeddy[0,grid_id,:,:], 0]) # [data, label]
                    ssl_train.append([data_noeddy[0,grid_id,:,:], 0]) 
                    uvel_train.append([data_noeddy[0,grid_id,:,:], 0]) 
                    vvel_train.append([data_noeddy[0,grid_id,:,:], 0]) 
                    phase_train.append([data_noeddy[0,grid_id,:,:], 0])
                    logger.info(f"+++++ Saving noneddy")       
                if added >= savedImgCounter:
                    break

            # =========================================================
            # ============== Interpolate ==============
            # =========================================================

            #sst_out = np.array(sst_train)
            #ssl_out = np.array(ssl_train)
            #uvel_out = np.array(uvel_train)
            #vvel_out = np.array(vvel_train)
            #phase_out = np.array(phase_train)
            #nTeddies = sst_out.shape[0]


            logger.info(f"Compressing and storing training data so far")


            # =========================================================
            # ========== Save data as compressed numpy array ==========
            # =========================================================

            save_npz_array( (sst_train, ssl_train, uvel_train, vvel_train, phase_train) )
Пример #33
0
def search(tile):

    if os.path.exists('rogue-%s-02.png' %
                      tile) and not os.path.exists('rogue-%s-03.png' % tile):
        print 'Skipping', tile
        return

    fn = os.path.join(tile[:3], tile, 'unwise-%s-w2-%%s-m.fits' % tile)

    try:
        II = [fitsio.read(os.path.join('e%i' % e, fn % 'img')) for e in [1, 2]]
        PP = [fitsio.read(os.path.join('e%i' % e, fn % 'std')) for e in [1, 2]]
        wcs = Tan(os.path.join('e%i' % 1, fn % 'img'))
    except:
        import traceback
        print
        print 'Failed to read data for tile', tile
        traceback.print_exc()
        print
        return
    H, W = II[0].shape

    ps = PlotSequence('rogue-%s' % tile)

    aa = dict(interpolation='nearest', origin='lower')
    ima = dict(interpolation='nearest', origin='lower', vmin=-100, vmax=500)

    plt.clf()
    plt.imshow(II[0], **ima)
    plt.title('Epoch 1')
    ps.savefig()
    plt.clf()
    plt.imshow(II[1], **ima)
    plt.title('Epoch 2')
    ps.savefig()

    # X = gaussian_filter(np.abs((II[0] - II[1]) / np.hypot(PP[0], PP[1])), 1.0)
    # plt.clf()
    # plt.imshow(X, interpolation='nearest', origin='lower')
    # plt.title('Blurred abs difference / per-pixel-std')
    # ps.savefig()

    # Y = (II[0] - II[1]) / reduce(np.hypot, [PP[0], PP[1], np.hypot(100,II[0]), np.hypot(100,II[1]) ])
    Y = (II[0] - II[1]) / reduce(np.hypot, [PP[0], PP[1]])
    X = gaussian_filter(np.abs(Y), 1.0)

    xthresh = 3.

    print 'Value at rogue:', X[1452, 1596]

    print 'pp at rogue:', [pp[1452, 1596] for pp in PP]

    plt.clf()
    plt.imshow(X, interpolation='nearest', origin='lower')
    plt.title('X')
    ps.savefig()

    # plt.clf()
    # plt.hist(np.minimum(100, PP[0].ravel()), 100, range=(0,100),
    #          histtype='step', color='r')
    # plt.hist(np.minimum(100, PP[1].ravel()), 100, range=(0,100),
    #          histtype='step', color='b')
    # plt.title('Per-pixel std')
    # ps.savefig()

    #Y = ((II[0] - II[1]) / np.hypot(PP[0], PP[1]))
    #Y = gaussian_filter(
    #    (II[0] - II[1]) / np.hypot(100, np.hypot(II[0], II[1]))
    #    , 1.0)

    #I = np.argsort(-X.ravel())
    #yy,xx = np.unravel_index(I[:25], X.shape)
    #print 'xx', xx
    #print 'yy', yy

    hot = (X > xthresh)
    peak = find_peaks(hot, X)
    dilate = 2
    hot = binary_dilation(hot, structure=np.ones((3, 3)), iterations=dilate)
    blobs, nblobs = label(hot, np.ones((3, 3), int))
    blobslices = find_objects(blobs)
    # Find maximum pixel within each blob.
    BX, BY = [], []
    BV = []
    for b, slc in enumerate(blobslices):
        sy, sx = slc
        y0, y1 = sy.start, sy.stop
        x0, x1 = sx.start, sx.stop
        bl = blobs[slc]
        i = np.argmax((bl == (b + 1)) * X[slc])
        iy, ix = np.unravel_index(i, dims=bl.shape)
        by = iy + y0
        bx = ix + x0
        BX.append(bx)
        BY.append(by)
        BV.append(X[by, bx])
    BX = np.array(BX)
    BY = np.array(BY)
    BV = np.array(BV)
    I = np.argsort(-BV)
    xx, yy = BX[I], BY[I]

    keep = []
    S = 15
    for i, (x, y) in enumerate(zip(xx, yy)):
        #print x,y
        if x < S or y < S or x + S >= W or y + S >= H:
            continue

        slc = slice(y - S, y + S + 1), slice(x - S, x + S + 1)
        slc2 = slice(y - 3, y + 3 + 1), slice(x - 3, x + 3 + 1)

        mx = np.max((II[0][slc] + II[1][slc]) / 2.)
        #print 'Max within slice:', mx
        #if mx > 5e3:
        if mx > 2e3:
            continue

        mx2 = np.max((II[0][slc2] + II[1][slc2]) / 2.)
        print 'Flux near object:', mx2
        if mx2 < 250:
            continue

        #miny = np.min(Y[slc2])
        #maxy = np.max(Y[slc2])
        keep.append(i)

    keep = np.array(keep)
    if len(keep) == 0:
        print 'No objects passed cuts'
        return
    xx = xx[keep]
    yy = yy[keep]

    plt.clf()
    plt.imshow(X, interpolation='nearest', origin='lower', cmap='gray')
    plt.title('X')
    ax = plt.axis()
    plt.plot(xx, yy, 'r+')
    plt.plot(1596, 1452, 'o', mec=(0, 1, 0), mfc='none')
    plt.axis(ax)
    ps.savefig()

    ylo, yhi = [], []
    for i in range(min(len(xx), 100)):
        x, y = xx[i], yy[i]
        slc2 = slice(y - 3, y + 3 + 1), slice(x - 3, x + 3 + 1)
        ylo.append(np.min(Y[slc2]))
        yhi.append(np.max(Y[slc2]))
    plt.clf()
    plt.plot(ylo, yhi, 'r.')
    plt.axis('scaled')
    ps.savefig()

    for i, (x, y) in enumerate(zip(xx, yy)[:50]):
        print x, y
        rows, cols = 2, 3
        ra, dec = wcs.pixelxy2radec(x + 1, y + 1)

        slc = slice(y - S, y + S + 1), slice(x - S, x + S + 1)
        slc2 = slice(y - 3, y + 3 + 1), slice(x - 3, x + 3 + 1)

        mx = max(np.max(II[0][slc]), np.max(II[1][slc]))
        print 'Max within slice:', mx
        miny = np.min(Y[slc2])
        maxy = np.max(Y[slc2])

        plt.clf()

        plt.subplot(rows, cols, 1)
        plt.imshow(II[0][slc], **ima)
        plt.xticks([])
        plt.yticks([])
        plt.colorbar()
        plt.title('epoch 1')

        plt.subplot(rows, cols, 2)
        plt.imshow(II[1][slc], **ima)
        plt.xticks([])
        plt.yticks([])
        plt.colorbar()
        plt.title('epoch 2')

        plt.subplot(rows, cols, 3)
        plt.imshow(PP[0][slc], **aa)
        plt.xticks([])
        plt.yticks([])
        plt.colorbar()
        plt.title('std 1')

        plt.subplot(rows, cols, 6)
        plt.imshow(PP[1][slc], **aa)
        plt.xticks([])
        plt.yticks([])
        plt.colorbar()
        plt.title('std 2')

        plt.subplot(rows, cols, 4)
        plt.imshow(X[slc], **aa)
        plt.xticks([])
        plt.yticks([])
        plt.colorbar()
        plt.title('X')

        plt.subplot(rows, cols, 5)
        plt.imshow(Y[slc], **aa)
        plt.xticks([])
        plt.yticks([])
        plt.colorbar()
        plt.title('Y')

        #plt.suptitle('Tile %s, Flux: %4.0f, Range: %.2g %.2g' % (tile,mx,miny,maxy))
        plt.suptitle('Tile %s, RA,Dec (%.4f, %.4f)' % (tile, ra, dec))

        ps.savefig()
Пример #34
0
def giveShipOrders(ship, currentOrders, collectingStop):
    # build ship status
    global GLOBAL_DEPO
    global GLOBAL_DEPO_BUILD_OK
    global SAVE_UP_FOR_DEPO
    global DEPO_ONE_SHIP_AT_A_TIME
    global FIRST_DEPO_BUILT
    global WAIT_TO_BUILD_DEPOT
    global ATTACK_CURRENT_HALITE
    global ATTACK_TARGET_HALITE
    global BUILD_DEPO_TIMER
    global DEPO_MIN_SHIPS

    moveFlag = False

    turns_left = (constants.MAX_TURNS - game.turn_number)
    #logging.info("Ship {} was {}".format(ship, currentOrders))

    # enemy locations, look if they are next to you
    runFlag = False
    attackFlag = False
    surroundings = game_map.get_normalized_cardinals(ship.position)

    #logging.info("Enemy ship halite \n {}".format(game_map.enemyShipHalite))

    # check to run
    shipX = ship.position.x
    shipY = ship.position.y

    distanceRule = 1

    dist = game_map.distanceMatrixNonZero[shipX][shipY].copy()
    dist[dist > distanceRule] = 0
    enemyInSight = dist * game_map.shipFlag
    #logging.info("ship {} dist {} enemy in sight {}".format(ship.id, dist, enemyInSight))

    # is an enemy in zone
    if np.sum(enemyInSight) > 0:
        enemyHalite = game_map.enemyShipHalite * dist
        enemyMA = np.ma.masked_equal(enemyHalite, 0, copy=False)
        if len(game.players) == 2:
            fightHalite = dist * (game_map.enemyShipHalite +
                                  game_map.shipFlag * game_map.npMap *
                                  (0.25 + 0.5 * game_map.negInspirationBonus))
        else:
            fightHalite = dist * 1

        # check if we should run
        if enemyMA.min() < 300 and \
           ship.halite_amount > 700 and \
           len(game.players) == 2:
            logging.info("ship {} runs!!!".format(ship.id))
            runFlag = True
        elif enemyMA.min() < 500 and \
             ship.halite_amount>700 and \
             len(game.players) == 4 and \
             turns_left < 50:
            runFlag = True
        elif len(game.players) == 2 and \
             (ship.halite_amount + game_map.npMap[shipY,shipX] *(0.25 + 0.5 * game_map.inspirationBonus[shipY,shipX]))>500 and \
             game_map.friendlyShipCount[shipY,shipX] <= game_map.enemyShipCount[shipY,shipX] and \
             enemyMA.min() < 500:
            logging.info("ship {} needs to move!".format(ship.id))
            moveFlag = np.unravel_index(enemyMA.argmin(), enemyMA.shape)
        # check if we should fight
        elif np.max(fightHalite) > ship.halite_amount and \
             len(game.players)==2 and \
             game_map.friendlyShipCount[shipY,shipX] > game_map.enemyShipCount[shipY,shipX]:
            logging.info("ship {} attacks!!!".format(ship.id))
            attackFlag = True
        elif np.max(fightHalite) > 750 and \
             ship.halite_amount < 200 and \
             game_map.friendlyShipCount[shipY,shipX] == game_map.enemyShipCount[shipY,shipX]:
            attackFlag = True

    okToBuildDepo = False
    # we wait if we just built a depo
    if FIRST_DEPO_BUILT == False:
        okToBuildDepo = True
    elif WAIT_TO_BUILD_DEPOT < 1:
        okToBuildDepo = True

    #logging.info("ship {} in max zone {}".format(ship.id, game_map.dropCalc.inMaxZone(ship.position)))
    #logging.info("ship {} friendly count {}".format(ship.id, game_map.returnFriendlyCount(ship.position, 7)))

    status = None
    if currentOrders is None:  #new ship
        status = "exploring"
    elif currentOrders == 'build depo' and BUILD_DEPO_TIMER < 45:
        status = 'build depo'
        BUILD_DEPO_TIMER += 1
    elif GLOBAL_DEPO < MAX_DEPO and \
         min(GLOBAL_DEPO+1,2) * 11 < game.me.get_ship_count() and \
         game.turn_number > shipBuildingTurns and \
         game_map.dropCalc.inMaxZone(ship.position) and \
         min([game_map.calculate_distance(ship.position, i) for i in me.get_all_drop_locations()]) >= DEPO_DISTANCE-6 and \
         GLOBAL_DEPO_BUILD_OK == True and \
         ship.position not in game.return_all_drop_locations() and \
         DEPO_ONE_SHIP_AT_A_TIME == False and\
         okToBuildDepo == True and \
         turns_left > 75 and \
         game_map.returnFriendlyCount(ship.position, 7) > DEPO_MIN_SHIPS:
        status = 'build depo'
        SAVE_UP_FOR_DEPO = True
        DEPO_ONE_SHIP_AT_A_TIME = True
    elif ship.halite_amount < game_map[ship.position].halite_amount * 0.1:
        status = 'mining'
    elif min([
            game_map.calculate_distance(ship.position, i)
            for i in me.get_all_drop_locations()
    ]) >= turns_left - SUICIDE_TURN_FLAG:
        status = "returnSuicide"
    elif currentOrders == "returning":
        status = "returning"
        if ship.position == me.shipyard.position or ship.position in me.get_dropoff_locations(
        ):
            status = "exploring"
    elif ship.halite_amount >= returnHaliteFlag or runFlag == True:
        status = "returning"
    #elif ship.halite_amount < game_map[ship.position].halite_amount * 0.1 or game_map[ship.position].halite_amount > collectingStop:
    #    status = 'mining'
    #create attack squad near end
    #elif (ship.halite_amount < 50 and game_map.averageHalite < 50 and game_map.width < 48 and len(game.players)==2) or attackFlag == True:
    elif attackFlag == True:
        status = 'attack'
    elif currentOrders == "exploring":
        status = "exploring"
    else:
        status = 'exploring'
    #logging.info("ship {} status is {}".format(ship.id, status))
    return status, moveFlag
Пример #35
0
def get_best_parameters(w0, w1, losses):
    """Get the best w from the result of grid search."""
    min_row, min_col = np.unravel_index(np.argmin(losses), losses.shape)
    return losses[min_row, min_col], w0[min_row], w1[min_col]
Пример #36
0
def stack_registration(s,
                       align_to_this_slice=0,
                       refinement='spike_interpolation',
                       register_in_place=True,
                       fourier_cutoff_radius=None,
                       debug=False):
    """Calculate shifts which would register the slices of a
    three-dimensional stack `s`, and optionally register the stack in-place.

    Axis 0 is the "z-axis", axis 1 is the "up-down" (Y) axis, and axis 2
    is the "left-right" (X) axis. For each XY slice, we calculate the
    shift in the XY plane which would line that slice up with the slice
    specified by `align_to_this_slice`. If `align_to_this_slice` is a
    number, it indicates which slice of `s` to use as the reference
    slice. If `align_to_this_slice` is a numpy array, it is used as the
    reference slice, and must be the same shape as a 2D slice of `s`.

    `refinement` is one of `integer`, `spike_interpolation`, or
    `phase_fitting`, in order of increasing precision/slowness. I don't
    yet have any evidence that my implementation of phase fitting gives
    any improvement over (faster, simpler) spike interpolation, so
    caveat emptor.

    `register_in_place`: If `True`, modify the input stack `s` by
    shifting its slices to line up with the reference slice.

    `fourier_cutoff_radius`: Ignore the Fourier phases of spatial
    frequencies higher than this cutoff, since they're probably lousy
    due to aliasing and noise anyway. If `None`, attempt to estimate a
    resonable cutoff.
    """
    assert len(s.shape) == 3
    try:
        assert align_to_this_slice in range(s.shape[0])
        align_to_this_slice = s[align_to_this_slice, :, :]
    except ValueError:
        align_to_this_slice = np.squeeze(align_to_this_slice)
    assert align_to_this_slice.shape == s.shape[-2:]
    assert refinement in ('integer', 'spike_interpolation', 'phase_fitting')
    if refinement == 'phase_fitting' and minimize is None:
        raise UserWarning("Failed to import scipy minimize; no phase fitting.")
    assert register_in_place in (True, False)
    assert debug in (True, False)
    if fourier_cutoff_radius is None:
        fourier_cutoff_radius = estimate_fourier_cutoff_radius(s, debug)
    assert (0 < fourier_cutoff_radius <= 0.5)
    if debug and np_tif is None:
        raise UserWarning("Failed to import np_tif; no debug mode.")
    ## Multiply each slice of the stack by an XY mask that goes to zero
    ## at the edges, to prevent periodic boundary artifacts when we
    ## Fourier transform.
    mask_ud = np.sin(np.linspace(0, np.pi, s.shape[1])).reshape(s.shape[1], 1)
    mask_lr = np.sin(np.linspace(0, np.pi, s.shape[2])).reshape(1, s.shape[2])
    masked_reference_slice = align_to_this_slice * mask_ud * mask_lr
    ## We'll base our registration on the phase of the low spatial
    ## frequencies of the cross-power spectrum. We'll need the complex
    ## conjugate of the Fourier transform of the masked reference slice,
    ## and a mask in the Fourier domain to pick out the low spatial
    ## frequencies:
    ref_slice_ft_conj = np.conj(np.fft.rfftn(masked_reference_slice))
    k_ud = np.fft.fftfreq(s.shape[1]).reshape(ref_slice_ft_conj.shape[0], 1)
    k_lr = np.fft.rfftfreq(s.shape[2]).reshape(1, ref_slice_ft_conj.shape[1])
    fourier_mask = (k_ud**2 + k_lr**2) < (fourier_cutoff_radius)**2
    ## Now we'll loop over each slice of the stack, calculate our
    ## registration shifts, and optionally apply the shifts to the
    ## original stack.
    registration_shifts = []
    if debug:
        ## Save some intermediate data to help with debugging
        masked_stack = np.zeros_like(s)
        masked_stack_ft = np.zeros((s.shape[0], ) + ref_slice_ft_conj.shape,
                                   dtype=np.complex128)
        masked_stack_ft_vs_ref = np.zeros_like(masked_stack_ft)
        cross_power_spectra = np.zeros_like(masked_stack_ft)
        spikes = np.zeros(s.shape, dtype=np.float64)
    for which_slice in range(s.shape[0]):
        if debug: print("Calculating registration for slice", which_slice)
        ## Compute the cross-power spectrum of our slice, and mask out
        ## the high spatial frequencies.
        current_slice = s[which_slice, :, :] * mask_ud * mask_lr
        current_slice_ft = np.fft.rfftn(current_slice)
        cross_power_spectrum = current_slice_ft * ref_slice_ft_conj
        cross_power_spectrum = (fourier_mask * cross_power_spectrum /
                                np.abs(cross_power_spectrum))
        ## Inverse transform to get a 'spike' in real space. The
        ## location of this spike gives the desired registration shift.
        ## Start by locating the spike to the nearest integer:
        spike = np.fft.irfftn(cross_power_spectrum, s=current_slice.shape)
        loc = np.array(np.unravel_index(np.argmax(spike), spike.shape))
        if refinement in ('spike_interpolation', 'phase_fitting'):
            ## Use (very simple) three-point polynomial interpolation to
            ## refine the location of the peak of the spike:
            neighbors = np.array([-1, 0, 1])
            ud_vals = spike[(loc[0] + neighbors) % spike.shape[0], loc[1]]
            lr_vals = spike[loc[0], (loc[1] + neighbors) % spike.shape[1]]
            lr_fit = np.poly1d(np.polyfit(neighbors, lr_vals, deg=2))
            ud_fit = np.poly1d(np.polyfit(neighbors, ud_vals, deg=2))
            lr_max_shift = -lr_fit[1] / (2 * lr_fit[2])
            ud_max_shift = -ud_fit[1] / (2 * ud_fit[2])
            loc = loc + (ud_max_shift, lr_max_shift)
        ## Convert our shift into a signed number near zero:
        loc = ((np.array(spike.shape) // 2 + loc) % np.array(spike.shape) -
               np.array(spike.shape) // 2)
        if refinement == 'phase_fitting':
            if debug: print("Phase fitting slice", which_slice, "...")

            ## (Attempt to) further refine our registration shift by
            ## fitting Fourier phases. I'm not sure this does any good,
            ## perhaps my implementation is lousy?
            def minimize_me(loc, cross_power_spectrum):
                disagreement = np.abs(
                    expected_cross_power_spectrum(loc, k_ud, k_lr) -
                    cross_power_spectrum)[fourier_mask].sum()
                if debug: print(" Shift:", loc, "Disagreement:", disagreement)
                return disagreement

            loc = minimize(minimize_me,
                           x0=loc,
                           args=(cross_power_spectrum, ),
                           method='Nelder-Mead').x
        registration_shifts.append(loc)
        if debug:
            ## Save some intermediate data to help with debugging
            masked_stack[which_slice, :, :] = current_slice
            masked_stack_ft[which_slice, :, :] = (np.fft.fftshift(
                current_slice_ft, axes=0))
            masked_stack_ft_vs_ref[which_slice, :, :] = (np.fft.fftshift(
                current_slice_ft * ref_slice_ft_conj, axes=0))
            cross_power_spectra[which_slice, :, :] = (np.fft.fftshift(
                cross_power_spectrum, axes=0))
            spikes[which_slice, :, :] = np.fft.fftshift(spike)
    if register_in_place:
        ## Modify the input stack in-place so it's registered.
        if refinement == 'integer':
            registration_type = 'nearest_integer'
        else:
            registration_type = 'fourier_interpolation'
        apply_registration_shifts(s,
                                  registration_shifts,
                                  registration_type=registration_type)
    if debug:
        np_tif.array_to_tif(masked_stack, 'DEBUG_masked_stack.tif')
        np_tif.array_to_tif(np.log(np.abs(masked_stack_ft)),
                            'DEBUG_masked_stack_FT_log_magnitudes.tif')
        np_tif.array_to_tif(np.angle(masked_stack_ft),
                            'DEBUG_masked_stack_FT_phases.tif')
        np_tif.array_to_tif(np.angle(masked_stack_ft_vs_ref),
                            'DEBUG_masked_stack_FT_phase_vs_ref.tif')
        np_tif.array_to_tif(np.angle(cross_power_spectra),
                            'DEBUG_cross_power_spectral_phases.tif')
        np_tif.array_to_tif(spikes, 'DEBUG_spikes.tif')
        if register_in_place:
            np_tif.array_to_tif(s, 'DEBUG_registered_stack.tif')
    return np.array(registration_shifts)
def binned_statistic_dd(sample, values, statistic='mean',
                        bins=10, range=None, expand_binnumbers=False):
    """
    Compute a multidimensional binned statistic for a set of data.

    This is a generalization of a histogramdd function.  A histogram divides
    the space into bins, and returns the count of the number of points in
    each bin.  This function allows the computation of the sum, mean, median,
    or other statistic of the values within each bin.

    Parameters
    ----------
    sample : array_like
        Data to histogram passed as a sequence of D arrays of length N, or
        as an (N,D) array.
    values : (N,) array_like or list of (N,) array_like
        The data on which the statistic will be computed.  This must be
        the same shape as `x`, or a list of sequences - each with the same
        shape as `x`.  If `values` is such a list, the statistic will be
        computed on each independently.
    statistic : string or callable, optional
        The statistic to compute (default is 'mean').
        The following statistics are available:

          * 'mean' : compute the mean of values for points within each bin.
            Empty bins will be represented by NaN.
          * 'median' : compute the median of values for points within each
            bin. Empty bins will be represented by NaN.
          * 'count' : compute the count of points within each bin.  This is
            identical to an unweighted histogram.  `values` array is not
            referenced.
          * 'sum' : compute the sum of values for points within each bin.
            This is identical to a weighted histogram.
          * 'min' : compute the minimum of values for points within each bin.
            Empty bins will be represented by NaN.
          * 'max' : compute the maximum of values for point within each bin.
            Empty bins will be represented by NaN.
          * function : a user-defined function which takes a 1D array of
            values, and outputs a single numerical statistic. This function
            will be called on the values in each bin.  Empty bins will be
            represented by function([]), or NaN if this returns an error.

    bins : sequence or int, optional
        The bin specification must be in one of the following forms:

          * A sequence of arrays describing the bin edges along each dimension.
          * The number of bins for each dimension (nx, ny, ... = bins).
          * The number of bins for all dimensions (nx = ny = ... = bins).

    range : sequence, optional
        A sequence of lower and upper bin edges to be used if the edges are
        not given explicitly in `bins`. Defaults to the minimum and maximum
        values along each dimension.
    expand_binnumbers : bool, optional
        'False' (default): the returned `binnumber` is a shape (N,) array of
        linearized bin indices.
        'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
        ndarray, where each row gives the bin numbers in the corresponding
        dimension.
        See the `binnumber` returned value, and the `Examples` section of
        `binned_statistic_2d`.

        .. versionadded:: 0.17.0

    Returns
    -------
    statistic : ndarray, shape(nx1, nx2, nx3,...)
        The values of the selected statistic in each two-dimensional bin.
    bin_edges : list of ndarrays
        A list of D arrays describing the (nxi + 1) bin edges for each
        dimension.
    binnumber : (N,) array of ints or (D,N) ndarray of ints
        This assigns to each element of `sample` an integer that represents the
        bin in which this observation falls.  The representation depends on the
        `expand_binnumbers` argument.  See `Notes` for details.


    See Also
    --------
    numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d

    Notes
    -----
    Binedges:
    All but the last (righthand-most) bin is half-open in each dimension.  In
    other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
    ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``.  The
    last bin, however, is ``[3, 4]``, which *includes* 4.

    `binnumber`:
    This returned argument assigns to each element of `sample` an integer that
    represents the bin in which it belongs.  The representation depends on the
    `expand_binnumbers` argument. If 'False' (default): The returned
    `binnumber` is a shape (N,) array of linearized indices mapping each
    element of `sample` to its corresponding bin (using row-major ordering).
    If 'True': The returned `binnumber` is a shape (D,N) ndarray where
    each row indicates bin placements for each dimension respectively.  In each
    dimension, a binnumber of `i` means the corresponding value is between
    (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.

    .. versionadded:: 0.11.0

    """
    known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max']
    if not callable(statistic) and statistic not in known_stats:
        raise ValueError('invalid statistic %r' % (statistic,))

    # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
    # `Dlen` is the length of elements along each dimension.
    # This code is based on np.histogramdd
    try:
        # `sample` is an ND-array.
        Dlen, Ndim = sample.shape
    except (AttributeError, ValueError):
        # `sample` is a sequence of 1D arrays.
        sample = np.atleast_2d(sample).T
        Dlen, Ndim = sample.shape

    # Store initial shape of `values` to preserve it in the output
    values = np.asarray(values)
    input_shape = list(values.shape)
    # Make sure that `values` is 2D to iterate over rows
    values = np.atleast_2d(values)
    Vdim, Vlen = values.shape

    # Make sure `values` match `sample`
    if(statistic != 'count' and Vlen != Dlen):
        raise AttributeError('The number of `values` elements must match the '
                             'length of each `sample` dimension.')

    nbin = np.empty(Ndim, int)    # Number of bins in each dimension
    edges = Ndim * [None]         # Bin edges for each dim (will be 2D array)
    dedges = Ndim * [None]        # Spacing between edges (will be 2D array)

    try:
        M = len(bins)
        if M != Ndim:
            raise AttributeError('The dimension of bins must be equal '
                                 'to the dimension of the sample x.')
    except TypeError:
        bins = Ndim * [bins]

    # Select range for each dimension
    # Used only if number of bins is given.
    if range is None:
        smin = np.atleast_1d(np.array(sample.min(axis=0), float))
        smax = np.atleast_1d(np.array(sample.max(axis=0), float))
    else:
        smin = np.zeros(Ndim)
        smax = np.zeros(Ndim)
        for i in xrange(Ndim):
            smin[i], smax[i] = range[i]

    # Make sure the bins have a finite width.
    for i in xrange(len(smin)):
        if smin[i] == smax[i]:
            smin[i] = smin[i] - .5
            smax[i] = smax[i] + .5

    # Create edge arrays
    for i in xrange(Ndim):
        if np.isscalar(bins[i]):
            nbin[i] = bins[i] + 2  # +2 for outlier bins
            edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
        else:
            edges[i] = np.asarray(bins[i], float)
            nbin[i] = len(edges[i]) + 1  # +1 for outlier bins
        dedges[i] = np.diff(edges[i])

    nbin = np.asarray(nbin)

    # Compute the bin number each sample falls into, in each dimension
    sampBin = [
        np.digitize(sample[:, i], edges[i])
        for i in xrange(Ndim)
    ]

    # Using `digitize`, values that fall on an edge are put in the right bin.
    # For the rightmost bin, we want values equal to the right
    # edge to be counted in the last bin, and not as an outlier.
    for i in xrange(Ndim):
        # Find the rounding precision
        decimal = int(-np.log10(dedges[i].min())) + 6
        # Find which points are on the rightmost edge.
        on_edge = np.where(np.around(sample[:, i], decimal) ==
                           np.around(edges[i][-1], decimal))[0]
        # Shift these points one bin to the left.
        sampBin[i][on_edge] -= 1

    # Compute the sample indices in the flattened statistic matrix.
    binnumbers = np.ravel_multi_index(sampBin, nbin)

    result = np.empty([Vdim, nbin.prod()], float)

    if statistic == 'mean':
        result.fill(np.nan)
        flatcount = np.bincount(binnumbers, None)
        a = flatcount.nonzero()
        for vv in xrange(Vdim):
            flatsum = np.bincount(binnumbers, values[vv])
            result[vv, a] = flatsum[a] / flatcount[a]
    elif statistic == 'std':
        result.fill(0)
        flatcount = np.bincount(binnumbers, None)
        a = flatcount.nonzero()
        for vv in xrange(Vdim):
            flatsum = np.bincount(binnumbers, values[vv])
            flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
            result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
                                    (flatsum[a] / flatcount[a]) ** 2)
    elif statistic == 'count':
        result.fill(0)
        flatcount = np.bincount(binnumbers, None)
        a = np.arange(len(flatcount))
        result[:, a] = flatcount[np.newaxis, :]
    elif statistic == 'sum':
        result.fill(0)
        for vv in xrange(Vdim):
            flatsum = np.bincount(binnumbers, values[vv])
            a = np.arange(len(flatsum))
            result[vv, a] = flatsum
    elif statistic == 'median':
        result.fill(np.nan)
        for i in np.unique(binnumbers):
            for vv in xrange(Vdim):
                result[vv, i] = np.median(values[vv, binnumbers == i])
    elif statistic == 'min':
        result.fill(np.nan)
        for i in np.unique(binnumbers):
            for vv in xrange(Vdim):
                result[vv, i] = np.min(values[vv, binnumbers == i])
    elif statistic == 'max':
        result.fill(np.nan)
        for i in np.unique(binnumbers):
            for vv in xrange(Vdim):
                result[vv, i] = np.max(values[vv, binnumbers == i])
    elif callable(statistic):
        with np.errstate(invalid='ignore'), suppress_warnings() as sup:
            sup.filter(RuntimeWarning)
            try:
                null = statistic([])
            except:
                null = np.nan
        result.fill(null)
        for i in np.unique(binnumbers):
            for vv in xrange(Vdim):
                result[vv, i] = statistic(values[vv, binnumbers == i])

    # Shape into a proper matrix
    result = result.reshape(np.append(Vdim, nbin))

    # Remove outliers (indices 0 and -1 for each bin-dimension).
    core = [slice(None)] + Ndim * [slice(1, -1)]
    result = result[core]

    # Unravel binnumbers into an ndarray, each row the bins for each dimension
    if(expand_binnumbers and Ndim > 1):
        binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))

    if np.any(result.shape[1:] != nbin - 2):
        raise RuntimeError('Internal Shape Error')

    # Reshape to have output (`reulst`) match input (`values`) shape
    result = result.reshape(input_shape[:-1] + list(nbin-2))

    return BinnedStatisticddResult(result, edges, binnumbers)
def nd_argmax(array):
    return np.unravel_index(np.argmax(array.flatten()), array.shape)
Пример #39
0
    def _laser_algo(self, params):
        time = params[0]
        dist_meas = params[1]
        if not dist_meas or dist_meas < self._laser_min_range or dist_meas > self._laser_max_range:
            raise PositioningException(
                'invalid laser distance measurement: %s' % dist_meas)

        d1_v, d1_q, d2_v, d2_q, sc_v, sc_q = self._parse_poses(params,
                                                               offset=2)

        q = SystemModel.sc2gl_q.conj() * sc_q.conj()
        d1, d2 = self.asteroids
        ast = d2 if self._target_d2 else d1
        ast_v = d2_v if self._target_d2 else d1_v
        ast_q = d2_q if self._target_d2 else d1_q

        rel_rot_q = q * ast_q * ast.ast2sc_q.conj()
        rel_pos_v = tools.q_times_v(q, ast_v - sc_v) * 1000
        max_r = ast.max_radius
        max_diam = 2 * max_r / 1000

        # set orthographic projection
        self._onboard_renderer.set_orth_frustum(max_diam, max_diam,
                                                -max_diam / 2, max_diam / 2)

        # render orthographic depth image
        _, zz = self._onboard_renderer.render(self._target_obj_idx, [0, 0, 0],
                                              rel_rot_q, [1, 0, 0],
                                              get_depth=True,
                                              shadows=False,
                                              textures=False)

        # restore regular perspective projection
        self._onboard_renderer.set_frustum(self._sm.cam.x_fov,
                                           self._sm.cam.y_fov,
                                           self._sm.min_altitude * .1,
                                           self._sm.max_distance)

        zz[zz > max_diam / 2 * 0.999] = float('nan')
        zz = zz * 1000 - rel_pos_v[2]
        xx, yy = np.meshgrid(
            np.linspace(-max_r, max_r, self._sm.view_width) - rel_pos_v[0],
            np.linspace(-max_r, max_r, self._sm.view_height) - rel_pos_v[1])

        x_expected = np.clip(
            (rel_pos_v[0] + max_r) / max_r / 2 * self._sm.view_width + 0.5, 0,
            self._sm.view_width - 1.001)
        y_expected = np.clip(
            (rel_pos_v[1] + max_r) / max_r / 2 * self._sm.view_height + 0.5, 0,
            self._sm.view_height - 1.001)
        dist_expected = tools.interp2(zz,
                                      x_expected,
                                      y_expected,
                                      discard_bg=True)

        # mse cost function balances between adjusted location and measurement error
        adj_dist_sqr = (zz - dist_meas)**2 + xx**2 + yy**2
        cost = self._laser_adj_loc_weight * adj_dist_sqr \
             + (self._laser_meas_err_weight - self._laser_adj_loc_weight) * (zz - dist_meas)**2

        j, i = np.unravel_index(np.nanargmin(cost), cost.shape)
        if np.isnan(zz[j, i]):
            raise PositioningException(
                'laser algo results in off asteroid pointing')
        if math.sqrt(adj_dist_sqr[j, i]) >= self._laser_max_adj_dist:
            raise PositioningException(
                'laser algo solution too far (%.0fm, limit=%.0fm), spurious measurement assumed'
                % (math.sqrt(adj_dist_sqr[j, i]), self._laser_max_adj_dist))

        dx, dy, dz = xx[0, i], yy[j, 0], zz[j, i] - dist_meas

        if self._result_frame == ApiServer.FRAME_GLOBAL:
            # return global ast-sc vector
            est_sc_ast_v = ast_v * 1000 - tools.q_times_v(
                q.conj(), rel_pos_v + np.array([dx, dy, dz]))
        else:
            # return local sc-ast vector
            est_sc_ast_v = tools.q_times_v(SystemModel.sc2gl_q,
                                           rel_pos_v + np.array([dx, dy, dz]))
        dist_expected = float(
            dist_expected) if not np.isnan(dist_expected) else -1.0
        return json.dumps([list(est_sc_ast_v), dist_expected])
def calc_beta_oris_from_boundary_misori(
        grain: ebsd.Grain,
        neighbour_network: nx.Graph,
        quat_array: np.ndarray,
        alpha_phase_id: int,
        burg_tol: float = 5
) -> Tuple[List[List[Quat]], List[float], List[Quat]]:
    """Calculate the possible beta orientations for pairs of alpha and
    neighbour orientations using the misorientation relation to neighbour
    orientations.

    Parameters
    ----------
    grain
        The grain currently being reconstructed

    neighbour_network
        A neighbour network mapping grain boundary connectivity

    quat_array
        Array of quaternions, representing the orientations of the pixels of the EBSD map

    burg_tol :
        The threshold misorientation angle to determine neighbour relations

    Returns
    -------
    list of lists of defdap.Quat.quat
        Possible beta orientations, grouped by each neighbour. Any
        neighbour with deviation greater than the tolerance is excluded.
    list of float
        Deviations from perfect Burgers transformation
    list of Quat
        Alpha orientations

    """
    # This needed to move further up calculation process
    unq_cub_sym_comps = Quat.extract_quat_comps(unq_cub_syms)

    beta_oris = []
    beta_devs = []
    alpha_oris = []

    neighbour_grains = neighbour_network.neighbors(grain)
    neighbour_grains = [
        grain for grain in neighbour_grains if grain.phaseID == alpha_phase_id
    ]
    for neighbour_grain in neighbour_grains:

        bseg = neighbour_network[grain][neighbour_grain]['boundary']
        # check sense of bseg
        if grain is bseg.grain1:
            ipoint = 0
        else:
            ipoint = 1

        for boundary_point_pair in bseg.boundaryPointPairsX:
            point = boundary_point_pair[ipoint]
            alpha_ori = quat_array[point[1], point[0]]

            point = boundary_point_pair[ipoint - 1]
            neighbour_ori = quat_array[point[1], point[0]]

            min_misoris, min_cub_sym_idxs = calc_misori_of_variants(
                alpha_ori.conjugate, neighbour_ori, unq_cub_sym_comps)

            # find the hex symmetries (i, j) from give the minimum
            # deviation from the burgers relation for the minimum store:
            # the deviation, the hex symmetries (i, j) and the cubic
            # symmetry if the deviation is over a threshold then set
            # cubic symmetry to -1
            min_misori_idx = np.unravel_index(np.argmin(min_misoris),
                                              min_misoris.shape)
            burg_dev = min_misoris[min_misori_idx]

            if burg_dev < burg_tol / 180 * np.pi:
                beta_oris.append(
                    beta_oris_from_cub_sym(alpha_ori,
                                           min_cub_sym_idxs[min_misori_idx],
                                           int(min_misori_idx[0])))
                beta_devs.append(burg_dev)
                alpha_oris.append(alpha_ori)

    return beta_oris, beta_devs, alpha_oris
def wind_diesel_hybrid(
        energy_per_hh,  # kWh/household/year as defined
        wind_speed, # annual average wind speed
        wind_curve,
        tier,
        start_year,
        end_year,
        wind_no=15,  # number of wind panel sizes simulated
        diesel_no=15,  # number of diesel generators simulated
        discount_rate=0.08,
        diesel_range=[0.7]
):
    n_chg = 0.92  # charge efficiency of battery
    n_dis = 0.92  # discharge efficiency of battery
    lpsp_max = 0.10  # maximum loss of load allowed over the year, in share of kWh
    battery_cost = 139  # battery capital capital cost, USD/kWh of storage capacity
    wind_cost = 2800  # Wind turbine capital cost, USD/kW peak power
    diesel_cost = 261  # diesel generator capital cost, USD/kW rated power
    wind_life = 20  # wind panel expected lifetime, years
    diesel_life = 10  # diesel generator expected lifetime, years
    wind_om = 0.015  # annual OM cost of wind panels
    diesel_om = 0.1  # annual OM cost of diesel generator
    k_t = 0.005  # temperature factor of wind panels
    inverter_cost = 80
    inverter_life = 10
    inverter_efficiency = 0.92
    charge_controller = 142

    wind_curve = wind_curve * wind_speed / np.average(wind_curve)

    hour_numbers = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23) * 365
    dod_max = 0.8  # maximum depth of discharge of battery

    def load_curve(tier, energy_per_hh):
        # the values below define the load curve for the five tiers. The values reflect the share of the daily demand
        # expected in each hour of the day (sum of all values for one tier = 1)
        tier5_load_curve = [0.021008403, 0.021008403, 0.021008403, 0.021008403, 0.027310924, 0.037815126,
                            0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.042016807,
                            0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.046218487, 0.050420168,
                            0.067226891, 0.084033613, 0.073529412, 0.052521008, 0.033613445, 0.023109244]
        tier4_load_curve = [0.017167382, 0.017167382, 0.017167382, 0.017167382, 0.025751073, 0.038626609,
                            0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.042918455,
                            0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.0472103, 0.051502146,
                            0.068669528, 0.08583691, 0.075107296, 0.053648069, 0.034334764, 0.021459227]
        tier3_load_curve = [0.013297872, 0.013297872, 0.013297872, 0.013297872, 0.019060284, 0.034574468,
                            0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.044326241,
                            0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.048758865, 0.053191489,
                            0.070921986, 0.088652482, 0.077570922, 0.055407801, 0.035460993, 0.019946809]
        tier2_load_curve = [0.010224949, 0.010224949, 0.010224949, 0.010224949, 0.019427403, 0.034764826,
                            0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.040899796,
                            0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.04601227, 0.056237219,
                            0.081799591, 0.102249489, 0.089468303, 0.06390593, 0.038343558, 0.017893661]
        tier1_load_curve = [0, 0, 0, 0, 0.012578616, 0.031446541, 0.037735849, 0.037735849, 0.037735849,
                            0.037735849, 0.037735849, 0.037735849, 0.037735849, 0.037735849, 0.037735849,
                            0.037735849, 0.044025157, 0.062893082, 0.100628931, 0.125786164, 0.110062893,
                            0.078616352, 0.044025157, 0.012578616]

        if tier == 1:
            load_curve = tier1_load_curve * 365
        elif tier == 2:
            load_curve = tier2_load_curve * 365
        elif tier == 3:
            load_curve = tier3_load_curve * 365
        elif tier == 4:
            load_curve = tier4_load_curve * 365
        else:
            load_curve = tier5_load_curve * 365

        return np.array(load_curve) * energy_per_hh / 365

    load_curve = load_curve(tier, energy_per_hh)

    def wind_diesel_capacities(wind_capacity, battery_size, diesel_capacity, wind_no, diesel_no, battery_no, wind_curve):
        dod = np.zeros(shape=(24, battery_no, wind_no, diesel_no))
        battery_use = np.zeros(shape=(24, battery_no, wind_no, diesel_no))  # Stores the amount of battery discharge during the day
        fuel_result = np.zeros(shape=(battery_no, wind_no, diesel_no))
        battery_life = np.zeros(shape=(battery_no, wind_no, diesel_no))
        soc = np.ones(shape=(battery_no, wind_no, diesel_no)) * 0.5
        unmet_demand = np.zeros(shape=(battery_no, wind_no, diesel_no))
        excess_gen = np.zeros(shape=(battery_no, wind_no, diesel_no)) # TODO
        annual_diesel_gen = np.zeros(shape=(battery_no, wind_no, diesel_no))
        dod_max = np.ones(shape=(battery_no, wind_no, diesel_no)) * 0.6

        p_rated = 600
        es = 0.85  # losses in wind electricity
        u_arr = range(1, 26)
        p_curve = [0, 0, 0, 0, 30, 77, 135, 208, 287, 371, 450, 514, 558,
                   582, 594, 598, 600, 600, 600, 600, 600, 600, 600, 600, 600]
        wind_power = np.zeros(8760)
        wind_curve = np.round(wind_curve)
        for i in range(len(p_curve)):
            #  wind_power = np.where(wind_curve == i, p_curve[i], wind_power)
            wind_curve = np.where(wind_curve == i, p_curve[i], wind_curve)
        # wind_power = wind_power[:, 0]
        wind_power = wind_curve

        for i in range(8760):

            # Battery self-discharge (0.02% per hour)
            battery_use[hour_numbers[i], :, :] = 0.0002 * soc
            soc *= 0.9998

            # Calculation of wind gen and net load
            wind_gen = wind_power[i] * wind_capacity / p_rated
            net_load = load_curve[hour_numbers[i]] - wind_gen  # remaining load not met by wind panels

            # Dispatchable energy from battery available to meet load
            battery_dispatchable = soc * battery_size * n_dis
            # Energy required to fully charge battery
            battery_chargeable = (1 - soc) * battery_size / n_chg

            # Below is the dispatch strategy for the diesel generator as described in word document

            if 4 < hour_numbers[i] <= 17:
                # During the morning and day, the batteries are dispatched primarily.
                # The diesel generator, if needed, is run at the lowest possible capacity

                # Minimum diesel capacity to cover the net load after batteries.
                # Diesel genrator limited by lowest possible capacity (40%) and rated capacity
                min_diesel = np.minimum(
                    np.maximum(net_load - battery_dispatchable, 0.4 * diesel_capacity),
                    diesel_capacity)

                diesel_gen = np.where(net_load > battery_dispatchable, min_diesel, 0)

            elif 17 > hour_numbers[i] > 23:
                # During the evening, the diesel generator is dispatched primarily, at max_diesel.
                # Batteries are dispatched if diesel generation is insufficient.

                #  Maximum amount of diesel needed to supply load and charge battery
                # Diesel genrator limited by lowest possible capacity (40%) and rated capacity
                max_diesel = np.maximum(
                    np.minimum(net_load + battery_chargeable, diesel_capacity),
                    0.4 * diesel_capacity)

                diesel_gen = np.where(net_load > 0, max_diesel, 0)
            else:
                # During night, batteries are dispatched primarily.
                # The diesel generator is used at max_diesel if load is larger than battery capacity

                #  Maximum amount of diesel needed to supply load and charge battery
                # Diesel genrator limited by lowest possible capacity (40%) and rated capacity
                max_diesel = np.maximum(
                    np.minimum(net_load + battery_chargeable, diesel_capacity),
                    0.4 * diesel_capacity)

                diesel_gen = np.where(net_load > battery_dispatchable, max_diesel, 0)

            fuel_result += np.where(diesel_gen > 0, diesel_capacity * 0.08145 + diesel_gen * 0.246, 0)
            annual_diesel_gen += diesel_gen

            # Reamining load after diesel generator
            net_load = net_load - diesel_gen

            # If diesel generation is larger than load, battery is charged
            # If diesel generation is smaller than load, battery is discharged
            soc -= np.where(net_load > 0,
                            net_load / n_dis / battery_size,
                            net_load * n_chg / battery_size)

            # The amount of battery discharge in the hour is stored (measured in State Of Charge)
            battery_use[hour_numbers[i], :, :] = \
                np.minimum(np.where(net_load > 0,
                                    net_load / n_dis / battery_size,
                                    0),
                           soc)

            # If State of charge is negative, that means there's demand that could not be met.
            unmet_demand += np.where(soc < 0,
                                     -soc / n_dis * battery_size,
                                     0)
            soc = np.maximum(soc, 0)

            # If State of Charge is larger than 1, that means there was excess wind/diesel generation
            excess_gen += np.where(soc > 1,
                                   (soc - 1) / n_chg * battery_size,
                                   0)
            # TODO
            soc = np.minimum(soc, 1)

            dod[hour_numbers[i], :, :] = 1 - soc  # The depth of discharge in every hour of the day is stored
            if hour_numbers[i] == 23:  # The battery wear during the last day is calculated
                battery_used = np.where(dod.max(axis=0) > 0, 1, 0)
                battery_life += battery_use.sum(axis=0) / (
                        531.52764 * np.maximum(0.1, dod.max(axis=0) * dod_max) ** -1.12297) * battery_used

        condition = unmet_demand / energy_per_hh  # LPSP is calculated
        excess_gen = excess_gen / energy_per_hh
        battery_life = np.round(1 / battery_life)
        diesel_share = annual_diesel_gen / energy_per_hh

        return diesel_share, battery_life, condition, fuel_result, excess_gen

    # This section creates the range of wind capacities, diesel capacities and battery sizes to be simulated
    ref = 5 * load_curve[19]

    battery_sizes = [0.5 * energy_per_hh / 365, energy_per_hh / 365, 2 * energy_per_hh / 365]
    wind_caps = []
    diesel_caps = []
    diesel_extend = np.ones(wind_no)
    wind_extend = np.ones(diesel_no)

    for i in range(wind_no):
        wind_caps.append(ref * (wind_no - i) / wind_no)

    for j in range(diesel_no):
        diesel_caps.append(j * max(load_curve) / diesel_no)

    wind_caps = np.outer(np.array(wind_caps), wind_extend)
    diesel_caps = np.outer(diesel_extend, np.array(diesel_caps))

    # This section creates 2d-arrays to store information on wind capacities, diesel capacities, battery sizes,
    # fuel usage, battery life and LPSP

    battery_size = np.ones((len(battery_sizes), wind_no, diesel_no))
    wind_panel_size = np.zeros((len(battery_sizes), wind_no, diesel_no))
    diesel_capacity = np.zeros((len(battery_sizes), wind_no, diesel_no))

    for j in range(len(battery_sizes)):
        battery_size[j, :, :] *= battery_sizes[j]
        wind_panel_size[j, :, :] = wind_caps
        diesel_capacity[j, :, :] = diesel_caps

    # For the number of diesel, wind and battery capacities the lpsp, battery lifetime, fuel usage and LPSP is calculated
    diesel_share, battery_life, lpsp, fuel_usage, excess_gen = \
        wind_diesel_capacities(wind_panel_size, battery_size, diesel_capacity, wind_no, diesel_no, len(battery_sizes), wind_curve)
    battery_life = np.minimum(20, battery_life)

    def calculate_hybrid_lcoe(diesel_price):
        # Necessary information for calculation of LCOE is defined
        project_life = end_year - start_year
        generation = np.ones(project_life) * energy_per_hh
        generation[0] = 0

        # Calculate LCOE
        sum_costs = np.zeros((len(battery_sizes), wind_no, diesel_no))
        sum_el_gen = np.zeros((len(battery_sizes), wind_no, diesel_no))
        investment = np.zeros((len(battery_sizes), wind_no, diesel_no))

        for year in range(project_life + 1):
            salvage = np.zeros((len(battery_sizes), wind_no, diesel_no))

            fuel_costs = fuel_usage * diesel_price
            om_costs = (wind_panel_size * wind_cost * wind_om + diesel_capacity * diesel_cost * diesel_om)

            inverter_investment = np.where(year % inverter_life == 0, max(load_curve) * inverter_cost, 0)
            diesel_investment = np.where(year % diesel_life == 0, diesel_capacity * diesel_cost, 0)
            wind_investment = np.where(year % wind_life == 0, wind_panel_size * wind_cost, 0)
            battery_investment = np.where(year % battery_life == 0, battery_size * battery_cost / dod_max, 0)  # TODO Include dod_max here?

            if year == project_life:
                salvage = (1 - (project_life % battery_life) / battery_life) * battery_cost * battery_size / dod_max + \
                          (1 - (project_life % diesel_life) / diesel_life) * diesel_capacity * diesel_cost + \
                          (1 - (project_life % wind_life) / wind_life) * wind_panel_size * wind_cost + \
                          (1 - (project_life % inverter_life) / inverter_life) * max(load_curve) * inverter_cost

            investment += diesel_investment + wind_investment + battery_investment + inverter_investment - salvage

            sum_costs += (fuel_costs + om_costs + battery_investment + diesel_investment + wind_investment - salvage) / ((1 + discount_rate) ** year)

            if year > 0:
                sum_el_gen += energy_per_hh / ((1 + discount_rate) ** year)

        return sum_costs / sum_el_gen, investment

    diesel_limit = 0.5

    min_lcoe_range = []
    investment_range = []
    capacity_range = []
    ren_share_range = []

    for d in diesel_range:
        lcoe, investment = calculate_hybrid_lcoe(d)
        lcoe = np.where(lpsp > lpsp_max, 99, lcoe)
        lcoe = np.where(diesel_share > diesel_limit, 99, lcoe)

        min_lcoe = np.min(lcoe)
        min_lcoe_combination = np.unravel_index(np.argmin(lcoe, axis=None), lcoe.shape)
        ren_share = 1 - diesel_share[min_lcoe_combination]
        capacity = wind_panel_size[min_lcoe_combination] + diesel_capacity[min_lcoe_combination]
        ren_capacity = wind_panel_size[min_lcoe_combination] / capacity
        # excess_gen = excess_gen[min_lcoe_combination]

        min_lcoe_range.append(min_lcoe)
        investment_range.append(investment[min_lcoe_combination])
        capacity_range.append(capacity)
        ren_share_range.append(ren_share)

    return min_lcoe_range, investment_range, capacity_range #, ren_share_range  # , ren_capacity, excess_gen

#wind_diesel_hybrid(1, 5, wind_curve, 1, 2018, 2030, diesel_price=0.3)
Пример #42
0
def main() -> None:
    st.title("Лабораторная работа №2")
    st.markdown("<h3 style='margin-bottom: 50px'>"
                "{task}"
                "</h3>".format(task=task),
                unsafe_allow_html=True)
    seller, customer = st.beta_columns(2)

    seller.text("Вопросы о продавце")
    customer.text("Вопросы о покупателе")
    seller_answers, customer_answers = [], []

    for question in seller_questions:
        seller_answers.append(seller.radio(question, default_choices))
    for question in customer_questions:
        customer_answers.append(customer.radio(question, default_choices))
    if st.button("Рассчитать результат сделки"):
        type_seller: Dict[int, int] = {k: 0 for k in range(1, 4)}
        type_customer: Dict[int, int] = {k: 0 for k in range(1, 4)}
        # Подсчёт баллов для всех типов продавцов
        for idx, answer in enumerate(seller_answers):
            # Если ответ "Нет", то вопрос нас не интересует
            # Если ответ "Да", то считаем коэффициенты
            if not dict_default_choices[answer]:
                continue
            if idx == 0:
                type_seller[1] += 5
                type_seller[3] += 2.5
            if idx == 1:
                type_seller[2] += 10
            if idx == 2:
                type_seller[1] += 10
            if idx == 3:
                type_seller[2] += 5
            if idx == 4:
                type_seller[2] += 2.5
                type_seller[3] += 5
            if idx == 5:
                type_seller[1] += 2.5
                type_seller[3] += 10
            if idx == 6:
                type_seller[1] += 5
            if idx == 7:
                type_seller[3] += 5
            if idx == 8:
                type_seller[2] += 5
                type_seller[3] += 2.5
            if idx == 9:
                type_seller[1] += 2.5
            if idx == 10:
                type_seller[2] += 2.5
        # Подсчёт баллов для всех типов покупателей
        for idx, answer in enumerate(customer_answers):
            if not dict_default_choices[answer]:
                continue
            if idx == 0:
                type_customer[1] += 6
            if idx == 1:
                type_customer[1] += 7
            if idx == 2:
                type_customer[1] += 4
            if idx == 3:
                type_customer[1] += 3
            if idx == 4:
                type_customer[2] += 7
            if idx == 5:
                type_customer[2] += 3
            if idx == 6:
                type_customer[2] += 6
            if idx == 7:
                type_customer[2] += 4
            if idx == 8:
                type_customer[3] += 7
            if idx == 9:
                type_customer[3] += 6
            if idx == 10:
                type_customer[3] += 4
            if idx == 11:
                type_customer[3] += 3

        seller_prob = list(type_seller.values())
        customer_prob = list(type_customer.values())

        seller_sum = sum(seller_prob)
        customer_sum = sum(customer_prob)

        seller_prob = [elem / seller_sum * 100 for elem in seller_prob]
        customer_prob = [elem / customer_sum * 100 for elem in customer_prob]

        probabilities = np.zeros((3, 3))
        for idx in range(3):
            for jdx in range(3):
                probabilities[idx][
                    jdx] = seller_prob[idx] * customer_prob[jdx] / 100

        df = pd.DataFrame(
            data=probabilities,
            index=[f"{idx} тип покупателя" for idx in range(1, 4)],
            columns=[f"{idx} тип продавца" for idx in range(1, 4)])
        st.write(df)
        row, col = np.unravel_index(probabilities.argmax(),
                                    probabilities.shape)
        st.text(result_messages[row][col])
        st.text(f"Вероятность сделки = {probabilities[row][col]:.1f}%")
Пример #43
0
def largest_indices(ary, n):
    """Returns the n largest indices from a numpy array."""
    flat = ary.flatten()
    indices = np.argpartition(flat, -n)[-n:]
    indices = indices[np.argsort(-flat[indices])]
    return np.unravel_index(indices, ary.shape)
Пример #44
0
def needle_affine(A,
                  B,
                  matrix=PAM250,
                  gap=-1,
                  opengap=-5,
                  EMBOSS=False,
                  global_align=True,
                  local_align=False):
    """
    Alignment implementation supporting both global and local alignments
    When running with the EMBOSS flag set to True, two things change:
        - Gaps in the beginning and at the end of the sequence are not 
          subject to the "opengap" penalty.
        - The penalty for extending a gap is not added when opening one
    EMBOSS mode only works reliably with the global alignment mode.
    """
    if local_align:
        global_align = False
    maybe_gap = True
    if EMBOSS:
        maybe_gap = False
        if not global_align:
            print(
                'WARNING: EMBOSS mode only works reliably with global alignments.'
            )
    #Initialising
    scores_matched = [[[-np.inf, (0, 0)]] * (len(B) + 1)
                      for i in range(len(A) + 1)]
    scores_hgap = [[[-np.inf, (0, 0)]] * (len(B) + 1)
                   for i in range(len(A) + 1)]
    scores_vgap = [[[-np.inf, (0, 0)]] * (len(B) + 1)
                   for i in range(len(A) + 1)]
    #Filling first rows and columns
    scores_matched[0][0] = [0, (0, 0)]
    for i in range(1, len(A) + 1):
        scores_hgap[i][0] = [opengap * maybe_gap + (i) * gap, (-1, 0)]
        if not global_align:
            scores_matched[i][0] = [0, (0, 0)]
    for j in range(1, len(B) + 1):
        scores_vgap[0][j] = [opengap * maybe_gap + (j) * gap, (0, -1)]
        if not global_align:
            scores_matched[0][j] = [0, (0, 0)]
    #Filling the rest of the matrix
    directions = [(-1, -1), (-1, 0), (0, -1), (0, 0)]
    zero_option = -np.inf
    if not global_align:
        zero_option = 0
    for i in range(1, len(A) + 1):
        for j in range(1, len(B) + 1):
            options = [
                scores_matched[i - 1][j - 1][0], scores_hgap[i - 1][j - 1][0],
                scores_vgap[i - 1][j - 1][0], zero_option
            ]
            scores_matched[i][j] = [
                matrix[A[i - 1]][B[j - 1]] + max(options),
                directions[np.argmax(options)]
            ]
            options = [
                scores_matched[i - 1][j][0] + opengap + gap * maybe_gap,
                scores_hgap[i - 1][j][0] + gap, -np.inf
            ]
            scores_hgap[i][j] = [max(options), directions[np.argmax(options)]]
            options = [
                scores_matched[i][j - 1][0] + opengap + gap * maybe_gap,
                -np.inf, scores_vgap[i][j - 1][0] + gap
            ]
            scores_vgap[i][j] = [max(options), directions[np.argmax(options)]]
    #Backtracking
    out_A = ''
    out_B = ''
    pos = [len(A), len(B)]
    if EMBOSS:
        scores_hgap[len(A)][len(B)][0] -= opengap
        scores_vgap[len(A)][len(B)][0] -= opengap
    options = [
        scores_matched[len(A)][len(B)], scores_hgap[len(A)][len(B)],
        scores_vgap[len(A)][len(B)]
    ]
    current_matrix = [scores_matched, scores_hgap,
                      scores_vgap][np.argmax([i[0] for i in options])]
    if not global_align:
        current_matrix = scores_matched
        temp = np.asarray([[i[0] for i in j] for j in current_matrix])
        temp = np.unravel_index(np.argmax(temp), temp.shape)
        pos[0] = temp[0]
        pos[1] = temp[1]
    step_pointers = {
        (-1, -1): scores_matched,
        (-1, 0): scores_hgap,
        (0, -1): scores_vgap,
        (0, 0): 'END'
    }
    step = current_matrix[pos[0]][pos[1]]
    final_score = step[0]
    step = step[1]
    while not step == (0, 0):
        # Processing data of current position
        if not current_matrix == scores_vgap:
            out_A += A[pos[0] - 1]
            pos[0] -= 1
        else:
            out_A += '-'
        if not current_matrix == scores_hgap:
            out_B += B[pos[1] - 1]
            pos[1] -= 1
        else:
            out_B += '-'
        # Getting new position
        current_matrix = step_pointers[step]
        step = current_matrix[pos[0]][pos[1]][1]
    return out_A[::-1], out_B[::-1], final_score
Пример #45
0
def find_cut_slices(img, direction='z', n_cuts=7, spacing='auto'):
    """ Find 'good' cross-section slicing positions along a given axis.

    Parameters
    ----------
    img : 3D Niimg-like object
        See http://nilearn.github.io/manipulating_images/input_output.html
        the brain map.

    direction : string, optional
        Sectional direction; possible values are "x", "y", or "z".
        Default='z'.

    n_cuts : int, optional
        Number of cuts in the plot. Default=7.

    spacing : 'auto' or int, optional
        Minimum spacing between cuts (in voxels, not milimeters)
        if 'auto', the spacing is .5 / n_cuts * img_length.
        Default='auto'.

    Returns
    -------
    cut_coords : 1D array of length n_cuts
        The computed cut_coords.

    Notes
    -----
    This code works by iteratively locating peak activations that are
    separated by a distance of at least 'spacing'. If n_cuts is very
    large and all the activated regions are covered, cuts with a spacing
    less than 'spacing' will be returned.

    Warnings
    --------
    If a non-diagonal img is given. This function automatically reorders
    img to get it back to diagonal. This is to avoid finding same cuts in
    the slices.

    """
    # misc
    if not direction in 'xyz':
        raise ValueError(
            "'direction' must be one of 'x', 'y', or 'z'. Got '%s'" %
            (direction))
    axis = 'xyz'.index(direction)
    img = check_niimg_3d(img)
    affine = img.affine
    if not np.alltrue(np.diag(affine)[:3]):
        warnings.warn(
            'A non-diagonal affine is found in the given '
            'image. Reordering the image to get diagonal affine '
            'for finding cuts in the slices.',
            stacklevel=2)
        # resample is set to avoid issues with an image having a non-diagonal
        # affine and rotation.
        img = reorder_img(img, resample='nearest')
        affine = img.affine
    # note: orig_data is a copy of img._data_cache thanks to np.abs
    orig_data = np.abs(_safe_get_data(img))
    this_shape = orig_data.shape[axis]

    if not isinstance(n_cuts, numbers.Number):
        raise ValueError("The number of cuts (n_cuts) must be an integer "
                         "greater than or equal to 1. "
                         "You provided a value of n_cuts=%s. " % n_cuts)

    # BF issue #575: Return all the slices along and axis if this axis
    # is the display mode and there are at least as many requested
    # n_slices as there are slices.
    if n_cuts > this_shape:
        warnings.warn('Too many cuts requested for the data: '
                      'n_cuts=%i, data size=%i' % (n_cuts, this_shape))
        return _transform_cut_coords(np.arange(this_shape), direction, affine)

    # To smooth data that might be np.int or np.uint,
    # first convert it to float.
    data = orig_data.copy()
    if data.dtype.kind in ('i', 'u'):
        data = data.astype(np.float)

    data = _smooth_array(data, affine, fwhm='fast')

    # to control floating point error problems
    # during given input value "n_cuts"
    epsilon = np.finfo(np.float32).eps
    difference = abs(round(n_cuts) - n_cuts)
    if round(n_cuts) < 1. or difference > epsilon:
        message = ("Image has %d slices in direction %s. "
                   "Therefore, the number of cuts must be between 1 and %d. "
                   "You provided n_cuts=%s " %
                   (this_shape, direction, this_shape, n_cuts))
        raise ValueError(message)
    else:
        n_cuts = int(round(n_cuts))

    if spacing == 'auto':
        spacing = max(int(.5 / n_cuts * data.shape[axis]), 1)

    slices = [slice(None, None), slice(None, None), slice(None, None)]

    cut_coords = list()

    for _ in range(n_cuts):
        # Find a peak
        max_along_axis = np.unravel_index(np.abs(data).argmax(),
                                          data.shape)[axis]

        # cancel out the surroundings of the peak
        start = max(0, max_along_axis - spacing)
        stop = max_along_axis + spacing
        slices[axis] = slice(start, stop)
        # We don't actually fully zero the neighborhood, to avoid ending
        # up with fully zeros if n_cuts is too big: we can do multiple
        # passes on the data
        data[tuple(slices)] *= 1.e-3

        cut_coords.append(max_along_axis)

    # We sometimes get duplicated cuts, so we add cuts at the beginning
    # and the end
    cut_coords = np.unique(cut_coords).tolist()
    while len(cut_coords) < n_cuts:
        # Candidates for new cuts:
        slice_below = min(cut_coords) - 2
        slice_above = max(cut_coords) + 2
        candidates = [slice_above]
        # One slice where there is the biggest gap in the existing
        # cut_coords
        if len(cut_coords) > 1:
            middle_idx = np.argmax(np.diff(cut_coords))
            slice_middle = int(
                .5 * (cut_coords[middle_idx] + cut_coords[middle_idx + 1]))
            if not slice_middle in cut_coords:
                candidates.append(slice_middle)
        if slice_below >= 0:
            # We need positive slice to avoid having negative
            # indices, which would work, but not the way we think of them
            candidates.append(slice_below)
        best_weight = -10
        for candidate in candidates:
            if candidate >= this_shape:
                this_weight = 0
            else:
                this_weight = np.sum(np.rollaxis(orig_data, axis)[candidate])
            if this_weight > best_weight:
                best_candidate = candidate
                best_weight = this_weight

        cut_coords.append(best_candidate)
        cut_coords = np.unique(cut_coords).tolist()

    cut_coords = np.array(cut_coords)
    cut_coords.sort()

    return _transform_cut_coords(cut_coords, direction, affine)
Пример #46
0
 def grid_indices_to_coordinates(self, indices=None):
     if indices is None:
         # size = 48
         indices = np.arange(self.size)
     return np.unravel_index(indices, self.shape)
        def get_target(bbox, gt_box, score, ref_bbox, ref_gt_box, ref_score):

            global num_of_is_full_max
            num_boxes = bbox.shape[0]
            ref_num_boxes = ref_bbox.shape[0]
            score_list = get_scores(bbox, gt_box, score)
            ref_score_list = get_scores(ref_bbox, ref_gt_box, ref_score)

            output_list = []
            ref_output_list = []
            for cls_idx in range(0, num_fg_classes):

                valid_gt_mask = (gt_box[0, :, -1].astype(np.int32)==(cls_idx+1))
                valid_gt_box = gt_box[0, valid_gt_mask, :]
                num_valid_gt = len(valid_gt_box)

                ref_valid_gt_mask = (ref_gt_box[0, :, -1].astype(np.int32)==(cls_idx+1))
                ref_valid_gt_box = ref_gt_box[0, ref_valid_gt_mask, :]
                ref_num_valid_gt = len(ref_valid_gt_box)

                score_list_per_class = score_list[cls_idx]
                ref_score_list_per_class = ref_score_list[cls_idx]

                bbox_per_class = bbox[:, cls_idx, :]
                ref_bbox_per_class = ref_bbox[:, cls_idx, :]

                if num_valid_gt != ref_num_valid_gt:
                    if ref_num_valid_gt > num_valid_gt:
                        num_rm = ref_num_valid_gt - num_valid_gt
                        ref_num_valid_gt = num_valid_gt
                        gt_overlap_mat = bbox_overlaps(ref_valid_gt_box.astype(np.float), 
                            valid_gt_box.astype(np.float))
                        rm_indices = np.argsort(np.sum(gt_overlap_mat, axis=1))[:num_rm]
                        ref_valid_gt_box = np.delete(ref_valid_gt_box, rm_indices, axis=0)
                        # update ref_score_list_per_class
                        ref_score_list_per_class = get_scores_per_class(ref_bbox_per_class, ref_valid_gt_box, ref_score[:, cls_idx:cls_idx+1])
                        assert ref_valid_gt_box.shape == valid_gt_box.shape, "failed remove ref, {} -> {}".format(ref_valid_gt_box.shape[0], valid_gt_box.shape[0])
                        print "success remove ref"
                    else:
                        num_rm = num_valid_gt - ref_num_valid_gt
                        num_valid_gt = ref_num_valid_gt
                        gt_overlap_mat = bbox_overlaps(valid_gt_box.astype(np.float), 
                            ref_valid_gt_box.astype(np.float))
                        rm_indices = np.argsort(np.sum(gt_overlap_mat, axis=1))[:num_rm]
                        valid_gt_box = np.delete(valid_gt_box, rm_indices, axis=0)
                        # update score_list_per_class
                        score_list_per_class = get_scores_per_class(bbox_per_class, valid_gt_box, score[:, cls_idx:cls_idx+1])
                        assert ref_valid_gt_box.shape == valid_gt_box.shape, "failed remove, {} -> {}".format(ref_valid_gt_box.shape[0], valid_gt_box.shape[0])
                        print "success remove"

                assert num_valid_gt == ref_num_valid_gt, "gt num are not the same"


                if len(score_list_per_class) == 0 or len(ref_score_list_per_class) == 0:
                    output_list.append(get_max_socre_bboxes(score_list_per_class, num_boxes))
                    ref_output_list.append(get_max_socre_bboxes(ref_score_list_per_class, ref_num_boxes))

                else:
                    output_list_per_class = []
                    ref_output_list_per_class = []

                    for i in range(len(self._target_thresh)):
                        overlap_score = score_list_per_class[i]
                        ref_overlap_score = ref_score_list_per_class[i]
                        output = np.zeros((overlap_score.shape[0],))
                        ref_output = np.zeros((ref_overlap_score.shape[0],))
                        if np.count_nonzero(overlap_score) == 0 or np.count_nonzero(ref_overlap_score) == 0:
                            output_list_per_class.append(output)
                            ref_output_list_per_class.append(ref_output)
                            continue
                        for x in range(num_valid_gt):
                            overlap_score_per_gt = overlap_score[:, x]
                            ref_overlap_score_per_gt = ref_overlap_score[:, x]
                            valid_bbox_indices = np.where(overlap_score_per_gt)[0]
                            ref_valid_bbox_indices = np.where(ref_overlap_score_per_gt)[0]
                            target_gt_box = valid_gt_box[x:x+1, :-1]
                            ref_target_gt_box = ref_valid_gt_box[x:x+1, :-1]
                            if len(valid_bbox_indices) == 0 or len(ref_valid_bbox_indices) == 0:
                                continue
                            dist_mat = translation_dist(bbox_per_class[valid_bbox_indices], target_gt_box)[:, 0, :]
                            ref_dist_mat = translation_dist(ref_bbox_per_class[ref_valid_bbox_indices], ref_target_gt_box)[:, 0, :]
                            dist_mat_shape = (bbox_per_class[valid_bbox_indices].shape[0], 
                                ref_bbox_per_class[ref_valid_bbox_indices].shape[0], 4)
                            # print((np.tile(np.expand_dims(dist_mat, 1), (1, dist_mat_shape[1], 1)) - 
                                # np.tile(np.expand_dims(ref_dist_mat, 0), (dist_mat_shape[0], 1, 1)))**2)
                            bbox_dist_mat = np.sum((np.tile(np.expand_dims(dist_mat, 1), (1, dist_mat_shape[1], 1)) - 
                                np.tile(np.expand_dims(ref_dist_mat, 0), (dist_mat_shape[0], 1, 1)))**2, axis=2)
                            assert bbox_dist_mat.shape == (len(bbox_per_class[valid_bbox_indices]), len(ref_bbox_per_class[ref_valid_bbox_indices]))
                            # top_k = 10
                            # translation_thresh = 1.1*np.min(bbox_dist_mat)
                            # top_k = np.sum(bbox_dist_mat < translation_thresh)
                            top_k = int(0.1 * len(bbox_dist_mat.flatten()) + 0.5)
                            top_k = max(1, top_k)
                            top_k = min(top_k, len(bbox_dist_mat.flatten()))
                            top_k = 1
                            if DEBUG:
                                print("{} of out {} stable pair".format(top_k, len(bbox_dist_mat.flatten())))
                            ind_list, ref_ind_list = np.unravel_index(np.argsort(bbox_dist_mat, axis=None)[:top_k], bbox_dist_mat.shape)
                            score_sum_list = []
                            rank_sum_list = []
                            for ind, ref_ind in zip(ind_list, ref_ind_list):
                                score_sum = overlap_score_per_gt[valid_bbox_indices[ind]] + ref_overlap_score_per_gt[ref_valid_bbox_indices[ref_ind]]
                                rank_sum = valid_bbox_indices[ind] + ref_valid_bbox_indices[ref_ind]
                                score_sum_list.append(score_sum)
                                rank_sum_list.append(rank_sum)
                            score_max_idx = np.argmax(np.array(score_sum_list))
                            rank_max_idx = np.argmin(np.array(rank_sum_list))
                            if DEBUG:
                                if score_max_idx == rank_max_idx:
                                    score_rank_max[0] += 1
                                score_rank_max[1] += 1
                            # max_idx = rank_max_idx
                            max_idx = score_max_idx
                            ind = ind_list[max_idx]
                            ref_ind = ref_ind_list[max_idx]
                            if DEBUG:
                                if ind == np.argmax(overlap_score_per_gt[valid_bbox_indices]):
                                    num_of_is_full_max[0] += 1
                                    print('cur takes the max')
                                if ref_ind == np.argmax(ref_overlap_score_per_gt[ref_valid_bbox_indices]):
                                    num_of_is_full_max[0] += 1
                                    print('ref takes the max')

                            output[valid_bbox_indices[ind]] = 1
                            ref_output[ref_valid_bbox_indices[ref_ind]] = 1
                        output_list_per_class.append(output)
                        ref_output_list_per_class.append(ref_output)
                    output_per_class = np.stack(output_list_per_class, axis=-1)
                    ref_output_per_class = np.stack(ref_output_list_per_class, axis=-1)
                    output_list.append(output_per_class)
                    ref_output_list.append(ref_output_per_class)
            # [num_boxes, num_fg_classes, num_thresh]
            blob = np.stack(output_list, axis=1).astype(np.float32, copy=False)
            ref_blob = np.stack(ref_output_list, axis=1).astype(np.float32, copy=False)
            return blob, ref_blob
Пример #48
0
def cross(function=lambda x: x,
          domain=None,
          tensors=None,
          function_arg='vectors',
          ranks_tt=None,
          kickrank=3,
          rmax=100,
          eps=1e-6,
          max_iter=25,
          val_size=1000,
          verbose=True,
          return_info=False,
          record_samples=False,
          _minimize=False,
          device=None,
          batch=False,
          suppress_warnings=False,
          detach_evaluations=False):
    """
    Cross-approximation routine that samples a black-box function and returns an N-dimensional tensor train approximating it. It accepts either:

    - A domain (tensor product of :math:`N` given arrays) and a function :math:`\\mathbb{R}^N \\to \\mathbb{R}`
    - A list of :math:`K` tensors of dimension :math:`N` and equal shape and a function :math:`\\mathbb{R}^K \\to \\mathbb{R}`

    :Examples:

    >>> tn.cross(function=lambda x: x**2, tensors=[t])  # Compute the element-wise square of `t` using 5 TT-ranks

    >>> domain = [torch.linspace(-1, 1, 32)]*5
    >>> tn.cross(function=lambda x, y, z, t, w: x**2 + y*z + torch.cos(t + w), domain=domain)  # Approximate a function over the rectangle :math:`[-1, 1]^5`

    >>> tn.cross(function=lambda x: torch.sum(x**2, dim=1), domain=domain, function_arg='matrix')  # An example where the function accepts a matrix

    References:

    - I. Oseledets, E. Tyrtyshnikov: `"TT-cross Approximation for Multidimensional Arrays" (2009) <http://www.mat.uniroma2.it/~tvmsscho/papers/Tyrtyshnikov5.pdf>`_
    - D. Savostyanov, I. Oseledets: `"Fast Adaptive Interpolation of Multi-dimensional Arrays in Tensor Train Format" (2011) <https://ieeexplore.ieee.org/document/6076873>`_
    - S. Dolgov, R. Scheichl: `"A Hybrid Alternating Least Squares - TT Cross Algorithm for Parametric PDEs" (2018) <https://arxiv.org/pdf/1707.04562.pdf>`_
    - A. Mikhalev's `maxvolpy package <https://bitbucket.org/muxas/maxvolpy>`_
    - I. Oseledets (and others)'s `ttpy package <https://github.com/oseledets/ttpy>`_

    :param function: should produce a vector of :math:`P` elements. Accepts either :math:`N` comma-separated vectors, or a matrix (see `function_arg`)
    :param domain: a list of :math:`N` vectors (incompatible with `tensors`)
    :param tensors: a :class:`Tensor` or list thereof (incompatible with `domain`)
    :param function_arg: if 'vectors', `function` accepts :math:`N` vectors of length :math:`P` each. If 'matrix', a matrix of shape :math:`P \\times N`.
    :param ranks_tt: int or list of :math:`N-1` ints. If None, will be determined adaptively
    :param kickrank: when adaptively found, ranks will be increased by this amount after every iteration (full sweep left-to-right and right-to-left)
    :param rmax: this rank will not be surpassed
    :param eps: the procedure will stop after this validation error is met (as measured after each iteration)
    :param max_iter: int
    :param val_size: size of the validation set
    :param verbose: default is True
    :param return_info: if True, will also return a dictionary with informative metrics about the algorithm's outcome
    :param device: PyTorch device
    :param batch: Boolean
    :param suppress_warnings: Boolean, if True, will hide the message about insufficient accuracy
    :param detach_evaluations: Boolean, if True, will remove gradient buffers for the function

    :return: an N-dimensional TT :class:`Tensor` (if `return_info`=True, also a dictionary)
    """
    if device is None and tensors is not None:
        if type(tensors) == list:
            device = tensors[0].cores[0].device
        else:
            device = tensors.cores[0].device

    if verbose:
        print('cross device is', device)

    try:
        import maxvolpy.maxvol
    except ModuleNotFoundError:
        raise ModuleNotFoundError(
            "Functions that require cross-approximation require the optional maxvolpy package, which can be installed by 'pip install maxvolpy'. More info is available at https://bitbucket.org/muxas/maxvolpy"
        )

    assert domain is not None or tensors is not None
    assert function_arg in ('vectors', 'matrix')
    if function_arg == 'matrix':

        def f(*args):
            return function(torch.cat([arg[:, None] for arg in args], dim=1))
    else:
        f = function

    if detach_evaluations:

        def build_function_wrapper(func):
            def g(*args):
                res = func(*args)
                if hasattr(res,
                           '__len__') and not isinstance(res, torch.Tensor):
                    for i in range(len(res)):
                        if isinstance(res[i], torch.Tensor):
                            res[i] = res[i].detach()
                else:
                    if isinstance(res, torch.Tensor):
                        res = res.detach()
                return res

            return g

        f = build_function_wrapper(f)

    if tensors is None:
        tensors = tn.meshgrid(domain, batch=batch)

    if not hasattr(tensors, '__len__'):
        tensors = [tensors]
    tensors = [t.decompress_tucker_factors(_clone=False) for t in tensors]
    Is = list(tensors[0].shape)
    N = len(Is)

    # Process ranks and cap them, if needed
    if ranks_tt is None:
        ranks_tt = 1
    else:
        kickrank = None
    if not hasattr(ranks_tt, '__len__'):
        ranks_tt = [ranks_tt] * (N - 1)
    ranks_tt = [1] + list(ranks_tt) + [1]
    Rs = np.array(ranks_tt)
    for n in list(range(1, N)) + list(range(N - 1, -1, -1)):
        Rs[n] = min(Rs[n - 1] * Is[n - 1], Rs[n], Is[n] * Rs[n + 1])

    # Initialize cores at random
    cores = [torch.randn(Rs[n], Is[n], Rs[n + 1]).to(device) for n in range(N)]

    # Prepare left and right sets
    lsets = [np.array([[0]])] + [None] * (N - 1)
    randint = np.hstack(
        [np.random.randint(0, Is[n + 1], [max(Rs), 1])
         for n in range(N - 1)] + [np.zeros([max(Rs), 1], dtype=np.int)])
    rsets = [randint[:Rs[n + 1], n:] for n in range(N - 1)] + [np.array([[0]])]

    # Initialize left and right interfaces for `tensors`
    def init_interfaces():
        t_linterfaces = []
        t_rinterfaces = []
        for t in tensors:
            linterfaces = [torch.ones(1, t.ranks_tt[0]).to(device)
                           ] + [None] * (N - 1)
            rinterfaces = [None] * (N - 1) + [
                torch.ones(t.ranks_tt[t.dim()], 1).to(device)
            ]
            for j in range(N - 1):
                M = torch.ones(t.cores[-1].shape[-1], len(rsets[j])).to(device)
                for n in range(N - 1, j, -1):
                    if t.cores[n].dim() == 3:  # TT core
                        M = torch.einsum('iaj,ja->ia', [
                            t.cores[n][:, rsets[j][:,
                                                   n - 1 - j], :].to(device), M
                        ])
                    else:  # CP factor
                        M = torch.einsum('ai,ia->ia', [
                            t.cores[n][rsets[j][:, n - 1 - j], :].to(device), M
                        ])
                rinterfaces[j] = M
            t_linterfaces.append(linterfaces)
            t_rinterfaces.append(rinterfaces)
        return t_linterfaces, t_rinterfaces

    t_linterfaces, t_rinterfaces = init_interfaces()

    # Create a validation set
    Xs_val = [
        torch.as_tensor(np.random.choice(I, int(val_size))).to(device)
        for I in Is
    ]
    ys_val = f(*[t[Xs_val].torch() for t in tensors])
    if ys_val.dim() > 1:
        assert ys_val.dim() == 2
        assert ys_val.shape[1] == 1
        ys_val = ys_val[:, 0]
    assert len(ys_val) == val_size
    norm_ys_val = torch.norm(ys_val)

    if verbose:
        print(
            'Cross-approximation over a {}D domain containing {:g} grid points:'
            .format(N, tensors[0].numel()))
    start = time.time()
    converged = False

    info = {
        'nsamples': 0,
        'eval_time': 0,
        'val_epss': [],
        'min': 0,
        'argmin': None
    }
    if record_samples:
        info['sample_positions'] = torch.zeros(0, N).to(device)
        info['sample_values'] = torch.zeros(0).to(device)

    def evaluate_function(
            j
    ):  # Evaluate function over Rs[j] x Rs[j+1] fibers, each of size I[j]
        Xs = []
        for k, t in enumerate(tensors):
            if tensors[k].cores[j].dim() == 3:  # TT core
                V = torch.einsum('ai,ibj,jc->abc', [
                    t_linterfaces[k][j], tensors[k].cores[j],
                    t_rinterfaces[k][j]
                ])
            else:  # CP factor
                V = torch.einsum('ai,bi,ic->abc', [
                    t_linterfaces[k][j], tensors[k].cores[j],
                    t_rinterfaces[k][j]
                ])
            Xs.append(V.flatten())

        eval_start = time.time()
        evaluation = f(*Xs)
        if record_samples:
            info['sample_positions'] = torch.cat(
                (info['sample_positions'],
                 torch.cat([x[:, None] for x in Xs], dim=1)),
                dim=0)
            info['sample_values'] = torch.cat(
                (info['sample_values'], evaluation))
        info['eval_time'] += time.time() - eval_start
        if _minimize:
            evaluation = np.pi / 2 - torch.atan(
                (evaluation - info['min'])
            )  # Function used by I. Oseledets for TT minimization in ttpy
            evaluation_argmax = torch.argmax(evaluation)
            eval_min = torch.tan(np.pi / 2 -
                                 evaluation[evaluation_argmax]) + info['min']
            if info['min'] == 0 or eval_min < info['min']:
                coords = np.unravel_index(evaluation_argmax,
                                          [Rs[j], Is[j], Rs[j + 1]])
                info['min'] = eval_min
                info['argmin'] = tuple(lsets[j][coords[0]][1:]) + tuple(
                    [coords[1]]) + tuple(rsets[j][coords[2]][:-1])

        # Check for nan/inf values
        if evaluation.dim() == 2:
            evaluation = evaluation[:, 0]
        invalid = (torch.isnan(evaluation) | torch.isinf(evaluation)).nonzero()
        if len(invalid) > 0:
            invalid = invalid[0].item()
            raise ValueError(
                'Invalid return value for function {}: f({}) = {}'.format(
                    function,
                    ', '.join('{:g}'.format(x[invalid].detach().cpu().numpy())
                              for x in Xs),
                    f(*[x[invalid:invalid + 1][:, None] for x in Xs]).item()))

        V = torch.reshape(evaluation, [Rs[j], Is[j], Rs[j + 1]])
        info['nsamples'] += V.numel()
        return V

    # Sweeps
    for i in range(max_iter):

        if verbose:
            print('iter: {: <{}}'.format(i,
                                         len('{}'.format(max_iter)) + 1),
                  end='')
            sys.stdout.flush()

        left_locals = []

        # Left-to-right
        for j in range(N - 1):

            # Update tensors for current indices
            V = evaluate_function(j)

            # QR + maxvol towards the right
            V = torch.reshape(V, [-1, V.shape[2]])  # Left unfolding
            Q, R = torch.qr(V)
            if _minimize:
                local, _ = maxvolpy.maxvol.rect_maxvol(
                    Q.detach().cpu().numpy(), maxK=Q.shape[1])
            else:
                local, _ = maxvolpy.maxvol.maxvol(Q.detach().cpu().numpy())
            V = torch.lstsq(Q.t(), Q[local, :].t())[0].t()
            cores[j] = torch.reshape(V, [Rs[j], Is[j], Rs[j + 1]])
            left_locals.append(local)

            # Map local indices to global ones
            local_r, local_i = np.unravel_index(local, [Rs[j], Is[j]])
            lsets[j + 1] = np.c_[lsets[j][local_r, :], local_i]
            for k, t in enumerate(tensors):
                if t.cores[j].dim() == 3:  # TT core
                    t_linterfaces[k][j + 1] = torch.einsum(
                        'ai,iaj->aj', [
                            t_linterfaces[k][j][local_r, :],
                            t.cores[j][:, local_i, :]
                        ])
                else:  # CP factor
                    t_linterfaces[k][j + 1] = torch.einsum(
                        'ai,ai->ai', [
                            t_linterfaces[k][j][local_r, :],
                            t.cores[j][local_i, :]
                        ])

        # Right-to-left sweep
        for j in range(N - 1, 0, -1):

            # Update tensors for current indices
            V = evaluate_function(j)

            # QR + maxvol towards the left
            V = torch.reshape(V, [Rs[j], -1])  # Right unfolding
            Q, R = torch.qr(V.t())
            if _minimize:
                local, _ = maxvolpy.maxvol.rect_maxvol(
                    Q.detach().cpu().numpy(), maxK=Q.shape[1])
            else:
                local, _ = maxvolpy.maxvol.maxvol(Q.detach().cpu().numpy())
            V = torch.lstsq(Q.t(), Q[local, :].t())[0]
            cores[j] = torch.reshape(torch.as_tensor(V),
                                     [Rs[j], Is[j], Rs[j + 1]])

            # Map local indices to global ones
            local_i, local_r = np.unravel_index(local, [Is[j], Rs[j + 1]])
            rsets[j - 1] = np.c_[local_i, rsets[j][local_r, :]]
            for k, t in enumerate(tensors):
                if t.cores[j].dim() == 3:  # TT core
                    t_rinterfaces[k][j - 1] = torch.einsum(
                        'iaj,ja->ia', [
                            t.cores[j][:, local_i, :],
                            t_rinterfaces[k][j][:, local_r]
                        ])
                else:  # CP factor
                    t_rinterfaces[k][j - 1] = torch.einsum(
                        'ai,ia->ia', [
                            t.cores[j][local_i, :],
                            t_rinterfaces[k][j][:, local_r]
                        ])

        # Leave the first core ready
        V = evaluate_function(0)
        cores[0] = V

        # Evaluate validation error
        val_eps = torch.norm(ys_val -
                             tn.Tensor(cores)[Xs_val].torch()) / norm_ys_val
        info['val_epss'].append(val_eps)
        if val_eps < eps:
            converged = True

        if verbose:  # Print status
            if _minimize:
                print('| best: {:.8g}'.format(info['min']), end='')
            else:
                print('| eps: {:.3e}'.format(val_eps), end='')
            print(' | total time: {:8.4f} | largest rank: {:3d}'.format(
                time.time() - start, max(Rs)),
                  end='')
            if converged:
                print(' <- converged: eps < {}'.format(eps))
            elif i == max_iter - 1:
                print(' <- max_iter was reached: {}'.format(max_iter))
            else:
                print()
        if converged:
            break
        elif i < max_iter - 1 and kickrank is not None:  # Augment ranks
            newRs = Rs.copy()
            newRs[1:-1] = np.minimum(rmax, newRs[1:-1] + kickrank)
            for n in list(range(1, N)) + list(range(N - 1, 0, -1)):
                newRs[n] = min(newRs[n - 1] * Is[n - 1], newRs[n],
                               Is[n] * newRs[n + 1])
            extra = np.hstack([
                np.random.randint(0, Is[n + 1], [max(newRs), 1])
                for n in range(N - 1)
            ] + [np.zeros([max(newRs), 1], dtype=np.int)])
            for n in range(N - 1):
                if newRs[n + 1] > Rs[n + 1]:
                    rsets[n] = np.vstack(
                        [rsets[n], extra[:newRs[n + 1] - Rs[n + 1], n:]])
            Rs = newRs
            t_linterfaces, t_rinterfaces = init_interfaces(
            )  # Recompute interfaces

    if val_eps > eps and not _minimize and not suppress_warnings:
        logging.warning(
            'eps={:g} (larger than {}) when cross-approximating {}'.format(
                val_eps, eps, function))

    if verbose:
        print(
            'Did {} function evaluations, which took {:.4g}s ({:.4g} evals/s)'.
            format(info['nsamples'], info['eval_time'],
                   info['nsamples'] / info['eval_time']))
        print()

    if return_info:
        info['lsets'] = lsets
        info['rsets'] = rsets
        info['Rs'] = Rs
        info['left_locals'] = left_locals
        info['total_time'] = time.time() - start
        info['val_eps'] = val_eps
        return tn.Tensor([
            c if isinstance(c, torch.Tensor) else torch.tensor(c)
            for c in cores
        ],
                         batch=batch), info
    else:
        return tn.Tensor([
            c if isinstance(c, torch.Tensor) else torch.tensor(c)
            for c in cores
        ],
                         batch=batch)
Пример #49
0
def image_correlate(image,
                    reference,
                    real_filter=1,
                    k_filter=1,
                    shift_func='shift',
                    verbose=False):
    """ Align image to reference by cross-correlation. Outputs shifts and shifted images.
    Uses the real FFT for ~2x speed improvement. The k_filter must have
    a shape that matches the np.fft.rfft2() of image and reference.
    Uses scipy.ndimage.shift() or np.roll to move the image. Use 'roll' to avoid losing
    data off the edge for multiple shifting operations. Use shift to avoid wrap around problems and when there
    is only one shifting operation.

    Note
    ----
        image, reference and real_filter must all have the same shape (N, M).
        k_filter must have a shape that matches the np.fft.rfft2() of
        the other inputs: (N, M/2+1)

    Parameters
    ----------
        image : ndarray
            A image as a 2D ndarray.

        reference : ndarray
            The reference image to align to.

        real_filter : ndarray, optional, default = 1
            A real space filter applied to image and reference before
            calculating the shift.

        k_filter : ndarray, optional, default = 1
            A Fourier space filter applied to the fourier transform of
            image and reference before calculating the cross-correlation.

        shift_func : str, default is 'shift'
            The function to use to shift the images. 'roll' uses np.roll and 'shift' uses ndimage.shift.

        verbose : bool
            Plots the cross-correlation using matplotlib for debugging purposes.

    Returns
    ------
        : tuple, (ndarray, tuple)
            A tuple containing the shifted image and the shifts applied.
    """
    output = None

    if shift_func is not 'shift' and shift_func is not 'roll':
        raise KeyError('Shift function has to be either shift or roll')

    image_f = np.fft.rfft2((image - np.mean(image)) * real_filter)
    reference_f = np.fft.rfft2((reference - np.mean(reference)) * real_filter)

    xcor = abs(np.fft.irfft2(np.conj(image_f) * reference_f * k_filter))
    if verbose:
        import matplotlib.pyplot as plt
        plt.imshow(np.fft.fftshift(xcor))
        plt.title('imageCrossCorRealShift xcor')
    shifts = np.unravel_index(np.fft.fftshift(xcor).argmax(), xcor.shape)
    shifts = (shifts[0] - xcor.shape[0] / 2, shifts[1] - xcor.shape[1] / 2)
    shifts = [int(i) for i in shifts]  # convert to integers

    if shift_func == 'shift':
        # shift image using ndimage.shift
        output = ndimage.interpolation.shift(image, shifts, order=0)
    elif shift_func == 'roll':
        # shift image using roll to be reversible
        output = np.roll(image, shifts[0], axis=0)
        output = np.roll(output, shifts[1], axis=1)

    return output, shifts
Пример #50
0
    def _simulate_frame_range(id, n_processes, orbit_xyz, frame_range, sim_n_samples, local_direction_g, UTMNorthing_g, UTMEasting_g, UTMZone_g, rxwin_ref, input_scr_data_shifts_alignment_t, dt, dem_obj, geom_obj, alt_sim_matrix, act_sim_matrix, gaussian_filter_sigma, further_pars=[]):
        _C = 299792458.

        n_frames_to_be_simulated = len(frame_range)
        sim_image = np.zeros([sim_n_samples, n_frames_to_be_simulated])         #, dtype="int16")   # int16 and uint8 saves memory but considerably slows down the processing as numpy cannot handle ints in a fast way
        uncert_image = np.zeros(sim_image.shape, dtype="uint8")
        ground_distance_min_image = np.nan * np.ones(sim_image.shape)
        ground_distance_max_image = np.nan * np.ones(sim_image.shape)
        first_return_lats = np.zeros(n_frames_to_be_simulated)
        first_return_lons = np.zeros(n_frames_to_be_simulated)

        status_str_pre = '\t'+"|\t\t"*id
        status_str_post = "\t\t|"*(n_processes-id-1)
        last_status = -1
        for count, i_frame_tbs in enumerate(frame_range):
            if count == n_frames_to_be_simulated-1:
                new_status = 100
            else:
                new_status = int(count / n_frames_to_be_simulated * 10) * 10
            if new_status != last_status:
                print(status_str_pre + '{:>2}'.format(new_status) + status_str_post)
                last_status = new_status

            local_direction = local_direction_g[i_frame_tbs]
            UTMNorthing = UTMNorthing_g[i_frame_tbs]
            UTMEasting = UTMEasting_g[i_frame_tbs]
            UTMZone = UTMZone_g[i_frame_tbs]
            sc_x_cart_local = orbit_xyz[0][i_frame_tbs]
            sc_y_cart_local = orbit_xyz[1][i_frame_tbs]
            sc_z_cart_local = orbit_xyz[2][i_frame_tbs]

            UTMNorthing_surface_matrix = UTMNorthing + (alt_sim_matrix * np.sin(local_direction) - act_sim_matrix * np.cos(local_direction))
            UTMEasting_surface_matrix = UTMEasting + (alt_sim_matrix * np.cos(local_direction) + act_sim_matrix * np.sin(local_direction))
            Lat_surface_matrix, Long_surface_matrix = geom_obj.UTM_to_LL(UTMNorthing_surface_matrix, UTMEasting_surface_matrix, UTMZone)
            dem_surface_radius_matrix = dem_obj.get_dem_radius_from_lat_lon(Lat_surface_matrix, Long_surface_matrix)

            ground_distance_from_nadir = np.sqrt((UTMNorthing_surface_matrix-UTMNorthing)**2 + (UTMEasting_surface_matrix-UTMEasting)**2)

            dummy_check_matrix = (dem_surface_radius_matrix == dem_obj.dummy_value)

            # smooth the DEM to avoid "stripes" in the simulation
            dem_surface_radius_matrix = gaussian_filter(dem_surface_radius_matrix, gaussian_filter_sigma)

            if dummy_check_matrix.any():
                uncert_image[:, count] = 1
                dem_surface_radius_matrix[dummy_check_matrix] = 0             # radius too far for being recorded in the simulation

            x_cart_surface_matrix, y_cart_surface_matrix, z_cart_surface_matrix = geom_obj.to_xyz_from_latlon_and_radius(Lat_surface_matrix, Long_surface_matrix, dem_surface_radius_matrix)

            d_surface = geom_obj.euclidean_distance_cart(sc_x_cart_local, sc_y_cart_local, sc_z_cart_local, x_cart_surface_matrix, y_cart_surface_matrix, z_cart_surface_matrix)

            t_surface = 2. * d_surface / _C
            sample_pos_surface = np.round(((t_surface - rxwin_ref + input_scr_data_shifts_alignment_t[i_frame_tbs]) / dt)).astype("int")        # use rxwin_ref instead of input_rxwin_data_t_local

            # set out-of-range sample positions to the farthest position (useful later for detecting the first-return position)
            sample_pos_surface[(sample_pos_surface < 0) | (sample_pos_surface >= sim_n_samples)] = sim_n_samples

            # find first return coordinates
            first_return_ids = np.unravel_index(np.argmin(sample_pos_surface), sample_pos_surface.shape)
            first_return_lats[count] = Lat_surface_matrix[first_return_ids]
            first_return_lons[count] = Long_surface_matrix[first_return_ids]

            ground_distance_from_nadir = ground_distance_from_nadir.flatten()
            sample_pos_surface = sample_pos_surface.flatten()

            # find and keep only the sample_pos_surface items within the simulation range
            sample_pos_surface_ok_ids = np.where(sample_pos_surface < sim_n_samples)[0]
            sample_pos_surface = sample_pos_surface[sample_pos_surface_ok_ids]

            # increment sim_image values by 1 each time they correspond to a valid sample_pos_surface
            np.add.at(sim_image[:, count], sample_pos_surface, 1)

            # keep only ground_distance_from_nadir items corresponding to valid sample_pos_surface items (selected before)
            ground_distance_from_nadir = ground_distance_from_nadir[sample_pos_surface_ok_ids]

            # sort distances and sample positions both by increasing distance
            sorting_indexes = np.argsort(ground_distance_from_nadir)
            ground_distance_from_nadir = ground_distance_from_nadir[sorting_indexes]
            sample_pos_surface = sample_pos_surface[sorting_indexes]

            # for each simulation sample detect the corresponding ground distance
            # note: as the arrays were first ordered by ground distance, and considering that
            # "unique" selects the first instance of the array items, the "shortest" distance
            # associated to a certain sample is kept
            unique_sample_pos_surface, unique_sample_pos_surface_ids = np.unique(sample_pos_surface, return_index=True)
            unique_ground_distance_from_nadir = ground_distance_from_nadir[unique_sample_pos_surface_ids]
            ground_distance_min_image[unique_sample_pos_surface, count] = unique_ground_distance_from_nadir

            # reverse the ordered arrays and use the sample principle to save the "longest" distances for every sample
            sample_pos_surface = np.flip(sample_pos_surface)
            ground_distance_from_nadir = np.flip(ground_distance_from_nadir)
            unique_sample_pos_surface, unique_sample_pos_surface_ids = np.unique(sample_pos_surface, return_index=True)
            unique_ground_distance_from_nadir = ground_distance_from_nadir[unique_sample_pos_surface_ids]
            ground_distance_max_image[unique_sample_pos_surface, count] = unique_ground_distance_from_nadir

        return (sim_image, uncert_image, first_return_lats, first_return_lons, ground_distance_min_image, ground_distance_max_image)
Пример #51
0
# Save model and weights
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)

# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
y_predict = model.predict(x_test, verbose=0)
print("PREDICT:", y_predict)

for i in range(y_predict.shape[0]):
    maximun = np.argmax(y_predict[i])
    prediction = np.unravel_index(maximun, y_predict[i].shape)
    maximun2 = np.argmax(y_test[i])
    real = np.unravel_index(maximun2, y_test[i].shape)
    if prediction[0] == 0:
        if real[0] == 0:
            print("REAL ANIMAL: CAT   PREDICTED ANIMAL: CAT")
        else:
            print("REAL ANIMAL: DOG   PREDICTED ANIMAL: CAT")
    else:
        if real[0] == 0:
            print("REAL ANIMAL: CAT   PREDICTED ANIMAL: DOG")
        else:
            print("REAL ANIMAL: DOG   PREDICTED ANIMAL: DOG")
Пример #52
0
    def track(self, img):
        """
        args:
            img(np.ndarray): BGR image
        return:
            bbox(list):[x, y, width, height]
        """
        w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
        h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
        s_z = np.sqrt(w_z * h_z)
        scale_z = cfg.TRACK.EXEMPLAR_SIZE / s_z
        s_x = s_z * (cfg.TRACK.INSTANCE_SIZE / cfg.TRACK.EXEMPLAR_SIZE)
        s_x = round(s_x)
        
        x_crop = self.get_subwindow(img, self.center_pos, cfg.TRACK.INSTANCE_SIZE,
                s_x, self.channel_average)
        crop_box = [self.center_pos[0] - s_x / 2, 
                    self.center_pos[1] - s_x / 2,
                    s_x,
                    s_x]
       
        outputs = self.model.track(x_crop)
        score = self._convert_score(outputs['cls'])
        pred_bbox = self._convert_bbox(outputs['loc'], self.anchors)
        
        def change(r):
            return np.maximum(r, 1. / r)

        def sz(w, h):
            pad = (w + h) * 0.5
            return np.sqrt((w + pad) * (h + pad))
            
        # scale penalty
        s_c = change(sz(pred_bbox[2,:], pred_bbox[3,:]) / 
                (sz(self.size[0]*scale_z, self.size[1]*scale_z))) 
        # aspect ratio penalty
        r_c = change((self.size[0]/self.size[1]) /
                     (pred_bbox[2,:]/pred_bbox[3,:]))
        penalty = np.exp(-(r_c * s_c - 1) * cfg.TRACK.PENALTY_K)
        pscore = penalty * score

        # window penalty 
        pscore = pscore * (1 - cfg.TRACK.WINDOW_INFLUENCE) + \
                self.window * cfg.TRACK.WINDOW_INFLUENCE
        best_idx = np.argmax(pscore)

        bbox = pred_bbox[:, best_idx] / scale_z
        lr = penalty[best_idx] * score[best_idx] * cfg.TRACK.LR
        
        cx = bbox[0] + self.center_pos[0]
        cy = bbox[1] + self.center_pos[1]

        # smooth bbox
        width = self.size[0] * (1 - lr) + bbox[2] * lr
        height = self.size[1] * (1 - lr) + bbox[3] * lr

        # clip boundary
        cx, cy, width, height = self._bbox_clip(cx, cy, width, height, img.shape[:2])

        # udpate state
        self.center_pos = np.array([cx, cy])
        self.size = np.array([width, height])
   
        bbox = [cx - width / 2,
                cy - height / 2,
                width,
                height]
        best_score = score[best_idx]

        # processing mask
        pos = np.unravel_index(best_idx, (5, self.score_size, self.score_size))
        delta_x, delta_y = pos[2], pos[1]

        mask = self.model.mask_refine((delta_y, delta_x)).sigmoid().squeeze()
        out_size = cfg.TRACK.MASK_OUTPUT_SIZE
        mask = mask.view(out_size, out_size).cpu().data.numpy()
        
        s = crop_box[2] / cfg.TRACK.INSTANCE_SIZE
        base_size = cfg.TRACK.BASE_SIZE
        stride = cfg.ANCHOR.STRIDE 
        sub_box = [crop_box[0] + (delta_x - base_size/2) * stride * s,
                   crop_box[1] + (delta_y - base_size/2) * stride * s,
                   s * cfg.TRACK.EXEMPLAR_SIZE,
                   s * cfg.TRACK.EXEMPLAR_SIZE]
        s = out_size / sub_box[2]

        im_h, im_w = img.shape[:2]
        back_box = [-sub_box[0] * s, -sub_box[1] * s, im_w*s, im_h*s]
        mask_in_img = self._crop_back(mask, back_box, (im_w, im_h))
        
        polygon = self._mask_post_processing(mask_in_img)
        polygon = polygon.flatten().tolist()

        # calculate predict z
        pz_center_pos = np.array([bbox[0]+(bbox[2]-1)/2, bbox[1]+(bbox[3]-1)/2])
        pz_size = np.array([bbox[2], bbox[3]])
        w_z = pz_size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(pz_size)
        h_z = pz_size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(pz_size)
        s_z = round(np.sqrt(w_z * h_z))
        pz_channel_average = np.mean(img, axis=(0, 1))
        z_crop = self.get_subwindow(img, pz_center_pos, cfg.TRACK.EXEMPLAR_SIZE, s_z, pz_channel_average)
        zf_cur = self.model.backbone(z_crop)
        if cfg.MASK.MASK:
            zf_cur = zf_cur[-1]
        if cfg.ADJUST.ADJUST:
            zf_cur = self.model.neck(zf_cur)
        
        return {
                'bbox': bbox,
                'best_score': best_score,
                'mask': mask_in_img,
                'polygon': polygon,
                'zf_cur': zf_cur
               }
def main():
    parser = argparse.ArgumentParser(
        "Visualize a set of output files from grid search")
    parser.add_argument("merged_grid_search_outputs")
    parser.add_argument("--outfile")
    parser.add_argument("--resolution", type=int, default=7)
    parser.add_argument("--best-n", type=int, default=10)
    parser.add_argument("--ignore-known-controllers", action="store_true")
    parser.add_argument("--plot", action="store_true")
    parser.add_argument("--viz", action="store_true")
    parser.add_argument("--save", action="store_true")
    parser.add_argument("--exclude-one-class", action="store_true")

    args = parser.parse_args()

    if args.plot or args.viz:
        style_dir = os.path.dirname(os.path.realpath(__file__))
        style = os.path.join(style_dir, "mpl.style")
        import matplotlib.pyplot as plt
        plt.style.use(style)

    std_costs = np.zeros((args.resolution**6))
    f = np.genfromtxt(args.merged_grid_search_outputs, skip_header=True)

    env_start_idx = 7
    all_costs = f[:, env_start_idx:]
    if args.exclude_one_class:
        # skip the first 8 environments, they are for the 1-class scenarios
        all_costs = all_costs[:, 8:]
    mean_costs = np.mean(all_costs, axis=1)
    std_costs = np.std(all_costs, axis=1)
    params = f[:, 1:7]

    if args.outfile:
        writer = csv.writer(open(args.outfile, 'w'), delimiter=',')
        for i, (p, c, s) in enumerate(zip(params, mean_costs, std_costs)):
            writer.writerow([i, p, c, s])

    if args.viz or args.save:
        axes_titles = [
            r'$v_{l_0}$', r'$v_{r_0}$', r'$v_{l_1}$', r'$v_{r_1}$',
            r'$v_{l_2}$', r'$v_{r_2}$'
        ]
        for x_param in range(6):
            for y_param in range(x_param + 1, 6):

                plt.figure()
                plt.xlabel(axes_titles[x_param], fontsize=32)
                plt.ylabel(axes_titles[y_param], fontsize=32, rotation=0)
                labels = ['-1.0', '', '', '', '', '', '1.0']
                plt.xticks(np.arange(7), labels, fontsize=24)
                plt.yticks(np.arange(7), labels, fontsize=24)

                shape = tuple([args.resolution] * 6)
                cost_image = np.ones((args.resolution, args.resolution)) * 1e24
                for parameter_idx, cost in enumerate(mean_costs):
                    indeces = np.unravel_index(parameter_idx, shape)
                    row = indeces[x_param]
                    col = indeces[y_param]
                    if cost < cost_image[row, col]:
                        cost_image[row, col] = cost

                plt.imshow(cost_image, cmap='Reds')
                if args.save:
                    plt.savefig("{:d}_{:d}_grid_img.png".format(
                        x_param, y_param))

    # Sorting messes up plotting so we have to do this after
    sorted_cost_indeces = mean_costs.argsort(axis=0)
    mean_costs.sort()
    params = params[sorted_cost_indeces]
    std_costs = std_costs[sorted_cost_indeces]
    all_costs = all_costs[sorted_cost_indeces]
    print("Best Params, Index, Cost")
    print("{} {:0.0f} {:0.0f}".format(params[0], mean_costs[0], std_costs[0]))
    print("=" * 85)

    print("Good params")
    unknown_controllers = 0
    for i, p in enumerate(params[:args.best_n]):
        # check if this matches the "patterns" of known segregating controllers
        # this never prints anything because turns out they all can be described this way
        if args.ignore_known_controllers:
            if p[0] > p[1]:
                if p[4] > p[5]:
                    if p[3] >= p[2]:
                        # left-hand circles segregating
                        continue
                    else:
                        # slow clustering segregation?
                        continue
            elif p[0] < p[1]:
                if p[5] > p[4]:
                    if p[2] >= p[3]:
                        # right-hand circles segregating
                        continue
                    else:
                        #slow clustering segregation?
                        continue
        unknown_controllers += 1
        print(p, "{:d}th {:0.0f} {:0.0f}".format(i, mean_costs[i],
                                                 std_costs[i]))

    best_costs = all_costs[0]
    inconclusive = True
    found_og_params = False
    for i, costs in enumerate(all_costs):
        t_value, p_value = stats.ttest_ind(best_costs, costs, equal_var=False)
        if np.allclose(params[i], [1, -2 / 3, 1 / 3, 1, 1, 0]):
            print("P value of old params: ", i, p_value, mean_costs[i],
                  std_costs[i], mean_costs[0], std_costs[0], params[i])
            found_og_params = True
            break
        if p_value < 0.05 and inconclusive:
            inconclusive = False
            print(
                "top {} params are not statistically significantly different".
                format(i))
        if found_og_params and not inconclusive:
            break
    if inconclusive:
        print(
            "cannot conclude that the worst params are not identical mean to the best params..."
        )

    if args.plot:
        plt.figure()
        c = mean_costs[::1000]
        N = len(c)
        plt.bar(np.arange(N), c, yerr=std_costs[::1000])
        plt.ylabel("cost")
        plt.title("every 1000th parameter set, sorted from best to worst")
        plt.gca().set_xticklabels([])

    if args.plot or args.viz:
        plt.show()
Пример #54
0
def image_phase_correlate(image,
                          reference,
                          real_filter=1,
                          k_filter=1,
                          shift_func='shift',
                          verbose=False):
    """ Align image to reference by phase-correlation. Outputs shifted images and shift.
    Uses np.fft.rfft2 for ~2x speed improvement.
    Uses scipy.ndimage.shift() to shift the image and remove border pixels.

    NOT WORKING OR TESTED YET

    Note
    ----
        image, reference and real_filter must all have the same shape (N, M).
        k_filter must have a shape that matches the np.fft.rfft2() of
        the other inputs: (N, M/2+1)

    Parameters
    ----------
        image : ndarray
            A image as a 2D ndarray.

        reference : ndarray
            The reference image to align to.

        real_filter : ndarray, optional, default = 1
            A real space filter applied to image and reference before
            calculating the shift.

        k_filter : ndarray, optional, default = 1
            A Fourier space filter applied to the fourier transform of
            image and reference before calculating the cross-correlation.

        shift_func : str, default is 'shift'
            The function to use to shift the images. 'roll' uses np.roll and 'shift' uses ndimage.shift.


        verbose : bool
            Plots the cross-correlation using matplotlib for debugging purposes.

    Returns
    ------
        : tuple, (ndarray, tuple)
            A tuple containing the shifted image and the shifts applied.

    """
    output = None

    image_f = np.fft.rfft2((image - np.mean(image)) * real_filter)
    reference_f = np.fft.rfft2((reference - np.mean(reference)) * real_filter)

    xcor = abs(np.fft.irfft2(np.conj(image_f) * reference_f * k_filter))
    pcor = xcor / (np.abs(xcor) + 0.001)
    if verbose:
        import matplotlib.pyplot as plt
        plt.imshow(np.fft.fftshift(pcor))
        plt.title('imageCrossPhaseRealShift pcor')
    shifts = np.unravel_index(np.fft.fftshift(pcor).argmax(), pcor.shape)
    shifts = (shifts[0] - pcor.shape[0] / 2, shifts[1] - pcor.shape[1] / 2)
    shifts = [int(i) for i in shifts]  # convert to integers

    if shift_func == 'shift':
        # shift image using ndimage.shift
        output = ndimage.interpolation.shift(image, shifts, order=0)
    elif shift_func == 'roll':
        # shift image using roll to be reversible
        output = np.roll(image, shifts[0], axis=0)
        output = np.roll(output, shifts[1], axis=1)

    return output, shifts
# divide by two because each edge is counted twice. i.e. entry i,j and j,i both count the same
# edge
start_weight = np.sum(network[network != 100000000]) / 2 
print("start total weight: ", start_weight)

ds = DisjointSet()
for i in range(0,40):
    ds.make_set(i)


new_weight = 0

while ds.get_parts() != 1:
    # find the minimum element in our network remaining.

    ind = np.unravel_index(np.argmin(network, axis=None), network.shape)
    
    # try to union the sets; if it fails, the two vertices are already connected via some
    # path so no need to add this edge. If it doesn't add this edge and count its weight.
    # Either way, set new edge weight to 10000000 so it isnt chosen again
    try: 
        ds.union(ind[0], ind[1])        
        new_weight += network[ind]
    except ValueError:
        x = 1   
        # do nothing 

    network[ind[0], ind[1]] = 10000000
    network[ind[0], ind[1]] = 10000000

Пример #56
0
def get_next_insert_idx(cur_idx, prev_move_order, total):
    step_idx = 0
    cumsum = np.cumsum(steps)
    cur_dir = next(prev_move_order)
    while True:
        #print "step idx {}".format(step_idx)
        #print "total: {}".format(total)
        if total < cumsum[step_idx]:
            index_offset = dir[cur_dir]
        # get next insert idxs
        else:
            cur_dir = next(prev_move_order)
            index_offset = dir[cur_dir]
            # print index_offset
            step_idx += 1
        total += 1
        cur_idx = (cur_idx[0] + index_offset[0], cur_idx[1] + index_offset[1])
        #print "new_index {}".format(cur_idx)
        yield cur_idx


idx_gen = get_next_insert_idx(mid_idx, cycle(move_order), 0)

for i in np.arange(SIDE_LEN):
    in_id = next(idx_gen)
    window = field[in_id[0] - 1:in_id[0] + 2, in_id[1] - 1:in_id[1] + 2]
    field[in_id] = np.sum(window)

print field[np.unravel_index(np.argmin(np.abs(field - SEARCH_VAL)),
                             field.shape)]
Пример #57
0
def sourcery(ops):
    change_codes = True
    i0 = tic()
    U, sdmov, u = getSVDdata(ops)  # get SVD components
    S, StU, StS = getStU(ops, U)
    Lyc, Lxc, nsvd = U.shape
    ops['Lyc'] = Lyc
    ops['Lxc'] = Lxc
    d0 = ops['diameter']
    sig = np.ceil(d0 / 4)  # smoothing constant
    # make array of radii values of size (2*d0+1,2*d0+1)
    rs, dy, dx = circleMask(d0)
    nsvd = U.shape[-1]
    nbasis = S.shape[-1]
    codes = np.zeros((0, nsvd), np.float32)
    LtU = np.zeros((0, nsvd), np.float32)
    LtS = np.zeros((0, nbasis), np.float32)
    L = np.zeros((Lyc, Lxc, 0), np.float32)
    # regress maps onto basis functions and subtract neuropil contribution
    neu = np.linalg.solve(StS, StU).astype('float32')
    Ucell = U - (S.reshape((-1, nbasis)) @ neu).reshape(U.shape)

    it = 0
    ncells = 0
    refine = -1

    ypix, xpix, lam = [], [], []

    while 1:
        if refine < 0:
            V, us = getVmap(Ucell, sig)
            if it == 0:
                vrem = morphOpen(V, rs <= 1.)
            V = V - vrem  # make V more uniform
            if it == 0:
                V = V.astype('float64')
                # find indices of all maxima in +/- 1 range
                maxV = filters.maximum_filter(V,
                                              footprint=np.ones((3, 3)),
                                              mode='reflect')
                imax = V > (maxV - 1e-10)
                peaks = V[imax]
                # use the median of these peaks to decide if ROI is accepted
                thres = ops['threshold_scaling'] * np.median(
                    peaks[peaks > 1e-4])
                ops['Vcorr'] = V
            V = np.minimum(V, ops['Vcorr'])

            # add extra ROIs here
            n = ncells
            while n < ncells + 200:
                ind = np.argmax(V)
                i, j = np.unravel_index(ind, V.shape)
                if V[i, j] < thres:
                    break
                yp, xp, la, ix, code = iter_extend(i,
                                                   j,
                                                   Ucell,
                                                   us[i, j, :],
                                                   change_codes=change_codes)
                codes = np.append(codes, np.expand_dims(code, axis=0), axis=0)
                ypix.append(yp)
                xpix.append(xp)
                lam.append(la)
                Ucell[ypix[n], xpix[n], :] -= np.outer(lam[n], codes[n, :])

                yp, xp = extendROI(yp, xp, Lyc, Lxc, int(np.mean(d0)))
                V[yp, xp] = 0
                n += 1
            newcells = len(ypix) - ncells
            if it == 0:
                Nfirst = newcells
            L = np.append(L,
                          np.zeros((Lyc, Lxc, newcells), 'float32'),
                          axis=-1)
            LtU = np.append(LtU, np.zeros((newcells, nsvd), 'float32'), axis=0)
            LtS = np.append(LtS,
                            np.zeros((newcells, nbasis), 'float32'),
                            axis=0)
            for n in range(ncells, len(ypix)):
                L[ypix[n], xpix[n], n] = lam[n]
                LtU[n, :] = lam[n] @ U[ypix[n], xpix[n], :]
                LtS[n, :] = lam[n] @ S[ypix[n], xpix[n], :]
            ncells += newcells

            # regression with neuropil
            LtL = L.reshape((-1, ncells)).transpose() @ L.reshape((-1, ncells))
            cellcode = np.concatenate((LtL, LtS), axis=1)
            neucode = np.concatenate((LtS.transpose(), StS), axis=1)
            codes = np.concatenate((cellcode, neucode), axis=0)
            Ucode = np.concatenate((LtU, StU), axis=0)
            codes = np.linalg.solve(codes + 1e-3 * np.eye((codes.shape[0])),
                                    Ucode).astype('float32')
            neu = codes[ncells:, :]
            codes = codes[:ncells, :]

        Ucell = U - (S.reshape((-1, nbasis)) @ neu + L.reshape(
            (-1, ncells)) @ codes).reshape(U.shape)
        # reestimate masks
        n, k = 0, 0
        while n < len(ypix):
            Ucell[ypix[n], xpix[n], :] += np.outer(lam[n], codes[k, :])
            ypix[n], xpix[n], lam[n], ix, codes[n, :] = iter_extend(
                ypix[n],
                xpix[n],
                Ucell,
                codes[k, :],
                refine,
                change_codes=change_codes)
            k += 1
            if ix.sum() == 0:
                print('dropped ROI with no pixels')
                del ypix[n], xpix[n], lam[n]
                continue
            Ucell[ypix[n], xpix[n], :] -= np.outer(lam[n], codes[n, :])
            n += 1
        codes = codes[:n, :]
        ncells = len(ypix)
        L = np.zeros((Lyc, Lxc, ncells), 'float32')
        LtU = np.zeros((ncells, nsvd), 'float32')
        LtS = np.zeros((ncells, nbasis), 'float32')
        for n in range(ncells):
            L[ypix[n], xpix[n], n] = lam[n]
            if refine < 0:
                LtU[n, :] = lam[n] @ U[ypix[n], xpix[n], :]
                LtS[n, :] = lam[n] @ S[ypix[n], xpix[n], :]
        err = (Ucell**2).mean()
        print('ROIs: %d, cost: %2.4f, time: %2.4f' % (ncells, err, toc(i0)))

        it += 1
        if refine == 0:
            break
        if refine == 2:
            # good place to get connected regions
            stat = [{
                'ypix': ypix[n],
                'lam': lam[n],
                'xpix': xpix[n]
            } for n in range(ncells)]
            stat = connected_region(stat, ops)
            # good place to remove ROIs that overlap, change ncells, codes, ypix, xpix, lam, L
            stat, ix = remove_overlaps(stat, ops, Lyc, Lxc)
            print('removed %d overlapping ROIs' % (len(ypix) - len(ix)))
            ypix = [stat[n]['ypix'] for n in range(len(stat))]
            xpix = [stat[n]['xpix'] for n in range(len(stat))]
            lam = [stat[n]['lam'] for n in range(len(stat))]
            L = L[:, :, ix]
            codes = codes[ix, :]
            ncells = len(ypix)
        if refine > 0:
            Ucell = Ucell + (S.reshape((-1, nbasis)) @ neu).reshape(U.shape)
        if refine < 0 and (newcells < Nfirst / 10
                           or it == ops['max_iterations']):
            refine = 3
            U, sdmov = getSVDproj(ops, u)
            Ucell = U
        if refine >= 0:
            StU = S.reshape((Lyc * Lxc, -1)).transpose() @ Ucell.reshape(
                (Lyc * Lxc, -1))
            #StU = np.reshape(S, (Lyc*Lxc,-1)).transpose() @ np.reshape(Ucell, (Lyc*Lxc, -1))
            neu = np.linalg.solve(StS, StU).astype('float32')
        refine -= 1
    Ucell = U - (S.reshape((-1, nbasis)) @ neu).reshape(U.shape)

    sdmov = np.reshape(sdmov, (Lyc, Lxc))
    ops['sdmov'] = sdmov
    stat = [{
        'ypix': ypix[n],
        'lam': lam[n] * sdmov[ypix[n], xpix[n]],
        'xpix': xpix[n]
    } for n in range(ncells)]

    stat = postprocess(ops, stat, Ucell, codes)
    return ops, stat
Пример #58
0
	return False

def allShipSunk(gameBoard, hits, boardSize):
	sunk = True
	for y in range(boardSize):
		for x in range(boardSize):
			if shipsOnGameBoard[x][y] == 1 and hits[x][y] != 1:
				sunk = False
	if sunk == False:
		return False
	else:
		return True

while allShipSunk(gameBoard, hits, boardSize) is False:
	algorithm = hunt(gameBoard, boardSize)
	nextTest = unravel_index(algorithm.argmax(), algorithm.shape)
	print("next shot:", nextTest)
	if any(nextTest in y for y in shipList):
		print("hit!!!!")
		hits[nextTest] = 1
		numberOfTurns += 1
		print("turn:",  numberOfTurns)

		targetHits = np.array([[0 for i in range(boardSize)] for j in range(boardSize)])
		targetMisses = np.array([[0 for i in range(boardSize)] for j in range(boardSize)])
		targetHits[nextTest] = 1

		hit = target(targetHits, algorithm, targetMisses)
		gameBoard = hit + gameBoard
	else:
		print("miss!!")
Пример #59
0
    def expand_and_prune_with_thresholds(self, expand_thr, prune_thr):
        for name, module in self.model.named_modules():
            if isinstance(module, MutatingModule):
                ncc = module.normalized_cross_correlation()

        splitted = False

        for name, module in self.model.named_modules():
            if not isinstance(module, MutatingModule):
                continue

            if module.fixed_feature_count:
                continue

            # print large weights:
            (max_val, max_idx) = torch.max(torch.abs(module.weight.view(-1)), dim=0)
            if torch.max(max_val.data) > 2:
                idx = np.unravel_index(max_idx.data[0], module.weight.size())
                print("maximum weight at index ", idx, " with value ", max_val.data[0])

            if len(module.output_tied_modules) > 0:
                all_nccs = [module.current_ncc] + [m.current_ncc for m in module.output_tied_modules]
                ncc_tensor = torch.abs(torch.stack(all_nccs))
                ncc = torch.mean(ncc_tensor, dim=0)
            else:
                ncc = module.current_ncc

            offset = 0

            feature_number = 0
            while feature_number < ncc.size(0):
                # weighted_value = ncc[feature_number] / (math.sqrt(module.in_channels))
                # weighted_value = ncc[feature_number] / (math.log(module.in_channels)+1)
                weighted_value = ncc[feature_number]
                if abs(weighted_value) > expand_thr:
                    print("in ", name, ", split feature number ", feature_number + offset, ", ncc: ", weighted_value)
                    ncc = module.split_feature(feature_number=feature_number + offset)
                    all_modules = [module] + module.output_tied_modules
                    [m.normalized_cross_correlation() for m in all_modules]
                    splitted = True
                    self.allow_pruning = True
                    feature_number = 0
                    continue
                if abs(weighted_value) < prune_thr and weighted_value != 0 and self.allow_pruning:
                    if feature_number >= module.out_channels:
                        continue
                    print("in ", name, ", prune feature number ", feature_number)
                    ncc = module.prune_feature(feature_number=feature_number + offset)
                    all_modules = [module] + module.output_tied_modules
                    [m.normalized_cross_correlation() for m in all_modules]
                    splitted = True
                    feature_number = 0
                    continue
                feature_number += 1

        if splitted:
            # self.optimizer = optim.SGD(self.model.parameters(),
            #                            lr=self.lr,
            #                            momentum=self.momentum,
            #                            weight_decay=self.weight_decay)
            #self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
            print("new parameter count: ", self.parameter_count())
            # print("test result after expanding: ")
            # self.test(epoch)
        else:
            print("No feature changed enough to split")

        return splitted
Пример #60
0
def make_parcellation(subject, dest_dir='parcellation', roi_mask_file=None):
    """
    Perform a functional parcellation from input fmri data
    
    Return: parcellation file name (str)
    """
    # Loading names for folders and files
    # - T maps (input)
    #func_files = glob(op.join(op.join(op.join('./', subject), \
    #                    't_maps'), 'BOLD*nii'))
    #func_files = glob(op.join('./', subject, 'ASLf', 'spm_analysis', \
    #                            'Tmaps*img'))
    func_files = glob(op.join('./', subject, 'ASLf', 'spm_analysis', \
                                'spmT*img'))
    print 'Tmap files: ', func_files 

    # - Mask (input)
    #spm_mask_file = op.join(spm_maps_dir, 'mask.img')
    mask_dir = op.join('./', subject, 'preprocessed_data')
    if not op.exists(mask_dir): os.makedirs(mask_dir)
    mask_file = op.join(mask_dir, 'mask.nii')
    mask = op.join(mask_dir, 'rcut_tissue_mask.nii')
    volume = op.join('./', subject, 'ASLf', 'funct', 'coregister', \
                     'mean' + subject + '_ASLf_correctionT1_0001.nii')
    make_mask(mask, volume, mask_file)

    # - parcellation (output)
    parcellation_dir = op.join('./', subject, dest_dir)
    if not op.exists(parcellation_dir): os.makedirs(parcellation_dir)
    pfile = op.join(parcellation_dir, 'parcellation_func.nii')

    # Parcellation
    from pyhrf.parcellation import make_parcellation_from_files 
    #make_parcellation_from_files(func_files, mask_file, pfile, 
    #                             nparcels=200, method='ward_and_gkm')

    # Masking with a ROI so we just consider parcels inside 
    # a certain area of the brain
    #if roi_mask_file is not None:   
    if 0: #for ip in np.array([11, 51, 131, 194]):
        #ip = 200

        #print 'Masking parcellation with roi_mask_file: ', roi_mask_file
        print 'Masking ROI: ', ip
        pfile_masked = op.join(parcellation_dir, 'parcellation_func_masked_roi' + str(ip) + '.nii')

        from pyhrf.ndarray import xndarray
        parcellation = xndarray.load(pfile)
        #m = xndarray.load(roi_mask_file)
        #parcels_to_keep = np.unique(parcellation.data * m.data)
        masked_parcellation = xndarray.xndarray_like(parcellation)
        #for ip in parcels_to_keep:
        #    masked_parcellation.data[np.where(parcellation.data==ip)] = ip
        
        masked_parcellation.data[np.where(parcellation.data==ip)] = ip
        masked_parcellation.save(pfile_masked)

    from pyhrf.ndarray import xndarray
    for tmap in func_files:
        func_file_i = xndarray.load(tmap)
        func_data = func_file_i.data
        #func_file_i.data[np.where(func_data<0.1*func_data.max())] = 0

        parcellation = xndarray.load(pfile)
        print func_data.max()
        print func_data.argmax()
        #ip = parcellation.data[func_data.argmax()]
        ip = parcellation.data[np.unravel_index(func_data.argmax(), parcellation.data.shape)]
        print ip.shape

        masked_parcellation = xndarray.xndarray_like(parcellation)
        print masked_parcellation.data.shape
        print parcellation.data.shape
        masked_parcellation.data[np.where(parcellation.data==ip)] = ip
        masked_parcellation.save(tmap[:-4] + '_parcelmax.nii')

    return pfile