Example #1
0
def getpbasis(u,deg,ktype):
	N0,knots = getzerobasis(u,deg,ktype)
	nc = len(u)
	n = nc - 1
	p = deg
	m = p+n+1
	nk = m+1

	#Get the p level basis functions
	Np = np.empty_like(N0)
	Np[:,:] = N0
	for j in range(1,p+1):
		blength = Np.shape[1]
		Nnew = np.zeros((nc,blength-1))
		for i in range(blength-1):
			denom1 = (knots[i+j]-knots[i])
			if (denom1 == 0):
				term1 = 0.0
			else:
				term1 = (u-knots[i])/(knots[i+j]-knots[i])
			denom2 =  (knots[i+j+1]-knots[i+1])
			if (denom2 == 0):
				term2 = 0.0
			else:
				term2 = (knots[i+j+1]-u)/(knots[i+j+1]-knots[i+1])
			Nnew[:,i] = term1*Np[:,i] + term2*Np[:,i+1]
		Np = np.empty_like(Nnew)
		Np[:,:] = Nnew

	return Np,knots
Example #2
0
    def test_2d_float32(self):
        arr = np.random.randn(4, 3).astype(np.float32)
        indexer = [0, 2, -1, 1, -1]

        # axis=0
        result = com.take_nd(arr, indexer, axis=0)
        result2 = np.empty_like(result)
        com.take_nd(arr, indexer, axis=0, out=result2)
        tm.assert_almost_equal(result, result2)

        expected = arr.take(indexer, axis=0)
        expected[[2, 4], :] = np.nan
        tm.assert_almost_equal(result, expected)

        #### this now accepts a float32! # test with float64 out buffer
        out = np.empty((len(indexer), arr.shape[1]), dtype='float32')
        com.take_nd(arr, indexer, out=out)  # it works!

        # axis=1
        result = com.take_nd(arr, indexer, axis=1)
        result2 = np.empty_like(result)
        com.take_nd(arr, indexer, axis=1, out=result2)
        tm.assert_almost_equal(result, result2)

        expected = arr.take(indexer, axis=1)
        expected[:, [2, 4]] = np.nan
        tm.assert_almost_equal(result, expected)
Example #3
0
    def test_no_bounds(self):
        x0 = np.zeros(3)
        h = np.ones(3) * 1e-2
        inf_lower = np.empty_like(x0)
        inf_upper = np.empty_like(x0)
        inf_lower.fill(-np.inf)
        inf_upper.fill(np.inf)

        h_adjusted, one_sided = _adjust_scheme_to_bounds(
            x0, h, 1, '1-sided', inf_lower, inf_upper)
        assert_allclose(h_adjusted, h)
        assert_(np.all(one_sided))

        h_adjusted, one_sided = _adjust_scheme_to_bounds(
            x0, h, 2, '1-sided', inf_lower, inf_upper)
        assert_allclose(h_adjusted, h)
        assert_(np.all(one_sided))

        h_adjusted, one_sided = _adjust_scheme_to_bounds(
            x0, h, 1, '2-sided', inf_lower, inf_upper)
        assert_allclose(h_adjusted, h)
        assert_(np.all(~one_sided))

        h_adjusted, one_sided = _adjust_scheme_to_bounds(
            x0, h, 2, '2-sided', inf_lower, inf_upper)
        assert_allclose(h_adjusted, h)
        assert_(np.all(~one_sided))
  def __init__(self, in_size, out_size, encode_size,
               Wscale=1.0, Vscale=1.0, Uscale=1.0,
               nobias=False, bias=0.0, forget_bias=1.0):
 
      self.bias = np.float32(bias)
      self.nobias = nobias
      self.in_size = in_size
      self.out_size = out_size
      self.encode_size = encode_size
      self.forget_bias = np.float32(forget_bias)
          
      #initialize weight matrices 
      self.W = cpu.utils.weight_initialization(in_size, out_size*4, Wscale)
      self.gW = np.empty_like(self.W)
      
      self.V = cpu.utils.weight_initialization(out_size, out_size*4, Vscale)
      self.gV = np.empty_like(self.V)
      
      self.U = cpu.utils.weight_initialization(encode_size, out_size*4, Uscale)
      self.gU = np.empty_like(self.U)
      
      if not self.nobias:
          self.b = np.empty((1, out_size*4), dtype=np.float32)
          self.b.fill(self.bias)
          self.b[0,out_size:out_size*2] = self.forget_bias
          self.gb = np.empty_like(self.b)
      
      self.z = None
Example #5
0
def search_for_channel(source_area, routys, routxs, search=2, tol=10):
    """Search neighboring grid cells for channel"""

    log.debug('serching for channel')

    new_ys = np.empty_like(routys)
    new_xs = np.empty_like(routxs)

    for i, (y, x) in enumerate(zip(routys, routxs)):
        area0 = source_area[y, x]

        search_area = source_area[y-search:y+search+1, x-search:x+search+1]

        if np.any(search_area > area0*tol):
            sy, sx = np.unravel_index(search_area.argmax(), search_area.shape)

            new_ys[i] = y + sy - search
            new_xs[i] = x + sx - search

            log.debug('Moving pour point to channel y: '
                      '{0}->{1}, x: {2}->{3}'.format(y, new_ys[i],
                                                     x, new_xs[i]))
            log.debug('Source Area has increased from {0}'
                      ' to {1}'.format(area0, source_area[new_ys[i], new_xs[i]]))
        else:
            new_ys[i] = y
            new_xs[i] = x
    return new_ys, new_xs
def _find_longest_prefix_match(tree, bin_X, hash_size, left_masks, right_masks):
    """Find the longest prefix match in tree for each query in bin_X

    Most significant bits are considered as the prefix.
    """
    hi = np.empty_like(bin_X, dtype=np.intp)
    hi.fill(hash_size)
    lo = np.zeros_like(bin_X, dtype=np.intp)
    res = np.empty_like(bin_X, dtype=np.intp)

    left_idx, right_idx = _find_matching_indices(tree, bin_X, left_masks[hi], right_masks[hi])
    found = right_idx > left_idx
    res[found] = lo[found] = hash_size

    r = np.arange(bin_X.shape[0])
    kept = r[lo < hi]  # indices remaining in bin_X mask
    while kept.shape[0]:
        mid = (lo.take(kept) + hi.take(kept)) // 2

        left_idx, right_idx = _find_matching_indices(tree, bin_X.take(kept), left_masks[mid], right_masks[mid])
        found = right_idx > left_idx
        mid_found = mid[found]
        lo[kept[found]] = mid_found + 1
        res[kept[found]] = mid_found
        hi[kept[~found]] = mid[~found]

        kept = r[lo < hi]

    return res
Example #7
0
def hsv_to_rgb(hsv):
    """
    convert hsv values in a numpy array to rgb values
    both input and output arrays have shape (M,N,3)
    """
    h = hsv[:,:,0]; s = hsv[:,:,1]; v = hsv[:,:,2]
    r = np.empty_like(h); g = np.empty_like(h); b = np.empty_like(h)
    i = (h*6.0).astype(np.int)
    f = (h*6.0) - i
    p = v*(1.0 - s)
    q = v*(1.0 - s*f)
    t = v*(1.0 - s*(1.0-f))
    idx = i%6 == 0
    r[idx] = v[idx]; g[idx] = t[idx]; b[idx] = p[idx]
    idx = i == 1
    r[idx] = q[idx]; g[idx] = v[idx]; b[idx] = p[idx]
    idx = i == 2
    r[idx] = p[idx]; g[idx] = v[idx]; b[idx] = t[idx]
    idx = i == 3
    r[idx] = p[idx]; g[idx] = q[idx]; b[idx] = v[idx]
    idx = i == 4
    r[idx] = t[idx]; g[idx] = p[idx]; b[idx] = v[idx]
    idx = i == 5
    r[idx] = v[idx]; g[idx] = p[idx]; b[idx] = q[idx]
    idx = s == 0
    r[idx] = v[idx]; g[idx] = v[idx]; b[idx] = v[idx]
    rgb = np.empty_like(hsv)
    rgb[:,:,0]=r; rgb[:,:,1]=g; rgb[:,:,2]=b
    return rgb
Example #8
0
def bytes_to_yuv(data, resolution):
    """
    Converts a bytes object containing YUV data to a `numpy`_ array.
    """
    width, height = resolution
    fwidth, fheight = raw_resolution(resolution)
    y_len = fwidth * fheight
    uv_len = (fwidth // 2) * (fheight // 2)
    if len(data) != (y_len + 2 * uv_len):
        raise PiCameraValueError(
            'Incorrect buffer length for resolution %dx%d' % (width, height))
    # Separate out the Y, U, and V values from the array
    a = np.frombuffer(data, dtype=np.uint8)
    Y = a[:y_len].reshape((fheight, fwidth))
    Uq = a[y_len:-uv_len].reshape((fheight // 2, fwidth // 2))
    Vq = a[-uv_len:].reshape((fheight // 2, fwidth // 2))
    # Reshape the values into two dimensions, and double the size of the
    # U and V values (which only have quarter resolution in YUV4:2:0)
    U = np.empty_like(Y)
    V = np.empty_like(Y)
    U[0::2, 0::2] = Uq
    U[0::2, 1::2] = Uq
    U[1::2, 0::2] = Uq
    U[1::2, 1::2] = Uq
    V[0::2, 0::2] = Vq
    V[0::2, 1::2] = Vq
    V[1::2, 0::2] = Vq
    V[1::2, 1::2] = Vq
    # Stack the channels together and crop to the actual resolution
    return np.dstack((Y, U, V))[:height, :width]
 def getDataStructures(self):
     """Initializes and returns data structures with proper shapes for:
     - activation of the layer (accessible later as ass[l])
     - dError of the layer (accessible later as dErrors[l])"""
     dError = np.empty_like(self.b)
     a = np.empty_like(self.b)
     return a, dError
Example #10
0
def parse_params(param_file):
    f = open(param_file)
    archive_name = f.readline()[15:-1] + '.bin'
    mass_wd = float(f.readline()[10:-1])
    n_piles = int(f.readline()[10:-1])
    seed = int(float(f.readline()[7:-1]))
    pile_masses = np.empty((n_piles))
    pile_rho_bulks = np.empty_like(pile_masses)
    pile_orbits = np.empty((n_piles, 6))
    pile_layer_fracs = np.empty((pile_masses), dtype=list)
    pile_layer_k_rhos = np.empty_like(pile_layer_fracs)
    for i in range(0, n_piles):
        pile_masses[i] = float(f.readline()[14:-1])
        pile_rho_bulks[i] = float(f.readline()[18:-1])
        for j in range(0, 6):
            pile_orbits[i, j] = float(f.readline()[17:-1])
        pile_layer_num = int(f.readline()[16:-1])
        pile_layer_fracs[i] = [float(f.readline()[22:-1])
                               for k in range(0, pile_layer_num)]
        pile_layer_k_rhos[i] = [float(f.readline()[23:-1])
                                for k in range(0, pile_layer_num)]
    pile_layer_fracs = pile_layer_fracs[0:n_piles]
    pile_layer_k_rhos = pile_layer_k_rhos[0:n_piles]
    n_frags = int(f.readline()[10:-1])
    frag_mass_high = float(f.readline()[16:-1])
    frag_mass_low = float(f.readline()[15:-1])
    frag_rho_high = float(f.readline()[15:-1])
    frag_rho_low = float(f.readline()[14:-1])
    return [archive_name, mass_wd,
            seed, n_piles,
            pile_masses, pile_rho_bulks,
            pile_orbits, pile_layer_fracs,
            pile_layer_k_rhos, n_frags,
            [frag_mass_high, frag_mass_low],
            [frag_rho_high, frag_rho_low]]
Example #11
0
def mso_r_lat_lon_position(time, mso=False, sza=False, **kwargs):
    """Returns position in MSO spherical polar coordinates.
    With `mso' set, return [r/lat/lon], [mso x/y/z [km]].
    With `sza' set, return [r/lat/lon], [sza [deg]].
    With both, return return [r/lat/lon], [mso x/y/z [km]], [sza [deg]]."""

    if sza:
        pos = position(time, frame = 'MAVEN_MSO', **kwargs)
        sza = np.rad2deg(np.arctan2(np.sqrt(pos[1]**2 + pos[2]**2), pos[0]))
        if isinstance(sza, np.ndarray):
            inx = sza < 0.
            if np.any(inx):
                sza[inx] = 180. + sza[inx]
        elif sza < 0.0:
            sza = 180. + sza

        tmp = reclat(pos)
        tmp_out = np.empty_like(tmp)
        tmp_out[0] = tmp[0]
        tmp_out[1] = np.rad2deg(tmp[2])
        tmp_out[2] = np.rad2deg(tmp[1])
        if mso:
            return tmp_out, pos, sza
        return tmp_out, sza

    else:
        pos = position(time, frame = 'MAVEN_MSO', **kwargs)
        tmp = reclat(pos)
        tmp_out = np.empty_like(tmp)
        tmp_out[0] = tmp[0]
        tmp_out[1] = np.rad2deg(tmp[2])
        tmp_out[2] = np.rad2deg(tmp[1])
        if mso:
            return tmp_out, pos
        return tmp_out
def M_limit_sphere(M_tot, sphere_rad, origin=(0, 0, 0),
                   m_func=m_const, r_func=r_const,
                   m_kwargs=[1.0], r_kwargs=[1.0], N_est=10000):
    M_count = 0.0
    N_count = 0
    x_coords = np.empty((N_est))
    y_coords = np.empty_like(x_coords)
    z_coords = np.empty_like(x_coords)
    m_list = np.empty_like(x_coords)
    r_list = np.empty_like(x_coords)
    while M_count < M_tot:
        if N_count < N_est:
            p_coords = random_sphere_coords(sphere_rad, origin=(0, 0, 0))
            x_coords[N_count] = p_coords[0]
            y_coords[N_count] = p_coords[1]
            z_coords[N_count] = p_coords[2]
            r_list[N_count] = r_func(r_kwargs)
            m_list[N_count] = m_func(m_kwargs, r_list[N_count])
            M_tot += m_list[N_count]
            N_count += 1
        else:
            x_coords = np.append(x_coords, np.empty((int(0.5*N_est))))
            y_coords = np.append(y_coords, np.empty((int(0.5*N_est))))
            z_coords = np.append(z_coords, np.empty((int(0.5*N_est))))
            m_list = np.append(m_list, np.empty((int(0.5*N_est))))
            r_list = np.append(r_list, np.empty((int(0.5*N_est))))
    cart_coords = np.asarray([x_coords[:N_count],
                             y_coords[:N_count], z_coords[N_count]])
    return cart_coords, r_list[:N_count], m_list[:N_count]
def psup_O(exits, P, R, O_shape, P_heatmap = None, alpha = 1.0e-10):
    OT = np.zeros(O_shape, P.dtype)
    
    # Calculate denominator
    #----------------------
    # but only do this if it hasn't been done already
    # (we must set P_heatmap = None when the probe/coords has changed)
    if P_heatmap is None : 
        P_heatmapT = era.make_P_heatmap(P, R, O_shape)
        P_heatmap  = np.empty_like(P_heatmapT)
        comm.Allreduce([P_heatmapT, MPI.__TypeDict__[P_heatmapT.dtype.char]], \
                       [P_heatmap,  MPI.__TypeDict__[P_heatmap.dtype.char]], \
                       op=MPI.SUM)

    # Calculate numerator
    #--------------------
    for r, exit in zip(R, exits):
        OT[-r[0]:P.shape[0]-r[0], -r[1]:P.shape[1]-r[1]] += exit * P.conj()
         
    # divide
    # here we need to do an all reduce
    #---------------------------------
    O = np.empty_like(OT)
    comm.Allreduce([OT, MPI.__TypeDict__[OT.dtype.char]], \
                   [O, MPI.__TypeDict__[O.dtype.char]],   \
                    op=MPI.SUM)
    O  = O / (P_heatmap + alpha)
    return O, P_heatmap
Example #14
0
def mpi_meanstd(data):
    """An mpi implementation of the std over different nodes along axis 0
    """
    m = mpi_mean(data)
    # since we need to compute the square, we cannot do in-place subtraction
    # and addition.
    try:
        data_centered = data - m
        data_centered **= 2
        std_local = data_centered.sum(0)
        std_local_computed = 1
    except MemoryError:
        std_local_computed = 0
    # let's check if some nodes did not have enough memory
    if mpi.COMM.allreduce(std_local_computed) < mpi.SIZE:
        # we need to compute the std_local in a batch-based way
        std_local = np.zeros_like(data[0])
        # we try to get a reasonable minibatch size
        minibatch = max(int(data.shape[0] / 10), 1)
        data_batch = np.empty_like(data[:minibatch])
        for start in range(0, data.shape[0], minibatch):
            end = min(data.shape[0], start + minibatch)
            data_batch[:end-start] = data[start:end] - m
            data_batch **= 2
            std_local += data_batch.sum(axis=0)
    std = np.empty_like(std_local)
    mpi.COMM.Allreduce(std_local, std)
    num_data = mpi.COMM.allreduce(data.shape[0])
    std /= float(num_data)
    np.sqrt(std, out=std)
    return m, std
Example #15
0
def ibarrier(timeout=None, root=0, tag=123, comm=world):
    """Non-blocking barrier returning a list of requests to wait for.
    An optional time-out may be given, turning the call into a blocking
    barrier with an upper time limit, beyond which an exception is raised."""
    requests = []
    byte = np.ones(1, dtype=np.int8)
    if comm.rank == root:
        for rank in range(0,root) + range(root+1,comm.size): #everybody else
            rbuf, sbuf = np.empty_like(byte), byte.copy()
            requests.append(comm.send(sbuf, rank, tag=2 * tag + 0, 
                                      block=False))
            requests.append(comm.receive(rbuf, rank, tag=2 * tag + 1,
                                         block=False))
    else:
        rbuf, sbuf = np.empty_like(byte), byte
        requests.append(comm.receive(rbuf, root, tag=2 * tag + 0, block=False))
        requests.append(comm.send(sbuf, root, tag=2 * tag + 1, block=False))

    if comm.size == 1 or timeout is None:
        return requests

    t0 = time.time()
    while not comm.testall(requests): # automatic clean-up upon success
        if time.time() - t0 > timeout:
            raise RuntimeError('MPI barrier timeout.')
    return []
Example #16
0
def _accR(R, z, dens, sigma, qintr, bhMass, soft):

    mgepot = np.empty_like(R)
    pot = np.empty_like(dens)
    e2 = 1. - qintr**2
    s2 = sigma**2
    r2 = R**2
    z2 = z**2
    d2 = r2 + z2
    
    for k in range(R.size):
        for j in range(dens.size):
            if (d2[k] < s2[j]/240.**2):
                e = np.sqrt(e2[j]) # pot is Integral in {u,0,1} of -D[H[R,z,u],R]/R at (R,z)=0
                pot[j] = (np.arcsin(e)/e - qintr[j])/(2*e2[j]*s2[j]) # Cfr. equation (A5)
            elif (d2[k] < s2[j]*245**2):
                pot[j] = quadva(_accelerationR_dRRcapitalh, [0.,1.], 
                                args=(r2[k], z2[k], e2[j], s2[j]))[0]
            else: # R acceleration in Keplerian limit (Cappellari et al. 2002)
               pot[j] = np.sqrt(np.pi/2)*sigma[j]/d2[k]**1.5 # Cfr. equation (A4)
        mgepot[k] = np.sum(s2*qintr*dens*pot)
    
    G = 0.00430237    # (km/s)**2 pc/Msun [6.674e-11 SI units (CODATA-10)]
    
    return -R*(4*np.pi*G*mgepot + G*bhMass/(d2 + soft**2)**1.5)
Example #17
0
def test_structuring_element8():
    # check the output for a custom structuring element

    r = np.array([[0, 0, 0, 0, 0, 0],
                  [0, 0, 0, 0, 0, 0],
                  [0, 0, 255, 0, 0, 0],
                  [0, 0, 255, 255, 255, 0],
                  [0, 0, 0, 255, 255, 0],
                  [0, 0, 0, 0, 0, 0]])

    # 8-bit
    image = np.zeros((6, 6), dtype=np.uint8)
    image[2, 2] = 255
    elem = np.asarray([[1, 1, 0], [1, 1, 1], [0, 0, 1]], dtype=np.uint8)
    out = np.empty_like(image)
    mask = np.ones(image.shape, dtype=np.uint8)

    rank.maximum(image=image, selem=elem, out=out, mask=mask,
                 shift_x=1, shift_y=1)
    assert_equal(r, out)

    # 16-bit
    image = np.zeros((6, 6), dtype=np.uint16)
    image[2, 2] = 255
    out = np.empty_like(image)

    rank.maximum(image=image, selem=elem, out=out, mask=mask,
                 shift_x=1, shift_y=1)
    assert_equal(r, out)
Example #18
0
def _compute_useful_bin_quantities(x, y, signal, noise, xnode, ynode, scale):
    """
    Recomputes (Weighted) Voronoi Tessellation of the pixels grid to make sure
    that the class number corresponds to the proper Voronoi generator.
    This is done to take into account possible zero-size Voronoi bins
    in output from the previous CVT (or WVT).

    """
    # classe will contain the bin number of each given pixel
    #
    classe = np.argmin(((x[:, np.newaxis] - xnode)**2 + (y[:, np.newaxis] - ynode)**2)/scale**2, axis=1)

    # At the end of the computation evaluate the bin luminosity-weighted
    # centroids (xbar, ybar) and the corresponding final S/N of each bin.
    #
    xbar = np.empty_like(xnode)
    ybar = np.empty_like(xnode)
    sn = np.empty_like(xnode)
    area = np.empty_like(xnode)
    good = np.unique(classe)
    for k in good:
        index = classe == k   # Find subscripts of pixels in bin k.
        xbar[k], ybar[k] = _weighted_centroid(x[index], y[index], signal[index])
        sn[k] = _sn_func(signal, noise, index)
        area[k] = index.sum()

    return classe, xbar, ybar, sn, area
Example #19
0
def cutratio_all(ratio):
    import cPickle
    sparsedirect = cPickle.load(file('sparsepcadirect','rb'))
    sparsedirect.set_params(transform_algorithm='lasso_lars')
    glassmodel = np.load('glassline.npy').astype('f')
    glassmodel = glassmodel[:200]

    recall = np.empty_like(glassmodel)
    diffall = np.empty_like(glassmodel)
    maskall = np.empty_like(glassmodel)

    for idx in range(glassmodel.shape[0]):
        print idx,glassmodel.shape[0]
        recall[idx],diffall[idx],maskall[idx] = cutratio(ratio, idx, sparsedirect, glassmodel)

    np.save('cut_rec.npy',recall)
    np.save('cut_diff.npy',diffall)
    np.save('cut_mask.npy',maskall)
    from layerbase import DrawPatch
    drec = recall.reshape((-1,1,70-25,90-0))
    misc.toimage(DrawPatch(drec)).save('irec.jpg')
    drec = diffall.reshape((-1,1,70-25,90-0))
    misc.toimage(DrawPatch(drec)).save('idiff.jpg')
    drec = maskall.reshape((-1,1,70-25,90-0))
    misc.toimage(DrawPatch(drec)).save('imask.jpg')
def get_data_frames(llh_file):
    """
    Loads data from stored hdf5 file into a data frame for each
    combination of 'pseudo_data | hypo'
    """

    fh = h5py.File(llh_file,'r')
    data_frames = []
    for dFlag in ['data_NMH','data_IMH']:
        for hFlag in ['hypo_NMH','hypo_IMH']:

            keys = fh['trials'][dFlag][hFlag].keys()
            entries = len(fh['trials'][dFlag][hFlag][keys[0]])

            data = {key: np.array(fh['trials'][dFlag][hFlag][key]) for key in keys }
            data['seed'] = np.array(fh['trials'][dFlag]['seed'])
            data['pseudo_data'] = np.empty_like(data[keys[0]],dtype='|S16')
            data['pseudo_data'][:] = dFlag
            data['hypo'] = np.empty_like(data[keys[0]],dtype='|S16')
            data['hypo'][:] = hFlag

            data_frames.append(data)

    fh.close()

    return data_frames
Example #21
0
def crab_integral_flux(energy_min=1, energy_max=1e4, reference=CRAB_DEFAULT_REFERENCE):
    """Integral Crab flux.
    
    See the `gammapy.spectrum.crab` module docstring for a description
    of the available reference spectra.
    
    Parameters
    ----------
    energy_min, energy_max : array_like
        Energy band (TeV)
    reference : {{'hegra', 'hess_pl', 'hess_ecpl', 'meyer'}}
        Published Crab reference spectrum

    Returns
    -------
    flux : array
        Integral flux (cm^-2 s^-1) in energy band ``energy_min`` to ``energy_max``
    """
    from scipy.integrate import quad
    # @todo How does one usually handle 0-dim and 1-dim
    # arrays at the same time?
    energy_min, energy_max = np.asarray(energy_min, dtype=float), np.asarray(energy_max, dtype=float)
    npoints = energy_min.size
    energy_min, energy_max = energy_min.reshape(npoints), energy_max.reshape(npoints)
    I, I_err = np.empty_like(energy_min), np.empty_like(energy_max)
    for ii in range(npoints):
        I[ii], I_err[ii] = quad(crab_flux, energy_min[ii], energy_max[ii],
                                (reference), epsabs=1e-20)
    return I
Example #22
0
def make_mask(shape, width):
    """
    Create a 2D mask with 1 in the center and fades-out to 0 on the border.
    """
    assert len(shape) == 2
    s0, s1 = shape
    try:
        if len(width) == 2:
            w0, w1 = width
        else:
            w0 = w1 = width
    except TypeError:
        w0 = w1 = width
    key = ((s0, s1), (w0, w1))
    if key not in masks:
        g0 = gaussian(s0, w0)
        g1 = gaussian(s1, w1)
        h0 = numpy.empty_like(g0)
        h1 = numpy.empty_like(g1)
        h0[:s0 // 2] = g0[s0 - s0 // 2:]
        h0[s0 // 2:] = g0[:s0 - s0 // 2]
        h1[:s1 // 2] = g1[s1 - s1 // 2:]
        h1[s1 // 2:] = g1[:s1 - s1 // 2]
        mask = numpy.outer(1 - h0, 1 - h1)
        masks[key] = mask
    return masks[key]
Example #23
0
 def load(self, path2file, dtype='float32', iter_count=None, thresholds=None):
     fmt = os.path.splitext(path2file)[-1]
     print('Loading segmentation from file %s ...' % path2file)
     if fmt == 'npz':
         a = np.load(path2file)
         self.thresholds = a['thresholds']
         self.iter_count = a['iter_count']
         self.sdf = a['sdf']
     elif fmt == 'mat':
         a = sio.loadmat(path2file)
         self.thresholds = a['thresholds']
         self.iter_count = a['iter_count']
         self.sdf = a['sdf']
     elif fmt == 'bin':
         self.thresholds = thresholds
         self.sdf = np.fromfile(path2file, dtype=dtype).reshape(thresholds.shape+self.im.shape)
         self.iter_count = iter_count
     else:
         raise KeyError('File format not understood!')
     # Reinitialize variables
     self.nthresh = np.ndim(self.thresholds)
     self.im_ave = np.empty_like(self.im, dtype=self.dtype)
     self.im_error = np.empty_like(self.im, dtype=self.dtype)
     print('Calculating means and error with the loaded SDF ...')
     update_regions(self.im, self.sdf, self.im_ave, self.im_error)
     print('Done!')
    def get_harmonic_power1(self, interval_range=(15.,50.), harmonics=None, sample_interval=3):
        """
        return a 1D array of strength of harmonic peaks for each time
        in spectrum.times
        interval_range (2-tuple) .. harmonic interval range to search for best match
        harmonics (None, int or iterable of ints) .. the harmonics to match.  ex: [2,3] will ignore 
            the influence of the first harmonic.  ex: 3 will try to match [1,2,3].  None will
            use the default
        """
        print 'hpower1'
        if harmonics is None: harmonics=self.harmonics
        
        sample_times = self.spectrum.times[::sample_interval]
        fpnt_shape = len(harmonics), sample_times.shape[0]
        fpnt = np.empty(fpnt_shape)
        pwrs = np.empty_like(sample_times)
        ints = np.empty_like(sample_times)
        for i,t in enumerate(sample_times):
            res = self.get_peaks(t, harmonics=harmonics, interval_range=interval_range)
            fpnt[:,i] = res[1]
            pwrs[i] = res[2]
            ints[i] = res[3]

        tlen = self.spectrum.times.shape[0]
        self.fingerprint = np.zeros((self.harmonics.shape[0], tlen))
        for i in range(len(harmonics)):
            self.fingerprint[i,:] = fast_resample(fpnt[i,:], tlen)
        self.harmonic_power = fast_resample(pwrs, tlen)
        self.harmonic_intvl = fast_resample(ints, tlen)
        return self.harmonic_power, self.harmonic_intvl
def average_passive_aggressive(feature_matrix, labels, T, L):    
    theta = np.empty_like(feature_matrix[0])
    theta.fill(0.)
    theta_empty = np.empty_like(feature_matrix[0])
    theta_empty.fill(0.)
    theta_sum = theta  
    theta_0 = 0.0
    theta_0_sum = theta_0
    ticker = 0
    update_track = 0
    
    while ticker < T:
        
        for i in range(len(feature_matrix)):
  
            (theta_new, theta_0_new) = passive_aggressive_single_step_update(feature_matrix[i], labels[i], L, theta, theta_0)
                      

            if np.any(np.subtract(theta_new, theta)) or theta_0_new - theta_0 != 0: #Select for the instances where the theta actually gets updated
                theta_sum = np.add(theta_new, theta_sum)
                theta_0_sum += theta_0_new                
                update_track += 1
                theta = theta_new
                theta_0 = theta_0_new
            

        ticker += 1
        
    theta_average = np.divide(theta_sum, update_track)
    theta_0_average = theta_0_sum/update_track

    return (theta_average, theta_0_average)
Example #26
0
def feature_meanstd(mat):
    """
    Utility function that does in-place normalization of features.
    Input:
        mat: the local data matrix, each row is a feature vector and each 
             column is a feature dim
    Output:
        m:      the mean for each dimension
        std:    the standard deviation for each dimension
    """
    # subtract mean
    N = mpi.COMM.allreduce(mat.shape[0])
    m = np.empty_like(mat[0])
    mpi.COMM.Allreduce(np.sum(mat, axis=0), m)
    m /= N
    # we perform in-place modifications
    mat -= m
    # normalize variance
    std = np.empty_like(mat[0])
    mpi.COMM.Allreduce(np.sum(mat ** 2, axis=0), std)
    std /= N
    # we also add a regularization term
    std = np.sqrt(std) + np.finfo(np.float64).eps
    # recover the original mat
    mat += m
    return m, std
Example #27
0
    def test_prepared_invocation(self):
        a = np.random.randn(4,4).astype(np.float32)
        a_gpu = drv.mem_alloc(a.size * a.dtype.itemsize)

        drv.memcpy_htod(a_gpu, a)

        mod = SourceModule("""
            __global__ void doublify(float *a)
            {
              int idx = threadIdx.x + threadIdx.y*blockDim.x;
              a[idx] *= 2;
            }
            """)

        func = mod.get_function("doublify")
        func.prepare("P")
        func.prepared_call((1, 1), (4,4,1), a_gpu, shared_size=20)
        a_doubled = np.empty_like(a)
        drv.memcpy_dtoh(a_doubled, a_gpu)
        print (a)
        print (a_doubled)
        assert la.norm(a_doubled-2*a) == 0

        # now with offsets
        func.prepare("P")
        a_quadrupled = np.empty_like(a)
        func.prepared_call((1, 1), (15,1,1), int(a_gpu)+a.dtype.itemsize)
        drv.memcpy_dtoh(a_quadrupled, a_gpu)
        assert la.norm(a_quadrupled[1:]-4*a[1:]) == 0
Example #28
0
    def initialize(self):
        """
        Initialize the segmentation.
        :param thresholds:
        :return:
        """
        # Initialize variables
        self.thresholds = require_array(self.kwargs.get('thresholds'), dtype=self.dtype)
        self.nthresh = np.ndim(self.thresholds)
        self.sdf = np.empty((self.nthresh, ) + self.im.shape, dtype=self.dtype)
        self.im_ave = np.empty_like(self.im, dtype=self.dtype)
        self.im_error = np.empty_like(self.im, dtype=self.dtype)
        self.iter_count = 0

        # Initialize regions
        print('Initializing SDF and calculating im_ave & im_error ...')
        init_regions(self.im, self.thresholds, self.im_ave, self.im_error, self.sdf)
        
        # Reinitialize SDF
        print('Reinitializing SDF ...')
        for i in xrange(self.nthresh):
            im3D.sdf.inplace.reinit(self.sdf[i], self.sdf[i], 
                                    dt=self.kwargs['init_reinit_dt'],
                                    tol=self.kwargs['init_reinit_tol'],
                                    band=self.kwargs['init_reinit_band'],
                                    max_it=self.kwargs['init_reinit_max_it'],
                                    subcell=self.kwargs['init_reinit_subcell'],
                                    WENO=self.kwargs['init_reinit_weno'],
                                    verbose=True)
def get_data_frames(llh_file):
    """
    Loads data from stored hdf5 file into a data frame for each
    combination of 'pseudo_data | hypo'
    """

    fh = h5py.File(llh_file, "r")
    data_frames = []
    for dFlag in ["data_NMH", "data_IMH"]:
        for hFlag in ["hypo_NMH", "hypo_IMH"]:

            keys = fh["trials"][dFlag][hFlag].keys()
            entries = len(fh["trials"][dFlag][hFlag][keys[0]])

            data = {key: np.array(fh["trials"][dFlag][hFlag][key]) for key in keys}
            data["pseudo_data"] = np.empty_like(data[keys[0]], dtype="|S16")
            data["pseudo_data"][:] = dFlag
            data["hypo"] = np.empty_like(data[keys[0]], dtype="|S16")
            data["hypo"][:] = hFlag

            df = DataFrame(data)
            data_frames.append(df)

    fh.close()

    return data_frames
Example #30
0
    def run(self, *args, **kwargs):
        layer = pyrat.data.active

        # STEP1: Estimate profiles
        azprof, rgprof = self.layer_accumulate(self.estimate_profiles, combine=self.combine_profiles)

        # STEP2: Fit correction
        rgprof /= np.mean(rgprof, axis=-1, keepdims=True)
        azprof /= np.mean(azprof, axis=-1, keepdims=True)

        # todo: from here on adapt to nd-data sets
        rgaxis = np.arange(rgprof.shape[-1])
        azaxis = np.arange(azprof.shape[-1])
        rgcorr = np.empty_like(rgprof)
        azcorr = np.empty_like(azprof)
        if rgprof.ndim == 1:
            rgcorr = np.polyval(np.polyfit(rgaxis, rgprof, self.order), rgaxis)
            azcorr = np.polyval(np.polyfit(azaxis, azprof, self.order), azaxis)
        elif rgprof.ndim == 2:
            for k in range(rgprof.shape[0]):
                rgcorr[k, :] = np.polyval(np.polyfit(rgaxis, rgprof[k, :], self.order), rgaxis)
                azcorr[k, :] = np.polyval(np.polyfit(azaxis, azprof[k, :], self.order), azaxis)
        elif rgprof.ndim == 3:
            for k in range(rgprof.shape[0]):
                for l in range(rgprof.shape[1]):
                    rgcorr[k, l, :] = np.polyval(np.polyfit(rgaxis, rgprof[k, l, :], self.order), rgaxis)
                    azcorr[k, l, :] = np.polyval(np.polyfit(azaxis, azprof[k, l, :], self.order), azaxis)

        # STEP3: Apply correction
        outlayer = self.layer_process(self.applyfix, axis=self.axis, correction=(azcorr, rgcorr), siltent=False,
                                      **kwargs)
        pyrat.activate(outlayer)
        return outlayer
Example #31
0
def _assert_apply_unitary_works_when_axes_transposed(val: Any,
                                                     *,
                                                     atol: float = 1e-8
                                                     ) -> None:
    """Tests whether a value's _apply_unitary_ handles out-of-order axes.

    A common mistake to make when implementing `_apply_unitary_` is to assume
    that the incoming axes will be contiguous, or ascending, or that they can be
    flattened, or that other axes have a length of two, etc, etc ,etc. This
    method checks that `_apply_unitary_` does the same thing to out-of-order
    axes that it does to contiguous in-order axes.

    Args:
        val: The operation, gate, or other unitary object to test.
        atol: Absolute error tolerance.
    """

    # Only test custom apply unitary methods.
    if not hasattr(val, '_apply_unitary_') or not protocols.has_unitary(val):
        return

    # Pick sizes and shapes.
    shape = protocols.qid_shape(val)
    n = len(shape)
    padded_shape = shape + (1, 2, 2, 3)
    padded_n = len(padded_shape)
    size = np.product(padded_shape).item()

    # Shuffle the axes.
    permutation = list(range(padded_n))
    random.shuffle(permutation)
    transposed_shape = [0] * padded_n
    for i in range(padded_n):
        transposed_shape[permutation[i]] = padded_shape[i]

    # Prepare input states.
    in_order_input = lin_alg_utils.random_superposition(size).reshape(
        padded_shape)
    out_of_order_input = np.empty(shape=transposed_shape, dtype=np.complex128)
    out_of_order_input.transpose(permutation)[...] = in_order_input

    # Apply to in-order and out-of-order axes.
    in_order_output = protocols.apply_unitary(
        val,
        protocols.ApplyUnitaryArgs(
            target_tensor=in_order_input,
            available_buffer=np.empty_like(in_order_input),
            axes=range(n),
        ),
    )
    out_of_order_output = protocols.apply_unitary(
        val,
        protocols.ApplyUnitaryArgs(
            target_tensor=out_of_order_input,
            available_buffer=np.empty_like(out_of_order_input),
            axes=permutation[:n],
        ),
    )

    # Put the out of order output back into order, to enable comparison.
    reordered_output = out_of_order_output.transpose(permutation)

    # The results should be identical.
    if not np.allclose(in_order_output, reordered_output, atol=atol):
        raise AssertionError(
            f'The _apply_unitary_ method of {repr(val)} acted differently on '
            f'out-of-order axes than on in-order axes.\n'
            f'\n'
            f'The failing axis order: {repr(permutation[:n])}')
Example #32
0
print('test len:', len(testY))
print(testY)
print(len(testY))
print(len(testPredict))
#print(testX[len(testX)-1])
#print(scaler.inverse_transform([[0.04405421]]))
#print(scaler.inverse_transform([[0.044367921]]))

# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))

# shift train predictions for plotting
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict

# shift test predictions for plotting
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
#testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1-(forecastCandle*2), :] = testPredict

# plot baseline and predictions
#plt.plot(scaler.inverse_transform(dataset))
#plt.plot(trainPredictPlot)
#print('testPrices:')
arr2 = testYArr
print('arr2', arr2)
Example #33
0
def numpy_rw_11():

    if gdaltest.numpy_drv is None:
        return 'skip'

    import numpy
    from osgeo import gdal_array

    type_tuples = [
        ('uint8', gdal.GDT_Byte, numpy.uint8, 255),
        ('uint16', gdal.GDT_UInt16, numpy.uint16, 65535),
        ('int16', gdal.GDT_Int16, numpy.int16, -32767),
        ('uint32', gdal.GDT_UInt32, numpy.uint32, 4294967295),
        ('int32', gdal.GDT_Int32, numpy.int32, -2147483648),
        ('float32', gdal.GDT_Float32, numpy.float32, 1.23),
        ('float64', gdal.GDT_Float64, numpy.float64, 1.23456789),
        ('cint16', gdal.GDT_CInt16, numpy.complex64, -32768 + 32767j),
        ('cint32', gdal.GDT_CInt32, numpy.complex64, -32769 + 32768j),
        ('cfloat32', gdal.GDT_CFloat32, numpy.complex64, -32768.5 + 32767.5j),
        ('cfloat64', gdal.GDT_CFloat64, numpy.complex128,
         -32768.123456 + 32767.123456j)
    ]

    for type_tuple in type_tuples:
        ds = gdal.GetDriverByName('GTiff').Create('/vsimem/' + type_tuple[0],
                                                  1, 1, 1, type_tuple[1])
        tmp = ds.ReadAsArray()
        if tmp.dtype != type_tuple[2]:
            gdaltest.post_reason('did not get expected numpy type')
            print(type_tuple)
            return 'fail'

        ar = numpy.empty([1, 1], dtype=type_tuple[2])

        ar_ds = gdal_array.OpenArray(ar)
        got_dt = ar_ds.GetRasterBand(1).DataType
        ar_ds = None
        expected_dt = type_tuple[1]
        if expected_dt == gdal.GDT_CInt16 or expected_dt == gdal.GDT_CInt32:
            expected_dt = gdal.GDT_CFloat32
        if got_dt != expected_dt:
            gdaltest.post_reason('did not get expected result (0)')
            print(type_tuple[1])
            print(got_dt)
            print(expected_dt)
            return 'fail'

        ar[0][0] = type_tuple[3]
        ds.GetRasterBand(1).WriteArray(ar)
        ds = None

        ds = gdal.Open('/vsimem/' + type_tuple[0])
        ar2 = ds.ReadAsArray()
        ar3 = numpy.empty_like(ar2)
        ds.GetRasterBand(1).ReadAsArray(buf_obj=ar3)
        ds = None

        gdal.Unlink('/vsimem/' + type_tuple[0])

        if (type_tuple[0] == 'float32' and abs(ar2[0][0] - type_tuple[3]) > 1e-6) or \
           (type_tuple[0] != 'float32' and ar2[0][0] != type_tuple[3]):
            gdaltest.post_reason('did not get expected result (1)')
            print(ar2)
            print(type_tuple)
            return 'fail'

        if (type_tuple[0] == 'float32' and abs(ar3[0][0] - type_tuple[3]) > 1e-6) or \
           (type_tuple[0] != 'float32' and ar3[0][0] != type_tuple[3]):
            gdaltest.post_reason('did not get expected result (2)')
            print(ar3)
            print(type_tuple)
            return 'fail'

    return 'success'
Example #34
0
def LUP(A):
    global ctx
    global queue
    global prg

    ctx, queue = cl_init()
    kernel_params = {"block_size": block_size}

    prg = cl.Program(
        ctx,
        open("my_LUP.cl").read() % kernel_params,
    ).build()
    #	prg = cl.Program(ctx, open("my_LUP.cl").read() ).build()

    n = len(A)
    m = len(A[0])

    # correct size
    rem = m % block_size
    if rem:
        A = [row + [0.0] * (block_size - rem) for row in A]
        m = len(A[0])

    a_buf = np.array(A).astype(np.float32)
    L_buf = np.empty_like(a_buf).astype(np.float32)

    print("a_buf input")
    print(a_buf)

    print(L_buf.shape)

    mf = cl.mem_flags
    d_a_buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a_buf)
    d_L_buf = cl.Buffer(ctx,
                        mf.READ_WRITE | mf.COPY_HOST_PTR,
                        size=L_buf.nbytes,
                        hostbuf=L_buf)

    #	s = max_col(d_a_buf, m, 3)
    #	print("s =",s)

    #	LU_one_step(d_a_buf, d_L_buf, a_buf.shape, n, 0)
    #	LU_one_step(d_a_buf, d_L_buf, a_buf.shape, n, 1)
    #	LU_one_step(d_a_buf, d_L_buf, a_buf.shape, n, 2)
    #	LU_one_step(d_a_buf, d_L_buf, a_buf.shape, n, 3)

    #	cl.enqueue_copy(queue, a_buf, d_a_buf)
    #	cl.enqueue_copy(queue, L_buf, d_L_buf)

    #	print("a_buf")
    #	print(a_buf)
    #	print("L_buf")
    #	print(L_buf)

    #	return

    # actual benchmark ------------------------------------------------------------
    t1 = time()

    p_act = []
    for k in range(n - 1):
        s = max_col(d_a_buf, m, k)
        if s != k:
            p_act.append((s, k))
            swap_row(d_a_buf, m, s, k)
            swap_row(d_L_buf, m, s, k)
            swap_col(d_L_buf, m, s, k)
            print('swap', k, s)

#		print("a_buf_tmp")
#		print(a_buf)

# add some spike
#		cl.enqueue_copy(queue, a_buf, d_a_buf)
#		cl.enqueue_copy(queue, L_buf, d_L_buf)
#		d_a_buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a_buf)
#		d_L_buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, size=L_buf.nbytes, hostbuf=L_buf)
# end of spike

        print("one_step", k)
        LU_one_step(d_a_buf, d_L_buf, a_buf.shape, n, k)

        cl.enqueue_copy(queue, a_buf, d_a_buf)
        cl.enqueue_copy(queue, L_buf, d_L_buf)

#		if k==1: break

# construct permute vector
    p = list(range(n))
    for (s, k) in p_act:
        p[s], p[k] = p[k], p[s]

    gpu_time = (time() - t1)

    cl.enqueue_copy(queue, a_buf, d_a_buf)
    cl.enqueue_copy(queue, L_buf, d_L_buf)

    print("a_buf")
    print(a_buf)
    print("L_buf")
    print(L_buf)

    print("p = " + str(p))

    if rem:
        m -= block_size - rem

    res = [[round(a + l, 2) for (a, l) in zip(row_a[:m], row_l[:m])]
           for (row_a, row_l) in zip(a_buf, L_buf)]

    #	res = [[0.0]*m for i in range(n)]
    #	for i in range(n):
    #		for j in range(m):
    #			if i > j: #low sub-matrix
    #				res[i][j] = L[i][j]
    #			else:
    #				res[i][j] = A[i][j]

    #	for i in range(n): res[i][i] = 1.0

    return res
t      = np.arange(0,4800,0.1)
tref = t[:16000]



##############################################################################
##############################################################################
##############################################################################
### bifurcation analysis

### initialize parameter array
bif_array = np.linspace(0,1,100)


#### dummy arrays to be filled after simulation steps
max_array = np.empty_like(bif_array)
min_array = np.empty_like(bif_array)

period_array = np.empty_like(bif_array)

params = rate.copy()

for idx, valx in enumerate(bif_array):
    params['k3'] = valx
    state = odeint(clock,state0,t,args=(params,))
    state_notrans = remove_trans(state)
    
    ### store maxima and minima after each simulation step
    frq_tot = state_notrans[:16000,1]
    max_array[idx] = np.mean(pb.get_max(frq_tot, tref))
    min_array[idx] = np.mean(pb.get_min(frq_tot, tref))
Example #36
0
print(x)    # Prints "[[1 2]
            #          [3 4]]"
print(x.T)  # Prints "[[1 3]
            #          [2 4]]"

# Note that taking the transpose of a rank 1 array does nothing:
v = np.array([1,2,3])
print(v)    # Prints "[1 2 3]"
print(v.T)  # Prints "[1 2 3]"

print()
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
v = np.array([1, 0, 1])
y = np.empty_like(x)   # Create an empty matrix with the same shape as x

# Add the vector v to each row of the matrix x with an explicit loop
for i in range(4):
    y[i, :] = x[i, :] + v

# Now y is the following
# [[ 2  2  4]
#  [ 5  5  7]
#  [ 8  8 10]
#  [11 11 13]]
print(y)

print()
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
v = np.array([1, 0, 1])
Example #37
0
def trigger_onset(charfct, thres1, thres2, max_len=9e99, max_len_delete=False):
    """
    Calculate trigger on and off times.

    Given thres1 and thres2 calculate trigger on and off times from
    characteristic function.

    This method is written in pure Python and gets slow as soon as there
    are more then 1e6 triggerings ("on" AND "off") in charfct --- normally
    this does not happen.

    :type charfct: NumPy :class:`~numpy.ndarray`
    :param charfct: Characteristic function of e.g. STA/LTA trigger
    :type thres1: float
    :param thres1: Value above which trigger (of characteristic function)
                   is activated (higher threshold)
    :type thres2: float
    :param thres2: Value below which trigger (of characteristic function)
        is deactivated (lower threshold)
    :type max_len: int
    :param max_len: Maximum length of triggered event in samples. A new
                    event will be triggered as soon as the signal reaches
                    again above thres1.
    :type max_len_delete: bool
    :param max_len_delete: Do not write events longer than max_len into
                           report file.
    :rtype: List
    :return: Nested List of trigger on and of times in samples
    """
    # 1) find indices of samples greater than threshold
    # 2) calculate trigger "of" times by the gap in trigger indices
    #    above the threshold i.e. the difference of two following indices
    #    in ind is greater than 1
    # 3) in principle the same as for "of" just add one to the index to get
    #    start times, this operation is not supported on the compact
    #    syntax
    # 4) as long as there is a on time greater than the actual of time find
    #    trigger on states which are greater than last of state an the
    #    corresponding of state which is greater than current on state
    # 5) if the signal stays above thres2 longer than max_len an event
    #    is triggered and following a new event can be triggered as soon as
    #    the signal is above thres1
    ind1 = np.where(charfct > thres1)[0]
    if len(ind1) == 0:
        return []
    ind2 = np.where(charfct > thres2)[0]
    #
    on = deque([ind1[0]])
    of = deque([-1])
    # determine the indices where charfct falls below off-threshold
    ind2_ = np.empty_like(ind2, dtype=bool)
    ind2_[:-1] = np.diff(ind2) > 1
    # last occurence is missed by the diff, add it manually
    ind2_[-1] = True
    of.extend(ind2[ind2_].tolist())
    on.extend(ind1[np.where(np.diff(ind1) > 1)[0] + 1].tolist())
    # include last pick if trigger is on or drop it
    if max_len_delete:
        # drop it
        of.extend([1e99])
        on.extend([on[-1]])
    else:
        # include it
        of.extend([ind2[-1]])
    #
    pick = []
    while on[-1] > of[0]:
        while on[0] <= of[0]:
            on.popleft()
        while of[0] < on[0]:
            of.popleft()
        if of[0] - on[0] > max_len:
            if max_len_delete:
                on.popleft()
                continue
            of.appendleft(on[0] + max_len)
        pick.append([on[0], of[0]])
    return np.array(pick, dtype=np.int64)
Example #38
0
    def __init__(self, surface, dt, t0=0., root=0):

        # MPI
        self.comm = MPI.COMM_WORLD
        self.size, self.rank = self.comm.Get_size(), self.comm.Get_rank()
        self.root = root

        # Surface
        if self.rank == self.root:
            if not surface:
                raise ValueError('Surface is needed by root process')

            self.surface = surface

            # Prepare surface properties for broadcasting
            surface_prop = {
                'Lx': self.surface.Lx,
                'Ly': self.surface.Ly,
                'dx': self.surface.dx,
                'dy': self.surface.dy,
                'Nx': self.surface.Nx,
                'Ny': self.surface.Ny,
                'x': self.surface.x,
                'y': self.surface.y,
                'wind_dir': self.surface.wind_dir,
                'wind_dir_eff': self.surface.wind_dir_eff,
                'wind_fetch': self.surface.wind_fetch,
                'wind_U': self.surface.wind_U,
                'wind_U_eff': self.surface.wind_U_eff,
                'current_mag': self.surface.current_mag,
                'current_dir': self.surface.current_dir,
                'compute': self.surface.compute
            }
        else:
            surface_prop = None

        # Broadcast & save properties
        surface_prop = self.comm.bcast(surface_prop, root=self.root)
        self.Lx = surface_prop['Lx']
        self.Ly = surface_prop['Ly']
        self.dx = surface_prop['dx']
        self.dy = surface_prop['dy']
        self.Nx = surface_prop['Nx']
        self.Ny_full = surface_prop['Ny']
        self.x = surface_prop['x']
        self.y_full = surface_prop['y']
        self.wind_dir = surface_prop['wind_dir']
        self.wind_dir_eff = surface_prop['wind_dir_eff']
        self.wind_fetch = surface_prop['wind_fetch']
        self.wind_U = surface_prop['wind_U']
        self.wind_U_eff = surface_prop['wind_U_eff']
        self.current_mag = surface_prop['current_mag']
        self.current_dir = surface_prop['current_dir']
        self.compute = surface_prop['compute']

        # Setup balancing (counts, displacements) for 2-D matrixes [Ny,Nx]
        self.counts, self.displ = utils.balance_elements(
            self.Ny_full, self.size)
        self.counts *= self.Nx
        self.displ *= self.Nx

        # Process-dependent properties
        self.Ny = np.int(self.counts[self.rank] / self.Nx)
        self.y = np.empty(self.Ny)
        if self.rank == self.root:
            y = (np.ascontiguousarray(surface.y),
                 (self.counts / self.Nx, self.displ / self.Nx), MPI.DOUBLE)
        else:
            y = None
        self.comm.Scatterv(y, (self.y, MPI.DOUBLE), root=self.root)

        # INITIALIZE SURFACE
        # Memory allocation (LOW (0) / HIGH (1) dt values)
        if 'D' in self.compute:
            self._Dx = np.empty(2 * self.counts[self.rank]).reshape(
                2, self.counts[self.rank] / self.Nx, self.Nx)
            self._Dy = np.empty_like(self._Dx)
            self._Dz = np.empty_like(self._Dx)
        if 'Diff' in self.compute:
            self._Diffx = np.empty(2 * self.counts[self.rank]).reshape(
                2, self.counts[self.rank] / self.Nx, self.Nx)
            self._Diffy = np.empty_like(self._Diffx)
        if 'Diff2' in self.compute:
            self._Diffxx = np.empty(2 * self.counts[self.rank]).reshape(
                2, self.counts[self.rank] / self.Nx, self.Nx)
            self._Diffyy = np.empty_like(self._Diffxx)
            self._Diffxy = np.empty_like(self._Diffxx)
        if 'V' in self.compute:
            self._Vx = np.empty(2 * self.counts[self.rank]).reshape(
                2, self.counts[self.rank] / self.Nx, self.Nx)
            self._Vy = np.empty_like(self._Vx)
            self._Vz = np.empty_like(self._Vx)
        if 'A' in self.compute:
            self._Ax = np.empty(2 * self.counts[self.rank]).reshape(
                2, self.counts[self.rank] / self.Nx, self.Nx)
            self._Ay = np.empty_like(self._Ax)
            self._Az = np.empty_like(self._Ax)
        if 'hMTF' in self.compute:
            self._hMTF = np.empty(2 * self.counts[self.rank]).reshape(
                2, self.counts[self.rank] / self.Nx, self.Nx)

        self.dt = dt
        self.t_l_last = -1.
        self.t_h_last = -1.
        self.t = t0
                  [0, 0, 1, 0, 0, 0]])

label_img = skimage.measure.label(image)
regions = skimage.measure.regionprops(label_img)

exact_areas = {5: 2, 3: 0.5, 13: 8}

print 'area', 'picks_area', 'bw_area', 'exact_areas'
for props in regions:
    print props.area, picks_area(props.convex_image), bwarea(props.convex_image), exact_areas[props.area]



rs = np.logspace(start=0.42, stop=1.7, base=10)

ps = np.empty_like(rs)
ar_picks = np.empty_like(rs)
ar0 = np.empty_like(rs)
ar_bw = np.empty_like(rs)

for i in range(rs.size):
    r = rs[i]
    region = (np.arange(-r - 1, r + 2) ** 2 + np.arange(-r - 1, r + 2)[:, np.newaxis] ** 2 <= r ** 2).astype('int')
    rr = skimage.measure.regionprops(region)
    ps[i] = rr[0].perimeter
    ar_picks[i] = picks_area(rr[0].convex_image)
    ar0[i] = rr[0].area
    ar_bw[i] = bwarea(rr[0].convex_image)


plt.plot(rs, ar_picks, label='Picks')
Example #40
0
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
    """Binarize labels in a one-vs-all fashion

    Several regression and binary classification algorithms are
    available in scikit-learn. A simple way to extend these algorithms
    to the multi-class classification case is to use the so-called
    one-vs-all scheme.

    This function makes it possible to compute this transformation for a
    fixed set of class labels known ahead of time.

    Parameters
    ----------
    y : array-like
        Sequence of integer labels or multilabel data to encode.

    classes : array-like of shape [n_classes]
        Uniquely holds the label for each class.

    neg_label : int (default: 0)
        Value with which negative labels must be encoded.

    pos_label : int (default: 1)
        Value with which positive labels must be encoded.

    sparse_output : boolean (default: False),
        Set to true if output binary array is desired in CSR sparse format

    Returns
    -------
    Y : numpy array or CSR matrix of shape [n_samples, n_classes]
        Shape will be [n_samples, 1] for binary problems.

    Examples
    --------
    >>> from sklearn.preprocessing import label_binarize
    >>> label_binarize([1, 6], classes=[1, 2, 4, 6])
    array([[1, 0, 0, 0],
           [0, 0, 0, 1]])

    The class ordering is preserved:

    >>> label_binarize([1, 6], classes=[1, 6, 4, 2])
    array([[1, 0, 0, 0],
           [0, 1, 0, 0]])

    Binary targets transform to a column vector

    >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
    array([[1],
           [0],
           [0],
           [1]])

    See also
    --------
    LabelBinarizer : class used to wrap the functionality of label_binarize and
        allow for fitting to classes independently of the transform operation
    """
    if not isinstance(y, list):
        # XXX Workaround that will be removed when list of list format is
        # dropped
        y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
    else:
        if _num_samples(y) == 0:
            raise ValueError('y has 0 samples: %r' % y)
    if neg_label >= pos_label:
        raise ValueError("neg_label={0} must be strictly less than "
                         "pos_label={1}.".format(neg_label, pos_label))

    if (sparse_output and (pos_label == 0 or neg_label != 0)):
        raise ValueError("Sparse binarization is only supported with non "
                         "zero pos_label and zero neg_label, got "
                         "pos_label={0} and neg_label={1}"
                         "".format(pos_label, neg_label))

    # To account for pos_label == 0 in the dense case
    pos_switch = pos_label == 0
    if pos_switch:
        pos_label = -neg_label

    y_type = type_of_target(y)
    if 'multioutput' in y_type:
        raise ValueError("Multioutput target data is not supported with label "
                         "binarization")
    if y_type == 'unknown':
        raise ValueError("The type of target data is not known")

    n_samples = y.shape[0] if sp.issparse(y) else len(y)
    n_classes = len(classes)
    classes = np.asarray(classes)

    if y_type == "binary":
        if n_classes == 1:
            if sparse_output:
                return sp.csr_matrix((n_samples, 1), dtype=int)
            else:
                Y = np.zeros((len(y), 1), dtype=np.int)
                Y += neg_label
                return Y
        elif len(classes) >= 3:
            y_type = "multiclass"

    sorted_class = np.sort(classes)
    if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
        raise ValueError("classes {0} missmatch with the labels {1}"
                         "found in the data".format(classes, unique_labels(y)))

    if y_type in ("binary", "multiclass"):
        y = column_or_1d(y)

        # pick out the known labels from y
        y_in_classes = np.in1d(y, classes)
        y_seen = y[y_in_classes]
        indices = np.searchsorted(sorted_class, y_seen)
        indptr = np.hstack((0, np.cumsum(y_in_classes)))

        data = np.empty_like(indices)
        data.fill(pos_label)
        Y = sp.csr_matrix((data, indices, indptr),
                          shape=(n_samples, n_classes))
    elif y_type == "multilabel-indicator":
        Y = sp.csr_matrix(y)
        if pos_label != 1:
            data = np.empty_like(Y.data)
            data.fill(pos_label)
            Y.data = data
    else:
        raise ValueError("%s target data is not supported with label "
                         "binarization" % y_type)

    if not sparse_output:
        Y = Y.toarray()
        Y = Y.astype(int, copy=False)

        if neg_label != 0:
            Y[Y == 0] = neg_label

        if pos_switch:
            Y[Y == pos_label] = 0
    else:
        Y.data = Y.data.astype(int, copy=False)

    # preserve label ordering
    if np.any(classes != sorted_class):
        indices = np.searchsorted(sorted_class, classes)
        Y = Y[:, indices]

    if y_type == "binary":
        if sparse_output:
            Y = Y.getcol(-1)
        else:
            Y = Y[:, -1].reshape((-1, 1))

    return Y
Example #41
0
import numpy as np

a = np.arange(25).reshape(5, 5)
print(a)

blue = a[[0, 1, 2, 3], [1, 2, 3, 4]]
print("blue is \n", str(blue))

divisby3 = a % 3 == 0
print("divisible by 3 \n", str(a[divisby3]))

output = np.empty_like(a, dtype='float')
print(output.fill(np.nan))

output[divisby3] = a[divisby3]

print(output)
<<<<<<< HEAD:Code snippets/np_array_practice.py
print("test")
=======
testing

>>>>>>> 2d157e3c4c58172ebe7d35858be72a2dbea9aa23:Hello world.py
Example #42
0
net = tflearn.input_data(shape=[None, steps_of_history, 1])
net = tflearn.lstm(net, n_units=6)
net = tflearn.fully_connected(net, 1, activation='linear')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
        loss='mean_square')

model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=0.1, batch_size=1, n_epoch=150)

model = tflearn.DNN(net, tensorboard_verbose=0)

train_predict = model.predict(trainX)
test_predict = model.predict(testX)

train_predict_plot = np.empty_like(dataset)
train_predict_plot[:, :] = np.nan
train_predict_plot[steps_of_history:len(train_predict)+steps_of_history, :] = \
        train_predict

test_predict_plot = np.empty_like(dataset)
test_predict_plot[:, :] = np.nan
test_predict_plot[len(train_predict)+steps_of_history:len(dataset), :] = \
        test_predict

plt.figure(figsize=(8, 8))
plt.title('History={} Future={}'.format(steps_of_history, steps_in_future))
plt.plot(dataset)
plt.plot(train_predict_plot)
plt.plot(test_predict_plot)
plt.savefig('demand.png')
Example #43
0
def _tv2(data, weight, Niter=50):
    """
    chambolles tv regularized denoising

    weight should be around  2+1.5*noise_sigma
    """

    if dev is None:
        dev = imgtools.__DEFAULT_OPENCL_DEVICE__

    if dev is None:
        raise ValueError("no OpenCLDevice found...")

    proc = OCLProcessor(dev, utils.absPath("kernels/tv_chambolle.cl"))

    if Ncut == 1:
        inImg = dev.createImage(data.shape[::-1], dtype=np.float32)

        pImgs = [
            dev.createImage(data.shape[::-1],
                            mem_flags=cl.mem_flags.READ_WRITE,
                            dtype=np.float32,
                            channel_order=cl.channel_order.RGBA)
            for i in range(2)
        ]

        outImg = dev.createImage(data.shape[::-1],
                                 dtype=np.float32,
                                 mem_flags=cl.mem_flags.READ_WRITE)

        dev.writeImage(inImg, data.astype(np.float32))
        dev.writeImage(pImgs[0], np.zeros((4, ) + data.shape,
                                          dtype=np.float32))
        dev.writeImage(pImgs[1], np.zeros((4, ) + data.shape,
                                          dtype=np.float32))

        for i in range(Niter):
            proc.runKernel("div_step", inImg.shape, None, inImg, pImgs[i % 2],
                           outImg)
            proc.runKernel("grad_step", inImg.shape, None, outImg,
                           pImgs[i % 2], pImgs[1 - i % 2], np.float32(weight))
        return dev.readImage(outImg, dtype=np.float32)

    else:
        res = np.empty_like(data, dtype=np.float32)
        Nz, Ny, Nx = data.shape
        # a heuristic guess: Npad = Niter means perfect
        Npad = 1 + Niter / 2
        for i0, (i, j, k) in enumerate(product(list(range(Ncut)), repeat=3)):
            logger.info("calculating box  %i/%i" % (i0 + 1, Ncut**3))
            sx = slice(i * Nx / Ncut, (i + 1) * Nx / Ncut)
            sy = slice(j * Ny / Ncut, (j + 1) * Ny / Ncut)
            sz = slice(k * Nz / Ncut, (k + 1) * Nz / Ncut)
            sx1, sx2 = utils._extended_slice(sx, Nx, Npad)
            sy1, sy2 = utils._extended_slice(sy, Ny, Npad)
            sz1, sz2 = utils._extended_slice(sz, Nz, Npad)

            data_sliced = data[sz1, sy1, sx1].copy()
            _res = tv3_gpu(dev, data_sliced, weight, Niter, Ncut=1)
            res[sz, sy, sx] = _res[sz2, sy2, sx2]

        return res
def marginal_spatial_pdf(lat,
                         lon,
                         det_list,
                         path_geo_model=None,
                         prog_step=0,
                         resol=100):
    # define seprate lists of infrasound and seismic only detections
    infr_det_list = [
        det for det in det_list if type(det) == InfrasoundDetection
    ]
    seis_det_list = [det for det in det_list if type(det) == SeismicDetection]

    infr_cnt = len(infr_det_list)
    seis_cnt = len(seis_det_list)
    from IPython import embed

    if 3**infr_cnt > resol:

        def temp(la, lo):
            infr_rngs = np.array([
                sph_proj.inv(det.longitude, det.latitude, lo, la)[2] / 1000.0
                for det in infr_det_list
            ])
            infr_t1 = max(
                np.array([
                    det.peakF_UTCtime -
                    np.timedelta64(int(infr_rngs[n] / 0.2 * 1e3), 'ms')
                    for n, det in enumerate(infr_det_list)
                ]))
            infr_t2 = min(
                np.array([
                    det.peakF_UTCtime -
                    np.timedelta64(int(infr_rngs[n] / 0.4 * 1e3), 'ms')
                    for n, det in enumerate(infr_det_list)
                ]))

            if seis_cnt > 0:
                seis_rngs = np.array([
                    sph_proj.inv(det.longitude, det.latitude, lo, la)[2] /
                    1000.0 for det in seis_det_list
                ])
                seis_t1 = max(
                    np.array([
                        det.peakF_UTCtime -
                        np.timedelta64(int(seis_rngs[n] / 2.0 * 1e3), 'ms')
                        for n, det in enumerate(seis_det_list)
                    ]))
                seis_t2 = min(
                    np.array([
                        det.peakF_UTCtime -
                        np.timedelta64(int(seis_rngs[n] / 8.0 * 1e3), 'ms')
                        for n, det in enumerate(seis_det_list)
                    ]))

                t1 = max(infr_t1, seis_t1)
                t2 = min(infr_t2, seis_t2)
            else:
                t1, t2 = infr_t1, infr_t2

            t_vals = np.array(
                [t1 + (t2 - t1) / (resol - 1) * m for m in range(resol)])
            pdf_vals = joint_pdf(np.array([la] * resol),
                                 np.array([lo] * resol),
                                 t_vals,
                                 det_list,
                                 path_geo_model=path_geo_model)

            return simps(pdf_vals, (t2 - t1).astype('m8[ms]').astype(float) *
                         1.0e-3 / (resol - 1) * range(resol))

        if len(np.atleast_1d(lat)) == 1:
            return temp(lat, lon)
        else:
            return np.array([temp(la, lon[n]) for n, la in enumerate(lat)])

    else:
        # pull out the latitudes and longitudes for infrasonic and seismic detections
        infr_lats = np.array([det.latitude for det in infr_det_list])
        infr_lons = np.array([det.longitude for det in infr_det_list])

        seis_lats = np.array([det.latitude for det in seis_det_list])
        seis_lons = np.array([det.longitude for det in seis_det_list])

        # compute the relative travel times and source-receiver ranges for infrasonic and seismic detections
        infr_tms = np.array([
            (det.peakF_UTCtime -
             det_list[0].peakF_UTCtime).astype('m8[ms]').astype(float) * 1.0e-3
            for det in infr_det_list
        ])
        seis_tms = np.array([
            (det.peakF_UTCtime -
             det_list[0].peakF_UTCtime).astype('m8[ms]').astype(float) * 1.0e-3
            for det in seis_det_list
        ])

        # compute azimuthal distribution for all infrasound detections
        az_pdf = np.array([
            det.az_pdf(lat, lon, path_geo_model) for det in infr_det_list
        ]).prod(axis=0)

        # Compute the index sequences for infrasound likelihoods
        sequences = []
        for seq in itertools.product(list(range(3)), repeat=infr_cnt):
            sequences = sequences + [list(seq)]
        sequences = np.array(sequences, dtype=int)

        if len(np.atleast_1d(lat)) == 1:
            infr_rngs = sph_proj.inv(infr_lons,
                                     infr_lats,
                                     np.array([lon] * infr_cnt),
                                     np.array([lat] * infr_cnt),
                                     radians=False)[2] / 1000.0
            seis_rngs = sph_proj.inv(seis_lons,
                                     seis_lats,
                                     np.array([lon] * seis_cnt),
                                     np.array([lat] * seis_cnt),
                                     radians=False)[2] / 1000.0
            if path_geo_model:
                mns = np.empty((infr_cnt, 3))
                vrs = np.empty((infr_cnt, 3))
                wts = np.empty((infr_cnt, 3))

                for n, (rng_n, az_n) in enumerate(
                        zip(infr_rngs,
                            np.array([det.__back_az
                                      for det in infr_det_list]))):
                    az_bin = infrasound.find_azimuth_bin(az_n - 180.0)

                    mns[n] = np.array([
                        path_geo_model.rcel_mns[az_bin][m](min(
                            rng_n, path_geo_model.rng_max)) for m in range(3)
                    ])
                    vrs[n] = np.array([
                        path_geo_model.rcel_vrs[az_bin][m](min(
                            rng_n, path_geo_model.rng_max)) for m in range(3)
                    ])
                    wts[n] = np.array([
                        path_geo_model.rcel_wts[az_bin][m](min(
                            rng_n, path_geo_model.rng_max)) for m in range(3)
                    ])
            else:
                mns = np.array([infrasound.canon_rcel_mns] * infr_cnt)
                vrs = np.array([infrasound.canon_rcel_vrs] * infr_cnt)
                wts = np.array([infrasound.canon_rcel_wts] * infr_cnt)
            temp1 = np.array([1.0 / det.sigma()**2 for det in seis_det_list])
            temp2 = np.array([
                seis_tms[n] - det.trvl_tm(seis_rngs[n])
                for n, det in enumerate(seis_det_list)
            ])

            a_seis = temp1.sum()
            b_seis = (temp2 * temp1).sum()
            c_seis = (temp2**2 * temp1).sum()

            rng_cel_pdf = 0.0
            for seq in sequences:
                a, b, c, N = a_seis, b_seis, c_seis, 1.0

                for n, ni in enumerate(zip(seq)):
                    dt = infr_tms[n] - infr_rngs[n] * mns[n][ni]
                    sig = infr_rngs[n] * vrs[n][ni]

                    a += 1.0 / sig**2
                    b += dt / sig**2
                    c += (dt / sig)**2
                    N *= wts[n][ni] / sig

                rng_cel_pdf += N / np.sqrt(a) * np.exp(-1.0 / 2.0 *
                                                       (c - b**2 / a))

            rng_cel_pdf /= np.power(2.0 * np.pi,
                                    (seis_cnt + infr_cnt - 1.0) / 2.0)
            prog_bar.increment(n=prog_step)
        else:
            infr_rngs = sph_proj.inv(np.array([infr_lons] * len(lon)),
                                     np.array([infr_lats] * len(lat)),
                                     np.array([lon] * infr_cnt).T,
                                     np.array([lat] * infr_cnt).T,
                                     radians=False)[2].reshape(
                                         len(lat), infr_cnt) / 1000.0
            seis_rngs = sph_proj.inv(np.array([seis_lons] * len(lon)),
                                     np.array([seis_lats] * len(lat)),
                                     np.array([lon] * seis_cnt).T,
                                     np.array([lat] * seis_cnt).T,
                                     radians=False)[2].reshape(
                                         len(lat), seis_cnt) / 1000.0

            if path_geo_model:
                mns = np.empty((len(lat), infr_cnt, 3))
                vrs = np.empty((len(lat), infr_cnt, 3))
                wts = np.empty((len(lat), infr_cnt, 3))

                infr_azs = np.array([
                    np.array([
                        det.back_azimuth
                        for det in det_list if type(det) == InfrasoundDetection
                    ])
                ] * len(lat))
                az_bins = infrasound.find_azimuth_bin(
                    infr_azs.flatten() - 180.0, path_geo_model.az_bin_cnt)
                rngs_eval = infr_rngs.flatten()
                rngs_eval[rngs_eval >
                          path_geo_model.rng_max] = path_geo_model.rng_max

                for k in range(3):
                    mns_temp = np.empty_like(az_bins, dtype=float)
                    vrs_temp = np.empty_like(az_bins, dtype=float)
                    wts_temp = np.empty_like(az_bins, dtype=float)

                    for n_az in range(path_geo_model.az_bin_cnt):
                        if np.any(az_bins == n_az):
                            mns_temp[az_bins ==
                                     n_az] = path_geo_model.rcel_mns[n_az][k](
                                         rngs_eval[az_bins == n_az])
                            vrs_temp[az_bins ==
                                     n_az] = path_geo_model.rcel_vrs[n_az][k](
                                         rngs_eval[az_bins == n_az])
                            wts_temp[az_bins ==
                                     n_az] = path_geo_model.rcel_wts[n_az][k](
                                         rngs_eval[az_bins == n_az])

                    mns[:, :, k] = mns_temp.reshape(len(lat), infr_cnt)
                    vrs[:, :, k] = vrs_temp.reshape(len(lat), infr_cnt)
                    wts[:, :, k] = wts_temp.reshape(len(lat), infr_cnt)

            else:
                mns = np.array(
                    [np.array([infrasound.canon_rcel_mns] * infr_cnt)] *
                    len(lat))
                vrs = np.array(
                    [np.array([infrasound.canon_rcel_vrs] * infr_cnt)] *
                    len(lat))
                wts = np.array(
                    [np.array([infrasound.canon_rcel_wts] * infr_cnt)] *
                    len(lat))

            temp1 = np.array([[1.0 / det.sigma()**2] * len(lat)
                              for det in seis_det_list])
            temp2 = np.array([
                seis_tms[n] - det.trvl_tm(seis_rngs[:, n])
                for n, det in enumerate(seis_det_list)
            ])

            a_seis = temp1.sum(axis=0)
            b_seis = (temp2 * temp1).sum(axis=0)
            c_seis = (temp2**2 * temp1).sum(axis=0)

            rng_cel_pdf = 0.0
            for seq in sequences:
                a, b, c, N = a_seis, b_seis, c_seis, 1.0

                for n in range(infr_cnt):
                    dt = infr_tms[n] - infr_rngs[:, n] * mns[:, n, seq[n]]
                    sig = infr_rngs[:, n] * vrs[:, n, seq[n]]

                    a = a + 1.0 / sig**2
                    b = b + dt / sig**2
                    c = c + (dt / sig)**2
                    N = N * wts[:, n, seq[n]] / sig
                #embed()
                rng_cel_pdf += N / np.sqrt(a) * np.exp(-1.0 / 2.0 *
                                                       (c - b**2 / a))
            rng_cel_pdf /= np.power(2.0 * np.pi,
                                    (seis_cnt + infr_cnt - 1.0) / 2.0)
            prog_bar.increment(n=prog_step)

        return az_pdf * rng_cel_pdf
Example #45
0
def _crystal_to_lab(gvecs,
                    rmat_s, rmat_c,
                    bmat=None, vmat_inv=None):
    """
    gvecs is (n, 3)

    rmat_s are either (3, 3) or (n, 3, 3)

    if bmat is not None, gvecs are assumed to be hkls
    Takes a list of reciprocal lattice vectors components in crystal frame to
    the specified detector-relative frame, subject to the conditions:

    1) the reciprocal lattice vector must be able to satisfy a bragg condition
    2) the associated diffracted beam must intersect the detector plane

    Parameters
    ----------
    gvecs : array_like
        Concatenated triplets of G-vector components in either the
        CRYSTAL FRAME or RECIPROCAL FRAME (see optional kwarg `bmat` below).
        The shape when cast as an ndarray is (n, 3), representing n vectors.
    rmat_s : array_like
        The COB matrix taking components in the SAMPLE FRAME to the LAB FRAME.
        This can be either (3, 3) or (n, 3, 3). In the latter case, each of the
        n input G-vectors is transformed using the associated entry in
        `rmat_s`.
    rmat_c : array_like
        The (3, 3) COB matrix taking components in the
        CRYSTAL FRAME to the SAMPLE FRAME.
    bmat : array_like, optional
        The (3, 3) COB matrix taking components in the
        RECIPROCAL LATTICE FRAME to the CRYSTAL FRAME; if supplied, it is
        assumed that the input `gvecs` are G-vector components in the
        RECIPROCL LATTICE FRAME (the default is None, which implies components
        in the CRYSTAL FRAME)
    vmat_inv : array_like, optional
        The (3, 3) matrix of inverse stretch tensor components in the
        SAMPLE FRAME.  The default is None, which implies a strain-free state
        (i.e. V = I).

    Returns
    -------
    array_like
        The (n, 3) array of G-vectors components in the LAB FRAME as specified
        by `rmat_s` and `rmat_c`.  Note that resulting vector components are
        not normalized.

    Raises
    ------
    AssertionError
        If `rmat_s` has dimension 3, but the first is != n.

    Notes
    -----

    To go to the LAB FRAME from the CRYSTAL FRAME in column vec order (hstacked
    gvec_c):

        gvec_l = np.dot(np.dot(rmat_c.T, np.dot(rmat_s.T, rmat_b)), gvec_b)

     rmat_s = np.dot(rchi, rome)

     --> in row vec order (vstacked gvec_l):

     gvec_l = np.dot(gvec_b, np.dot(rmat_b.T, np.dot(rmat_s, rmat_c)))

    """

    # catch 1-d input and grab number of input vectors
    gvec_c = np.atleast_2d(gvecs)
    nvecs = len(gvecs)

    # initialize transformed gvec arrays
    gvec_s = np.empty_like(gvecs)
    gvec_l = np.empty_like(gvecs)

    # squash out the case where rmat_s.shape is (1, 3, 3)
    rmat_s = np.squeeze(rmat_s)

    # if bmat is specified, input are components in reiprocal lattice (h, k, l)
    if bmat is not None:
        gvec_c = np.dot(gvec_c, bmat.T)

    # CRYSTAL FRAME --> SAMPLE FRAME
    gvec_s = np.dot(gvec_c, rmat_c.T)
    if vmat_inv is not None:
        gvec_s = np.dot(gvec_s, vmat_inv.T)

    # SAMPLE FRAME --> LAB FRAME
    if rmat_s.ndim > 2:
        # individual rmat_s for each vector
        assert len(rmat_s) == nvecs, \
            "len(rmat_s) must be %d for 3-d arg; you gave %d" \
            % (nvecs, len(rmat_s))
        for i in range(nvecs):
            gvec_l[i] = np.dot(gvec_s[i], rmat_s[i].T)
    else:
        # single rmat_s
        gvec_l = np.dot(gvec_s, rmat_s.T)
    return gvec_l
Example #46
0
def _make_eris(mp, mo_coeff=None, verbose=None):
    log = logger.new_logger(mp, verbose)
    time0 = (logger.process_clock(), logger.perf_counter())

    log.debug('transform (ia|jb) outcore')
    mol = mp.mol
    nocc = mp.nocc
    nmo = mp.nmo
    nvir = nmo - nocc

    eris = mp2._ChemistsERIs(mp, mo_coeff)
    nao = eris.mo_coeff.shape[0]
    assert(nvir <= nao)
    orbo = eris.mo_coeff[:,:nocc]
    orbv = numpy.asarray(eris.mo_coeff[:,nocc:], order='F')
    eris.feri = lib.H5TmpFile()

    int2e = mol._add_suffix('int2e')
    ao2mopt = _ao2mo.AO2MOpt(mol, int2e, 'CVHFnr_schwarz_cond',
                             'CVHFsetnr_direct_scf')
    fint = gto.moleintor.getints4c

    ntasks = mpi.pool.size
    olocs = [_task_location(nocc, task_id) for task_id in range(ntasks)]
    oloc0, oloc1 = olocs[rank]
    nocc_seg = oloc1 - oloc0
    log.debug2('olocs %s', olocs)

    ao_loc = mol.ao_loc_nr()
    task_sh_locs = lib.misc._balanced_partition(ao_loc, ntasks)
    log.debug2('task_sh_locs %s', task_sh_locs)
    ao_sh0 = task_sh_locs[rank]
    ao_sh1 = task_sh_locs[rank+1]
    ao_loc0 = ao_loc[ao_sh0]
    ao_loc1 = ao_loc[ao_sh1]
    nao_seg = ao_loc1 - ao_loc0
    orbo_seg = orbo[ao_loc0:ao_loc1]

    mem_now = lib.current_memory()[0]
    max_memory = max(0, mp.max_memory - mem_now)
    dmax = numpy.sqrt(max_memory*.9e6/8/((nao+nocc)*(nao_seg+nocc)))
    dmax = min(nao//4+2, max(BLKMIN, min(comm.allgather(dmax))))
    sh_ranges = ao2mo.outcore.balance_partition(ao_loc, dmax)
    sh_ranges = comm.bcast(sh_ranges)
    dmax = max(x[2] for x in sh_ranges)
    eribuf = numpy.empty((nao,dmax,dmax,nao_seg))
    ftmp = lib.H5TmpFile()
    log.debug('max_memory %s MB (dmax = %s) required disk space %g MB',
              max_memory, dmax, nocc*nocc_seg*(nao*(nao+dmax)/2+nvir**2)*8/1e6)

    def save(count, tmp_xo):
        di, dj = tmp_xo.shape[2:4]
        tmp_xo = [tmp_xo[p0:p1] for p0, p1 in olocs]
        tmp_xo = mpi.alltoall(tmp_xo, split_recvbuf=True)
        tmp_xo = sum(tmp_xo).reshape(nocc_seg,nocc,di,dj)
        ftmp[str(count)+'b'] = tmp_xo

        tmp_ox = mpi.alltoall([tmp_xo[:,p0:p1] for p0, p1 in olocs],
                              split_recvbuf=True)
        tmp_ox = [tmp_ox[i].reshape(p1-p0,nocc_seg,di,dj)
                  for i, (p0,p1) in enumerate(olocs)]
        ftmp[str(count)+'a'] = numpy.vstack(tmp_ox)

    jk_blk_slices = []
    count = 0
    time1 = time0
    with lib.call_in_background(save) as bg_save:
        for ip, (ish0, ish1, ni) in enumerate(sh_ranges):
            for jsh0, jsh1, nj in sh_ranges[:ip+1]:
                i0, i1 = ao_loc[ish0], ao_loc[ish1]
                j0, j1 = ao_loc[jsh0], ao_loc[jsh1]
                jk_blk_slices.append((i0,i1,j0,j1))

                shls_slice = (0,mol.nbas,ish0,ish1, jsh0,jsh1,ao_sh0,ao_sh1)
                eri = fint(int2e, mol._atm, mol._bas, mol._env,
                           shls_slice=shls_slice, aosym='s1', ao_loc=ao_loc,
                           cintopt=ao2mopt._cintopt, out=eribuf)
                tmp_xo = lib.einsum('pi,pqrs->iqrs', orbo, eri)
                tmp_xo = lib.einsum('iqrs,sl->ilqr', tmp_xo, orbo_seg)
                bg_save(count, tmp_xo)
                tmp_xo = None
                count += 1
                time1 = log.timer_debug1('partial ao2mo [%d:%d,%d:%d]' %
                                         (ish0,ish1,jsh0,jsh1), *time1)
    eri = eribuf = None
    time1 = time0 = log.timer('mp2 ao2mo_ovov pass1', *time0)

    eris.ovov = eris.feri.create_dataset('ovov', (nocc,nvir,nocc_seg,nvir), 'f8')
    occblk = int(min(nocc, max(BLKMIN, max_memory*.9e6/8/(nao**2*nocc_seg+1)/5)))
    def load(i0, eri):
        if i0 < nocc:
            i1 = min(i0+occblk, nocc)
            for k, (p0,p1,q0,q1) in enumerate(jk_blk_slices):
                eri[:i1-i0,:,p0:p1,q0:q1] = ftmp[str(k)+'a'][i0:i1]
                if p0 != q0:
                    dat = numpy.asarray(ftmp[str(k)+'b'][:,i0:i1])
                    eri[:i1-i0,:,q0:q1,p0:p1] = dat.transpose(1,0,3,2)

    def save(i0, i1, dat):
        eris.ovov[i0:i1] = dat

    buf_prefecth = numpy.empty((occblk,nocc_seg,nao,nao))
    buf = numpy.empty_like(buf_prefecth)
    bufw = numpy.empty((occblk*nocc_seg,nvir**2))
    bufw1 = numpy.empty_like(bufw)
    with lib.call_in_background(load) as prefetch:
        with lib.call_in_background(save) as bsave:
            load(0, buf_prefecth)
            for i0, i1 in lib.prange(0, nocc, occblk):
                buf, buf_prefecth = buf_prefecth, buf
                prefetch(i1, buf_prefecth)
                eri = buf[:i1-i0].reshape((i1-i0)*nocc_seg,nao,nao)

                dat = _ao2mo.nr_e2(eri, orbv, (0,nvir,0,nvir), 's1', 's1', out=bufw)
                bsave(i0, i1, dat.reshape(i1-i0,nocc_seg,nvir,nvir).transpose(0,2,1,3))
                bufw, bufw1 = bufw1, bufw
                time1 = log.timer_debug1('pass2 ao2mo [%d:%d]' % (i0,i1), *time1)

    time0 = log.timer('mp2 ao2mo_ovov pass2', *time0)
    mp._eris = eris
    return eris
Example #47
0
def rot90(xy, zdir=1.0):
    temp = empty_like(xy)
    temp[:, 0] = - zdir * xy[:, 1]
    temp[:, 1] = + zdir * xy[:, 0]
    return temp
Example #48
0
    def __init__(self):
        n_injection_turns = 1
        n_trajectory_turns = 50
        injection_amplitude = 1.3
        n_turns = n_injection_turns + n_trajectory_turns
        #Run 101 with args [0.2116535191136668, -1.1802958558145478, 1.5605421957991457]Y MAX 10.0
        # injection setup
        self.momentum = 75.
        self.max_turn = n_turns
        self.number_pulses = n_injection_turns
        self.number_per_pulse = 1000
        self.turn_bumper_index = [
            range(n_turns), range(n_turns)
        ]  # first list is the turn number, second list is the index of the bumper magnet setting
        # default bumps = position of the proton orbit
        angle_u = -0.11628784278921322
        angle_v = 1.4012763546351992
        if n_injection_turns == 1:
            self.default_bumps = [["action_angle", angle_u, 0.0, angle_v,
                                   0.0]]  #"action_angle" or "coupled"
        else:
            self.default_bumps = [
                [
                    "action_angle",
                    angle_u,
                    i * injection_amplitude // (
                        n_injection_turns - 1
                    ),  #(n_injection_turns-1.-i)*injection_amplitude/(n_injection_turns-1.),
                    angle_v,
                    i * injection_amplitude / (n_injection_turns - 1)
                ] for i in range(n_injection_turns)
            ]
            inj_end = self.default_bumps[-1]
            traj_end = [angle_u, 5., angle_v, 5.]
            self.default_bumps += [[(traj_end[j] - inj_end[j]) * i /
                                    (n_trajectory_turns) + inj_end[j]
                                    for j in range(4)]
                                   for i in range(1, n_trajectory_turns + 1)]
            #self.default_bumps = [[angle_u, i*1.3/(n_pulses-1.), angle_v, i*1.3/(n_pulses-1)] for i in range(n_pulses)]
        self.default_bumps = [[
            "coupled", 0.0, 0.0, -40.0 * i / (n_turns - 1), 0.0
        ] for i in range(n_turns)]
        self.default_injection = [
            math.pi / 2, 0., 0.0, 0.0
        ]  # position of the injection orbit in aa coordinates
        self.foil_edge = -1
        self.foil_angle = math.degrees(math.pi)

        self.injection_ellipse_algorithm = "transfer_matrix"  # transfer_matrix from_twiss or user_defined
        self.beta_x = 1.0  # beta [m]
        self.alpha_x = 0.0
        self.beta_y = 1.0  # beta [m]
        self.alpha_y = 0.0
        self.injection_ellipse = None

        self.m_index = -1.28  # m^-1
        self.pulse_emittance = 0.026
        self.dp_model = "gauss"  # none gauss or fixed
        self.dp_over_p = 0.0013  # 0.0029 # sigma
        self.max_dp_over_p = self.dp_over_p * 3
        self.foil_material = "carbon"
        self.foil_column_density = 20e-6  # g/cm^2
        self.pid = 2212
        self.n_foil_sigma = 3  # number of sigma; set to < 0 to disable foil adjustment
        self.amplitude_acceptance = 20.0  # microns
        self.dp_over_p_acceptance = 0.004  # +/- dp/p

        # execution parameters
        self.verbose = 8
        self.n_cells = 10
        self.accumulate = False
        self.do_plots = True
        self.plot_frequency = 1  #self.max_turn # controls whether to plot frames
        self.do_stats = True
        self.stats_frequency = self.max_turn  # controls whether to plot frames
        self.do_movie = False
        self.sleep_time = 1  #0.1
        self.f_size = 20
        self.l_size = 14
        self.momentum_offset_injection = False
        self.do_scattering = True
        self.do_energy_loss = True

        self.real_range = [
            25.0 * self.amp_scale, 0.05 * self.amp_scale,
            25.0 * self.amp_scale, 0.025 * self.amp_scale
        ]
        self.real_centre = [
            0.0 * self.amp_scale, 0.0 * self.amp_scale, -20.0 * self.amp_scale,
            0.0 * self.amp_scale
        ]

        self.dec_range = [
            20.0 * self.amp_scale, 0.05 * self.amp_scale,
            50.0 * self.amp_scale, 0.025 * self.amp_scale
        ]
        self.dec_centre = [
            0.0 * self.amp_scale, 0.0 * self.amp_scale, 0.0 * self.amp_scale,
            0.0 * self.amp_scale
        ]

        self.aa_range = [
            200, 1.5 * self.amp_scale**2, 200, 1.5 * self.amp_scale**2
        ]
        self.aa_centre = [
            0., self.aa_range[1] - 0.1 * self.amp_scale**2, 0.,
            self.aa_range[3] - 0.1 * self.amp_scale**2
        ]

        # internal data
        self.bump_fields = []
        self.bump_orbits = []
        self.bump_tms = []
        self.beam_data = numpy.empty_like([], shape=(0, 4))
        self.injection_orbit = None
        self.dp_over_p_data = []
        self.beam_injection_turn = []
        self.turn = 0
        self.foil_hits = []  # number of foil hits per particle
        self.foil_hit_positions = []  # positions of foil hits
        self.first_turn_positions = []  # positions of foil hits
        self.output_dir = ""
        self.reset_output()
        self.setup_material()
        self.setup_subplots()
Example #49
0
def normalize_nf(tomo, flats, dark, flat_loc,
                 cutoff=None, ncore=None, out=None):
    """
    Normalize raw 3D projection data with flats taken more than once during
    tomography. Normalization for each projection is done with the mean of the
    nearest set of flat fields (nearest flat fields).

    Parameters
    ----------
    tomo : ndarray
        3D tomographic data.
    flats : ndarray
        3D flat field data.
    dark : ndarray
        3D dark field data.
    flat_loc : list of int
        Indices of flat field data within tomography
    ncore : int, optional
        Number of cores that will be assigned to jobs.
    out : ndarray, optional
        Output array for result. If same as arr, process
        will be done in-place.

    Returns
    -------
    ndarray
        Normalized 3D tomographic data.
    """

    tomo = dtype.as_float32(tomo)
    flats = dtype.as_float32(flats)
    dark = dtype.as_float32(dark)
    l = np.float32(1e-6)
    if cutoff is not None:
        cutoff = np.float32(cutoff)
    if out is None:
        out = np.empty_like(tomo)

    dark = np.median(dark, axis=0)
    denom = np.empty_like(dark)

    num_flats = len(flat_loc)
    total_flats = flats.shape[0]
    total_tomo = tomo.shape[0]

    num_per_flat = total_flats//num_flats
    tend = 0

    for m, loc in enumerate(flat_loc):
        fstart = m*num_per_flat
        fend = (m + 1)*num_per_flat
        flat = np.median(flats[fstart:fend], axis=0)

        # Normalization can be parallelized much more efficiently outside this
        # for loop accounting for the nested parallelism arising from
        # chunking the total normalization and each chunked normalization
        tstart = 0 if m == 0 else tend
        tend = total_tomo if m >= num_flats-1 \
            else int(np.round((flat_loc[m+1]-loc)/2)) + loc
        tomo_l = tomo[tstart:tend]
        out_l = out[tstart:tend]
        with mproc.set_numexpr_threads(ncore):
            ne.evaluate('flat-dark', out=denom)
            ne.evaluate('where(denom<l,l,denom)', out=denom)
            ne.evaluate('(tomo_l-dark)/denom', out=out_l, truediv=True)
            if cutoff is not None:
                ne.evaluate('where(out_l>cutoff,cutoff,out_l)', out=out_l)

    return out
Example #50
0
def gvec_to_xy(gvec_c,
               rmat_d, rmat_s, rmat_c,
               tvec_d, tvec_s, tvec_c,
               beam_vec=None,
               vmat_inv=None,
               bmat=None):

    beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec

    ztol = cnst.epsf

    # catch 1-d input case and initialize return array with NaNs
    gvec_c = np.atleast_2d(gvec_c)
    retval = np.nan * np.ones_like(gvec_c)

    nvec_l = rmat_d[:, 2]  # detector normal (LAB FRAME)
    bhat_l = unit_vector(beam_vec.flatten())  # unit beam vector

    # need CRYSTAL frame origin.  If rmat_s is 3-d, this will be a list
    # !!!: use _crystal_to_lab helper with trivial rmat_c
    P0_l = _crystal_to_lab(tvec_c, rmat_s, np.eye(3))  # CRYSTAL FRAME origin
    P3_l = tvec_d  # DETECTOR FRAME origin

    # form unit reciprocal lattice vectors in lab frame (w/o translation)
    if bmat is None:
        # got hkls as input
        ghat_l = _crystal_to_lab(
            unit_vector(gvec_c), rmat_s, rmat_c,
            bmat=None, vmat_inv=vmat_inv
            )
    else:
        # got G-vectors in CRYSTAL FRAME as input
        ghat_l = unit_vector(
            _crystal_to_lab(
                gvec_c, rmat_s, rmat_c, bmat=bmat, vmat_inv=vmat_inv
                )
            )

    # dot with beam vector (upstream, cone axis)
    bdot = np.dot(ghat_l, -bhat_l)

    # see who can diffract; initialize output array with NaNs
    can_diffract = np.logical_and(bdot >= ztol, bdot <= 1. - ztol)
    if np.any(can_diffract):
        # subset of feasible reciprocal lattice vectors
        adm_ghat_l = np.atleast_2d(ghat_l[can_diffract, :])

        # initialize diffracted beam vector array
        dvec_l = np.empty_like(adm_ghat_l)
        for i, v in enumerate(adm_ghat_l):
            dvec_l[i] = np.dot(make_binary_rmat(v), -bhat_l)
            pass

        '''       displacement vector calculation below
        '''

        # first check for non-instersections and mitigate divide-by-zero
        # ???: better to use np.divide and feed NaNs through?
        denom = np.dot(dvec_l, nvec_l)
        dzero = abs(denom) < ztol
        denom[dzero] = 1.
        cant_intersect = denom > 0.  # index to dvec_l that can't hit det

        # displacement scaling (along dvec_l)
        u = np.dot(P3_l - P0_l, nvec_l) / denom

        # filter out non-intersections, fill with NaNs
        u[np.logical_or(dzero, cant_intersect)] = np.nan

        # diffracted beam points IN DETECTOR FRAME
        P2_l = P0_l + np.tile(u, (3, 1)).T * dvec_l
        P2_d = np.dot(P2_l - tvec_d, rmat_d)

        # put feasible transformed gvec intersections into return array
        retval[can_diffract, :] = P2_d
    return retval[:, :2]
Example #51
0
def transform(quantile):
    tparams = np.empty_like(quantile)
    tparams = quantile * width + pmin
    return tparams
Example #52
0
def main():
    xl, xh, nx = -1.0, 1.0, 41
    yl, yh, ny = -1.5, 1.5, 41
    zl, zh, nz = -2.0, 2.0, 41
    x = np.linspace(xl, xh, nx)
    y = np.linspace(yl, yh, ny)
    z = np.linspace(zl, zh, nz)
    crds = coordinate.wrap_crds("nonuniform_cartesian", [('z', z), ('y', y),
                                                         ('x', x)])
    bx = field.empty(crds, name="$B_x$", center="Node")
    by = field.empty(crds, name="$B_y$", center="Node")
    bz = field.empty(crds, name="$B_z$", center="Node")
    fld = field.empty(crds,
                      name="B",
                      nr_comps=3,
                      center="Node",
                      layout="interlaced")
    X, Y, Z = crds.get_crds(shaped=True)

    x01, y01, z01 = 0.5, 0.5, 0.5
    x02, y02, z02 = 0.5, 0.5, 0.5
    x03, y03, z03 = 0.5, 0.5, 0.5

    bx[:] = 0.0 + 1.0 * (X - x01) + 1.0 * (Y - y01) + 1.0 * (Z - z01) + \
              1.0 * (X - x01) * (Y - y01) + 1.0 * (Y - y01) * (Z - z01) + \
              1.0 * (X - x01) * (Y - y01) * (Z - z01)
    by[:] = 0.0 + 1.0 * (X - x02) - 1.0 * (Y - y02) + 1.0 * (Z - z02) + \
              1.0 * (X - x02) * (Y - y02) + 1.0 * (Y - y02) * (Z - z02) - \
              1.0 * (X - x02) * (Y - y02) * (Z - z02)
    bz[:] = 0.0 + 1.0 * (X - x03) + 1.0 * (Y - y03) - 1.0 * (Z - z03) + \
              1.0 * (X - x03) * (Y - y03) + 1.0 * (Y - y03) * (Z - z03) + \
              1.0 * (X - x03) * (Y - y03) * (Z - z03)
    fld[..., 0] = bx
    fld[..., 1] = by
    fld[..., 2] = bz

    fig = mlab.figure(size=(1150, 850),
                      bgcolor=(1.0, 1.0, 1.0),
                      fgcolor=(0.0, 0.0, 0.0))
    f1_src = vlab.add_field(bx)
    f2_src = vlab.add_field(by)
    f3_src = vlab.add_field(bz)
    mlab.pipeline.iso_surface(f1_src,
                              contours=[0.0],
                              opacity=1.0,
                              color=(1.0, 0.0, 0.0))
    mlab.pipeline.iso_surface(f2_src,
                              contours=[0.0],
                              opacity=1.0,
                              color=(0.0, 1.0, 0.0))
    mlab.pipeline.iso_surface(f3_src,
                              contours=[0.0],
                              opacity=1.0,
                              color=(0.0, 0.0, 1.0))
    mlab.axes()
    mlab.show()

    nullpt = cycalc.interp_trilin(fld, [(0.5, 0.5, 0.5)])
    print("f(0.5, 0.5, 0.5):", nullpt)

    _, axes = plt.subplots(4, 3, sharex=True, sharey=True)
    all_roots = []
    positive_roots = []
    ix = iy = iz = 0

    for di, d in enumerate([0, -1]):
        #### XY face
        a1 = bx[iz + d, iy, ix]
        b1 = bx[iz + d, iy, ix - 1] - a1
        c1 = bx[iz + d, iy - 1, ix] - a1
        d1 = bx[iz + d, iy - 1, ix - 1] - c1 - b1 - a1

        a2 = by[iz + d, iy, ix]
        b2 = by[iz + d, iy, ix - 1] - a2
        c2 = by[iz + d, iy - 1, ix] - a2
        d2 = by[iz + d, iy - 1, ix - 1] - c2 - b2 - a2

        a3 = bz[iz + d, iy, ix]
        b3 = bz[iz + d, iy, ix - 1] - a3
        c3 = bz[iz + d, iy - 1, ix] - a3
        d3 = bz[iz + d, iy - 1, ix - 1] - c3 - b3 - a3

        roots1, roots2 = find_roots_face(a1, b1, c1, d1, a2, b2, c2, d2)

        # for rt1, rt2 in zip(roots1, roots2):
        #     print("=")
        #     print("fx", a1 + b1 * rt1 + c1 * rt2 + d1 * rt1 * rt2)
        #     print("fy", a2 + b2 * rt1 + c2 * rt2 + d2 * rt1 * rt2)
        #     print("=")

        # find f3 at the root points
        f3 = np.empty_like(roots1)
        markers = [None] * len(f3)
        for i, rt1, rt2 in zip(count(), roots1, roots2):
            f3[i] = a3 + b3 * rt1 + c3 * rt2 + d3 * rt1 * rt2
            all_roots.append((rt1, rt2, d))  # switch order here
            if f3[i] >= 0.0:
                markers[i] = 'k^'
                positive_roots.append((rt1, rt2, d))  # switch order here
            else:
                markers[i] = 'w^'

        # rescale the roots to the original domain
        roots1 = (xh - xl) * roots1 + xl
        roots2 = (yh - yl) * roots2 + yl

        xp = np.linspace(0.0, 1.0, nx)

        vlt.plot(fld['x'],
                 "z={0}i".format(d),
                 ax=axes[0 + 2 * di, 0],
                 plot_opts="x={0}_{1},y={2}_{3},lin_-10_10".format(
                     xl, xh, yl, yh))
        y1 = -(a1 + b1 * xp) / (c1 + d1 * xp)
        plt.plot(x, (yh - yl) * y1 + yl, 'k')
        for i, xrt, yrt in zip(count(), roots1, roots2):
            plt.plot(xrt, yrt, markers[i])

        vlt.plot(fld['y'],
                 "z={0}i".format(d),
                 ax=axes[1 + 2 * di, 0],
                 plot_opts="x={0}_{1},y={2}_{3},lin_-10_10".format(
                     xl, xh, yl, yh))
        y2 = -(a2 + b2 * xp) / (c2 + d2 * xp)
        plt.plot(x, (yh - yl) * y2 + yl, 'k')
        for xrt, yrt in zip(roots1, roots2):
            plt.plot(xrt, yrt, markers[i])

        #### YZ face
        a1 = bx[iz, iy, ix + d]
        b1 = bx[iz, iy - 1, ix + d] - a1
        c1 = bx[iz - 1, iy, ix + d] - a1
        d1 = bx[iz - 1, iy - 1, ix + d] - c1 - b1 - a1

        a2 = by[iz, iy, ix + d]
        b2 = by[iz, iy - 1, ix + d] - a2
        c2 = by[iz - 1, iy, ix + d] - a2
        d2 = by[iz - 1, iy - 1, ix + d] - c2 - b2 - a2

        a3 = bz[iz, iy, ix + d]
        b3 = bz[iz, iy - 1, ix + d] - a3
        c3 = bz[iz - 1, iy, ix + d] - a3
        d3 = bz[iz - 1, iy - 1, ix + d] - c3 - b3 - a3

        roots1, roots2 = find_roots_face(a1, b1, c1, d1, a2, b2, c2, d2)

        # for rt1, rt2 in zip(roots1, roots2):
        #     print("=")
        #     print("fx", a1 + b1 * rt1 + c1 * rt2 + d1 * rt1 * rt2)
        #     print("fy", a2 + b2 * rt1 + c2 * rt2 + d2 * rt1 * rt2)
        #     print("=")

        # find f3 at the root points
        f3 = np.empty_like(roots1)
        markers = [None] * len(f3)
        for i, rt1, rt2 in zip(count(), roots1, roots2):
            f3[i] = a3 + b3 * rt1 + c3 * rt2 + d3 * rt1 * rt2
            all_roots.append((d, rt1, rt2))  # switch order here
            if f3[i] >= 0.0:
                markers[i] = 'k^'
                positive_roots.append((d, rt1, rt2))  # switch order here
            else:
                markers[i] = 'w^'

        # rescale the roots to the original domain
        roots1 = (yh - yl) * roots1 + yl
        roots2 = (zh - zl) * roots2 + zl

        yp = np.linspace(0.0, 1.0, ny)

        # plt.subplot(121)
        vlt.plot(fld['x'],
                 "x={0}i".format(d),
                 ax=axes[0 + 2 * di, 1],
                 plot_opts="x={0}_{1},y={2}_{3},lin_-10_10".format(
                     yl, yh, zl, zh))
        z1 = -(a1 + b1 * yp) / (c1 + d1 * yp)
        plt.plot(y, (zh - zl) * z1 + zl, 'k')
        for i, yrt, zrt in zip(count(), roots1, roots2):
            plt.plot(yrt, zrt, markers[i])

        # plt.subplot(122)
        vlt.plot(fld['y'],
                 "x={0}i".format(d),
                 ax=axes[1 + 2 * di, 1],
                 plot_opts="x={0}_{1},y={2}_{3},lin_-10_10".format(
                     yl, yh, zl, zh))
        z1 = -(a2 + b2 * yp) / (c2 + d2 * yp)
        plt.plot(y, (zh - zl) * z1 + zl, 'k')
        for i, yrt, zrt in zip(count(), roots1, roots2):
            plt.plot(yrt, zrt, markers[i])

        #### ZX face
        a1 = bx[iz, iy + d, ix]
        b1 = bx[iz - 1, iy + d, ix] - a1
        c1 = bx[iz, iy + d, ix - 1] - a1
        d1 = bx[iz - 1, iy + d, ix - 1] - c1 - b1 - a1

        a2 = by[iz, iy + d, ix]
        b2 = by[iz - 1, iy + d, ix] - a2
        c2 = by[iz, iy + d, ix - 1] - a2
        d2 = by[iz - 1, iy + d, ix - 1] - c2 - b2 - a2

        a3 = bz[iz, iy + d, ix]
        b3 = bz[iz - 1, iy + d, ix] - a3
        c3 = bz[iz, iy + d, ix - 1] - a3
        d3 = bz[iz - 1, iy + d, ix - 1] - c3 - b3 - a3

        roots1, roots2 = find_roots_face(a1, b1, c1, d1, a2, b2, c2, d2)

        # for rt1, rt2 in zip(roots1, roots2):
        #     print("=")
        #     print("fx", a1 + b1 * rt1 + c1 * rt2 + d1 * rt1 * rt2)
        #     print("fy", a2 + b2 * rt1 + c2 * rt2 + d2 * rt1 * rt2)
        #     print("=")

        # find f3 at the root points
        f3 = np.empty_like(roots1)
        markers = [None] * len(f3)
        for i, rt1, rt2 in zip(count(), roots1, roots2):
            f3[i] = a3 + b3 * rt1 + c3 * rt2 + d3 * rt1 * rt2
            all_roots.append((rt2, d, rt1))  # switch order here
            if f3[i] >= 0.0:
                markers[i] = 'k^'
                positive_roots.append((rt2, d, rt1))  # switch order here
            else:
                markers[i] = 'w^'

        # rescale the roots to the original domain
        roots1 = (zh - zl) * roots1 + zl
        roots2 = (xh - xl) * roots2 + xl

        zp = np.linspace(0.0, 1.0, nz)

        # plt.subplot(121)
        vlt.plot(fld['x'],
                 "y={0}i".format(d),
                 ax=axes[0 + 2 * di, 2],
                 plot_opts="x={0}_{1},y={2}_{3},lin_-10_10".format(
                     xl, xh, zl, zh))
        x1 = -(a1 + b1 * zp) / (c1 + d1 * zp)
        plt.plot(z, (xh - xl) * x1 + xl, 'k')
        for i, zrt, xrt in zip(count(), roots1, roots2):
            plt.plot(xrt, zrt, markers[i])

        # plt.subplot(121)
        vlt.plot(fld['y'],
                 "y={0}i".format(d),
                 ax=axes[1 + 2 * di, 2],
                 plot_opts="x={0}_{1},y={2}_{3},lin_-10_10".format(
                     xl, xh, zl, zh))
        x1 = -(a2 + b2 * zp) / (c2 + d2 * zp)
        plt.plot(z, (xh - xl) * x1 + xl, 'k')
        for i, zrt, xrt in zip(count(), roots1, roots2):
            plt.plot(xrt, zrt, markers[i])

    print("all:", len(all_roots), "positive:", len(positive_roots))
    if len(all_roots) % 2 == 1:
        print("something is fishy, there are an odd number of root points "
              "on the surface of your cube, there is probably a degenerate "
              "line or surface of nulls")
    print("Null Point?", (len(positive_roots) % 2 == 1))

    plt.show()
Example #53
0
    async def process_data(self, data):

        # Append any data carried from the last run
        if self.carry.size > 0:
            data = np.concatenate((self.carry, data))

        # This is the largest number of records we can handle
        num_records = data.size // self.record_length

        # This is the carryover that we'll store until next round.
        # If nothing is left then reset the carryover.
        remaining_points = data.size % self.record_length
        if remaining_points > 0:
            if num_records > 0:
                self.carry = data[-remaining_points:]
                data = data[:-remaining_points]
            else:
                self.carry = data
        else:
            self.carry = np.zeros(0, dtype=self.output_descriptor.dtype)

        if num_records > 0:
            # The records are processed in parallel after being reshaped here
            reshaped_data = np.reshape(data, (num_records, self.record_length), order="C")

            # Update demodulation frequency if necessary
            if self.follow_axis.value is not "":
                freq = self.demod_freqs[(self.idx % self.pts_before_freq_reset) // self.pts_before_freq_update]
                if freq != self.current_freq:
                    self.update_references(freq)
                    self.current_freq = freq

            self.idx += data.size

            # first stage decimating filter
            if self.filters[0] is None:
                filtered = reshaped_data
            else:
                stacked_coeffs = np.concatenate(self.filters[0])
                # filter
                if np.iscomplexobj(reshaped_data):
                    # TODO: compile complex versions of the IPP functions
                    filtered_r = np.empty_like(reshaped_data, dtype=np.float32)
                    filtered_i = np.empty_like(reshaped_data, dtype=np.float32)
                    libipp.filter_records_iir(stacked_coeffs, self.filters[0][0].size-1, np.ascontiguousarray(reshaped_data.real.astype(np.float32)), self.record_length, num_records, filtered_r)
                    libipp.filter_records_iir(stacked_coeffs, self.filters[0][0].size-1, np.ascontiguousarray(reshaped_data.imag.astype(np.float32)), self.record_length, num_records, filtered_i)
                    filtered = filtered_r + 1j*filtered_i
                    # decimate
                    if self.decim_factors[0] > 1:
                        filtered = filtered[:, ::self.decim_factors[0]]
                else:
                    filtered = np.empty_like(reshaped_data)
                    libipp.filter_records_iir(stacked_coeffs, self.filters[0][0].size-1, reshaped_data, self.record_length, num_records, filtered)

                    # decimate
                    if self.decim_factors[0] > 1:
                        filtered = filtered[:, ::self.decim_factors[0]]

            # mix with reference
            # keep real and imaginary separate for filtering below
            if np.iscomplexobj(reshaped_data):
                filtered *= self.reference
                filtered_r = filtered.real
                filtered_i = filtered.imag
            else:
                filtered_r = self.reference_r * filtered
                filtered_i = self.reference_i * filtered

            # channel selection filters
            for ct in [1,2]:
                if self.filters[ct] == None:
                    continue

                coeffs = self.filters[ct]
                stacked_coeffs = np.concatenate(self.filters[ct])
                out_r = np.empty_like(filtered_r)
                out_i = np.empty_like(filtered_i)
                libipp.filter_records_iir(stacked_coeffs, self.filters[ct][0].size-1, np.ascontiguousarray(filtered_r.astype(np.float32)), filtered_r.shape[-1], num_records, out_r)
                libipp.filter_records_iir(stacked_coeffs, self.filters[ct][0].size-1, np.ascontiguousarray(filtered_i.astype(np.float32)), filtered_i.shape[-1], num_records, out_i)

                # decimate
                if self.decim_factors[ct] > 1:
                    filtered_r = np.copy(out_r[:, ::self.decim_factors[ct]], order="C")
                    filtered_i = np.copy(out_i[:, ::self.decim_factors[ct]], order="C")
                else:
                    filtered_r = out_r
                    filtered_i = out_i

            filtered = filtered_r + 1j*filtered_i

            # recover gain from selecting single sideband
            filtered *= 2

            # push to ouptut connectors
            for os in self.source.output_streams:
                await os.push(filtered)
Example #54
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--path_run', help='path to the run folder.')
    parser.add_argument('--v_min',
                        default=10,
                        type=int,
                        help='min spine volume')
    parser.add_argument('--v_max',
                        default=99999,
                        type=int,
                        help='max spine volume')
    parser.add_argument('--coinc_thr',
                        default=0.5,
                        type=float,
                        help='coincidence threshold')
    parser.add_argument(
        '--print_opt',
        default=0,
        type=int,
        help=
        'print optins, 0:no print, 1: print all, 2: print together, 3:print separated'
    )
    parser.add_argument(
        '--v_min_sweep',
        default=0,
        type=int,
        help='v_min_seep options, 0:no sweep, x: sweep from 0 to x ')
    parser.add_argument(
        '--source',
        type=str,
        help='source training classes, options: spine or spden')
    parser.add_argument('--eval',
                        default="iou",
                        type=str,
                        help='eval options, nomean or iou')

    parsed_args = parser.parse_args(sys.argv[1:])

    path_run = parsed_args.path_run  # get prediction folder
    v_min = parsed_args.v_min  # get min size
    v_max = parsed_args.v_max  # get max size
    coinc_thr = parsed_args.coinc_thr  # get coincidence threshold
    print_opt = parsed_args.print_opt
    v_min_sweep = parsed_args.v_min_sweep
    source = parsed_args.source
    eval = parsed_args.eval

    path_pred = os.path.join(path_run, "prediction")
    out = str(v_min) + '_' + str(v_max) + '_' + str(coinc_thr) + '_' + str(
        v_min_sweep) + '_' + eval
    path_out = os.path.join(path_run, "results/", out)
    if not os.path.exists(path_out):
        os.makedirs(path_out)

    # initialization
    TP_v_all = list()
    FP_v_all = list()
    FP = list()
    FN = list()
    TP = list()
    SENS = list()
    PREC = list()
    F1 = list()
    validation_cases = list()
    SENS_v_all = list()
    PREC_v_all = list()
    F1_v_all = list()

    dir = listdir(path_pred)

    for idx, case_folder in enumerate(dir):  # for each case

        # lists to store volume of TP and FP pred spines
        SCORES = list()
        TP_v = list()
        FP_v = list()

        # lists to store sens and prec calculated from tpv and fpv for each thr sweep
        SENS_v = list()
        PREC_v = list()
        F1_v = list()

        path, val_case = os.path.split(case_folder)

        # load gt and prediction files
        truth_file = os.path.join(path_pred, case_folder, "truth.nii.gz")
        truth_image = nib.load(truth_file)
        truth = truth_image.get_data()

        prediction_file = os.path.join(path_pred, case_folder,
                                       "prediction.nii.gz")
        prediction_image = nib.load(prediction_file)
        prediction = prediction_image.get_data()

        if source == "spine":
            # adapt gt and prediction
            truth.astype(int)
            sp_pred = np.where(prediction == [0])
            bg_pred = np.where(prediction == [-1])
            prediction = np.empty_like(truth)
            prediction[sp_pred] = 255
            prediction[bg_pred] = 0

        if source == "spden":
            den_pred = np.where(prediction == [150])
            den_gt = np.where(truth == [150])
            prediction[den_pred[0], den_pred[1], den_pred[2]] = 0
            truth[den_gt[0], den_gt[1], den_gt[2]] = 0

        # get gt and prediction labels
        label_prediction, num_labels_prediction = label(prediction)
        label_truth, num_labels_truth = label(truth)

        # get gt and predictions spines
        props_pred = regionprops(label_prediction)
        props_truth = regionprops(label_truth)

        # preprocess prediction spines
        for spinePred in range(num_labels_prediction):  # for each spine
            size = props_pred[spinePred].area  # get size
            if size <= v_min or size >= v_max:  # if not in between thresholds
                prediction[props_pred[spinePred].coords[:, 0],
                           props_pred[spinePred].coords[:, 1],
                           props_pred[spinePred].coords[:,
                                                        2]] = 0  # delete spine

        # get new prediction labels and spines
        label_prediction, num_labels_prediction = label(prediction)
        props_pred = regionprops(label_prediction)

        for spineGT in range(
                num_labels_truth):  # for each spine in gt (spineGT)

            # print progression
            prog = (spineGT / num_labels_truth) * 100
            print("case - " + str(idx + 1) + "/" + str(len(dir)) + " - " +
                  case_folder + '-' + str(round(prog, 1)) + "%")

            # init
            coincide_list_GT = list()
            coincide_list_Pred = list()
            IoU_list = list()

            coordsGT = props_truth[spineGT].coords  # get spineGT coords

            for spinePred in range(
                    num_labels_prediction
            ):  # for each spine in prediction (spinePred)

                # init
                counter_sp_coord = 0

                coordsPred = props_pred[
                    spinePred].coords  # get spinePred coords

                for pos in coordsGT:  # for each pixel in SpineGT
                    find = np.where((coordsPred == pos).all(
                        axis=1))  # look if it is in spinePred
                    if find[0].size == 1:  # if it is, count 1
                        counter_sp_coord += 1

                if eval == "nomean":
                    # calculate % of pixels found, respect gt and pred size
                    percentageGT = counter_sp_coord / props_truth[spineGT].area
                    percentagePred = counter_sp_coord / props_pred[
                        spinePred].area
                    # save %
                    coincide_list_GT.append(percentageGT)
                    coincide_list_Pred.append(percentagePred)

                if eval == "iou":
                    # calculate % of pixels found, respect gt and pred size
                    IoU = counter_sp_coord / (props_truth[spineGT].area +
                                              props_pred[spinePred].area -
                                              counter_sp_coord)
                    # save %
                    IoU_list.append(IoU)

            if eval == "nomean":

                for i in range(len(coincide_list_GT)):
                    if coincide_list_GT[i] < coinc_thr or coincide_list_Pred[
                            i] < coinc_thr:
                        coincide_list_GT[i] = 0
                        coincide_list_Pred[i] = 0
                # get maximum mean score
                coincide_list_mean = [
                    (x + y) / 2
                    for x, y in zip(coincide_list_GT, coincide_list_Pred)
                ]  # scores mean
                SCORES.append(coincide_list_mean)

            if eval == "iou":
                SCORES.append(IoU_list)

        tp_case, fp_case, fn_case, used_list = get_cmatrix(SCORES, coinc_thr)

        # calculate evaluation metrics
        sens_case = tp_case / (tp_case + fn_case)
        prec_case = tp_case / (tp_case + fp_case)
        f1_case = (2 * (sens_case * prec_case) / (sens_case + prec_case))

        # save case metrics
        FP.append(fp_case)
        FN.append(fn_case)
        TP.append(tp_case)
        SENS.append(sens_case)
        PREC.append(prec_case)
        F1.append(f1_case)
        validation_cases.append(val_case)

        for i in range(num_labels_prediction):
            if i in used_list:
                TP_v.append(props_pred[i].area)
                TP_v_all.append(props_pred[i].area)
            if i not in used_list:
                FP_v.append(props_pred[i].area)
                FP_v_all.append(props_pred[i].area)

        if v_min_sweep != 0:
            for i in range(v_min_sweep):
                sens, prec, f1 = get_metrics_from_volumes(
                    TP_v, FP_v, i, num_labels_truth)

                SENS_v.append(sens)  # stack sens for each thr of the case
                PREC_v.append(prec)  # stack prec for each thr of the case
                F1_v.append(f1)

            SENS_v_all.append(SENS_v)  # stack sens of all cases for each thr
            PREC_v_all.append(PREC_v)  # stack prec of all cases for each thr
            F1_v_all.append(F1_v)  # stack prec of all cases for each thr

    # save spine results on csv
    header = ['FP', 'FN', 'TP', 'Sens.', 'Precision', 'F1']
    spine_csv = ({
        header[0]: FP,
        header[1]: FN,
        header[2]: TP,
        header[3]: SENS,
        header[4]: PREC,
        header[5]: F1
    })
    df = pd.DataFrame.from_records(spine_csv, index=validation_cases)
    df.to_csv(path_out + "/spine_scores.ods")

    if v_min_sweep != 0:

        sens_avg = [sum(i) / len(SENS_v_all) for i in zip(*SENS_v_all)]
        prec_avg = [sum(i) / len(PREC_v_all) for i in zip(*PREC_v_all)]
        f1_avg = [sum(i) / len(F1_v_all) for i in zip(*F1_v_all)]

        best_thr = f1_avg.index(max(f1_avg))  # best f1_sweep index = best thr
        best_sens = sens_avg[best_thr]  # sens of best thr
        best_prec = prec_avg[best_thr]  # prec of best thr

        print('best confidence threshold: ' + str(best_thr) + ' with:')
        print('sensitivity: ' + str(best_sens))
        print('precision: ' + str(best_prec))

        plt.plot(sens_avg, 'g', prec_avg, 'r', f1_avg, 'b')
        plt.title('sens and prec vs f1')
        plt.xlabel('V_THR')
        plt.savefig(path_out + "/sweep.pdf")

        # save spine results on csv
        header = ['SENS', 'PREC', 'F1']
        sweep_csv = ({
            header[0]: sens_avg,
            header[1]: prec_avg,
            header[2]: f1_avg
        })
        df = pd.DataFrame.from_records(sweep_csv, index=range(v_min_sweep))
        df.to_csv(path_out + "/sweep.ods")

    # PLOT
    if print_opt == 1 or print_opt == 2:

        fig10 = plt.figure()
        plt.hist(TP_v_all, 300, [0, 5000],
                 color="blue")  # hist(data,bins,range)
        plt.hist(FP_v_all, 300, [0, 5000],
                 color="orange")  # hist(data,bins,range)
        plt.minorticks_on()
        plt.xlabel('spine area')
        plt.ylabel('Number of spines')
        fig10.savefig(path_out + '/Histogram_5000.pdf')

        fig20 = plt.figure()
        plt.hist(TP_v_all, 300, [0, 2000],
                 color="blue")  # hist(data,bins,range)
        plt.hist(FP_v_all, 300, [0, 2000],
                 color="orange")  # hist(data,bins,range)
        plt.minorticks_on()
        plt.xlabel('spine area')
        plt.ylabel('Number of spines')
        fig20.savefig(path_out + '/Histogram_2000.pdf')

        fig30 = plt.figure()
        plt.hist(TP_v_all, 300, [0, 200],
                 color="blue")  # hist(data,bins,range)
        plt.hist(FP_v_all, 300, [0, 200],
                 color="orange")  # hist(data,bins,range)
        plt.minorticks_on()
        plt.xlabel('spine area')
        plt.ylabel('Number of spines')
        fig30.savefig(path_out + '/Histogram_200.pdf')

    if print_opt == 1 or print_opt == 3:

        fig10 = plt.figure()
        plt.hist(TP_v_all, 300, [0, 5000],
                 color="blue")  # hist(data,bins,range)
        plt.minorticks_on()
        plt.xlabel('spine area')
        plt.ylabel('Number of spines')
        fig10.savefig(path_out + '/Histogram_tp_5000.pdf')

        fig11 = plt.figure()
        plt.hist(FP_v_all, 300, [0, 5000],
                 color="orange")  # hist(data,bins,range)
        plt.minorticks_on()
        plt.xlabel('spine area')
        plt.ylabel('Number of spines')
        fig11.savefig(path_out + '/Histogram_fp_5000.pdf')

        fig20 = plt.figure()
        plt.hist(TP_v_all, 300, [0, 2000],
                 color="blue")  # hist(data,bins,range)
        plt.minorticks_on()
        plt.xlabel('spine area')
        plt.ylabel('Number of spines')
        fig20.savefig(path_out + '/Histogram_tp_2000.pdf')

        fig21 = plt.figure()
        plt.hist(FP_v_all, 300, [0, 2000],
                 color="orange")  # hist(data,bins,range)
        plt.minorticks_on()
        plt.xlabel('spine area')
        plt.ylabel('Number of spines')
        fig21.savefig(path_out + '/Histogram_fp_2000.pdf')

        fig30 = plt.figure()
        plt.hist(TP_v_all, 300, [0, 200],
                 color="blue")  # hist(data,bins,range)
        plt.minorticks_on()
        plt.xlabel('spine area')
        plt.ylabel('Number of spines')
        fig30.savefig(path_out + '/Histogram_tp_200.pdf')

        fig31 = plt.figure()
        plt.hist(FP_v_all, 300, [0, 200],
                 color="orange")  # hist(data,bins,range)
        plt.minorticks_on()
        plt.xlabel('spine area')
        plt.ylabel('Number of spines')
        fig31.savefig(path_out + '/Histogram_fp_200.pdf')
Example #55
0
def classical_strength_of_connection(A, theta=0.0):
    """
    Return a strength of connection matrix using the classical AMG measure
    An off-diagonal entry A[i,j] is a strong connection iff::

            | A[i,j] | >= theta * max(| A[i,k] |), where k != i

    Parameters
    ----------
    A : csr_matrix or bsr_matrix
        Square, sparse matrix in CSR or BSR format
    theta : float
        Threshold parameter in [0,1].

    Returns
    -------
    S : csr_matrix
        Matrix graph defining strong connections.  S[i,j]=1 if vertex i
        is strongly influenced by vertex j.

    See Also
    --------
    symmetric_strength_of_connection : symmetric measure used in SA
    evolution_strength_of_connection : relaxation based strength measure

    Notes
    -----
    - A symmetric A does not necessarily yield a symmetric strength matrix S
    - Calls C++ function classical_strength_of_connection
    - The version as implemented is designed form M-matrices.  Trottenberg et
      al. use max A[i,k] over all negative entries, which is the same.  A
      positive edge weight never indicates a strong connection.

    References
    ----------

    .. [1] Briggs, W. L., Henson, V. E., McCormick, S. F., "A multigrid
       tutorial", Second edition. Society for Industrial and Applied
       Mathematics (SIAM), Philadelphia, PA, 2000. xii+193 pp.
       ISBN: 0-89871-462-1

    .. [2] Trottenberg, U., Oosterlee, C. W., Schuller, A., "Multigrid",
       Academic Press, Inc., San Diego, CA, 2001. xvi+631 pp.
       ISBN: 0-12-701070-X

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import classical_strength_of_connection
    >>> n=3
    >>> stencil = np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = classical_strength_of_connection(A, 0.0)

    """

    if sparse.isspmatrix_bsr(A):
        blocksize = A.blocksize[0]
    else:
        blocksize = 1

    if not sparse.isspmatrix_csr(A):
        warn("Implicit conversion of A to csr", sparse.SparseEfficiencyWarning)
        A = sparse.csr_matrix(A)

    if (theta < 0 or theta > 1):
        raise ValueError('expected theta in [0,1]')

    Sp = np.empty_like(A.indptr)
    Sj = np.empty_like(A.indices)
    Sx = np.empty_like(A.data)

    fn = amg_core.classical_strength_of_connection
    fn(A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
    S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)

    if blocksize > 1:
        S = amalgamate(S, blocksize)

    # Strength represents "distance", so take the magnitude
    S.data = np.abs(S.data)

    # Scale S by the largest magnitude entry in each row
    S = scale_rows_by_largest_entry(S)

    return S
    pool.close()
    pool.join()

    print()
    print("Face Detection Done, now printing dimensions of results:")
    print("Total: {}, Segment1: {}, Segment2: {}, Segment3: {}, Segment4: {}, Total Again:{}".format(len(result),
                                                                                                     len(result[0]),
                                                                                                     len(result[1]),
                                                                                                     len(result[2]),
                                                                                                     len(result[3]),
                                                                                                     (len(result[0]) +
                                                                                                      len(result[1]) +
                                                                                                      len(result[2]) +
                                                                                                      len(result[3])
                                                                                                      )
                                                                                                     )
          )

    end = time.time()
    print()
    print("Time taken by 4 Cores: {}s".format(end - start))
    print("All processing Done")
    print()
    print("Now Saving Video:")

    final_video = np.empty_like(frames)
    save_video(result, final_video, output_path)

    print("Total time taken for processing:", end - start)

Example #57
0
def hitmiss(input, Bc, output=None):
    '''
    output = hitmiss(input, Bc, output=np.zeros_like(input))

    Hit & Miss transform

    For a given pixel position, the hit&miss is ``True`` if, when ``Bc`` is
    overlaid on ``input``, centered at that position, the ``1`` values line up
    with ``1``s, while the ``0``s line up with ``0``s (``2``s correspond to
    *don't care*).

    Example
    -------

    ::

    print hitmiss(np.array([
                [0,0,0,0,0],
                [0,1,1,1,1],
                [0,0,1,1,1]]),
            np.array([
                [0,0,0],
                [2,1,1],
                [2,1,1]]))

    prints::

        [[0 0 0 0 0]
         [0 0 1 1 0]
         [0 0 0 0 0]]



    Parameters
    ----------
    input : input ndarray
        This is interpreted as a binary array.
    Bc : ndarray
        hit & miss template, values must be one of (0, 1, 2)
    output : output array

    Returns
    -------
    output : ndarray
    '''
    _verify_is_integer_type(input, 'hitmiss')
    _verify_is_integer_type(Bc, 'hitmiss')
    if input.dtype != Bc.dtype:
        if input.dtype == np.bool_:
            input = input.view(np.uint8)
            if Bc.dtype == np.bool_:
                Bc = Bc.view(np.uint8)
            else:
                Bc = Bc.astype(np.uint8)
        else:
            Bc = Bc.astype(input.dtype)
    if output is None:
        output = np.empty_like(input)
    else:
        if output.shape != input.shape:
            raise ValueError(
                'mahotas.hitmiss: output must be of same shape as input')
        if output.dtype != input.dtype:
            if output.dtype == np.bool_ and input.dtype == np.uint8:
                output = output.view(np.uint8)
            else:
                raise TypeError(
                    'mahotas.hitmiss: output must be of same type as input')
    return _morph.hitmiss(input, Bc, output)
Example #58
0
def symmetric_strength_of_connection(A, theta=0):
    """
    Compute strength of connection matrix using the standard symmetric measure

    An off-diagonal connection A[i,j] is strong iff::

        abs(A[i,j]) >= theta * sqrt( abs(A[i,i]) * abs(A[j,j]) )

    Parameters
    ----------
    A : csr_matrix
        Matrix graph defined in sparse format.  Entry A[i,j] describes the
        strength of edge [i,j]
    theta : float
        Threshold parameter (positive).

    Returns
    -------
    S : csr_matrix
        Matrix graph defining strong connections.  S[i,j]=1 if vertex i
        is strongly influenced by vertex j.

    See Also
    --------
    symmetric_strength_of_connection : symmetric measure used in SA
    evolution_strength_of_connection : relaxation based strength measure

    Notes
    -----
        - For vector problems, standard strength measures may produce
          undesirable aggregates.  A "block approach" from Vanek et al. is used
          to replace vertex comparisons with block-type comparisons.  A
          connection between nodes i and j in the block case is strong if::

          ||AB[i,j]|| >= theta * sqrt( ||AB[i,i]||*||AB[j,j]|| ) where AB[k,l]

          is the matrix block (degrees of freedom) associated with nodes k and
          l and ||.|| is a matrix norm, such a Frobenius.

    References
    ----------
    .. [1] Vanek, P. and Mandel, J. and Brezina, M.,
       "Algebraic Multigrid by Smoothed Aggregation for
       Second and Fourth Order Elliptic Problems",
       Computing, vol. 56, no. 3, pp. 179--196, 1996.
       http://citeseer.ist.psu.edu/vanek96algebraic.html

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import symmetric_strength_of_connection
    >>> n=3
    >>> stencil = np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = symmetric_strength_of_connection(A, 0.0)
    """

    if theta < 0:
        raise ValueError('expected a positive theta')

    if sparse.isspmatrix_csr(A):
        # if theta == 0:
        #     return A

        Sp = np.empty_like(A.indptr)
        Sj = np.empty_like(A.indices)
        Sx = np.empty_like(A.data)

        fn = amg_core.symmetric_strength_of_connection
        fn(A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)

        S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)

    elif sparse.isspmatrix_bsr(A):
        M, N = A.shape
        R, C = A.blocksize

        if R != C:
            raise ValueError('matrix must have square blocks')

        if theta == 0:
            data = np.ones(len(A.indices), dtype=A.dtype)
            S = sparse.csr_matrix((data, A.indices.copy(), A.indptr.copy()),
                                  shape=(M / R, N / C))
        else:
            # the strength of connection matrix is based on the
            # Frobenius norms of the blocks
            data = (np.conjugate(A.data) * A.data).reshape(-1, R*C).sum(axis=1)
            A = sparse.csr_matrix((data, A.indices, A.indptr),
                                  shape=(M / R, N / C))
            return symmetric_strength_of_connection(A, theta)
    else:
        raise TypeError('expected csr_matrix or bsr_matrix')

    # Strength represents "distance", so take the magnitude
    S.data = np.abs(S.data)

    # Scale S by the largest magnitude entry in each row
    S = scale_rows_by_largest_entry(S)

    return S
Example #59
0
print(x)
print(xNp)
print(xNp.shape)
print()

xReshaped = np.reshape(x, (2, 2))

print(y)
print(xReshaped)
print(xReshaped.shape)
print()

# Creating standard numpy arrays
x = [1, 2, 3]
npEmpty = np.empty((3, 3, 4), dtype=int, order='F')
npEmptyLike = np.empty_like(x, dtype=int)
print(npEmpty)
print()
#print(npEmptyLike)
#print()

npZeros = np.zeros((2, 3, 4), dtype=int)
npZerosLike = np.zeros_like(x)
print(npZeros)
print()
#print(npZerosLike)
#print()

npOnes = np.ones((4, 4, 3), dtype=int)
npOnesLike = np.ones_like(x)
print(npOnes)
Example #60
0
def QuarticSolverVec(a,b,c,d,e):
    """
     function [x1, x2, x3, x4]=QuarticSolverVec(a,b,c,d,e)
     v.0.2 - Python Port
     - Added condition in size sorting to avoid floating point errors.
     - Removed early loop abortion when stuck in loop (Inefficient)
     - Improved numerical stability of analytical solution
     - Added code for the case of S==0
     ============================================
     v.0.1 - Nearly identical to QuarticSolver v. 0.4, the first successful vectorized implimentation 
             Changed logic of ChosenSet to accomudate simultaneous convergence of sets 1 & 2
           - Note the periodicity in nearly-convergent solutions can other
             than four (related to text on step 4 after table 3). examples:
             period of 5: [a,b,c,d,e]=[0.111964240308252 -0.88497524334712 -0.197876116344933 -1.07336408259262 -0.373248675102065];
             period of 6: [a,b,c,d,e]=[-1.380904438798326 0.904866918945240 -0.280749330818231 0.990034312758900 1.413106456228119];
             period of 22: [a,b,c,d,e]=[0.903755513939902 0.490545114637739 -1.389679906455410 -0.875910689438623 -0.290630547104907];
             Therefore condition was changed from epsilon1(iiter)==0 to epsilon1(iiter)<8*eps (and similarl for epsilon2)
           - Special case criterion of the analytical formula was changed to
             ind=abs(4*Delta0**3./Delta1**2)<2*eps;  (instead of exact zero)
           - vectorized
     ============================================
     - Solves for the x1-x4 roots of the quartic equation y(x)=ax^4+bx^3+cx^2+dx+e.
       Multiple eqations can be soved simultaneously by entering same-sized column vectors on all inputs.
     - Note the code immediatly tanslates the input parameters ["a","b","c","d","e"] to the reference paper parameters [1,a,b,c,d] for consistency,
       and the code probably performes best when "a"=1.
    
    Parameters
    ----------
    a,b,c,d,e : ``1-D arrays``
        Quartic polynomial coefficients
    
    Returns
    ------
    - x1-x4 : ``2-D array``
        Concatenated array of the polynomial roots. The function always returns four (possibly complex) values. Multiple roots, if exist, are given multiple times. An error will result in four NaN values.
        No convergence may result in four inf values (still?)
    
    Reference: 
    Peter Strobach (2010), Journal of Computational and Applied Mathematics 234
        http://www.sciencedirect.com/science/article/pii/S0377042710002128
    """
#    MaxIter=16;
    MaxIter=50;
    eps = np.finfo(float).eps
    #INPUT CONTROL
    #Note: not all input control is implemented.
    # all-column vectors only
#    if size(a,1)~=size(b,1) or size(a,1)~=size(c,1) or size(a,1)~=size(d,1) or size(a,1)~=size(e,1) or ...
#       size(a,2)~=1 or size(b,2)~=1 or size(c,2)~=1 or size(d,2)~=1 or size(e,2)~=1:
#        fprintf('ERROR: illegal input parameter sizes.\n');
#        x1=inf; x2=inf; x3=inf; x4=inf;    
#        return
    
    # translate input variables to the paper's
    if np.any(a==0):
       print('ERROR: a==0. Not a quartic equation.\n')
       x1=np.NaN; x2=np.NaN; x3=np.NaN; x4=np.NaN;    
       return x1,x2,x3,x4
    else:
        input_a=a;
        input_b=b;
        input_c=c;
        input_d=d;
        input_e=e;
        a=input_b/input_a;
        b=input_c/input_a;
        c=input_d/input_a;
        d=input_e/input_a;
    
    # PRE-ALLOCATE MEMORY
    # ChosenSet is used to track which input set already has a solution (=non-zero value)
    ChosenSet=np.zeros_like(a);
    x1 = np.empty_like(a,complex)
    x1[:] = np.nan
    x2=x1.copy(); x3=x1.copy(); x4=x1.copy(); x11=x1.copy(); x12=x1.copy(); x21=x1.copy(); x22=x1.copy(); alpha01=x1.copy(); alpha02=x1.copy(); beta01=x1.copy(); beta02=x1.copy(); gamma01=x1.copy(); gamma02=x1.copy(); delta01=x1.copy(); delta02=x1.copy(); e11=x1.copy(); e12=x1.copy(); e13=x1.copy(); e14=x1.copy(); e21=x1.copy(); e22=x1.copy(); e23=x1.copy(); e24=x1.copy(); alpha1=x1.copy(); alpha2=x1.copy(); beta1=x1.copy(); beta2=x1.copy(); gamma1=x1.copy(); gamma2=x1.copy(); delta1=x1.copy(); delta2=x1.copy(); alpha=x1.copy(); beta=x1.copy(); gamma=x1.copy(); delta=x1.copy();
    # check multiple roots -cases 2 & 3. indexed by ChosenSet=-2
    test_alpha=0.5*a;
    test_beta=0.5*(b-test_alpha**2);
    test_epsilon=np.stack((c-2*test_alpha*test_beta, d-test_beta**2)).T;
    ind=np.all(test_epsilon==0,1);
    if np.any(ind):
        x1[ind], x2[ind]=SolveQuadratic(np.ones_like(test_alpha[ind]),test_alpha[ind],test_beta[ind]);
        x3[ind]=x1[ind]; x4[ind]=x2[ind];
        ChosenSet[ind]=-2;
    
    # check multiple roots -case 4. indexed by ChosenSet=-4
    i=ChosenSet==0;
    x11[i], x12[i]=SolveQuadratic(np.ones(np.sum(i)),a[i]/2,b[i]/6);
    x21[i]=-a[i]-3*x11[i];    
    test_epsilon[i,:2]=np.stack((c[i]+x11[i]**2*(x11[i]+3*x21[i]), d[i]-x11[i]**3*x21[i])).T;
    ind[i]=np.all(test_epsilon[i]==0,1);
    if np.any(ind[i]):
        x1[ind[i]]=x11[ind[i]]; x2[ind[i]]=x11[ind[i]]; x3[ind[i]]=x11[ind[i]]; x4[ind[i]]=x12[ind[i]];
        ChosenSet[ind[i]]=-4;
    x22[i]=-a[i]-3*x12[i];
    test_epsilon[i,:2]=np.stack((c[i]+x12[i]**2*(x12[i]+3*x22[i]), d[i]-x12[i]**3*x22[i])).T;
    ind[i]=np.all(test_epsilon[i]==0,1);
    if np.any(ind[i]):
        x1[ind[i]]=x21[ind[i]]; x2[ind[i]]=x21[ind[i]]; x3[ind[i]]=x21[ind[i]]; x4[ind[i]]=x22[ind[i]];
        ChosenSet[ind[i]]=-4;
    # General solution
    # initilize
    epsilon1=np.empty((np.size(a),MaxIter))
    epsilon1[:]=np.inf
    epsilon2=epsilon1.copy();
    
    i=ChosenSet==0;
    fi=np.nonzero(i)[0];
    x=np.empty((fi.size,4),complex)
    ii = np.arange(fi.size)
    #Calculate analytical root values
    x[:,0], x[:,1], x[:,2], x[:,3]=AnalyticalSolution(np.ones(np.sum(i)),a[i],b[i],c[i],d[i],eps);
    #Sort the roots in order of their size
    ind=np.argsort(abs(x))[:,::-1]; #'descend'
    x1[i]=x.flatten()[4*ii+ind[:,0]];
    x2[i]=x.flatten()[4*ii+ind[:,1]];
    x3[i]=x.flatten()[4*ii+ind[:,2]];
    x4[i]=x.flatten()[4*ii+ind[:,3]];
    #Avoiding floating point errors.
    #The value chosen is somewhat arbitrary. See Appendix C for details.
    ind = abs(x1)-abs(x4)<8*10**-12;
    x2[ind] = np.conj(x1[ind])
    x3[ind] = -x1[ind]
    x4[ind] = -x2[ind]
    #Initializing parameter values
    alpha01[i]=-np.real(x1[i]+x2[i]);
    beta01[i]=np.real(x1[i]*x2[i]);
    alpha02[i]=-np.real(x2[i]+x3[i]);
    beta02[i]=np.real(x2[i]*x3[i]);
    gamma01[i], delta01[i]=FastGammaDelta(alpha01[i],beta01[i],a[i],b[i],c[i],d[i]);
    gamma02[i], delta02[i]=FastGammaDelta(alpha02[i],beta02[i],a[i],b[i],c[i],d[i]);
    
    alpha1[i]=alpha01[i]; beta1[i]=beta01[i]; gamma1[i]=gamma01[i]; delta1[i]=delta01[i];
    alpha2[i]=alpha02[i]; beta2[i]=beta02[i]; gamma2[i]=gamma02[i]; delta2[i]=delta02[i];
    
    #Backward Optimizer Outer Loop
    e11[i]=a[i]-alpha1[i]-gamma1[i];
    e12[i]=b[i]-beta1[i]-alpha1[i]*gamma1[i]-delta1[i];
    e13[i]=c[i]-beta1[i]*gamma1[i]-alpha1[i]*delta1[i];
    e14[i]=d[i]-beta1[i]*delta1[i];
    
    e21[i]=a[i]-alpha2[i]-gamma2[i];
    e22[i]=b[i]-beta2[i]-alpha2[i]*gamma2[i]-delta2[i];
    e23[i]=c[i]-beta2[i]*gamma2[i]-alpha2[i]*delta2[i];
    e24[i]=d[i]-beta2[i]*delta2[i];
    iiter=0;
    while iiter<MaxIter and np.any(ChosenSet[i]==0):
        i=np.nonzero(ChosenSet==0)[0];
        
        alpha1[i], beta1[i], gamma1[i], delta1[i], e11[i], e12[i], e13[i], e14[i], epsilon1[i,iiter]=BackwardOptimizer_InnerLoop(a[i],b[i],c[i],d[i],alpha1[i],beta1[i],gamma1[i],delta1[i],e11[i],e12[i],e13[i],e14[i]);
        alpha2[i], beta2[i], gamma2[i], delta2[i], e21[i], e22[i], e23[i], e24[i], epsilon2[i,iiter]=BackwardOptimizer_InnerLoop(a[i],b[i],c[i],d[i],alpha2[i],beta2[i],gamma2[i],delta2[i],e21[i],e22[i],e23[i],e24[i]);
    
        j = np.ones_like(a[i])
        j[(epsilon2[i,iiter]<epsilon1[i,iiter]).flatten()] = 2
        BestEps = np.nanmin(np.stack([epsilon1[i,iiter].flatten(), epsilon2[i,iiter].flatten()]),0);
        ind=BestEps<8*eps;
        ChosenSet[i[ind]]=j[ind];
        ind=np.logical_not(ind);
#        if iiter>0 and np.any(ind):
#            ii=i[ind];
#            LimitCycleReached = np.empty((ii.size,2),bool)
#            LimitCycleReached[:,0] = np.any(epsilon1[ii,:iiter]==epsilon1[ii,iiter],0)
#            LimitCycleReached[:,1] = np.any(epsilon2[ii,:iiter]==epsilon2[ii,iiter],0)
##            LimitCycleReached=[any(bsxfun(@eq,epsilon1(i(ind),max(1,iiter-4):max(1,iiter-1)),epsilon1(i(ind),iiter)),2) any(bsxfun(@eq,epsilon2(i(ind),max(1,iiter-4):max(1,iiter-1)),epsilon2(i(ind),iiter)),2)];
#            ChosenSet[ii[np.logical_and(LimitCycleReached[:,0] , np.logical_not(LimitCycleReached[:,1]))]]=1;
#            ChosenSet[ii[np.logical_and(LimitCycleReached[:,1] , np.logical_not(LimitCycleReached[:,0]))]]=2;
##            ChosenSet(ii(~LimitCycleReached(:,1) & LimitCycleReached(:,2)))=2;
##            ind=find(ind);
#            cond = np.logical_and(LimitCycleReached[:,1],LimitCycleReached[:,0])
#            ChosenSet[ii[cond]]=j[ind][cond]
##            ChosenSet(ii(LimitCycleReached(:,1) & LimitCycleReached(:,2)))=j(ind(LimitCycleReached(:,1) & LimitCycleReached(:,2)));
        iiter=iiter+1;
        
    #Checking which of the chains is relevant
    i=np.nonzero(ChosenSet==0)[0];
    ind=epsilon1[i,-1]<epsilon2[i,-1];
#    ind=np.logical_and(epsilon1[i,-1]<epsilon2[i,-1],np.logical_not(np.isnan(epsilon2[i,-1])));
    ChosenSet[i[ind]]=1;
    ChosenSet[i[np.logical_not(ind)]]=2;
    
    # Output
    i=ChosenSet==1;
    alpha[i]=alpha1[i];
    beta[i]=beta1[i];
    gamma[i]=gamma1[i];
    delta[i]=delta1[i];
    
    i=ChosenSet==2;
    alpha[i]=alpha2[i];
    beta[i]=beta2[i];
    gamma[i]=gamma2[i];
    delta[i]=delta2[i];
    
    i=ChosenSet>0;
    x1[i], x2[i]=SolveQuadratic(np.ones(np.sum(i)),alpha[i],beta[i]);
    x3[i], x4[i]=SolveQuadratic(np.ones(np.sum(i)),gamma[i],delta[i]);

    return np.array([x1,x2,x3,x4])