Пример #1
0
def VtuMatchLocationsArbitrary(vtu1, vtu2, tolerance=1.0e-6):
    """
  Check that the locations in the supplied vtus match, returning True if they
  match and False otherwise.
  The locations may be in a different order.
  """

    locations1 = vtu1.GetLocations()
    locations2 = vtu2.GetLocations()
    if not locations1.shape == locations2.shape:
        return False

    epsilon = numpy.ones(locations1.shape[1]) * numpy.finfo(numpy.float).eps
    for j in range(locations1.shape[1]):
        epsilon[j] = epsilon[j] * (locations1[:, j].max() - locations1[:, j].min())

    for i in range(len(locations1)):
        for j in range(len(locations1[i])):
            if abs(locations1[i][j]) < epsilon[j]:
                locations1[i][j] = 0.0
            if abs(locations2[i][j]) < epsilon[j]:
                locations2[i][j] = 0.0

    # lexical sort on x,y and z coordinates resp. of locations1 and locations2
    sort_index1 = numpy.lexsort(locations1.T)
    sort_index2 = numpy.lexsort(locations2.T)

    # should now be in same order, so we can check for its biggest difference
    return abs(locations1[sort_index1] - locations2[sort_index2]).max() < tolerance
Пример #2
0
    def test_sort_index_multicolumn(self):
        import random
        A = np.arange(5).repeat(20)
        B = np.tile(np.arange(5), 20)
        random.shuffle(A)
        random.shuffle(B)
        frame = DataFrame({'A': A, 'B': B,
                           'C': np.random.randn(100)})

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['A', 'B'])
        result = frame.sort_values(by=['A', 'B'])
        indexer = np.lexsort((frame['B'], frame['A']))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['A', 'B'], ascending=False)
        result = frame.sort_values(by=['A', 'B'], ascending=False)
        indexer = np.lexsort((frame['B'].rank(ascending=False),
                              frame['A'].rank(ascending=False)))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['B', 'A'])
        result = frame.sort_values(by=['B', 'A'])
        indexer = np.lexsort((frame['A'], frame['B']))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)
def test_cutting_plane_selector():
    # generate fake data with a number of non-cubical grids
    ds = fake_random_ds(64, nprocs=51)
    assert all(ds.periodicity)

    # test cutting plane against orthogonal plane
    for i, d in enumerate("xyz"):
        norm = np.zeros(3)
        norm[i] = 1.0

        for coord in np.arange(0, 1.0, 0.1):
            center = np.zeros(3)
            center[i] = coord

            data = ds.slice(i, coord)
            data.get_data()
            data2 = ds.cutting(norm, center)
            data2.get_data()

            assert data.shape[0] == data2.shape[0]

            cells1 = np.lexsort((data["x"], data["y"], data["z"]))
            cells2 = np.lexsort((data2["x"], data2["y"], data2["z"]))
            for d2 in "xyz":
                yield assert_equal, data[d2][cells1], data2[d2][cells2]
Пример #4
0
    def negative_gradient(self, y_true, y_pred, sample_group=None, **kargs):
        y_pred = y_pred.ravel()
        # the lambda terms
        grad = np.empty_like(y_true, dtype=np.float64)

        # for updating terminal regions
        self.weights = np.empty_like(y_true, dtype=np.float64)

        if sample_group is None:
            ix = np.lexsort((y_true, -y_pred))
            inv_ix = np.empty_like(ix)
            inv_ix[ix] = np.arange(len(ix))
            tmp_grad, tmp_weights = _lambda(y_true[ix], y_pred[ix],
                                            self.max_rank)
            grad = tmp_grad[inv_ix]
            self.weights = tmp_weights[inv_ix]
        else:
            for start, end in self._groupby(sample_group):
                ix = np.lexsort((y_true[start:end], -y_pred[start:end]))
                inv_ix = np.empty_like(ix)
                inv_ix[ix] = np.arange(len(ix))

                # sort by current score before passing
                # and then remap the return values
                tmp_grad, tmp_weights = _lambda(y_true[ix + start],
                                                y_pred[ix + start],
                                                self.max_rank)
                grad[start:end] = tmp_grad[inv_ix]
                self.weights[start:end] = tmp_weights[inv_ix]

        return grad
Пример #5
0
def doubleParetoSorting(x0, x1):
    fronts = [[]]
    left = [[]]
    right = [[]]
    idx = np.lexsort((x1, x0))
    
    idxEdge = np.lexsort((-np.square(x0-0.5), x1))
    
    fronts[-1].append(idxEdge[0])
    left[-1].append(x0[idxEdge[0]])
    right[-1].append(x0[idxEdge[0]])
    for i0 in idxEdge[1:]:
        if x0[i0]>=left[-1] and x0[i0]<=right[-1]:
            #add a new front
            fronts.append([])
            left.append([])
            right.append([])
            fronts[-1].append(i0)
            left[-1].append(x0[i0])
            right[-1].append(x0[i0])
        else:
            #check existing fonts
            for i1 in range(len(fronts)):
                if x0[i0]<left[i1] or x0[i0]>right[i1]:
                    if x0[i0]<left[i1]:
                        left[i1] = x0[i0]
                        fronts[i1].insert(0, i0)
                    else:
                        right[i1] = x0[i0]
                        fronts[i1].append(i0)
                    break    
    return (fronts, idx)
def ssea_ranker (ssea_list, q_val_cutoff, peak_type):
    if peak_type == "Amplification":
        sign = 1
    elif peak_type == "Deletion":
        sign = -1
    
    thresh=(ssea_list[:,1]<q_val_cutoff).astype(int)
    sign_adj = ssea_list*sign
    rank_big=np.lexsort((thresh, -sign_adj[:,0]))
    ranks_ranked = []
    for x in enumerate(rank_big):
        ranks_ranked.append(x)
    ranked = np.array(ranks_ranked)
    
    ranked_sorted = ranked[np.lexsort([ranked[:,0], ranked[:,1]])]
    ranks = []
    for x in ranked_sorted:
        ranks.append(x[0])
    
    
    ranks_out = []
    for x in xrange(len(ssea_list[:,0])):
        if thresh[x]==1: 
            ranks_out.append(ranks[x])
        else:
            ranks_out.append('not significant')
    return ranks_out
Пример #7
0
def pearson_correlation(movie_id_1, movie_id_2):
    rated_movie1 = ratings[ratings[:, 1] == movie_id_1]
    rated_movie2 = ratings[ratings[:, 1] == movie_id_2]
    rated_both = np.intersect1d(rated_movie1[:, 0], rated_movie2[:, 0], True)

    if len(rated_both) < 15:
        return 0

    ratings_movie1 = rated_movie1[np.in1d(rated_movie1[:, 0], rated_both), :]
    ratings_movie2 = rated_movie2[np.in1d(rated_movie2[:, 0], rated_both), :]
    sorted_movie1 = ratings_movie1[np.lexsort((ratings_movie1[:, 0], ))][:, [0, 2]]
    sorted_movie2 = ratings_movie2[np.lexsort((ratings_movie2[:, 0], ))][:, [0, 2]]

    mean1 = np.mean(ratings_movie1[:, 2])
    mean2 = np.mean(ratings_movie2[:, 2])

    numerator = 0
    denomX = 0
    denomY = 0
    for i in range(len(sorted_movie1)):
        x = sorted_movie1[i][1] - mean1
        y = sorted_movie2[i][1] - mean2
        numerator += x * y
        denomX += x * x
        denomY += y * y

    if (denomX == 0 or denomY == 0):
        return 0

    return round(numerator / m.sqrt(denomX * denomY), 3)
Пример #8
0
def sortContours2( contours, direction = "x" ):#TODO
    contourPoints = np.zeros((len(contours),2), dtype = int)
    if direction == "x":
        a = 1
        b = 0
    elif direction == "y":
        a = 0
        b = 1
    
    counter = 0
    for cnt in contours:
        conResh = np.reshape(cnt,(-1,2))
        idx = np.lexsort( (conResh[:,a],conResh[:,b]) )
        sortedContours = conResh[idx,:]
        contourPoints[counter,:] = sortedContours[0,:] # The coordinate of reference point.
        counter = counter + 1
        
    sortedIdx = np.lexsort((contourPoints[:,a], contourPoints[:,b]))
    sortedContours = []
    referencePoints = []
    for idx in sortedIdx:
        sortedContours.append(contours[idx])
        referencePoints.append(contourPoints[idx])
        
    return sortedContours, referencePoints
 def set_sort_col(self, col_index, add=False):
     '''Set the column to sort this table by. If add is true, this column
     will be added to the end of the existing sort order (or removed from the
     sort order if it is already present.)
     '''
     if not add:
         if len(self.sortcols)>0 and col_index in self.sortcols:
             # If this column is already sorted, flip it
             self.row_order = self.row_order[::-1]
             self.sortdir = -self.sortdir
         else:
             self.sortdir = 1
             self.sortcols = [col_index]
             # If this column hasn't been sorted yet, then sort descending
             self.row_order = np.lexsort(self.data[:,self.col_order][:,self.sortcols[::-1]].T.tolist())
     else:
         if len(self.sortcols)>0 and col_index in self.sortcols:
             self.sortcols.remove(col_index)
         else:
             self.sortcols += [col_index]
         if self.sortcols==[]:
             # if all sort columns have been toggled off, reset row_order
             self.row_order = np.arange(self.data.shape[0])
         else:
             self.row_order = np.lexsort(self.data[:,self.sortcols[::-1]].T.tolist())
     self.ordered_data = self.data[self.row_order,:][:,self.col_order]
Пример #10
0
def preCompute(rowBased_row_array,rowBased_col_array,S_rowBased_data_array):
    """
    format affinity/similarity matrix
    """
    
    # Get parameters
    data_len=len(S_rowBased_data_array)
    row_indptr=sparseAP_cy.getIndptr(rowBased_row_array)
    if row_indptr[-1]!=data_len: row_indptr=np.concatenate((row_indptr,np.array([data_len])))
    row_to_col_ind_arr=np.lexsort((rowBased_row_array,rowBased_col_array))
    colBased_row_array=sparseAP_cy.npArrRearrange_int_para(rowBased_row_array,row_to_col_ind_arr)
    colBased_col_array=sparseAP_cy.npArrRearrange_int_para(rowBased_col_array,row_to_col_ind_arr)
    col_to_row_ind_arr=np.lexsort((colBased_col_array,colBased_row_array))
    col_indptr=sparseAP_cy.getIndptr(colBased_col_array)
    if col_indptr[-1]!=data_len: col_indptr=np.concatenate((col_indptr,np.array([data_len])))
    kk_col_index=sparseAP_cy.getKKIndex(colBased_row_array,colBased_col_array)
    
    #Initialize matrix A, R
    A_rowbased_data_array=np.array([0.0]*data_len)
    R_rowbased_data_array=np.array([0.0]*data_len)
    
    #Add random samll value to remove degeneracies
    random_state=np.random.RandomState(0)
    S_rowBased_data_array+=1e-12*random_state.randn(data_len)*(np.amax(S_rowBased_data_array)-np.amin(S_rowBased_data_array))
    
    #Convert row_to_col_ind_arr/col_to_row_ind_arr data type to np.int datatype so it is compatible with cython code
    row_to_col_ind_arr=row_to_col_ind_arr.astype(np.int)
    col_to_row_ind_arr=col_to_row_ind_arr.astype(np.int)
    
    return S_rowBased_data_array, A_rowbased_data_array, R_rowbased_data_array,col_indptr,row_indptr,row_to_col_ind_arr,col_to_row_ind_arr,kk_col_index
Пример #11
0
def loc_vector_labels(x):
    """Identify unique labels from the vector of image labels
    
    x - a vector of one label or dose per image
    
    returns labels, labnum, uniqsortvals
    labels - a vector giving an ordinal per image where that ordinal
             is an index into the vector of unique labels (uniqsortvals)
    labnum - # of unique labels in x
    uniqsortvals - a vector containing the unique labels in x
    """
    #
    # Get the index of each image's label in the sorted array
    #
    order = np.lexsort((x,))
    reverse_order = np.lexsort((order,))
    #
    # Get a sorted view of the labels
    #
    sorted_x = x[order]
    #
    # Find the elements that start a new run of labels in the sorted array
    # ex: 0,0,0,3,3,3,5,5,5
    #     1,0,0,1,0,0,1,0,0
    #
    # Then cumsum - 1 turns into:
    #     0,0,0,1,1,1,2,2,2
    #
    # and sorted_x[first_occurrence] gives the unique labels in order
    first_occurrence = np.ones(len(x), bool)
    first_occurrence[1:] = sorted_x[:-1] != sorted_x[1:]
    sorted_labels = np.cumsum(first_occurrence) - 1
    labels = sorted_labels[reverse_order]
    uniqsortvals = sorted_x[first_occurrence]
    return (labels, len(uniqsortvals), uniqsortvals)
Пример #12
0
def compute_orderings(path):

    logging.info("Computing orderings of features")
    job = load_job(path)
    original = np.arange(len(job.input.feature_ids))
    stats = job.results.feature_to_score[...]
    rev_stats = 0.0 - stats

    logging.info("  Computing ordering by score for each tuning param")
    by_score_original = np.zeros(np.shape(job.results.raw_stats), int)
    for i in range(len(job.settings.tuning_params)):
        by_score_original[i] = np.lexsort(
            (original, rev_stats[i]))

    order_by_score_original = by_score_original

    logging.info("  Computing ordering by fold change")
    by_foldchange_original = np.zeros(np.shape(job.results.fold_change.table), int)
    foldchange = job.results.fold_change.table[...]
    rev_foldchange = 0.0 - foldchange
    for i in range(len(job.results.fold_change.header)):
        keys = (original, rev_foldchange[..., i])

        by_foldchange_original[..., i] = np.lexsort(keys)

    order_by_foldchange_original = by_foldchange_original

    with h5py.File(path, 'r+') as db:
        orderings = db.create_group('orderings')
        orderings['by_score_original'] = order_by_score_original
        orderings['by_foldchange_original'] = order_by_foldchange_original        
Пример #13
0
def VtuMatchLocationsArbitrary(vtu1, vtu2, tolerance = 1.0e-6):
  """
  Check that the locations in the supplied vtus match, returning True if they
  match and False otherwise.
  The locations may be in a different order.
  """
   
  locations1 = vtu1.GetLocations()
  locations2 = vtu2.GetLocations()
  if not locations1.shape == locations2.shape:
    return False   
    
  for j in range(locations1.shape[1]):
    # compute the smallest possible precision given the range of this coordinate
    epsilon = numpy.finfo(numpy.float).eps * numpy.abs(locations1[:,j]).max()
    if tolerance<epsilon:
      # the specified tolerance is smaller than possible machine precision
      # (or something else went wrong)
      raise Exception("ERROR: specified tolerance is smaller than machine precision of given locations")
    # ensure epsilon doesn't get too small (might be for zero for instance)
    epsilon=max(epsilon,tolerance/100.0)

    # round to that many decimal places (-2 to be sure) so that
    # we don't get rounding issues with lexsort
    locations1[:,j]=numpy.around(locations1[:,j], int(-numpy.log10(epsilon))-2)
    locations2[:,j]=numpy.around(locations2[:,j], int(-numpy.log10(epsilon))-2)

  # lexical sort on x,y and z coordinates resp. of locations1 and locations2
  sort_index1=numpy.lexsort(locations1.T)
  sort_index2=numpy.lexsort(locations2.T)
  
  # should now be in same order, so we can check for its biggest difference
  return numpy.allclose(locations1[sort_index1],locations2[sort_index2], atol=tolerance)
Пример #14
0
def sortrowsByMultiCol(data, list_sortCol, isAscending=1):
    """
    Sort 2D numpy array by multiple columns
    :param data:
    :param list_sortCol: e.g. [0, 2, 3], 1st element is most important column to sort, 2nd element is 2nd most important, etc.
    :return: sorted data
    """
    # data_sort = data # data_sort has to be number
    # k_col = len(list_sortCol)-1
    # while k_col >= 0:
    #     idx_col = list_sortCol[k_col]
    #     t_data = np.float64(data_sort[:,idx_col])
    #     if isAscending==1:
    #         data_sort = data_sort[t_data.argsort(),:]
    #     elif isAscending==0:
    #         data_sort = data_sort[(-t_data).argsort(),:] # descending order
    #     k_col -= 1

    dataForSort = np.transpose(data[:, list_sortCol[::-1]])  # [start:stop:step] -1 is meant to reverse the order
    if isAscending == 1:
        idx_sort = np.lexsort(dataForSort)  # last row in dataForSort is most important to sort
    elif isAscending == 0:
        idx_sort = np.lexsort(-dataForSort)  # last row in dataForSort is most important to sort

    data_sorted = data[idx_sort, :]

    return data_sorted
Пример #15
0
    def test_03_01_graph(self):
        '''Make a simple graph'''
        #
        # The skeleton looks something like this:
        #
        #   .   .
        #    . .
        #     .
        #     .
        i,j = np.mgrid[-10:11,-10:11]
        skel = (i < 0) & (np.abs(i) == np.abs(j))
        skel[(i >= 0) & (j == 0)] = True
        #
        # Put a single label at the bottom
        #
        labels = np.zeros(skel.shape, int)
        labels[(i > 8) & (np.abs(j) < 2)] = 1
        np.random.seed(31)
        intensity = np.random.uniform(size = skel.shape)
        workspace, module = self.make_workspace(
            labels, skel, intensity_image = intensity, wants_graph = True)
        module.run(workspace)
        edge_graph = self.read_graph_file(EDGE_FILE)
        vertex_graph = self.read_graph_file(VERTEX_FILE)
        vidx = np.lexsort((vertex_graph["j"], vertex_graph["i"]))
        #
        # There should be two vertices at the bottom of the array - these
        # are bogus artifacts of the object hitting the edge of the image
        #
        for vidxx in vidx[-2:]:
            self.assertEqual(vertex_graph["i"][vidxx], 20)
        vidx = vidx[:-2]
        
        expected_vertices = ((0,0), (0,20), (10,10), (17,10))
        self.assertEqual(len(vidx), len(expected_vertices))
        for idx, v in enumerate(expected_vertices):
            vv = vertex_graph[vidx[idx]]
            self.assertEqual(vv["i"], v[0])
            self.assertEqual(vv["j"], v[1])

        #
        # Get rid of edges to the bogus vertices
        #
        for v in ("v1","v2"):
            edge_graph = edge_graph[vertex_graph["i"][edge_graph[v]-1] != 20]
            
        eidx = np.lexsort((vertex_graph["j"][edge_graph["v1"]-1],
                           vertex_graph["i"][edge_graph["v1"]-1],
                           vertex_graph["j"][edge_graph["v2"]-1],
                           vertex_graph["i"][edge_graph["v2"]-1]))
        expected_edges = (((0,0),(10,10),11, np.sum(intensity[(i <= 0) & (j<=0) & skel])),
                          ((0,20),(10,10),11, np.sum(intensity[(i <= 0) & (j>=0) & skel])),
                          ((10,10),(17,10),8, np.sum(intensity[(i >= 0) & (i <= 7) & skel])))
        for i, (v1, v2, length, total_intensity) in enumerate(expected_edges):
            ee = edge_graph[eidx[i]]
            for ve, v in ((v1, ee["v1"]), (v2, ee["v2"])):
                self.assertEqual(ve[0], vertex_graph["i"][v-1])
                self.assertEqual(ve[1], vertex_graph["j"][v-1])
            self.assertEqual(length, ee["length"])
            self.assertAlmostEqual(total_intensity, ee["total_intensity"], 4)
Пример #16
0
def test_hemisphere_subdivide():

    def flip(vertices):
        x, y, z = vertices.T
        f = (z < 0) | ((z == 0) & (y < 0)) | ((z == 0) & (y == 0) & (x < 0))
        return 1 - 2*f[:, None]

    decimals = 6
    # Test HemiSphere.subdivide
    # Create a hemisphere by dividing a hemi-icosahedron
    hemi1 = HemiSphere.from_sphere(unit_icosahedron).subdivide(4)
    vertices1 = np.round(hemi1.vertices, decimals)
    vertices1 *= flip(vertices1)
    order = np.lexsort(vertices1.T)
    vertices1 = vertices1[order]

    # Create a hemisphere from a subdivided sphere
    sphere = unit_icosahedron.subdivide(4)
    hemi2 = HemiSphere.from_sphere(sphere)
    vertices2 = np.round(hemi2.vertices, decimals)
    vertices2 *= flip(vertices2)
    order = np.lexsort(vertices2.T)
    vertices2 = vertices2[order]

    # The two hemispheres should have the same vertices up to their order
    nt.assert_array_equal(vertices1, vertices2)

    # Create a hemisphere from vertices
    hemi3 = HemiSphere(xyz=hemi1.vertices)
    nt.assert_array_equal(hemi1.faces, hemi3.faces)
    nt.assert_array_equal(hemi1.edges, hemi3.edges)
Пример #17
0
    def fullCheck(self,a):
        # Check that atoms repeats over a
        bp1, bp2, bp3 = self.bp1, self.bp2, self.bp3 
        atomtypes = N.unique(self.snr)
        passed = True
        for atomtype in atomtypes:
            sameatoms = N.argwhere(self.snr==atomtype)
            samexyz = self.xyz[sameatoms]
            samexyz = samexyz.reshape((-1, 3))
            shifted = samexyz+a.reshape((1,3))
            
            # Move inside PBC
            samexyz = moveIntoCell(samexyz,self.pbc[0,:],self.pbc[1,:],self.pbc[2,:],self.accuracy)
            shifted = moveIntoCell(shifted,self.pbc[0,:],self.pbc[1,:],self.pbc[2,:],self.accuracy)

            # Should be the same if sorted!
            ipiv = N.lexsort(N.round(N.transpose(samexyz)/self.accuracy))
            samexyz = samexyz[ipiv,:]
            ipiv = N.lexsort(N.round(N.transpose(shifted)/self.accuracy))
            shifted = shifted[ipiv,:]
            
            if not N.allclose(samexyz,shifted,atol=self.accuracy):
                passed = False
            
        return passed
Пример #18
0
def test_set_synaptic_parameters_fully_connected(sim):
    sim.setup()
    mpi_rank = sim.rank()
    p1 = sim.Population(4, sim.IF_cond_exp())
    p2 = sim.Population(2, sim.IF_cond_exp())
    syn = sim.TsodyksMarkramSynapse(U=0.5, weight=0.123, delay=0.1)
    prj = sim.Projection(p1, p2, sim.AllToAllConnector(), syn)

    expected = numpy.array([
        (0.0, 0.0, 0.123, 0.1, 0.5),
        (0.0, 1.0, 0.123, 0.1, 0.5),
        (1.0, 0.0, 0.123, 0.1, 0.5),
        (1.0, 1.0, 0.123, 0.1, 0.5),
        (2.0, 0.0, 0.123, 0.1, 0.5),
        (2.0, 1.0, 0.123, 0.1, 0.5),
        (3.0, 0.0, 0.123, 0.1, 0.5),
        (3.0, 1.0, 0.123, 0.1, 0.5),
    ])
    actual = numpy.array(prj.get(['weight', 'delay', 'U'], format='list'))
    if mpi_rank == 0:
        ind = numpy.lexsort((actual[:, 1], actual[:, 0]))
        assert_arrays_almost_equal(actual[ind], expected, 1e-16)

    positional_weights = numpy.array([[0, 1], [2, 3], [4, 5], [6, 7]], dtype=float)
    prj.set(weight=positional_weights)
    expected = positional_weights
    actual = prj.get('weight', format='array')
    if mpi_rank == 0:
        assert_arrays_equal(actual, expected)

    u_list = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
    prj.set(U=u_list)
    expected = numpy.array([[0.9, 0.8], [0.7, 0.6], [0.5, 0.4], [0.3, 0.2]])
    actual = prj.get('U', format='array')
    if mpi_rank == 0:
        assert_arrays_equal(actual, expected)

    f_delay = lambda d: 0.5+d
    prj.set(delay=f_delay)
    expected = numpy.array([[0.5, 1.5], [1.5, 0.5], [2.5, 1.5], [3.5, 2.5]])
    actual = prj.get('delay', format='array')
    if mpi_rank == 0:
        assert_arrays_equal(actual, expected)

    # final sanity check
    expected = numpy.array([
        (0.0, 0.0, 0.0, 0.5, 0.9),
        (0.0, 1.0, 1.0, 1.5, 0.8),
        (1.0, 0.0, 2.0, 1.5, 0.7),
        (1.0, 1.0, 3.0, 0.5, 0.6),
        (2.0, 0.0, 4.0, 2.5, 0.5),
        (2.0, 1.0, 5.0, 1.5, 0.4),
        (3.0, 0.0, 6.0, 3.5, 0.3),
        (3.0, 1.0, 7.0, 2.5, 0.2),
    ])
    actual = numpy.array(prj.get(['weight', 'delay', 'U'], format='list'))
    if mpi_rank == 0:
        ind = numpy.lexsort((actual[:, 1], actual[:, 0]))
        assert_arrays_equal(actual[ind], expected)
Пример #19
0
def get_rgrid_nrow_ncol(grid_layer):
    """
    Description
    ----------
    Get number of rows (nrow) and columns (ncol) of a structured grid

    Parameters
    ----------
    grid_layer : the structured grid layer

    Returns
    -------
    (nrow, ncol)


    Examples
    --------
    >>> nrow, ncol = get_rgrid_nrow_ncol(layer)
    """

    # TODO : check if the grid is actually regular 
    
    # Init variables 
    all_features = {feat.id():feat for feat in grid_layer.getFeatures()}
    allCentroids = [feat.geometry().centroid().asPoint() \
			for feat in all_features.values()]
    centroids_ids = all_features.keys()
    centroids_x = [centroid.x() for centroid in allCentroids]
    centroids_y = [centroid.y() for centroid in allCentroids]
    centroids = np.array( [centroids_ids , centroids_x, centroids_y] )
    centroids = centroids.T

    # get ncol :
    # sort by decreasing y and increasing x
    idx_row = np.lexsort([centroids[:,1],-centroids[:,2]])
    yy = centroids[idx_row,2]
    # iterate along first row and count number of items with same y
    i=0
    #return yy
    while is_equal(yy[i],yy[i+1]):
	i+=1
	if i >= (yy.size - 1): 
	    break # for one-row grids
    ncol = i+1

    # get nrow :
    # sort by increasing x and decreasing y
    idx_col = np.lexsort([-centroids[:,2],centroids[:,1]])
    xx=centroids[idx_col,1]
    # iterate over first col and count number of items with same x
    i=0
    while is_equal(xx[i],xx[i+1]) :
	i+=1
	if i >= (xx.size-1):
	    break # for one-column grids
    nrow = i+1

    # return nrow, ncol
    return(nrow, ncol)
Пример #20
0
def _cplxreal(z, tol=None):

    import numpy as np
    from numpy import atleast_1d, atleast_2d, array

    z = atleast_1d(z)
    if z.size == 0:
        return z, z
    elif z.ndim != 1:
        raise ValueError('_cplxreal only accepts 1D input')

    if tol is None:
        # Get tolerance from dtype of input
        tol = 100 * np.finfo((1.0 * z).dtype).eps

    # Sort by real part, magnitude of imaginary part (speed up further sorting)
    z = z[np.lexsort((abs(z.imag), z.real))]

    # Split reals from conjugate pairs
    real_indices = abs(z.imag) <= tol * abs(z)
    zr = z[real_indices].real

    if len(zr) == len(z):
        # Input is entirely real
        return array([]), zr

    # Split positive and negative halves of conjugates
    z = z[~real_indices]
    zp = z[z.imag > 0]
    zn = z[z.imag < 0]

    if len(zp) != len(zn):
        raise ValueError('Array contains complex value with no matching '
                         'conjugate.')

    # Find runs of (approximately) the same real part
    same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
    diffs = numpy.diff(concatenate(([0], same_real, [0])))
    run_starts = numpy.where(diffs > 0)[0]
    run_stops = numpy.where(diffs < 0)[0]

    # Sort each run by their imaginary parts
    for i in range(len(run_starts)):
        start = run_starts[i]
        stop = run_stops[i] + 1
        for chunk in (zp[start:stop], zn[start:stop]):
            chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]

    # Check that negatives match positives
    if any(abs(zp - zn.conj()) > tol * abs(zn)):
        raise ValueError('Array contains complex value with no matching '
                         'conjugate.')

    # Average out numerical inaccuracy in real vs imag parts of pairs
    zc = (zp + zn.conj()) / 2

    return zc, zr
Пример #21
0
def TipDetector(I):
    
    I.flags.writeable = True
    
    # Convert RGB to YUV
    Y=0.3*I[:,:,2]+0.6*I[:,:,1]+0.1*I[:,:,0]
    V=0.4375*I[:,:,2]-0.375*I[:,:,1]-0.0625*I[:,:,0]
    U=-0.15*I[:,:,2]-0.3*I[:,:,1]+0.45*I[:,:,0]

    # Find pink
    M=np.ones((np.shape(I)[0], np.shape(I)[1]), np.uint8)*255
    for i in range(0,np.shape(I)[0]):
        for j in range(0,np.shape(I)[1]):
            if V[i,j]>15 and U[i,j]>-7:
                M[i,j]=0
    kernel = np.ones((5,5),np.uint8)   
    M = cv2.morphologyEx(M, cv2.MORPH_OPEN, kernel)
    M=cv2.GaussianBlur(M,(7,7),8)
    
    # find Harris corners in pink mask
    dst = cv2.cornerHarris(M,5,3,0.04)
    dst = cv2.dilate(dst,None)
    ret, dst = cv2.threshold(dst,0.7*dst.max(),255,0)
    dst = np.uint8(dst)
    E = np.where(dst > 0.01*dst.max())
    
    # find Harris corners in image
    gray1 = cv2.cvtColor(I,cv2.COLOR_BGR2GRAY)
    gray1 = np.float32(gray1)
    dst1 = cv2.cornerHarris(gray1,3,3,0.04)
    dst1 = cv2.dilate(dst1,None)
    ret1, dst1 = cv2.threshold(dst1,0.01*dst1.max(),255,0)
    dst1 = np.uint8(dst1)
    E1 = np.where(dst1 > 0.01*dst1.max())

    # no tip identified  
    if not E or not E1:
        return [0,0]
    
    # Rearrange the coordinates in more readable format
    ind1 = np.lexsort((E1[1],E1[0]))
    C1=[(E1[1][i],E1[0][i]) for i in ind1]
    ind = np.lexsort((E[1],E[0]))
    C=[(E[1][i],E[0][i]) for i in ind]
    
    # Identify the tip
    D=[]
    for i in range(1,np.shape(C1)[0]):
        for j in range(1,np.shape(C)[0]):
       	    if abs(C1[i][0]-C[j][0])<5 and abs(C1[i][1]-C[j][1])<5:
                D.append([int(np.uint(C1[i][0]*2)), int(np.uint(C1[i][1]*2))])
    if not D:
        return [0,0]
    else:
        return count(D)
Пример #22
0
def Main( 
    Dataset = None,
    Axis = 0,
    PreserveOrder = False,
    CheckArguments = True,
    ):
    if ( CheckArguments ):
        ArgumentErrorMessage = ""

        DatasetType = str(type(Dataset))
        #Type saving
        if ( DatasetType == "<type 'list'>"):
            Dataset = numpy.array(Dataset)
            DatasetDimension = 0
        if ( Type_NumpyOneDimensionalDataset.Main(Dataset) ):
            DatasetDimension = 1
            Dataset = Library_CastNumpyOneDimensionalDatasetToNumpyTwoDimensionalDataset.Main(Dataset)
        elif ( Type_NumpyTwoDimensionalDataset.Main(Dataset) ):
            DatasetDimension = 2
        else:
            ArgumentErrorMessage += "( Type_NumpyTwoDimensionalDataset.Main(Dataset) == False)\n"
        if ( len(ArgumentErrorMessage) > 0 ):
            raise Exception(ArgumentErrorMessage)

    #if (SortPriorityIndexes == []):
    if (Axis == 1) :
        SortedDatasetIndexes = numpy.lexsort(Dataset)
        SortedDataset = numpy.array( Dataset.T[SortedDatasetIndexes] ).T

    elif (Axis == 0):
        SortedDatasetIndexes = numpy.lexsort(Dataset.T).T
        SortedDataset = numpy.array( Dataset[SortedDatasetIndexes] )
    else:
        raise (Exception ("Only Eligable Axis in [0,1]"))

    if (CheckArguments):
        if (DatasetDimension == 1):
            #
            #import numpy
            #a=[2.3, 1.23, 3.4, 0.4]
            #a_sorted = numpy.sorted(a)
            #a_order = numpy.argsort(a)
            #
            SortedDataset = SortedDataset.T[0]

        if (DatasetType == "<type 'list'>" ):
            SortedDataset = SortedDataset.tolist()

    Result = None
    if ( PreserveOrder ):   
        Result = ( SortedDataset, SortedDatasetIndexes )
    else:
        Result = SortedDataset
    
    return Result
Пример #23
0
def convert(cst_file_in, scalar_file_out):
    """
    Calculates a scalar element pattern file from a CST element pattern file

    Parameters
    ----------
    cst_file_in : string
        Input CST format element pattern file
    scalar_file_out : string
        Output scalar format element pattern file

    Notes
    -----
    This function is designed to be used to create scalar element input files
    for the oskar_fit_element_data application.
    """

    import numpy as np

    # Load the CST element pattern data for X. (Ignore lines that don't consist
    # of 8 floats)
    X = load_cst_file(cst_file_in)
    # Only require a columns for:
    # Theta, Phi, Abs(Theta), Phase(Theta), Abs(Phi), Phase(Phi)
    X = np.copy(X[:, [0, 1, 3, 4, 5, 6]])

    # Generate the rotated data for Y from X by adding 90 degrees to the phi
    # values
    Y = np.copy(X)
    Y[:, 1] += 90.0
    Y[Y[:, 1] >= 360.0, 1] -= 360.0

    # Linked column sort by phi and then theta for both X and Y.
    X = X[np.lexsort((X[:, 1], X[:, 0])), :]
    Y = Y[np.lexsort((Y[:, 1], Y[:, 0])), :]
    assert(np.sum(X[:, 0] == Y[:, 0]) == len(X[:, 0]))
    assert(np.sum(X[:, 1] == Y[:, 1]) == len(X[:, 1]))

    # Generate scalar values from sorted data.
    X_theta = X[:, 2] * np.exp(1j * X[:, 3] * np.pi / 180.0)
    X_phi = X[:, 4] * np.exp(1j * X[:, 5] * np.pi / 180.0)
    Y_theta = Y[:, 2] * np.exp(1j * Y[:, 3] * np.pi / 180.0)
    Y_phi = Y[:, 4] * np.exp(1j * Y[:, 5] * np.pi / 180.0)
    s = X_theta * np.conj(X_theta) + X_phi * np.conj(X_phi) + \
        Y_theta * np.conj(Y_theta) + Y_phi * np.conj(Y_phi)

    # Take the sqrt to convert to a 'voltage'
    s = np.sqrt(0.5 * s)
    s_amp = np.absolute(s)
    s_phase = np.angle(s, deg=True)

    # Write scalar values to file Columns = (theta, phi, amp, phase).
    o = np.column_stack((X[:, 0], X[:, 1], s_amp, s_phase))
    np.savetxt(scalar_file_out, o, fmt=['%12.4f', '%12.4f', '%20.6e',
                                        '%12.4f'])
Пример #24
0
def arrangeToList(A):
    L = np.zeros((1, 2))
    if np.shape(A)[1] == 1:
        L = np.vstack((L, [A[1], A[0]]))
    elif np.shape(A)[1] > 1:
        for i in range(0, np.shape(A)[1]):
            L = np.vstack((L, [A[1][i], A[0][i]]))
    temp = L.view(np.ndarray)
    np.lexsort((temp[:, 1],))
    temp[np.lexsort((temp[:, 1],))]
    return L
    def test_basic(self):
        a = [1,2,1,3,1,5]
        b = [0,4,5,6,2,3]
        idx = np.lexsort((b,a))
        expected_idx = np.array([0,4,2,1,3,5])
        assert_array_equal(idx,expected_idx)

        x = np.vstack((b,a))
        idx = np.lexsort(x)
        assert_array_equal(idx,expected_idx)

        assert_array_equal(x[1][idx],np.sort(x[1]))
Пример #26
0
    def build_map(self, data_c):
        """
        """
        made_copy = False
        raw_len = len(data_c)
        current_pos = raw_len % self.length
        print 'map builder buildmap raw_len, length = {}, {}'.format(raw_len, self.length)
        data_x = self._data_x
        data_y = self._data_y
        if current_pos != 0:
            last = data_c[-1]
            data_c = numpy.append(data_c,
                            last*numpy.ones(self.length - current_pos))
            data_x = numpy.append(data_x,
                            max(data_x)*numpy.ones(self.length - current_pos))
            data_y = numpy.append(data_y,
                            max(data_y)*numpy.ones(self.length - current_pos))
            self.made_copy = True

        self.available_length = len(data_c) - raw_len
        print 'map builder buildmap len = {}'.format(len(data_c))

        if self.mode == 'basic':
            self.first_index = raw_len

        elif self.mode == 'reverse':
            index = numpy.arange(len(data_c))
            l = self.length
            index = index*(1+self.parity*(-1)**(index/l))/2 +\
                ((index/l+1)*l-index%l-1)*(1+self.parity*(-1)**(index/l+1))/2

            self.parity = (-1)**(raw_len/l)
            data_c = data_c[index]
            made_copy = True
            if self.parity == 1:
                self.first_index = raw_len
            else:
                self.first_index = self.length*raw_len/self.length

        else:
            if self.transpose:
                index = numpy.lexsort((data_x, data_y))
            else:
                index = numpy.lexsort((data_y, data_x))
            data_c = data_c[index]
            made_copy = True

        if not self.transpose:
            return numpy.reshape(data_c, (self.length, -1), order = 'F'),\
                    made_copy
        else:
            return numpy.reshape(data_c,(self.length, -1), order = 'F').T,\
                    made_copy
Пример #27
0
def build_rank_vectors(merged_peaks):
    # allocate memory for the ranks vector
    s1 = numpy.zeros(len(merged_peaks))
    s2 = numpy.zeros(len(merged_peaks))
    # add the signal
    for i, x in enumerate(merged_peaks):
        s1[i], s2[i] = x.signals

    rank1 = numpy.lexsort((numpy.random.random(len(s1)), s1)).argsort()
    rank2 = numpy.lexsort((numpy.random.random(len(s2)), s2)).argsort()
    
    return ( numpy.array(rank1, dtype=numpy.int), 
             numpy.array(rank2, dtype=numpy.int) )
Пример #28
0
 def coop(self,coldatax,coldatay):
     oper = self.paralib
     if oper['oper'] in  ['Shift','FlipSign','CutStart','CutEnd','Scale']:
         newcoldatax = self.oper(coldatax)
         newcoldatay = self.oper(coldatay)
         
     elif oper['oper'] == 'CutNegative':
         ''' cut the initial negative portion and until n continuous point reach certain value'''
         # which column to detect
         if 'mode' not in oper.keys():
             oper['mode'] = 'x'
         
         if 'nodenum' not in oper.keys():
             oper['nodenum'] = 10
         
         # operation
         if oper['mode'] == 'x':
             id = search_continue(coldatax,oper['nodenum'],0,mode='LargerThan')
             newcoldatax = coldatax[id:]
             newcoldatay = coldatay[id:]
             
     elif oper['oper'] == 'CutDrop':
         if 'mode' not in oper.keys():
             oper['mode'] = 'y'
         
         if oper['mode'] == 'y':  
             id = search_drop(coldatay,oper['scalar'])
             newcoldatax = coldatax[:id]
             newcoldatay = coldatay[:id]               
                 
     elif oper['oper'] == 'Sort':
         if 'mode' not in oper.keys():
             oper['mode'] = 'y'
         
         #datapair = np.vstack([[coldatax.T],[coldatay.T]])
         # sort based on y column
         if oper['mode'] == 'y':  
             ind = np.lexsort((coldatax, coldatay))     
             newcoldatax = coldatax[ind]
             newcoldatay = coldatay[ind]        
         # sort based on x column
         
         elif oper['mode'] == 'x':  
             ind = np.lexsort((coldatay, coldatax))     
             newcoldatax = coldatax[ind]
             newcoldatay = coldatay[ind]         
             
         else:
             raise KeyError,('Operation',oper['oper'], ' do not defined\n')
         
     return newcoldatax,newcoldatay
Пример #29
0
    def compute_network(self): 
        """Solves arbitrary graphs instead of raster grids."""
        (g_graph, node_names) = self.read_graph(self.options.habitat_file)
        
        fp = None
        if self.options.scenario == 'pairwise':
            if self.options.use_included_pairs==True:
                self.state.included_pairs = IncludeExcludePairs(self.options.included_pairs_file)
            
            focal_nodes = self.read_focal_nodes(self.options.point_file)
            fp = FocalPoints(focal_nodes, self.state.included_pairs, True)            
        elif self.options.scenario == 'advanced':
            self.state.source_map = CSIO.read_point_strengths(self.options.source_file)
            self.state.ground_map = CSIO.read_point_strengths(self.options.ground_file)        
        
        g_habitat = HabitatGraph(g_graph=g_graph, node_names=node_names)
        out = Output(self.options, self.state, False, node_names)
        if self.options.write_cur_maps:
            out.alloc_c_map('')
        
        Compute.logger.info('Calling solver module.')
        Compute.logger.info('Graph has ' + str(g_habitat.num_nodes) + ' nodes and '+ str(g_habitat.num_components)+ ' components.')
        if self.options.scenario == 'pairwise':
            (resistances, solver_failed) = self.single_ground_all_pair_resistances(g_habitat, fp, out, True)
            if self.options.write_cur_maps:
                full_branch_currents, full_node_currents, _bca, _np = out.get_c_map('')            
            _resistances, resistances_3col = self.write_resistances(fp.point_ids, resistances)
            result1 = resistances_3col
        elif self.options.scenario == 'advanced':
            self.options.write_max_cur_maps = False
            voltages, current_map, solver_failed = self.advanced_module(g_habitat, out, self.state.source_map, self.state.ground_map)
            if self.options.write_cur_maps:
                full_branch_currents, full_node_currents, _bca, _np = current_map
            result1 = voltages
            
        if solver_failed == True:
            Compute.logger.error('Solver failed')
            
        if self.options.write_cur_maps:
            full_branch_currents = Output._convert_graph_to_3_col(full_branch_currents, node_names)
            full_node_currents = Output._append_names_to_node_currents(full_node_currents, node_names)

            ind = np.lexsort((full_branch_currents[:, 1], full_branch_currents[:, 0]))
            full_branch_currents = full_branch_currents[ind]

            ind = np.lexsort((full_node_currents[:, 1], full_node_currents[:, 0]))
            full_node_currents = full_node_currents[ind]

            CSIO.write_currents(self.options.output_file, full_branch_currents, full_node_currents, '',self.options)
            
        return result1, solver_failed
Пример #30
0
    def __init__(self):
        """
        Read and initialize relation, concept, and assertion arrays from disk.
        """
        data = np.load(DATA_FILENAME)
        self.relations = data['relations']
        self.concepts = data['concepts']

        self.assertions = dict()
        for i, relation in enumerate(self.relations):
            edges = data[str(i)]
            self.assertions[relation] = edges[np.lexsort((edges[:, 1], edges[:, 0]))]

            flipped = np.fliplr(edges)
            self.assertions[('!', relation)] = flipped[np.lexsort((flipped[:, 1], flipped[:, 0]))]
Пример #31
0
def transform2equi(c_pts, h_c_mean, h_f_mean, W, H, fp_size, scale):
    c_pts = c_pts.squeeze(1)

    c_cor = np_xy2coor(np.array(c_pts), h_c_mean, W, H, fp_size * scale,
                       fp_size * scale)
    f_cor = np_xy2coor(np.array(c_pts), -h_f_mean, W, H, fp_size * scale,
                       fp_size * scale)  ####based on the ceiling shape

    cor_count = len(c_cor)

    c_ind = np.lexsort((c_cor[:, 1], c_cor[:, 0]))
    f_ind = np.lexsort((f_cor[:, 1], f_cor[:, 0]))

    ####sorted by theta (pixels coords)
    c_cor = c_cor[c_ind]
    f_cor = f_cor[f_ind]

    cor_id = []

    for j in range(len(c_cor)):
        cor_id.append(c_cor[j])
        cor_id.append(f_cor[j])

    cor_id = np.array(cor_id)

    cor = np.roll(cor_id[:, :2], -2 * np.argmin(cor_id[::2, 0]), 0)

    ##print('cor shape',cor.shape)

    # Prepare 1d ceiling-wall/floor-wall boundary
    bon_ceil_x, bon_ceil_y = [], []
    bon_floor_x, bon_floor_y = [], []

    n_cor = len(cor)

    for i in range(n_cor // 2):
        xys = pano_connect_points(cor[i * 2], cor[(i * 2 + 2) % n_cor], z=-50)
        bon_ceil_x.extend(xys[:, 0])
        bon_ceil_y.extend(xys[:, 1])

        ##print('ceiling list',len(bon_ceil_x),len(bon_ceil_y))

    draw_floor_mask = True

    if draw_floor_mask:
        for i in range(n_cor // 2):
            ##NB expecting corner coords in pixel
            xys = pano_connect_points(cor[i * 2 + 1],
                                      cor[(i * 2 + 3) % n_cor],
                                      z=50)
            bon_floor_x.extend(xys[:, 0])
            bon_floor_y.extend(xys[:, 1])
        else:
            ##NB using only ceiling shape
            for i in range(n_cor // 2):
                ###NB expecting corner coords in pixel
                xys = pano_connect_points(cor[i * 2],
                                          cor[(i * 2 + 2) % n_cor],
                                          z=50)
                bon_floor_x.extend(xys[:, 0])
                bon_floor_y.extend(xys[:, 1])

    bon_ceil_x, bon_ceil_y = sort_xy_filter_unique(bon_ceil_x,
                                                   bon_ceil_y,
                                                   y_small_first=True)
    bon_floor_x, bon_floor_y = sort_xy_filter_unique(bon_floor_x,
                                                     bon_floor_y,
                                                     y_small_first=False)

    bon = np.zeros((2, W))
    bon[0] = np.interp(np.arange(W), bon_ceil_x, bon_ceil_y, period=W)
    bon[1] = np.interp(np.arange(W), bon_floor_x, bon_floor_y, period=W)

    ###normalize to image height (from px to 0-1)
    bon = ((bon + 0.5) / H - 0.5) * np.pi

    return bon
Пример #32
0
 def argsort(self, *args, **kwargs) -> np.ndarray:
     return np.lexsort((self.right, self.left))
Пример #33
0
    def _deg_sort(self):
        """Sorts atoms by degree and reorders internal data structures.

    Sort the order of the atom_features by degree, maintaining original order
    whenever two atom_features have the same degree. 
    """
        old_ind = range(self.get_num_atoms())
        deg_list = self.deg_list
        new_ind = list(np.lexsort((old_ind, deg_list)))

        num_atoms = self.get_num_atoms()

        # Reorder old atom_features
        self.atom_features = self.atom_features[new_ind, :]

        # Reorder old deg lists
        self.deg_list = [self.deg_list[i] for i in new_ind]

        # Sort membership
        self.membership = [self.membership[i] for i in new_ind]

        # Create old to new dictionary. not exactly intuitive
        old_to_new = dict(zip(new_ind, old_ind))

        # Reorder adjacency lists
        self.canon_adj_list = [self.canon_adj_list[i] for i in new_ind]
        self.canon_adj_list = [[old_to_new[k] for k in self.canon_adj_list[i]]
                               for i in range(len(new_ind))]

        # Get numpy version of degree list for indexing
        deg_array = np.array(self.deg_list)

        # Initialize adj_lists, which supports min_deg = 1 only
        self.deg_adj_lists = (self.max_deg + 1 - self.min_deg) * [0]

        # Parse as deg separated
        for deg in range(self.min_deg, self.max_deg + 1):
            # Get indices corresponding to the current degree
            rng = np.array(range(num_atoms))
            indices = rng[deg_array == deg]

            # Extract and save adjacency list for the current degree
            to_cat = [self.canon_adj_list[i] for i in indices]
            if len(to_cat) > 0:
                adj_list = np.vstack([self.canon_adj_list[i] for i in indices])
                self.deg_adj_lists[deg - self.min_deg] = adj_list

            else:
                self.deg_adj_lists[deg - self.min_deg] = np.zeros(
                    [0, deg], dtype=np.int32)

        # Construct the slice information
        deg_slice = np.zeros([self.max_deg + 1 - self.min_deg, 2],
                             dtype=np.int32)

        for deg in range(self.min_deg, self.max_deg + 1):
            if deg == 0:
                deg_size = np.sum(deg_array == deg)
            else:
                deg_size = self.deg_adj_lists[deg - self.min_deg].shape[0]

            deg_slice[deg - self.min_deg, 1] = deg_size
            # Get the cumulative indices after the first index
            if deg > self.min_deg:
                deg_slice[deg - self.min_deg,
                          0] = (deg_slice[deg - self.min_deg - 1, 0] +
                                deg_slice[deg - self.min_deg - 1, 1])

        # Set indices with zero sized slices to zero to avoid indexing errors
        deg_slice[:, 0] *= (deg_slice[:, 1] != 0)
        self.deg_slice = deg_slice
Пример #34
0
coords = exodus.variables['coord'][:].transpose()

#NOTE: careful of python index starting at 0
# a, = np.nonzero(ids == 17)[0] #kept as numpy array
ids = list(exodus.variables['ns_prop1'][:])
nsID = ids.index(nodesetID) + 1
# corresponding names:
#print [''.join(x) for x in exodus.variables['ns_names'][:]]
nsName = 'node_ns{}'.format(nsID)
nodes = exodus.variables[nsName][:] -1
surface = coords[nodes]

# Sort by x-coordinate for readability
#surface.sort(axis=0) #gotcha,,, works elementwise
#output = surface[surface[:,0].argsort()] # dirty method to sort by 1st column
# sort by 1st, then 2nd column by converting to record array:
#surface.view('f8,f8,f8').sort(order=['f0','f1'], axis=0)
# Confusing!!! Perhaps use pandas, but here is the short answer, NOTE: first column
# to sort on comes last in list!
ind = np.lexsort( (surface[:,1], surface[:,0]) )
output = surface[ind]

# Only output every n'th point:
#n=5
output = output[::n]

# Export to file
np.savetxt(outName, output, fmt=fmt, header=header)

print 'Saved ', outName
Пример #35
0
def inverse_kin(T06, a, d, l, debug=False):
    qs1 = []
    T = inv_mat(Tb0) * T06 * inv_mat(T6e)
    A = d[5] * T[1, 2] - T[1, 3]
    B = d[5] * T[0, 2] - T[0, 3]
    R = A * A + B * B
    if abs(A) < ZERO_THRESH:
        print '1: A low'
        return []
    elif abs(B) < ZERO_THRESH:
        print '1: B low'
        return []
    elif d[3] * d[3] > R:
        #print '1: Impossible solution'
        return []
    else:
        for i in range(2):
            qs1.append([0.] * 6)
        acos = arccos(d[3] / sqrt(R))
        atan = arctan2(B, A)
        pos = acos - atan
        neg = -acos - atan
        if pos >= 0.:
            qs1[0][0] = pos
        else:
            qs1[0][0] = 2. * pi + pos
        if neg >= 0.:
            qs1[1][0] = neg
        else:
            qs1[1][0] = 2. * pi + neg
        #if pos < 0:
        #    qs1[2][0] = pos + 2.*pi
        #else:
        #    qs1[2][0] = pos - 2.*pi
        #if neg < 0:
        #    qs1[3][0] = neg + 2.*pi
        #else:
        #    qs1[3][0] = neg - 2.*pi
    qs2 = []
    for i in range(len(qs1)):
        for j in range(2):
            qs2.append(copy.copy(qs1[i]))
        if debug:
            print 'h', T[0, 2] * sin(qs1[i][0]) - T[1, 2] * cos(qs1[i][0])
            print 'h2', T
            acos = arccos(T[0, 2] * sin(qs1[i][0]) - T[1, 2] * cos(qs1[i][0]))
            print 'h3', qs1[i][0]
            print 'h2', (T[0, 3] * sin(qs1[i][0]) - T[1, 3] * cos(qs1[i][0]) -
                         d[3]) / d[5]
        acos = arccos(
            (T[0, 3] * sin(qs1[i][0]) - T[1, 3] * cos(qs1[i][0]) - d[3]) /
            d[5])
        if acos >= 0.:
            qs2[i * 2 + 0][4] = acos
            qs2[i * 2 + 1][4] = 2. * pi - acos
        else:
            qs2[i * 2 + 0][4] = -acos
            qs2[i * 2 + 1][4] = 2. * pi + acos

    qs3 = []
    for i in range(len(qs2)):
        for j in range(2):
            qs3.append(copy.copy(qs2[i]))
        s4 = sin(qs2[i][4])
        #print 's4', s4
        #print 'h2', (T[0,0]*sin(qs2[i][0]) - T[1,0]*cos(qs2[i][0]))
        #c1, s1 = cos(qs2[i][0]), sin(qs2[i][0])
        #acos = arctan2(-(T[1,1]*c1-T[0,1]*s1), T[1,0]*c1-T[0,0]*s1)
        #acos = np.arctan(T[2,0]/ T[2,1])
        #print 'k', acos
        #print 'k2', acos
        #acos = ( (-1.)**(i%2+0)* np.sign(T[2,2])**2 *pi/2.
        #        +(-1.)**2* np.sign(T[2,2])**2 *arcsin(T[1,0])
        #        +(-1.)**2* np.sign(T[2,2])**2 *qs2[i][0])
        if abs(s4) < ZERO_THRESH:
            #print '6: s4 low'
            qs3[i][5] = 0.
            qs3[i + 1][5] = pi
        elif abs(abs(s4) - 1.) < ZERO_THRESH:
            acos = (-1.)**(i % 2) * pi / 2. + arcsin(T[1, 0]) + qs2[i][0]
            if acos >= 0.:
                if T[2, 2] >= 0.:
                    qs3[i * 2 + 0][5] = 2. * pi - acos
                    qs3[i * 2 + 1][5] = 2. * pi - acos
                else:
                    qs3[i * 2 + 0][5] = acos
                    qs3[i * 2 + 1][5] = acos
            else:
                if T[2, 2] >= 0.:
                    qs3[i * 2 + 0][5] = -acos
                    qs3[i * 2 + 1][5] = -acos
                else:
                    qs3[i * 2 + 0][5] = 2. * pi + acos
                    qs3[i * 2 + 1][5] = 2. * pi + acos
        else:
            acos = arccos(
                (T[0, 0] * sin(qs2[i][0]) - T[1, 0] * cos(qs2[i][0])) / s4)
            #if abs(cos(acos-qs2[i][0])) - abs(T[0,0]) > ZERO_THRESH:
            #    acos += pi
            #if qs2[0][0] < pi and T[2,2] < 0.:
            #    acos -= pi
            if acos >= 0.:
                #if T[2,2] >= 0.:
                #    qs3[i*1+0][5] = 2.*pi-acos
                #else:
                #    qs3[i*1+0][5] = acos
                qs3[i * 2 + 0][5] = 2. * pi - acos
                qs3[i * 2 + 1][5] = acos
            else:
                #if T[2,2] >= 0.:
                #    qs3[i*1+0][5] = -acos
                #else:
                #    qs3[i*1+0][5] = 2.*pi+acos
                qs3[i * 2 + 0][5] = -acos
                qs3[i * 2 + 1][5] = 2. * pi + acos
        #print 'ssss', s4, qs3[i*1+0][5], qs3[i*1+1][5]

        #print '1111111111111', cos(qs3[i][5])*sin(qs3[i][0])*sin(qs3[i][4]) - cos(qs3[i][0])*sin(qs3[i][5]),  cos(qs3[i][5])*sin(qs3[i][0])*sin(qs3[i][4]) + cos(qs3[i][0])*sin(qs3[i][5]), T[0,0], T[1,0]
        for k in [0, 1]:
            if abs(abs(s4) - 1.) < ZERO_THRESH:
                tmp1 = cos(qs3[2 * i + k][5]) * sin(qs3[2 * i + k][0]) * sin(
                    qs3[2 * i + k][4])
                tmp2 = cos(qs3[2 * i + k][0]) * sin(qs3[2 * i + k][5])
                #print sin(qs3[2*i+k][4])
                if abs(abs(tmp1 - tmp2) - abs(T[0, 0])) < ZERO_THRESH:
                    if np.sign(tmp1 - tmp2) != np.sign(T[0, 0]):
                        #qs3[2*i+k][5] -= pi
                        #qs3[2*i+k][0] *= -1
                        #qs3[i][5] *= -1
                        if sin(qs3[2 * i + k][4]) > 0:
                            qs3[2 * i +
                                k][5] = -qs3[2 * i + k][5] + 2 * qs3[2 * i +
                                                                     k][0]
                        else:
                            qs3[2 * i +
                                k][5] = -qs3[2 * i + k][5] - 2 * qs3[2 * i +
                                                                     k][0]
                        #print tmp1 - tmp2
                        #print T[0,0]
                        #print 'yo1'
                else:
                    if np.sign(tmp1 + tmp2) != np.sign(T[0, 0]):
                        #qs3[i][5] -= pi
                        #qs3[i][0] *= -1
                        #qs3[i][5] *= -1
                        if sin(qs3[2 * i + k][4]) < 0:
                            qs3[2 * i +
                                k][5] = -qs3[2 * i + k][5] + 2 * qs3[2 * i +
                                                                     k][0]
                        else:
                            qs3[2 * i +
                                k][5] = -qs3[2 * i + k][5] - 2 * qs3[2 * i +
                                                                     k][0]
                        #print tmp1 + tmp2
                        #print T[0,0]
                        #print 'yo2'
                while qs3[2 * i + k][5] < 0.:
                    qs3[2 * i + k][5] += 2. * pi
                while qs3[2 * i + k][5] > 2. * pi:
                    qs3[2 * i + k][5] -= 2. * pi
        if debug:
            print 'yeh', qs3[i]

        if False:
            print 'wwwwwwwwwwwwwwww', sin(qs3[i][5] +
                                          qs3[i][0]), sin(qs3[i][5] -
                                                          qs3[i][0]), T[0, 0]
            print 'qqqqqqqqqqqqqqqq', cos(qs3[i][5] +
                                          qs3[i][0]), cos(qs3[i][5] -
                                                          qs3[i][0]), T[0, 1]
            flip_sign_sin, flip_sign_cos, flip_sub_sin, flip_sub_cos = False, False, False, False
            flip_diff = False
            if abs(abs(sin(qs3[i][5] + qs3[i][0])) -
                   abs(T[0, 0])) > ZERO_THRESH:
                qs3[i][5] -= 2 * qs3[i][0]
                print 'a'
            print 'wwwwwwwwwwwwwwww', sin(qs3[i][5] +
                                          qs3[i][0]), sin(qs3[i][5] -
                                                          qs3[i][0]), T[0, 0]

            if abs(sin(qs3[i][5] + qs3[i][0]) - T[0, 0]) > ZERO_THRESH:
                flip_sign_sin = True
            if abs(cos(qs3[i][5] + qs3[i][0]) - T[0, 1]) > ZERO_THRESH:
                flip_sign_cos = True
            if flip_sign_sin:
                if flip_sign_cos:
                    qs3[i][5] += pi
                    print 'b'
                else:
                    qs3[i][5] = -qs3[i][5]
                    #qs3[i][5] = -qs3[i][5] - 2*qs3[i][0]
                    qs3[i][0] = -qs3[i][0]
                    print 'c'
            elif flip_sign_cos:
                qs3[i][5] = pi - qs3[i][5]
                #qs3[i][5] = pi -qs3[i][5] - 2*qs3[i][0]
                qs3[i][0] = -qs3[i][0]
                print 'd'
            print 'e'

            print '3333333333333333', sin(qs3[i][5] +
                                          qs3[i][0]), sin(qs3[i][5] -
                                                          qs3[i][0]), T[0, 0]
            print '4444444444444444', cos(qs3[i][5] +
                                          qs3[i][0]), cos(qs3[i][5] -
                                                          qs3[i][0]), T[0, 1]
            #qs3[i][0] -= pi
            #qs3[i][0] -= 2*acos
        #if T[0,1] >= 0.:
        #    if -T[2,2] >= 0.:
        #        qs3[i][5] -= pi
        #qs3[i*4+2][5] = 2.*pi - acos
        #qs3[i*4+3][5] = -2.*pi + acos
    qs4 = []
    for i in range(len(qs3)):
        c1, s1 = cos(qs3[i][0]), sin(qs3[i][0])
        c5, s5 = cos(qs3[i][4]), sin(qs3[i][4])
        c6, s6 = cos(qs3[i][5]), sin(qs3[i][5])
        x04x = -s5 * (T[0, 2] * c1 + T[1, 2] * s1) - c5 * (
            s6 * (T[0, 1] * c1 + T[1, 1] * s1) - c6 *
            (T[0, 0] * c1 + T[1, 0] * s1))
        x04y = c5 * (T[2, 0] * c6 - T[2, 1] * s6) - T[2, 2] * s5
        p04x = d[4] * (s6 * (T[0, 0] * c1 + T[1, 0] * s1) + c6 *
                       (T[0, 1] * c1 + T[1, 1] * s1)) - d[5] * (
                           T[0, 2] * c1 +
                           T[1, 2] * s1) + T[0, 3] * c1 + T[1, 3] * s1
        p04y = T[2, 3] - d[0] - d[5] * T[2, 2] + d[4] * (T[2, 1] * c6 +
                                                         T[2, 0] * s6)
        #_, Ts = forward_kin(qs3[i], a, d, l)
        #T14 = inv_mat(Ts[0]) * T * inv_mat(Ts[5]) * inv_mat(Ts[4])
        #qs_rrr = inverse_rrr(T14, a[1:4], d[1:4])
        if debug:
            print 'lllh', p04x, p04y, x04x, x04y
            print 'kk', c1, s1, c5, s5, c6, s6
        qs_rrr = inverse_rrr(p04x, p04y, x04x, x04y, a[1:4], d[1:4])
        for j in range(len(qs_rrr)):
            qsol = [
                qs3[i][0], qs_rrr[j][0], qs_rrr[j][1], qs_rrr[j][2], qs3[i][4],
                qs3[i][5]
            ]
            if abs(-sin(qsol[1] + qsol[2] + qsol[3]) * sin(qsol[4]) -
                   T[2, 2]) < ZERO_THRESH:
                qs4.append(qsol)
            #Tsol, _ = forward_kin(qsol, a, d, l)
            #print 'yo', qsol
            #print Tsol**-1 * T06
    if False:
        qs4 = np.array(qs4)[np.lexsort(np.mat(qs4).T)[0]]
        unique_sols = []
        qlast = np.array([-999.] * 6)
        for i in range(np.size(qs4, 0)):
            if np.sum(abs(qlast - qs4[i])) > ZERO_THRESH:
                unique_sols.append(qs4[i])
                qlast = qs4[i]
        return unique_sols
    else:
        return qs4
Пример #36
0
def dat2hdf5(table_dir):
    """
    Convert the Marshall et al. (2006) map from *.dat.gz to *.hdf5.
    """

    import astropy.io.ascii as ascii
    import gzip
    from contextlib import closing

    readme_fname = os.path.join(table_dir, 'ReadMe')
    table_fname = os.path.join(table_dir, 'table1.dat.gz')
    h5_fname = os.path.join(table_dir, 'marshall.h5')

    # Extract the gzipped table
    with gzip.open(table_fname, 'rb') as f:
        # Read in the table using astropy's CDS table reader
        r = ascii.get_reader(ascii.Cds, readme=readme_fname)
        r.data.table_name = 'table1.dat' # Hack to deal with bug in CDS reader.
        table = r.read(f)
        print(table)

    # Reorder table entries according to Galactic (l, b)
    l = coordinates.Longitude(
        table['GLON'][:],
        wrap_angle=180.*units.deg)
    b = table['GLAT'][:]

    sort_idx = np.lexsort((b, l))

    l = l[sort_idx].astype('f4')
    b = b[sort_idx].astype('f4')
    l.shape = (801, 81)
    b.shape = (801, 81)

    # Extract arrays from the table
    chi2_all = np.reshape((table['x2all'][sort_idx]).astype('f4'), (801,81))
    chi2_giants = np.reshape((table['x2gts'][sort_idx]).astype('f4'), (801,81))

    A = np.empty((801*81,33), dtype='f4')
    sigma_A = np.empty((801*81,33), dtype='f4')
    dist = np.empty((801*81,33), dtype='f4')
    sigma_dist = np.empty((801*81,33), dtype='f4')

    for k in range(33):
        A[:,k] = table['ext{:d}'.format(k+1)][sort_idx]
        sigma_A[:,k] = table['e_ext{:d}'.format(k+1)][sort_idx]
        dist[:,k] = table['r{:d}'.format(k+1)][sort_idx]
        sigma_dist[:,k] = table['e_r{:d}'.format(k+1)][sort_idx]

    A.shape = (801,81,33)
    sigma_A.shape = (801,81,33)
    dist.shape = (801,81,33)
    sigma_dist.shape = (801,81,33)

    # Construct the HDF5 file
    h5_fname = os.path.join(table_dir, 'marshall.h5')
    filter_kwargs = dict(
        chunks=True,
        compression='gzip',
        compression_opts=3,
        # scaleoffset=4
    )

    with h5py.File(h5_fname, 'w') as f:
        dset = f.create_dataset('A', data=A, **filter_kwargs)
        dset.attrs['description'] = 'Extinction of each bin'
        dset.attrs['band'] = 'Ks (2MASS)'
        dset.attrs['units'] = 'mag'

        dset = f.create_dataset('sigma_A', data=sigma_A, **filter_kwargs)
        dset.attrs['description'] = 'Extinction uncertainty of each bin'
        dset.attrs['band'] = 'Ks (2MASS)'
        dset.attrs['units'] = 'mag'

        dset = f.create_dataset('dist', data=dist, **filter_kwargs)
        dset.attrs['description'] = 'Distance of each bin'
        dset.attrs['units'] = 'kpc'

        dset = f.create_dataset('sigma_dist', data=sigma_dist, **filter_kwargs)
        dset.attrs['description'] = 'Distance uncertainty of each bin'
        dset.attrs['units'] = 'kpc'

        dset = f.create_dataset('chi2_all', data=chi2_all, **filter_kwargs)
        dset.attrs['description'] = 'Chi^2, based on all the stars'
        dset.attrs['units'] = 'unitless'

        dset = f.create_dataset('chi2_giants', data=chi2_giants, **filter_kwargs)
        dset.attrs['description'] = 'Chi^2, based on giants only'
        dset.attrs['units'] = 'unitless'

        # filter_kwargs.pop('scaleoffset')

        dset = f.create_dataset('l', data=l, **filter_kwargs)
        dset.attrs['description'] = 'Galactic longitude'
        dset.attrs['units'] = 'deg'

        dset = f.create_dataset('b', data=b, **filter_kwargs)
        dset.attrs['description'] = 'Galactic latitude'
        dset.attrs['units'] = 'deg'
Пример #37
0
 def test_01_01_test_3(self):
     expected = np.array(((0,0),(1,1),(2,0),(2,2),(3,1),(3,3)),int)
     result = np.array(z.get_zernike_indexes(4))
     order = np.lexsort((result[:,1],result[:,0]))
     result = result[order]
     self.assertTrue(np.all(expected == result))
import numpy as np
import matplotlib.pyplot as plt
subsample = 100
subsample_axis = 10
position=np.loadtxt('emission random sample')
data = np.loadtxt('select emission')
plt.plot(position[:,0],position[:,1],'r.')
random_location = np.int_(np.linspace(0,len(position)-1,len(position)))
data_location   = np.int_(np.linspace(0,len(data)-1    ,len(data)    ))

x_bound = np.zeros(subsample_axis+1)
y_bound = np.zeros([subsample_axis,subsample_axis+1])
xdata   = np.zeros(subsample_axis)
data_order     = data[np.lexsort(data[:,::-1].T)]
position_order = position[np.lexsort(position[:,::-1].T)]
for i in range(subsample_axis+1):
	x_bound[i] =  np.percentile(position_order[:,0],100.0/subsample_axis*i)


random_loc0_list = []
random_loc1_list = []
random_temp      = []
random_xdata     = []
data_loc0_list = []
data_loc1_list = []
data_temp      = []
data_xdata     = []

for i in range(subsample_axis):
	random_loc0_list.append( np.where(position_order[:,0]<x_bound[i]))
	random_loc1_list.append( np.where(position_order[:,0]>=x_bound[i+1]))
Пример #39
0
if len(sys.argv) != 3:
    print("Usage: %s FILENAME OUTPUT" % sys.argv[0])
    sys.exit(1)

data = np.genfromtxt(sys.argv[1])

# cutoff necessary because i messed up
data[data[:, 2] > 1e3] = np.NaN
data = data.reshape(-1, samples, 3)

data = data.reshape(data.shape[0], -1, bin_size, 3)

data = np.nanmean(data, axis=2)

# find out whether there is still relaxation going on
t = np.arange(samples / bin_size)
st = np.std(t)
mt = np.mean(t)
tcorr = (np.nanmean(data[:, :, 2] * t, axis=1) -
         mt * np.nanmean(data[:, :, 2], axis=1)) / st / np.nanstd(
             data[:, :, 2], axis=1)

VTE = np.nanmean(data, axis=1)
sigE = np.nanstd(data[:, :, 2], ddof=1, axis=1) / (samples / bin_size)

# Volume, Temperature, Energy, sigEnergy, tcorrelation
c = np.hstack([VTE, sigE[:, None], tcorr[:, None]])
i = np.lexsort((c[:, 0], c[:, 1]), 0)

np.savetxt(sys.argv[2], c)
Пример #40
0
def multi_file_dicom(files_in, fname_out, tag, verbose):
    """Parse a list of Siemens DICOM files"""

    # Convert each file (combine after)
    data_list = []
    orientation_list = []
    dwelltime_list = []
    meta_list = []
    series_num = []
    inst_num = []
    reference = []
    str_suffix = []
    mainStr = ''
    for idx, fn in enumerate(files_in):
        if verbose:
            print(f'Converting dicom file {fn}')

        img = nibabel.nicom.dicomwrappers.wrapper_from_file(fn)

        mrs_type = svs_or_CSI(img)

        if mrs_type == 'SVS':
            specDataCmplx, orientation, dwelltime, meta_obj = process_siemens_svs(
                img, verbose=verbose)

            newshape = (1, 1, 1) + specDataCmplx.shape
            specDataCmplx = specDataCmplx.reshape(newshape)

        else:
            specDataCmplx, orientation, dwelltime, meta_obj = process_siemens_csi(
                img, verbose=verbose)

        data_list.append(specDataCmplx)
        orientation_list.append(orientation)
        dwelltime_list.append(dwelltime)
        meta_list.append(meta_obj)

        series_num.append(int(img.dcm_data.SeriesNumber))
        inst_num.append(int(img.dcm_data.InstanceNumber))

        ref_ind, str_suf = identify_integrated_references(
            img, img.dcm_data.InstanceNumber)
        reference.append(ref_ind)
        str_suffix.append(str_suf)

        if idx == 0:
            if fname_out:
                mainStr = fname_out
            elif 'SeriesDescription' in img.dcm_data:
                mainStr = img.dcm_data.SeriesDescription
            elif 'SeriesInstanceUID' in img.dcm_data:
                mainStr = img.dcm_data.SeriesInstanceUID
            else:
                raise missingTagError(
                    "Neither SeriesDescription or SeriesInstanceUID tags defined."
                    " Please specify an output filename using '-f'")

    # Sort by series and instance number
    data_list = np.asarray(data_list)
    orientation_list = np.asarray(orientation_list)
    dwelltime_list = np.asarray(dwelltime_list)
    meta_list = np.asarray(meta_list)
    series_num = np.asarray(series_num)
    inst_num = np.asarray(inst_num)
    reference = np.asarray(reference)
    str_suffix = np.asarray(str_suffix)
    files_in = np.asarray(files_in)

    sort_index = np.lexsort((inst_num, series_num))  # Sort by series then inst

    data_list = data_list[sort_index, :]
    orientation_list = orientation_list[sort_index]
    dwelltime_list = dwelltime_list[sort_index]
    meta_list = meta_list[sort_index]
    series_num = series_num[sort_index]
    inst_num = inst_num[sort_index]
    reference = reference[sort_index]
    str_suffix = str_suffix[sort_index]
    files_in = files_in[sort_index]

    group_ind = []
    for sn in np.unique(series_num):
        for rn in np.unique(reference):
            group_ind.append(
                list(
                    np.where(np.logical_and(series_num == sn,
                                            reference == rn))[0]))

    if verbose:
        print(f'Sorted series numbers: {series_num}')
        print(f'Sorted instance numbers: {inst_num}')
        print(f'Sorted reference index: {reference}')
        print(f'Output groups: {group_ind}')

    nifti_mrs_out, fnames_out = [], []
    for idx, gr in enumerate(group_ind):

        # If data shape, orientation, dwelltime match then
        # proceed
        def not_equal(lst):
            return lst[:-1] != lst[1:]

        if not_equal([d.shape for d in data_list[gr]])\
                and not_equal([o.Q44.tolist() for o in orientation_list[gr]])\
                and not_equal(dwelltime_list[gr]):
            raise inconsistentDataError(
                'Shape, orientation and dwelltime must match in combined data.'
            )

        fnames_out.append(mainStr + str_suffix[gr[0]])

        dt_used = dwelltime_list[gr[0]]
        or_used = orientation_list[gr[0]]

        # Add original files to nifti meta information.
        meta_used = meta_list[gr[0]]
        meta_used.set_standard_def('OriginalFile',
                                   [str(ff) for ff in files_in[gr]])

        # Combine data into 5th dimension if needed
        data_in_gr = data_list[gr]
        if len(data_in_gr) > 1:
            combined_data = np.stack(data_in_gr, axis=-1)
        else:
            combined_data = data_in_gr[0]

        # Add dimension information (if not None for default)
        if tag:
            meta_used.set_dim_info(0, tag)

        # Create NIFTI MRS object.
        nifti_mrs_out.append(
            nifti_mrs.NIfTI_MRS(combined_data, or_used.Q44, dt_used,
                                meta_used))

    # If there are any identical names then append an index
    seen = np.unique(fnames_out)
    if seen.size < len(fnames_out):
        seen_count = np.zeros(seen.shape, dtype=int)
        fnames_out_checked = []
        for fn in fnames_out:
            if fn in seen:
                seen_index = seen == fn
                fnames_out_checked.append(fn +
                                          f'_{seen_count[seen_index][0]:03}')
                seen_count[seen_index] += 1
            else:
                fnames_out_checked.append(fn)

        return nifti_mrs_out, fnames_out_checked
    else:
        return nifti_mrs_out, fnames_out
Пример #41
0
    def run_image_pair_objects(self, workspace, first_image_name,
                               second_image_name, object_name):
        '''Calculate per-object correlations between intensities in two images'''
        first_image = workspace.image_set.get_image(first_image_name,
                                                    must_be_grayscale=True)
        second_image = workspace.image_set.get_image(second_image_name,
                                                     must_be_grayscale=True)
        objects = workspace.object_set.get_objects(object_name)
        #
        # Crop both images to the size of the labels matrix
        #
        labels = objects.segmented
        try:
            first_pixels = objects.crop_image_similarly(first_image.pixel_data)
            first_mask = objects.crop_image_similarly(first_image.mask)
        except ValueError:
            first_pixels, m1 = cpo.size_similarly(labels,
                                                  first_image.pixel_data)
            first_mask, m1 = cpo.size_similarly(labels, first_image.mask)
            first_mask[~m1] = False
        try:
            second_pixels = objects.crop_image_similarly(
                second_image.pixel_data)
            second_mask = objects.crop_image_similarly(second_image.mask)
        except ValueError:
            second_pixels, m1 = cpo.size_similarly(labels,
                                                   second_image.pixel_data)
            second_mask, m1 = cpo.size_similarly(labels, second_image.mask)
            second_mask[~m1] = False
        mask = ((labels > 0) & first_mask & second_mask)
        first_pixels = first_pixels[mask]
        second_pixels = second_pixels[mask]
        labels = labels[mask]
        result = []
        first_pixel_data = first_image.pixel_data
        first_mask = first_image.mask
        first_pixel_count = np.product(first_pixel_data.shape)
        second_pixel_data = second_image.pixel_data
        second_mask = second_image.mask
        second_pixel_count = np.product(second_pixel_data.shape)
        #
        # Crop the larger image similarly to the smaller one
        #
        if first_pixel_count < second_pixel_count:
            second_pixel_data = first_image.crop_image_similarly(
                second_pixel_data)
            second_mask = first_image.crop_image_similarly(second_mask)
        elif second_pixel_count < first_pixel_count:
            first_pixel_data = second_image.crop_image_similarly(
                first_pixel_data)
            first_mask = second_image.crop_image_similarly(first_mask)
        mask = (first_mask & second_mask & (~np.isnan(first_pixel_data)) &
                (~np.isnan(second_pixel_data)))
        if np.any(mask):
            #
            # Perform the correlation, which returns:
            # [ [ii, ij],
            #   [ji, jj] ]
            #
            fi = first_pixel_data[mask]
            si = second_pixel_data[mask]

        n_objects = objects.count
        # Handle case when both images for the correlation are completely masked out

        if n_objects == 0:
            corr = np.zeros((0, ))
            overlap = np.zeros((0, ))
            K1 = np.zeros((0, ))
            K2 = np.zeros((0, ))
            M1 = np.zeros((0, ))
            M2 = np.zeros((0, ))
            RWC1 = np.zeros((0, ))
            RWC2 = np.zeros((0, ))
            C1 = np.zeros((0, ))
            C2 = np.zeros((0, ))
        elif np.where(mask)[0].__len__() == 0:
            corr = np.zeros((n_objects, ))
            corr[:] = np.NaN
            overlap = K1 = K2 = M1 = M2 = RWC1 = RWC2 = C1 = C2 = corr
        else:
            #
            # The correlation is sum((x-mean(x))(y-mean(y)) /
            #                         ((n-1) * std(x) *std(y)))
            #
            lrange = np.arange(n_objects, dtype=np.int32) + 1
            area = fix(scind.sum(np.ones_like(labels), labels, lrange))
            mean1 = fix(scind.mean(first_pixels, labels, lrange))
            mean2 = fix(scind.mean(second_pixels, labels, lrange))
            #
            # Calculate the standard deviation times the population.
            #
            std1 = np.sqrt(
                fix(
                    scind.sum((first_pixels - mean1[labels - 1])**2, labels,
                              lrange)))
            std2 = np.sqrt(
                fix(
                    scind.sum((second_pixels - mean2[labels - 1])**2, labels,
                              lrange)))
            x = first_pixels - mean1[labels - 1]  # x - mean(x)
            y = second_pixels - mean2[labels - 1]  # y - mean(y)
            corr = fix(
                scind.sum(x * y / (std1[labels - 1] * std2[labels - 1]),
                          labels, lrange))
            # Explicitly set the correlation to NaN for masked objects
            corr[scind.sum(1, labels, lrange) == 0] = np.NaN
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Correlation coeff",
                "%.3f" % np.mean(corr)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Correlation coeff",
                           "%.3f" % np.median(corr)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Correlation coeff",
                           "%.3f" % np.min(corr)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Correlation coeff",
                           "%.3f" % np.max(corr)
                       ]]

            # Threshold as percentage of maximum intensity of objects in each channel
            tff = (self.thr.value / 100) * fix(
                scind.maximum(first_pixels, labels, lrange))
            tss = (self.thr.value / 100) * fix(
                scind.maximum(second_pixels, labels, lrange))

            combined_thresh = (first_pixels >= tff[labels - 1]) & (
                second_pixels >= tss[labels - 1])
            fi_thresh = first_pixels[combined_thresh]
            si_thresh = second_pixels[combined_thresh]
            tot_fi_thr = scind.sum(
                first_pixels[first_pixels >= tff[labels - 1]],
                labels[first_pixels >= tff[labels - 1]], lrange)
            tot_si_thr = scind.sum(
                second_pixels[second_pixels >= tss[labels - 1]],
                labels[second_pixels >= tss[labels - 1]], lrange)

            nonZero = (fi > 0) | (si > 0)
            xvar = np.var(fi[nonZero], axis=0, ddof=1)
            yvar = np.var(si[nonZero], axis=0, ddof=1)

            xmean = np.mean(fi[nonZero], axis=0)
            ymean = np.mean(si[nonZero], axis=0)

            z = fi[nonZero] + si[nonZero]
            zvar = np.var(z, axis=0, ddof=1)

            covar = 0.5 * (zvar - (xvar + yvar))

            denom = 2 * covar
            num = (yvar - xvar) + np.sqrt((yvar - xvar) * (yvar - xvar) + 4 *
                                          (covar * covar))
            a = (num / denom)
            b = (ymean - a * xmean)

            i = 1
            while i > 0.003921568627:
                thr_fi_c = i
                thr_si_c = (a * i) + b
                combt = (fi < thr_fi_c) | (si < thr_si_c)
                costReg = scistat.pearsonr(fi[combt], si[combt])
                if costReg[0] <= 0:
                    break
                i = i - 0.003921568627

            # Costes' thershold for entire image is applied to each object
            fi_above_thr = first_pixels > thr_fi_c
            si_above_thr = second_pixels > thr_si_c
            combined_thresh_c = fi_above_thr & si_above_thr
            fi_thresh_c = first_pixels[combined_thresh_c]
            si_thresh_c = second_pixels[combined_thresh_c]
            if np.any(fi_above_thr):
                tot_fi_thr_c = scind.sum(
                    first_pixels[first_pixels >= thr_fi_c],
                    labels[first_pixels >= thr_fi_c], lrange)
            else:
                tot_fi_thr_c = np.zeros(len(lrange))
            if np.any(si_above_thr):
                tot_si_thr_c = scind.sum(
                    second_pixels[second_pixels >= thr_si_c],
                    labels[second_pixels >= thr_si_c], lrange)
            else:
                tot_si_thr_c = np.zeros(len(lrange))

            # Manders Coefficient
            M1 = np.zeros(len(lrange))
            M2 = np.zeros(len(lrange))

            if np.any(combined_thresh):
                M1 = np.array(
                    scind.sum(fi_thresh, labels[combined_thresh],
                              lrange)) / np.array(tot_fi_thr)
                M2 = np.array(
                    scind.sum(si_thresh, labels[combined_thresh],
                              lrange)) / np.array(tot_si_thr)
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Manders coeff",
                "%.3f" % np.mean(M1)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Manders coeff",
                           "%.3f" % np.median(M1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Manders coeff",
                           "%.3f" % np.min(M1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Manders coeff",
                           "%.3f" % np.max(M1)
                       ]]
            result += [[
                second_image_name, first_image_name, object_name,
                "Mean Manders coeff",
                "%.3f" % np.mean(M2)
            ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Median Manders coeff",
                           "%.3f" % np.median(M2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Min Manders coeff",
                           "%.3f" % np.min(M2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Max Manders coeff",
                           "%.3f" % np.max(M2)
                       ]]

            # RWC Coefficient
            RWC1 = np.zeros(len(lrange))
            RWC2 = np.zeros(len(lrange))
            [Rank1] = np.lexsort(([labels], [first_pixels]))
            [Rank2] = np.lexsort(([labels], [second_pixels]))
            Rank1_U = np.hstack(
                [[False], first_pixels[Rank1[:-1]] != first_pixels[Rank1[1:]]])
            Rank2_U = np.hstack(
                [[False],
                 second_pixels[Rank2[:-1]] != second_pixels[Rank2[1:]]])
            Rank1_S = np.cumsum(Rank1_U)
            Rank2_S = np.cumsum(Rank2_U)
            Rank_im1 = np.zeros(first_pixels.shape, dtype=int)
            Rank_im2 = np.zeros(second_pixels.shape, dtype=int)
            Rank_im1[Rank1] = Rank1_S
            Rank_im2[Rank2] = Rank2_S

            R = max(Rank_im1.max(), Rank_im2.max()) + 1
            Di = abs(Rank_im1 - Rank_im2)
            weight = (R - Di) * 1.0 / R
            weight_thresh = weight[combined_thresh]

            if np.any(combined_thresh):
                RWC1 = np.array(
                    scind.sum(fi_thresh * weight_thresh,
                              labels[combined_thresh],
                              lrange)) / np.array(tot_fi_thr)
                RWC2 = np.array(
                    scind.sum(si_thresh * weight_thresh,
                              labels[combined_thresh],
                              lrange)) / np.array(tot_si_thr)

            result += [[
                first_image_name, second_image_name, object_name,
                "Mean RWC coeff",
                "%.3f" % np.mean(RWC1)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median RWC coeff",
                           "%.3f" % np.median(RWC1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min RWC coeff",
                           "%.3f" % np.min(RWC1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max RWC coeff",
                           "%.3f" % np.max(RWC1)
                       ]]
            result += [[
                second_image_name, first_image_name, object_name,
                "Mean RWC coeff",
                "%.3f" % np.mean(RWC2)
            ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Median RWC coeff",
                           "%.3f" % np.median(RWC2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Min RWC coeff",
                           "%.3f" % np.min(RWC2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Max RWC coeff",
                           "%.3f" % np.max(RWC2)
                       ]]

            # Costes Automated Threshold
            C1 = np.zeros(len(lrange))
            C2 = np.zeros(len(lrange))
            if np.any(combined_thresh_c):
                C1 = np.array(
                    scind.sum(fi_thresh_c, labels[combined_thresh_c],
                              lrange)) / np.array(tot_fi_thr_c)
                C2 = np.array(
                    scind.sum(si_thresh_c, labels[combined_thresh_c],
                              lrange)) / np.array(tot_si_thr_c)
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Manders coeff (Costes)",
                "%.3f" % np.mean(C1)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Manders coeff (Costes)",
                           "%.3f" % np.median(C1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Manders coeff (Costes)",
                           "%.3f" % np.min(C1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Manders coeff (Costes)",
                           "%.3f" % np.max(C1)
                       ]]
            result += [[
                second_image_name, first_image_name, object_name,
                "Mean Manders coeff (Costes)",
                "%.3f" % np.mean(C2)
            ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Median Manders coeff (Costes)",
                           "%.3f" % np.median(C2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Min Manders coeff (Costes)",
                           "%.3f" % np.min(C2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Max Manders coeff (Costes)",
                           "%.3f" % np.max(C2)
                       ]]

            # Overlap Coefficient
            if np.any(combined_thresh):
                fpsq = scind.sum(first_pixels[combined_thresh]**2,
                                 labels[combined_thresh], lrange)
                spsq = scind.sum(second_pixels[combined_thresh]**2,
                                 labels[combined_thresh], lrange)
                pdt = np.sqrt(np.array(fpsq) * np.array(spsq))

                overlap = fix(
                    scind.sum(
                        first_pixels[combined_thresh] *
                        second_pixels[combined_thresh],
                        labels[combined_thresh], lrange) / pdt)
                K1 = fix((scind.sum(
                    first_pixels[combined_thresh] *
                    second_pixels[combined_thresh], labels[combined_thresh],
                    lrange)) / (np.array(fpsq)))
                K2 = fix(
                    scind.sum(
                        first_pixels[combined_thresh] *
                        second_pixels[combined_thresh],
                        labels[combined_thresh], lrange) / np.array(spsq))
            else:
                overlap = K1 = K2 = np.zeros(len(lrange))
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Overlap coeff",
                "%.3f" % np.mean(overlap)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Overlap coeff",
                           "%.3f" % np.median(overlap)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Overlap coeff",
                           "%.3f" % np.min(overlap)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Overlap coeff",
                           "%.3f" % np.max(overlap)
                       ]]

        measurement = ("Correlation_Correlation_%s_%s" %
                       (first_image_name, second_image_name))
        overlap_measurement = (F_OVERLAP_FORMAT %
                               (first_image_name, second_image_name))
        k_measurement_1 = (F_K_FORMAT % (first_image_name, second_image_name))
        k_measurement_2 = (F_K_FORMAT % (second_image_name, first_image_name))
        manders_measurement_1 = (F_MANDERS_FORMAT %
                                 (first_image_name, second_image_name))
        manders_measurement_2 = (F_MANDERS_FORMAT %
                                 (second_image_name, first_image_name))
        rwc_measurement_1 = (F_RWC_FORMAT %
                             (first_image_name, second_image_name))
        rwc_measurement_2 = (F_RWC_FORMAT %
                             (second_image_name, first_image_name))
        costes_measurement_1 = (F_COSTES_FORMAT %
                                (first_image_name, second_image_name))
        costes_measurement_2 = (F_COSTES_FORMAT %
                                (second_image_name, first_image_name))

        workspace.measurements.add_measurement(object_name, measurement, corr)
        workspace.measurements.add_measurement(object_name,
                                               overlap_measurement, overlap)
        workspace.measurements.add_measurement(object_name, k_measurement_1,
                                               K1)
        workspace.measurements.add_measurement(object_name, k_measurement_2,
                                               K2)
        workspace.measurements.add_measurement(object_name,
                                               manders_measurement_1, M1)
        workspace.measurements.add_measurement(object_name,
                                               manders_measurement_2, M2)
        workspace.measurements.add_measurement(object_name, rwc_measurement_1,
                                               RWC1)
        workspace.measurements.add_measurement(object_name, rwc_measurement_2,
                                               RWC2)
        workspace.measurements.add_measurement(object_name,
                                               costes_measurement_1, C1)
        workspace.measurements.add_measurement(object_name,
                                               costes_measurement_2, C2)

        if n_objects == 0:
            return [[
                first_image_name, second_image_name, object_name,
                "Mean correlation", "-"
            ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Median correlation", "-"
                    ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Min correlation", "-"
                    ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Max correlation", "-"
                    ]]
        else:
            return result
Пример #42
0
        idate = [int(d) for d in idate]
        day = (date(idate[0], idate[1],
                    idate[2]).isoweekday()) - 1  # monday = 0, sunday = 6

        timeU70 = int(spt[2])
        if timeU70 > maxtime: maxtime = timeU70
        if timeU70 < mintime: mintime = timeU70
        rawdata[cnt_i - 1] = [
            cnt_dr,
            float(spt[0]),
            float(spt[1]),
            int(spt[2]), timegroup, day
        ]

        if (cnt_i) % buffersize == 0:
            rawdata = rawdata[np.lexsort(rawdata.T)]
            df = pd.DataFrame(
                data=rawdata,
                columns=['ID', 'x', 'y', 'timeU70', 'timegroup', 'day'])
            t4 = time.time()
            stdout("Adding rawdata...")
            cnt_success += add(h5dset, f5, df, tcutoff, velmin, silent=True)
            t5 = time.time()
            stdout(str(t5 - t4) + " seconds")
            df = None
            rawdata = np.empty(shape=[buffersize, 6])
            stdout(str(cnt_i) + " points added")
            cnt_i = 0

    cnt_dr += 1
Пример #43
0
def slow_augmenting_row_reduction(n, ii, jj, idx, count, x, y, u, v, c):
    '''Perform the augmenting row reduction step from the Jonker-Volgenaut algorithm
    
    n - the number of i and j in the linear assignment problem
    ii - the unassigned i
    jj - the j-index of every entry in c
    idx - the index of the first entry for each i
    count - the number of entries for each i
    x - the assignment of j to i
    y - the assignment of i to j
    u - the dual variable "u" which will be updated. It should be
        initialized to zero for the first reduction transfer.
    v - the dual variable "v" which will be reduced in-place
    c - the cost for each entry.
    
    returns the new unassigned i
    '''

    #######################################
    #
    # From Jonker:
    #
    # procedure AUGMENTING ROW REDUCTION;
    # begin
    # LIST: = {all unassigned rows};
    # for all i in LIST do
    #    repeat
    #    ul:=min {c[i,j]-v[j] for j=l ...n};
    #    select j1 with c [i,j 1] - v[j 1] = u1;
    #    u2:=min {c[i,j]-v[j] for j=l ...n,j< >jl} ;
    #    select j2 with c [i,j2] - v [j2] = u2 and j2 < >j 1 ;
    #    u[i]:=u2;
    #    if ul <u2 then v[jl]:=v[jl]-(u2-ul)
    #    else if jl is assigned then jl : =j2;
    #    k:=y [jl]; if k>0 then x [k]:=0; x[i]:=jl; y [ j l ] : = i ; i:=k
    #  until ul =u2 (* no reduction transfer *) or k=0 i~* augmentation *)
    #  end
    ii = list(ii)
    k = 0
    limit = len(ii)
    free = []
    while k < limit:
        i = ii[k]
        k += 1
        j = jj[idx[i]:(idx[i] + count[i])]
        uu = c[idx[i]:(idx[i] + count[i])] - v[j]
        order = np.lexsort([uu])
        u1, u2 = uu[order[:2]]
        j1, j2 = j[order[:2]]
        i1 = y[j1]
        if u1 < u2:
            v[j1] = v[j1] - u2 + u1
        elif i1 != n:
            j1 = j2
            i1 = y[j1]
        if i1 != n:
            if u1 < u2:
                k -= 1
                ii[k] = i1
            else:
                free.append(i1)
        x[i] = j1
        y[j1] = i
    return np.array(free, np.uint32)
Пример #44
0
def make_hdf_spks(data, nev_hdf_fname):
    last_ts = 0
    units = []

    #### Open h5file: ####
    tf = tempfile.NamedTemporaryFile(delete=False)
    h5file = tables.openFile(tf.name, mode="w", title='BlackRock Nev Data')
    h5file.createGroup('/', 'channel')

    ### Spike Data First ###
    channels = data['spike_events']['ChannelID']
    base_str = 'channel00000'
    for ic, c in enumerate(channels):
        c_str = base_str[:-1*len(str(c))]+ str(c)
        h5file.createGroup('/channel', c_str)
        tab = h5file.createTable('/channel/'+c_str, 'spike_set', spike_set)

        for i, (ts, u, wv) in enumerate(zip(data['spike_events']['TimeStamps'][ic], data['spike_events']['Classification'][ic], data['spike_events']['Waveforms'][ic])):
            trial = tab.row
            last_ts = np.max([last_ts, ts])
            skip = False
            if u == 'none':
                u = 10
            elif u == 'noise':
                skip = True

            if not skip:
                trial['Unit'] = u
                trial['Wave'] = wv
                trial['TimeStamp'] = ts
                trial.append()

        #Check for non-zero units: 
        if len(data['spike_events']['TimeStamps'])>0:
            un = np.unique(data['spike_events']['Classification'][ic])
            for ci in un:
                #ci = 10
                if ci == 'none':
                    # Unsorted
                    units.append((c, 10))
                elif ci == 'noise':
                    pass
                else:
                    # Sorted (units are numbered )
                    units.append((c, int(ci)))

        tab.flush()

    ### Digital Data ###
    try:
        ts = data['dig_events']['TimeStamps']
        val = data['dig_events']['Data']

        for dchan in range(1, 1+len(ts)):
            h5file.createGroup('/channel', 'digital000'+str(dchan))
            dtab = h5file.createTable('/channel/digital000'+str(dchan), 'digital_set', digital_set)

            ts_chan = ts[dchan-1]
            val_chan = val[dchan-1]
            
            for ii, (tsi, vli) in enumerate(zip(ts_chan, val_chan)):
                trial = dtab.row
                trial['TimeStamp'] = tsi
                trial['Value'] = vli
                trial.append()
            last_ts = np.max([last_ts, tsi])
            dtab.flush()
    except:
        print 'no digital info in nev file '

    # Adding length / unit info: 
    tb = h5file.createTable('/', 'attr', mini_attr)
    rw = tb.row
    rw['last_ts'] = last_ts
    U = np.zeros((500, 2))
    n_units = len(units)
    U[:n_units, :] = np.vstack((units))

    rw['units'] = U
    rw['n_units'] = n_units
    rw.append()
    tb.flush()

    h5file.close()
    shutil.copyfile(tf.name, nev_hdf_fname)
    os.remove(tf.name)
    print 'successfully made HDF file from NEV file: %s' %nev_hdf_fname

    un_array = np.vstack((units))
    idx = np.lexsort((un_array[:, 1], un_array[:, 0]))
    units2 = [units[i] for i in idx]

    return last_ts, units2, h5file
Пример #45
0
        for j in range(num_labels):
            ctype_predictions[m][i].append(
                (sigmoid(pred[m][i][j]), binary_to_kmer(x[i][0])))

# In[9]:

# Sorting each cancer type by prediction score
high_predictions_ctype = []
for m in range(num_models):
    high_predictions_ctype.append([])
    for i in range(num_labels):
        # Sorts kmers based on prediction score and selects top kmers
        predarray = np.asarray(ctype_predictions)[m, :, i]
        if len(predarray) == 0:
            continue
        ind = np.lexsort((predarray[:, 1], predarray[:, 0]))
        a = predarray[ind]

        float_a = []
        for i2 in a[:, 0]:
            float_a.append(float(i2))

        #if (i==4): top = 1000
        else:
            top = (zscore(float_a) >
                   1).sum()  # uses z-score to compute threshold

        high_predictions = a[-top:, :]  # selects topkmers for sequence logo
        high_predictions = high_predictions[::-1]
        high_predictions_ctype[m].append(high_predictions)
Пример #46
0
def lidar_to_top_cuda(lidar):
    # input:
    # lidar: (N, 4) 4->(x,y,z,i) in lidar coordinate
    lidar = np.copy(lidar)
    mod = cuda.module_from_buffer(module_buff)
    func = mod.get_function('_Z12lidar_to_topPfPiS0_S0_S_S_S0_')
    func_density = mod.get_function('_Z20lidar_to_top_densityPfPiS0_S0_S0_')
    # trunc
    idx = np.where(lidar[:, 0] > TOP_X_MIN)
    lidar = lidar[idx]
    idx = np.where(lidar[:, 0] < TOP_X_MAX)
    lidar = lidar[idx]

    idx = np.where(lidar[:, 1] > TOP_Y_MIN)
    lidar = lidar[idx]
    idx = np.where(lidar[:, 1] < TOP_Y_MAX)
    lidar = lidar[idx]

    idx = np.where(lidar[:, 2] > TOP_Z_MIN)
    lidar = lidar[idx]
    idx = np.where(lidar[:, 2] < TOP_Z_MAX)
    lidar = lidar[idx]
    # shape
    X0, Xn = 0, int((TOP_X_MAX - TOP_X_MIN) // TOP_X_DIVISION) + 1
    Y0, Yn = 0, int((TOP_Y_MAX - TOP_Y_MIN) // TOP_Y_DIVISION) + 1
    Z0, Zn = 0, int((TOP_Z_MAX - TOP_Z_MIN) / TOP_Z_DIVISION)
    height = Xn - X0
    width = Yn - Y0
    channel = Zn - Z0 + 2
    # intensity and density channel do not cal seperately in kernel function
    top = np.zeros(shape=(height, width, channel), dtype=np.float32)
    top_density = np.zeros(shape=(height, width, 1), dtype=np.float32)
    top_shape = np.array(top.shape).astype(np.int32)
    lidar_shape = np.array(lidar.shape).astype(np.int32)

    # voxelize lidar
    lidar[:,
          0] = ((lidar[:, 0] - TOP_X_MIN) // TOP_X_DIVISION).astype(np.int32)
    lidar[:,
          1] = ((lidar[:, 1] - TOP_Y_MIN) // TOP_Y_DIVISION).astype(np.int32)
    lidar[:, 2] = (lidar[:, 2] - TOP_Z_MIN) / TOP_Z_DIVISION

    lidar = lidar[np.lexsort((lidar[:, 2], lidar[:, 1], lidar[:, 0])), :]
    lidar_x = np.ascontiguousarray(lidar[:, 0].astype(np.int32))
    lidar_y = np.ascontiguousarray(lidar[:, 1].astype(np.int32))
    lidar_z = np.ascontiguousarray(lidar[:, 2])
    lidar_i = np.ascontiguousarray(lidar[:, 3])

    func(
        cuda.InOut(top),
        cuda.In(top_shape),
        cuda.In(lidar_x),
        cuda.In(lidar_y),
        cuda.In(lidar_z),
        cuda.In(lidar_i),
        cuda.In(lidar_shape),
        #intensity and density channel do not cal seperately
        block=(channel, 1, 1),  # a thread <-> a channel 
        grid=(int(lidar_shape[0]), 1, 1)  # a grid <-> a point in laser scan  
    )
    func_density(cuda.InOut(top_density),
                 cuda.In(lidar_x),
                 cuda.In(lidar_y),
                 cuda.In(lidar_shape),
                 cuda.In(top_shape),
                 block=(1, 1, 1),
                 grid=(1, 1, 1))
    top_density = (np.log(top_density.astype(np.int32) + 1) /
                   math.log(32)).clip(max=1).astype(np.float32)
    return np.dstack([top[:, :, :-1], top_density])
Пример #47
0
def _select(input,
            labels=None,
            index=None,
            find_min=False,
            find_max=False,
            find_min_positions=False,
            find_max_positions=False,
            find_median=False):
    """Returns min, max, or both, plus their positions (if requested), and
    median."""

    input = numpy.asanyarray(input)

    find_positions = find_min_positions or find_max_positions
    positions = None
    if find_positions:
        positions = numpy.arange(input.size).reshape(input.shape)

    def single_group(vals, positions):
        result = []
        if find_min:
            result += [vals.min()]
        if find_min_positions:
            result += [positions[vals == vals.min()][0]]
        if find_max:
            result += [vals.max()]
        if find_max_positions:
            result += [positions[vals == vals.max()][0]]
        if find_median:
            result += [numpy.median(vals)]
        return result

    if labels is None:
        return single_group(input, positions)

    # ensure input and labels match sizes
    input, labels = numpy.broadcast_arrays(input, labels)

    if index is None:
        mask = (labels > 0)
        masked_positions = None
        if find_positions:
            masked_positions = positions[mask]
        return single_group(input[mask], masked_positions)

    if numpy.isscalar(index):
        mask = (labels == index)
        masked_positions = None
        if find_positions:
            masked_positions = positions[mask]
        return single_group(input[mask], masked_positions)

    # remap labels to unique integers if necessary, or if the largest
    # label is larger than the number of values.
    if (not _safely_castable_to_int(labels.dtype) or labels.min() < 0
            or labels.max() > labels.size):
        # remap labels, and indexes
        unique_labels, labels = numpy.unique(labels, return_inverse=True)
        idxs = numpy.searchsorted(unique_labels, index)

        # make all of idxs valid
        idxs[idxs >= unique_labels.size] = 0
        found = (unique_labels[idxs] == index)
    else:
        # labels are an integer type, and there aren't too many.
        idxs = numpy.asanyarray(index, numpy.int).copy()
        found = (idxs >= 0) & (idxs <= labels.max())

    idxs[~found] = labels.max() + 1

    if find_median:
        order = numpy.lexsort((input.ravel(), labels.ravel()))
    else:
        order = input.ravel().argsort()
    input = input.ravel()[order]
    labels = labels.ravel()[order]
    if find_positions:
        positions = positions.ravel()[order]

    result = []
    if find_min:
        mins = numpy.zeros(labels.max() + 2, input.dtype)
        mins[labels[::-1]] = input[::-1]
        result += [mins[idxs]]
    if find_min_positions:
        minpos = numpy.zeros(labels.max() + 2, int)
        minpos[labels[::-1]] = positions[::-1]
        result += [minpos[idxs]]
    if find_max:
        maxs = numpy.zeros(labels.max() + 2, input.dtype)
        maxs[labels] = input
        result += [maxs[idxs]]
    if find_max_positions:
        maxpos = numpy.zeros(labels.max() + 2, int)
        maxpos[labels] = positions
        result += [maxpos[idxs]]
    if find_median:
        locs = numpy.arange(len(labels))
        lo = numpy.zeros(labels.max() + 2, numpy.int)
        lo[labels[::-1]] = locs[::-1]
        hi = numpy.zeros(labels.max() + 2, numpy.int)
        hi[labels] = locs
        lo = lo[idxs]
        hi = hi[idxs]
        # lo is an index to the lowest value in input for each label,
        # hi is an index to the largest value.
        # move them to be either the same ((hi - lo) % 2 == 0) or next
        # to each other ((hi - lo) % 2 == 1), then average.
        step = (hi - lo) // 2
        lo += step
        hi -= step
        result += [(input[lo] + input[hi]) / 2.0]

    return result
Пример #48
0
def get_iperf_data_single(iperf_out, protocol, streams, repetitions):
    '''
    Notice: all entries are counted from the end, as sometimes the beginning of an
    output row can be unreadable. This is also the reason for "errors='ignore'".
    '''
    iperf_data = []
    additional_fields = 0
    if protocol == 'UDP':
        additional_fields = 5

    with open(iperf_out, encoding='utf-8', errors='ignore') as inputfile:
        for line in inputfile:
            tmp_lst = line.strip().split(',')
            if (
                not tmp_lst[0].isdigit()
                or len(tmp_lst) != (9 + additional_fields)
                or (additional_fields and float(tmp_lst[-3]) <= 0)
                or float(tmp_lst[-3 - additional_fields].split('-')[-1]) > repetitions * 10.0
               ):
                continue

            if (int(tmp_lst[-4 - additional_fields]) > 0):
                # If the link number is positive (i.e if it is not a summary, where it's -1)...
                date = datetime.strptime(tmp_lst[0], '%Y%m%d%H%M%S')
                if not iperf_data:
                    first_date = date

                time_from_start = float((date - first_date).total_seconds())
                rate = float(tmp_lst[-1 - additional_fields])
                if additional_fields:
                    # For UDP: rate = rate * (total_datagrams - lost_datagrams) / total_datagrams
                    rate = rate * (float(tmp_lst[-3]) - float(tmp_lst[-4])) / float(tmp_lst[-3])
                if (int(tmp_lst[-2 - additional_fields]) < 0) or (rate < 0.0):
                    rate = np.nan
                iperf_data.append([ time_from_start, int(tmp_lst[-4 - additional_fields]), rate ])

    if not iperf_data:
        raise ValueError('Nothing reached the server.')

    iperf_data = np.array(iperf_data)
    conns = np.unique(iperf_data[:,1])
    num_conn = conns.shape[0]
    if num_conn < streams:
        raise ValueError(str(num_conn) + ' out of ' + str(streams) + ' streams reached the server.')
    elif num_conn > streams:
        raise ValueError(str(num_conn) + ' connections reached the server (' + str(streams) + ' expected).')

    # Sort by connection number, then by date. Get indices of the result.
    bi_sorted_indices = np.lexsort((iperf_data[:,0], iperf_data[:,1]))
    iperf_data = iperf_data[bi_sorted_indices]
    ### Mechanism to check if too few or too many connections received
    # Get the index of the line after the last of each connection
    conn_ranges = np.searchsorted(iperf_data[:,1], conns, side='right')
    # Get sizes of connection blocks
    conn_count = np.diff(np.insert(conn_ranges, 0, 0))
    server_fault = False
    conn_reached = conn_count.min()
    if conn_reached < repetitions:
        # If there was at least one occasion when there were fewer connections than expected
        server_fault = 'too_few'
        repetitions = conn_reached

    # Get indices of connection block sizes that are bigger than expected (if any)
    where_extra_conn = (conn_count > repetitions).nonzero()[0]
    if where_extra_conn.size:
        ## If there were connection blocks bigger than expected
        # Get indices of lines after the last (n+1) for removal
        remove_before_lines = conn_ranges[where_extra_conn]
        # Get the amount of extra lines
        amount_lines_to_remove = [remove_before_lines[0] - repetitions * (where_extra_conn[0] + 1)]
        for i in where_extra_conn[1:]:
            amount_lines_to_remove.append(conn_ranges[i] - repetitions * (i + 1) - sum(amount_lines_to_remove))

        # Get the first lines to remove
        first_for_removal = remove_before_lines - amount_lines_to_remove
        # Get the ranges of lines to remove
        lines_to_remove = np.array([
                                    np.arange(first_for_removal[i],remove_before_lines[i])
                                    for i in np.arange(first_for_removal.size)
                                   ]).flatten()
        # Remove the extra lines
        iperf_data = np.delete(iperf_data, lines_to_remove, axis=0)
        if not server_fault:
            server_fault = 'too_many'

    ### End connection ammount check
    iperf_data = iperf_data[:,[0,2]].reshape((num_conn, iperf_data.shape[0]//num_conn, 2))
    iperf_data = np.ma.masked_array(iperf_data, np.isnan(iperf_data))
    mean_times = np.mean(iperf_data[:,:,0], axis=0)
    iperf_stdev = np.std(iperf_data[:,:,1], axis=0) * np.sqrt(num_conn)
    out_arr = np.vstack((mean_times, iperf_data[:,:,1].sum(axis=0), iperf_stdev)).filled(np.nan).T
    return out_arr, out_arr[:,1].mean(), out_arr[:,1].std(), server_fault
Пример #49
0
    def combine_data(self, obj, **kwargs):
        r"""Combines multiple data sets

        Parameters
        ----------
        obj : Data_object
            Data_object with equivalent data columns

        tols : ndarray or float, optional
            Tolerances for combining two data sets. Default: 5e-4.

        ret : bool, optional
            Return the combined data set, or merge. Default: False

        """
        if not isinstance(obj, Data):
            raise TypeError(
                'You can only combine two Data objects: input object is the wrong format!'
            )

        tols = np.array(
            [5.e-4 for i in range(len(obj._data) - len(self.data_keys))])
        try:
            if kwargs['tols'] is not None:
                tols = np.array(kwargs['tols'])
        except KeyError:
            pass

        # combine
        _data_temp = copy.deepcopy(self._data)
        for i in range(len(obj._data[obj.data_keys['detector']])):
            new_vals = np.array([
                val[i] for k, val in obj._data.items()
                if k not in list(obj.data_keys.values())
            ])
            for j in range(len(self._data[self.data_keys['detector']])):
                orig_vals = np.array([
                    val[j] for k, val in self._data.items()
                    if k not in list(self.data_keys.values())
                ])
                if (np.abs(orig_vals - new_vals) <= tols).all():
                    for _key, _value in _data_temp.items():
                        if _key in list(self.data_keys.values()):
                            _data_temp[_key][j] += obj._data[_key][i]
                    break
            else:
                for _key, _value in _data_temp.items():
                    _data_temp[_key] = np.concatenate(
                        (_value, np.array([obj._data[_key][i]])))

        # sort
        ind = np.lexsort(
            tuple(value for key, value in _data_temp.items()
                  if key not in list(self.data_keys.values())))
        _data = OrderedDict()
        for key, value in _data_temp.items():
            _data[key] = value[ind]

        if 'ret' in kwargs and kwargs['ret']:
            output = Data()
            output._data = _data
            return output
        else:
            self._data = _data
Пример #50
0
def lidar_to_front_cuda(lidar):
    # input:
    # lidar: (N, 4) 4->(x,y,z,i) in lidar coordinate

    mod = cuda.module_from_buffer(module_buff)
    func_add_points = mod.get_function('_Z25lidar_to_front_add_pointsPiS_S_S_')
    func_fill_front = mod.get_function(
        '_Z25lidar_to_front_fill_frontPfS_PiS0_')

    def cal_height(points):
        return np.clip(points[:, 2] + cfg.VELODYNE_HEIGHT, a_min=0,
                       a_max=None).astype(np.float32).reshape((-1, 1))

    def cal_distance(points):
        return np.sqrt(np.sum(points**2, axis=1)).astype(np.float32).reshape(
            (-1, 1))

    def cal_intensity(points):
        return points[:, 3].astype(np.float32).reshape((-1, 1))

    def to_front(points):
        return np.array([
            np.arctan2(points[:, 1], points[:, 0])/cfg.VELODYNE_ANGULAR_RESOLUTION,
            np.arctan2(points[:, 2], np.sqrt(points[:, 0]**2 + points[:, 1]**2)) \
                /cfg.VELODYNE_VERTICAL_RESOLUTION
        ], dtype=np.int32).T

    # using the same crop method as top view
    idx = np.where(lidar[:, 0] > TOP_X_MIN)
    lidar = lidar[idx]
    idx = np.where(lidar[:, 0] < TOP_X_MAX)
    lidar = lidar[idx]

    idx = np.where(lidar[:, 1] > TOP_Y_MIN)
    lidar = lidar[idx]
    idx = np.where(lidar[:, 1] < TOP_Y_MAX)
    lidar = lidar[idx]

    idx = np.where(lidar[:, 2] > TOP_Z_MIN)
    lidar = lidar[idx]
    idx = np.where(lidar[:, 2] < TOP_Z_MAX)
    lidar = lidar[idx]

    points = to_front(lidar)
    ind = np.where(cfg.FRONT_C_MIN < points[:, 0])
    points, lidar = points[ind], lidar[ind]
    ind = np.where(points[:, 0] < cfg.FRONT_C_MAX)
    points, lidar = points[ind], lidar[ind]
    ind = np.where(cfg.FRONT_R_MIN < points[:, 1])
    points, lidar = points[ind], lidar[ind]
    ind = np.where(points[:, 1] < cfg.FRONT_R_MAX)
    points, lidar = points[ind], lidar[ind]

    points[:, 0] += int(cfg.FRONT_C_OFFSET)
    points[:, 1] += int(cfg.FRONT_R_OFFSET)
    #points //= 2

    ind = np.where(0 <= points[:, 0])
    points, lidar = points[ind], lidar[ind]
    ind = np.where(points[:, 0] < cfg.FRONT_WIDTH)
    points, lidar = points[ind], lidar[ind]
    ind = np.where(0 <= points[:, 1])
    points, lidar = points[ind], lidar[ind]
    ind = np.where(points[:, 1] < cfg.FRONT_HEIGHT)
    points, lidar = points[ind], lidar[ind]

    # sort for mem friendly
    idx = np.lexsort((points[:, 1], points[:, 0]))
    points = points[idx, :]
    lidar = lidar[idx, :]

    channel = 3  # height, distance, intencity
    front = np.zeros((cfg.FRONT_WIDTH, cfg.FRONT_HEIGHT, channel),
                     dtype=np.float32)
    weight_mask = np.zeros_like(front[:, :, 0]).astype(np.int32)
    # def _add(x):
    #     weight_mask[int(x[0]), int(x[1])] += 1
    # def _fill(x):
    #     front[int(x[0]), int(x[1]), :] += x[2:]
    # np.apply_along_axis(_add, 1, points)
    buf = np.hstack((points, cal_height(lidar), cal_distance(lidar),
                     cal_intensity(lidar))).astype(np.float32)
    # np.apply_along_axis(_fill, 1, buf)

    func_add_points(
        cuda.InOut(weight_mask),
        cuda.In(points),
        cuda.In(np.array(weight_mask.shape).astype(np.int32)),
        cuda.In(np.array(points.shape).astype(np.int32)),
        block=(1, 1, 1),
        grid=(1, 1, 1),  # points
    )
    weight_mask[weight_mask == 0] = 1  # 0 and 1 are both 1
    func_fill_front(
        cuda.InOut(front),
        cuda.In(buf),
        cuda.In(np.array(front.shape).astype(np.int32)),
        cuda.In(np.array(buf.shape).astype(np.int32)),
        block=(3, 1, 1),  # channel 
        grid=(1, 1, 1)  # points 
    )

    front /= weight_mask[:, :, np.newaxis]
    return front
Пример #51
0
def apply_caltable_uvfits(gaincaltable,
                          datastruct,
                          filename_out,
                          cal_amp=False):
    """apply a calibration table to a uvfits file
       Args:
        caltable (Caltable) : a gaincaltable object
        datastruct (Datastruct) :  input data structure in EHTIM format
        filename_out (str) :  uvfits output file name
        cal_amp (bool): whether to do amplitude calibration
    """

    if datastruct.dtype != "EHTIM":
        raise Exception(
            "datastruct must be in EHTIM format in apply_caltable_uvfits!")

    gains0 = pd.read_csv(gaincaltable)
    polygain = {}
    mjd_start = {}
    polyamp = {}

    #deterimine which calibration to use when multiple options for multiple periods
    mjd_mean = datastruct.data['time'].mean() - MJD_0
    gains = gains0[(gains0.mjd_start <= mjd_mean)
                   & (gains0.mjd_stop >= mjd_mean)].reset_index(
                       drop=True).copy()

    for cou, row in gains.iterrows():
        polygain[row.station] = poly_from_str(str(row.ratio_phas))
        #if mjd0 provided, use it as mjd time reference offset, otherwise use mjd_start
        try:
            mjd_start[row.station] = row.mjd0
        except AttributeError:
            mjd_start[row.station] = row.mjd_start
        if cal_amp == True:
            polyamp[row.station] = poly_from_str(str(row.ratio_amp))
        else:
            polyamp[row.station] = poly_from_str('1.0')

    #print(gains0)
    #print(polygain)
    # interpolate the calibration  table
    rinterp = {}
    linterp = {}
    skipsites = []

    #-------------------------------------------
    # sort by baseline
    data = datastruct.data
    idx = np.lexsort((data['t2'], data['t1']))
    bllist = []
    for key, group in it.groupby(data[idx], lambda x: set((x['t1'], x['t2']))):
        bllist.append(np.array([obs for obs in group]))
    bllist = np.array(bllist)

    # apply the  calibration

    datatable = []
    coub = 0
    for bl_obs in bllist:
        t1 = bl_obs['t1'][0]
        t2 = bl_obs['t2'][0]
        coub = coub + 1
        print('Calibrating {}-{} baseline, {}/{}'.format(
            t1, t2, coub, len(bllist)))
        time_mjd = bl_obs['time'] - MJD_0  #dates are in mjd in Datastruct

        ###########################################################################################################################
        #OLD VERSION WHERE LCP IS SHIFTED TO RCP
        #        if t1 in skipsites:
        #            rscale1 = lscale1 = np.array(1.)
        #       else:
        #            try:
        #                rscale1 = 1./np.sqrt(polyamp[t1](time_mjd))
        #                lscale1 = np.sqrt(polyamp[t1](time_mjd))*np.exp(1j*polygain[t1](time_mjd - mjd_start[t1])*np.pi/180.)
        #            except KeyError:
        #                rscale1 = lscale1 = np.array(1.)
        #
        #        if t2 in skipsites:
        #            rscale2 = lscale2 = np.array(1.)
        #        else:
        #            try:
        #                rscale2 = 1./np.sqrt(polyamp[t2](time_mjd))
        #                lscale2 = np.sqrt(polyamp[t2](time_mjd))*np.exp(1j*polygain[t2](time_mjd - mjd_start[t2])*np.pi/180.)
        #            except KeyError:
        #                rscale2 = lscale2 = np.array(1.)
        ###########################################################################################################################

        ###########################################################################################################################
        #NEW VERSION WHERE RCP IS SHIFTED TO LCP // MW 2018/NOV/13
        if t1 in skipsites:
            rscale1 = lscale1 = np.array(1.)
        else:
            try:
                rscale1 = 1. / np.sqrt(polyamp[t1](time_mjd)) * np.exp(
                    -1j * polygain[t1](time_mjd - mjd_start[t1]) * np.pi /
                    180.)
                lscale1 = np.sqrt(polyamp[t1](time_mjd))
            except KeyError:
                rscale1 = lscale1 = np.array(1.)

        if t2 in skipsites:
            rscale2 = lscale2 = np.array(1.)
        else:
            try:
                rscale2 = 1. / np.sqrt(polyamp[t2](time_mjd)) * np.exp(
                    -1j * polygain[t2](time_mjd - mjd_start[t2]) * np.pi /
                    180.)
                lscale2 = np.sqrt(polyamp[t2](time_mjd))
            except KeyError:
                rscale2 = lscale2 = np.array(1.)
###########################################################################################################################

        rrscale = rscale1 * rscale2.conj()
        llscale = lscale1 * lscale2.conj()
        rlscale = rscale1 * lscale2.conj()
        lrscale = lscale1 * rscale2.conj()

        bl_obs['rr'] = (bl_obs['rr']) * rrscale
        bl_obs['ll'] = (bl_obs['ll']) * llscale
        bl_obs['rl'] = (bl_obs['rl']) * rlscale
        bl_obs['lr'] = (bl_obs['lr']) * lrscale

        bl_obs['rrweight'] = (bl_obs['rrweight']) / (np.abs(rrscale)**2)
        bl_obs['llweight'] = (bl_obs['llweight']) / (np.abs(llscale)**2)
        bl_obs['rlweight'] = (bl_obs['rlweight']) / (np.abs(rlscale)**2)
        bl_obs['lrweight'] = (bl_obs['lrweight']) / (np.abs(lrscale)**2)

        if len(datatable):
            datatable = np.hstack((datatable, bl_obs))
        else:
            datatable = bl_obs

    # put in uvfits format datastruct
    # telescope arrays
    tarr = datastruct.antenna_info
    tkeys = {tarr[i]['site']: i for i in range(len(tarr))}
    tnames = tarr['site']
    tnums = np.arange(1, len(tarr) + 1)
    xyz = np.array([[tarr[i]['x'], tarr[i]['y'], tarr[i]['z']]
                    for i in np.arange(len(tarr))])

    # uvfits format output data table
    bl_list = []
    for i in xrange(len(datatable)):
        entry = datatable[i]
        t1num = entry['t1']
        t2num = entry['t2']
        rl = entry['rl']
        lr = entry['lr']
        if tkeys[entry['t2']] < tkeys[
                entry['t1']]:  # reorder telescopes if necessary
            #print entry['t1'], tkeys[entry['t1']], entry['t2'], tkeys[entry['t2']]
            entry['t1'] = t2num
            entry['t2'] = t1num
            entry['u'] = -entry['u']
            entry['v'] = -entry['v']
            entry['rr'] = np.conj(entry['rr'])
            entry['ll'] = np.conj(entry['ll'])
            entry['rl'] = np.conj(lr)
            entry['lr'] = np.conj(rl)
            datatable[i] = entry
        bl_list.append(
            np.array((entry['time'], entry['t1'], entry['t2']), dtype=BLTYPE))
    _, unique_idx_anttime, idx_anttime = np.unique(bl_list,
                                                   return_index=True,
                                                   return_inverse=True)
    _, unique_idx_freq, idx_freq = np.unique(datatable['freq'],
                                             return_index=True,
                                             return_inverse=True)

    # random group params
    u = datatable['u'][unique_idx_anttime]
    v = datatable['v'][unique_idx_anttime]
    t1num = [tkeys[scope] + 1 for scope in datatable['t1'][unique_idx_anttime]]
    t2num = [tkeys[scope] + 1 for scope in datatable['t2'][unique_idx_anttime]]
    bls = 256 * np.array(t1num) + np.array(t2num)
    jds = datatable['time'][unique_idx_anttime]
    tints = datatable['tint'][unique_idx_anttime]

    # data table
    nap = len(unique_idx_anttime)
    nsubchan = 1
    nstokes = 4
    nchan = datastruct.obs_info.nchan

    outdat = np.zeros((nap, 1, 1, nchan, nsubchan, nstokes, 3))
    outdat[:, :, :, :, :, :, 2] = -1.0

    vistypes = ['rr', 'll', 'rl', 'lr']
    for i in xrange(len(datatable)):
        row_freq_idx = idx_freq[i]
        row_dat_idx = idx_anttime[i]

        for j in range(len(vistypes)):
            outdat[row_dat_idx, 0, 0, row_freq_idx, 0, j,
                   0] = np.real(datatable[i][vistypes[j]])
            outdat[row_dat_idx, 0, 0, row_freq_idx, 0, j,
                   1] = np.imag(datatable[i][vistypes[j]])
            outdat[row_dat_idx, 0, 0, row_freq_idx, 0, j,
                   2] = datatable[i][vistypes[j] + 'weight']

    # package data for saving
    obsinfo_out = datastruct.obs_info
    antennainfo_out = Antenna_info(tnames, tnums, xyz)
    uvfitsdata_out = Uvfits_data(u, v, bls, jds, tints, outdat)
    datastruct_out = Datastruct(obsinfo_out, antennainfo_out, uvfitsdata_out)

    # save final file
    save_uvfits(datastruct_out, filename_out)
    return
Пример #52
0
def LCmap_speedup():
    packets = pipe.read_obs()
    packets = packets[:,2:]
    print 'Sorting packets (takes a while)'
    ind = np.lexsort((packets[:, 1], packets[:, 2]))
    packets = packets[ind]

    print packets.shape, sp.num_processes
    # exit()
    LCmap = np.zeros((len(xlocs), len(ylocs), num_ints))
    # for i in range(num_chunks):
    #     packets_chunk = packets[i * max_photons:(i + 1) * max_photons]
    output = multiprocessing.Queue()
    inqueue = multiprocessing.Queue()
    packqueue = multiprocessing.Queue()
    jobs = []

    for i in range(sp.num_processes):
        p = multiprocessing.Process(target=LCmap_worker, args=(inqueue, output, packqueue))
        jobs.append(p)
        p.start()
        # print 'ip', i
    for idx in range(len(xlocs) * len(ylocs)):
        # print 'idx', idx
        inqueue.put(idx)
    lower=0
    # for ix, iy in zip(xlocs,ylocs):
    for ix, iy in list(itertools.product(xlocs, ylocs)):
        print ix, iy, 'ix iy'#'idx', idx
        # print inqueue.qsize(), output.qsize(), packqueue.qsize(), ' 3queues'
        diff = 0
        span = lower
        while diff == 0:
            # print diff, packets[span,1], iy,  span
            diff = packets[span,1] - iy
            span += 1
            if span == ap.star_photons*ap.numframes:
                break
        print 'lower and span', lower, span
        upper = span -1
        packets_chunk = packets[lower:upper]
        lower=upper
        packqueue.put(packets_chunk)
        LClist = output.get()
        # if ix == 1 and iy == 1:
        #     exit()
        # print LClist, 'LClist'
        # binned_chunk = num_ints / num_chunks
        # print debprint((len(LClist[0]), binned_chunk))
        # for idx in idxs:
        # ix = idx / len(xlocs)
        # iy = idx % len(xlocs)
        # LCmap[ix, iy, i * binned_chunk:(i + 1) * binned_chunk] = LClist  # [idx]
        LCmap[ix, iy] = LClist  # [idx]
    print inqueue.qsize(), output.qsize(), packqueue.qsize(),  'line380 3queues'
    output.put(None)
    # packqueue.put(None)
    print inqueue.qsize(), output.qsize(), packqueue.qsize(),  'line383 3queues'
    for i in range(sp.num_processes):
        # Send the sentinal to tell Simulation to end
        inqueue.put(sentinel)
        print 'second i', i
        print 'line 205', tp.detector
        print inqueue.qsize(), output.qsize(), packqueue.qsize(), 'line389 3queues'
    for i, p in enumerate(jobs):
        p.join()
        print 'third i', i

    return LCmap
Пример #53
0
    plt.grid()
    colors = np.arange(0, int(classes))
    d = 0
    for c, t in zip(colors, translation):
        d = c + 1
        plt.plot(t[:], linewidth=3, color=cmap(c), label='Class %s' % d)
    ticks = np.arange(2, iterations)
    plt.xlim(2, iterations)
    plt.legend(loc='best')
    pdf.savefig()
    #plt.show()

###########################################################################

### Sort group assignment array column by column
sortindices = np.lexsort(groupnumarray[:, 1:].T)
groupnumarraysorted = groupnumarray[sortindices]

### Heat map of group sizes
H = groupnumarraysorted[:, :]
cmap = plt.get_cmap('jet', int(classes))
norm = matplotlib.colors.Normalize(vmin=1, vmax=int(classes) + 1)
plt.figure(num=None, dpi=120, facecolor='white')
plt.title('Class assignments of each particle', fontsize=16, fontweight='bold')
plt.xlabel('Iteration #', fontsize=13)
plt.ylabel('Particle #', fontsize=13)
mat = plt.imshow(H,
                 aspect='auto',
                 interpolation="nearest",
                 cmap=cmap,
                 norm=norm)
Пример #54
0
def LCmap_speedup_colors():
    allpackets = pipe.read_obs()
    # packets = packets[:,2:]

    phases = allpackets[:, 1] - allpackets[:, 0]
    # packets = np.vstack((phases,packets[:,2:]))
    print phases[:50]
    print np.shape(allpackets)
    wsamples = np.linspace(tp.band[0], tp.band[1], tp.nwsamp)
    print wsamples

    phasebins = spec.phase_cal(wsamples)
    print phasebins
    binnedphase = np.digitize(phases,phasebins)
    print binnedphase[:20]
    LCmaps = np.zeros((len(phasebins), len(xlocs), len(ylocs), num_ints))
    for col in range(len(phasebins)):
        locs = np.where(binnedphase == col)[0]
        packets = allpackets[locs]
        # cube = pipe.arange_into_cube(packets)
        # image = pipe.make_intensity_map(cube)
        # quicklook_im(image)
        print 'Sorting packets (takes a while)'
        ind = np.lexsort((packets[:, 3], packets[:, 4]))
        packets = packets[ind]

        print packets.shape, sp.num_processes
        # exit()
        LCmap = np.zeros((len(xlocs), len(ylocs), num_ints))
        # for i in range(num_chunks):
        #     packets_chunk = packets[i * max_photons:(i + 1) * max_photons]
        output = multiprocessing.Queue()
        inqueue = multiprocessing.Queue()
        packqueue = multiprocessing.Queue()
        jobs = []

        for i in range(sp.num_processes):
            p = multiprocessing.Process(target=LCmap_worker, args=(inqueue, output, packqueue))
            jobs.append(p)
            p.start()
            # print 'ip', i
        for idx in range(len(xlocs) * len(ylocs)):
            # print 'idx', idx
            inqueue.put(idx)
        lower=0
        # for ix, iy in zip(xlocs,ylocs):
        for ix, iy in list(itertools.product(xlocs, ylocs)):
            print ix, iy, 'ix iy'#'idx', idx
            # print inqueue.qsize(), output.qsize(), packqueue.qsize(), ' 3queues'
            diff = 0
            span = lower
            while diff == 0:
                diff = packets[span,3] - iy
                span += 1
                if span == len(packets):
                    break
            # print 'lower and span', lower, span
            upper = span -1
            packets_chunk = packets[lower:upper]
            lower=upper
            packqueue.put(packets_chunk)
            LClist = output.get()
            # if ix == 1 and iy == 1:
            #     exit()
            # print LClist, 'LClist'
            # binned_chunk = num_ints / num_chunks
            # print debprint((len(LClist[0]), binned_chunk))
            # for idx in idxs:
            # ix = idx / len(xlocs)
            # iy = idx % len(xlocs)
            # LCmap[ix, iy, i * binned_chunk:(i + 1) * binned_chunk] = LClist  # [idx]
            LCmap[ix, iy] = LClist  # [idx]
            # print LClist
        print inqueue.qsize(), output.qsize(), packqueue.qsize(),  'line380 3queues'
        output.put(None)
        # packqueue.put(None)
        print inqueue.qsize(), output.qsize(), packqueue.qsize(),  'line383 3queues'
        for i in range(sp.num_processes):
            # Send the sentinal to tell Simulation to end
            inqueue.put(sentinel)
            print 'second i', i
            print 'line 205', tp.detector
            print inqueue.qsize(), output.qsize(), packqueue.qsize(), 'line389 3queues'
        for i, p in enumerate(jobs):
            p.join()
            print 'third i', i

        print col, type(col)
        LCmaps[col] = LCmap
    return LCmaps
Пример #55
0
    def run_image_pair_images(self, workspace, first_image_name,
                              second_image_name):
        '''Calculate the correlation between the pixels of two images'''
        first_image = workspace.image_set.get_image(first_image_name,
                                                    must_be_grayscale=True)
        second_image = workspace.image_set.get_image(second_image_name,
                                                     must_be_grayscale=True)
        first_pixel_data = first_image.pixel_data
        first_mask = first_image.mask
        first_pixel_count = np.product(first_pixel_data.shape)
        second_pixel_data = second_image.pixel_data
        second_mask = second_image.mask
        second_pixel_count = np.product(second_pixel_data.shape)
        #
        # Crop the larger image similarly to the smaller one
        #
        if first_pixel_count < second_pixel_count:
            second_pixel_data = first_image.crop_image_similarly(
                second_pixel_data)
            second_mask = first_image.crop_image_similarly(second_mask)
        elif second_pixel_count < first_pixel_count:
            first_pixel_data = second_image.crop_image_similarly(
                first_pixel_data)
            first_mask = second_image.crop_image_similarly(first_mask)
        mask = (first_mask & second_mask & (~np.isnan(first_pixel_data)) &
                (~np.isnan(second_pixel_data)))
        result = []
        if np.any(mask):
            #
            # Perform the correlation, which returns:
            # [ [ii, ij],
            #   [ji, jj] ]
            #
            fi = first_pixel_data[mask]
            si = second_pixel_data[mask]
            corr = np.corrcoef((fi, si))[1, 0]
            #
            # Find the slope as a linear regression to
            # A * i1 + B = i2
            #
            coeffs = lstsq(np.array((fi, np.ones_like(fi))).transpose(), si)[0]
            slope = coeffs[0]
            result += [[
                first_image_name, second_image_name, "-", "Correlation",
                "%.3f" % corr
            ],
                       [
                           first_image_name, second_image_name, "-", "Slope",
                           "%.3f" % slope
                       ]]
            # Orthogonal Regression for Costes' automated threshold
            nonZero = (fi > 0) | (si > 0)

            xvar = np.var(fi[nonZero], axis=0, ddof=1)
            yvar = np.var(si[nonZero], axis=0, ddof=1)

            xmean = np.mean(fi[nonZero], axis=0)
            ymean = np.mean(si[nonZero], axis=0)

            z = fi[nonZero] + si[nonZero]
            zvar = np.var(z, axis=0, ddof=1)

            covar = 0.5 * (zvar - (xvar + yvar))

            denom = 2 * covar
            num = (yvar - xvar) + np.sqrt((yvar - xvar) * (yvar - xvar) + 4 *
                                          (covar * covar))
            a = (num / denom)
            b = (ymean - a * xmean)

            i = 1
            while i > 0.003921568627:
                Thr_fi_c = i
                Thr_si_c = (a * i) + b
                combt = (fi < Thr_fi_c) | (si < Thr_si_c)
                costReg = scistat.pearsonr(fi[combt], si[combt])
                if costReg[0] <= 0:
                    break
                i = i - 0.003921568627

            # Costes' thershold calculation
            combined_thresh_c = (fi > Thr_fi_c) & (si > Thr_si_c)
            fi_thresh_c = fi[combined_thresh_c]
            si_thresh_c = si[combined_thresh_c]
            tot_fi_thr_c = fi[(fi > Thr_fi_c)].sum()
            tot_si_thr_c = si[(si > Thr_si_c)].sum()

            # Threshold as percentage of maximum intensity in each channel
            thr_fi = self.thr.value * np.max(fi) / 100
            thr_si = self.thr.value * np.max(si) / 100
            combined_thresh = (fi > thr_fi) & (si > thr_si)
            fi_thresh = fi[combined_thresh]
            si_thresh = si[combined_thresh]
            tot_fi_thr = fi[(fi > thr_fi)].sum()
            tot_si_thr = si[(si > thr_si)].sum()

            # Manders Coefficient
            M1 = 0
            M2 = 0
            M1 = fi_thresh.sum() / tot_fi_thr
            M2 = si_thresh.sum() / tot_si_thr

            result += [[
                first_image_name, second_image_name, "-",
                "Manders Coefficient",
                "%.3f" % M1
            ],
                       [
                           second_image_name, first_image_name, "-",
                           "Manders Coefficient",
                           "%.3f" % M2
                       ]]

            # RWC Coefficient
            RWC1 = 0
            RWC2 = 0
            Rank1 = np.lexsort([fi])
            Rank2 = np.lexsort([si])
            Rank1_U = np.hstack([[False], fi[Rank1[:-1]] != fi[Rank1[1:]]])
            Rank2_U = np.hstack([[False], si[Rank2[:-1]] != si[Rank2[1:]]])
            Rank1_S = np.cumsum(Rank1_U)
            Rank2_S = np.cumsum(Rank2_U)
            Rank_im1 = np.zeros(fi.shape, dtype=int)
            Rank_im2 = np.zeros(si.shape, dtype=int)
            Rank_im1[Rank1] = Rank1_S
            Rank_im2[Rank2] = Rank2_S

            R = max(Rank_im1.max(), Rank_im2.max()) + 1
            Di = abs(Rank_im1 - Rank_im2)
            weight = ((R - Di) * 1.0) / R
            weight_thresh = weight[combined_thresh]
            RWC1 = (fi_thresh * weight_thresh).sum() / tot_fi_thr
            RWC2 = (si_thresh * weight_thresh).sum() / tot_si_thr
            result += [[
                first_image_name, second_image_name, "-", "RWC Coefficient",
                "%.3f" % RWC1
            ],
                       [
                           second_image_name, first_image_name, "-",
                           "RWC Coefficient",
                           "%.3f" % RWC2
                       ]]

            # Costes' Automated Threshold
            C1 = 0
            C2 = 0
            C1 = fi_thresh_c.sum() / tot_fi_thr_c
            C2 = si_thresh_c.sum() / tot_si_thr_c

            result += [[
                first_image_name, second_image_name, "-",
                "Manders Coefficient (Costes)",
                "%.3f" % C1
            ],
                       [
                           second_image_name, first_image_name, "-",
                           "Manders Coefficient (Costes)",
                           "%.3f" % C2
                       ]]

            # Overlap Coefficient
            overlap = 0
            overlap = (fi_thresh * si_thresh).sum() / np.sqrt(
                (fi_thresh**2).sum() * (si_thresh**2).sum())
            K1 = (fi_thresh * si_thresh).sum() / (fi_thresh**2).sum()
            K2 = (fi_thresh * si_thresh).sum() / (si_thresh**2).sum()
            result += [[
                first_image_name, second_image_name, "-",
                "Overlap Coefficient",
                "%.3f" % overlap
            ]]

        else:
            corr = np.NaN
            slope = np.NaN
            C1 = np.NaN
            C2 = np.NaN
            M1 = np.NaN
            M2 = np.NaN
            RWC1 = np.NaN
            RWC2 = np.NaN
            overlap = np.NaN
            K1 = np.NaN
            K2 = np.NaN

        #
        # Add the measurements
        #
        corr_measurement = F_CORRELATION_FORMAT % (first_image_name,
                                                   second_image_name)
        slope_measurement = F_SLOPE_FORMAT % (first_image_name,
                                              second_image_name)
        overlap_measurement = F_OVERLAP_FORMAT % (first_image_name,
                                                  second_image_name)
        k_measurement_1 = F_K_FORMAT % (first_image_name, second_image_name)
        k_measurement_2 = F_K_FORMAT % (second_image_name, first_image_name)
        manders_measurement_1 = F_MANDERS_FORMAT % (first_image_name,
                                                    second_image_name)
        manders_measurement_2 = F_MANDERS_FORMAT % (second_image_name,
                                                    first_image_name)
        rwc_measurement_1 = F_RWC_FORMAT % (first_image_name,
                                            second_image_name)
        rwc_measurement_2 = F_RWC_FORMAT % (second_image_name,
                                            first_image_name)
        costes_measurement_1 = F_COSTES_FORMAT % (first_image_name,
                                                  second_image_name)
        costes_measurement_2 = F_COSTES_FORMAT % (second_image_name,
                                                  first_image_name)

        workspace.measurements.add_image_measurement(corr_measurement, corr)
        workspace.measurements.add_image_measurement(slope_measurement, slope)
        workspace.measurements.add_image_measurement(overlap_measurement,
                                                     overlap)
        workspace.measurements.add_image_measurement(k_measurement_1, K1)
        workspace.measurements.add_image_measurement(k_measurement_2, K2)
        workspace.measurements.add_image_measurement(manders_measurement_1, M1)
        workspace.measurements.add_image_measurement(manders_measurement_2, M2)
        workspace.measurements.add_image_measurement(rwc_measurement_1, RWC1)
        workspace.measurements.add_image_measurement(rwc_measurement_2, RWC2)
        workspace.measurements.add_image_measurement(costes_measurement_1, C1)
        workspace.measurements.add_image_measurement(costes_measurement_2, C2)

        return result
Пример #56
0
    def fit(
        dataset,
        complete=True,
        verbose=0
    ):  #variables, values, transitions, conclusion_values=None, complete=True):
        """
        Preprocess state transitions and learn rules for all variables/values.

        Args:
            variables: list of string
                variables of the system
            values: list of list of string
                possible value of each variable
            transitions: list of (list of int, list of int)
                state transitions of a dynamic system

        Returns:
            CDMVLP
                - each rules/constraints are minimals
                - the output set explains/reproduces the input transitions
        """
        #eprint("Start LUST learning...")

        # Nothing to learn
        #if len(transitions) == 0:
        #    return LogicProgram(variables, values, [])

        #if conclusion_values == None:
        #    conclusion_values = values

        # 1) Use GULA to learn local possibilities
        #------------------------------------------
        if complete:
            rules = GULA.fit(dataset)
        else:
            rules = PRIDE.fit(dataset)

        # 2) Learn constraints
        #------------------------------------------

        #negatives = [list(i)+list(j) for i,j in data] # next state value appear before current state
        #extended_variables = variables + variables # current state variables id are now += len(variables)
        #extended_values = conclusion_values + values

        # DBG
        #eprint("variables:\n", extended_variables)
        #eprint("values:\n", extended_values)
        #eprint("positives:\n", positives)
        #eprint("negatives:\n", negatives)

        encoded_data = Algorithm.encode_transitions_set(
            dataset.data, dataset.features, dataset.targets)

        negatives = np.array(
            [tuple(s1) + tuple(s2) for s1, s2 in encoded_data])
        if len(negatives) > 0:
            negatives = negatives[np.lexsort(
                tuple([
                    negatives[:, col]
                    for col in reversed(range(0, len(dataset.features)))
                ]))]

        if complete:
            constraints = GULA.fit_var_val(dataset.features + dataset.targets,
                                           -1, -1, negatives)
        else:
            # Extract occurences of each transition
            next_states = dict()
            for (i, j) in encoded_data:
                s_i = tuple(i)
                s_j = tuple(j)
                # new init state
                if s_i not in next_states:
                    next_states[s_i] = (s_i, [])
                # new next state
                next_states[s_i][1].append(s_j)

            # DBG
            #eprint("Transitions grouped:\n", next_states)

            impossible = set()
            for i in next_states:
                # Extract all possible value of each variable in next state
                domains = [set() for var in dataset.targets]

                for s in next_states[i][1]:
                    for var in range(0, len(s)):
                        domains[var].add(s[var])
                # DBG
                #eprint("domain: ", domains)
                combinations = Synchronizer.partial_combinations(
                    next_states[i][1], domains)
                #eprint("output: ", combinations)
                #exit()
                # DBG

                # Extract unobserved combinations
                if Synchronizer.HEURISTIC_PARTIAL_IMPOSSIBLE_STATE:
                    missings = [(next_states[i][0], tuple(j))
                                for j in combinations]
                else:
                    missings = [(next_states[i][0], j)
                                for j in list(itertools.product(*domains))
                                if j not in next_states[i][1]]

                if missings != []:
                    impossible.update(missings)
                #eprint("Missings: ", missings)
            # DBG
            #eprint("Synchronous impossible transitions:\n", impossible)

            # convert impossible transition for PRIDE input
            positives = [list(i) + list(j) for i, j in impossible]

            constraints = PRIDE.fit_var_val(
                -1, -1,
                len(dataset.features) + len(dataset.targets), positives,
                negatives)

        # DBG
        #eprint("Learned constraints:\n", [r.logic_form(variables, values, None, len(variables)) for r in constraints])

        # 3) Discard non-applicable constraints
        #---------------------------------------
        necessary_constraints = []
        if not complete:
            necessary_constraints = constraints
        else:
            # Heuristic: clean constraint with not even a rule for each target condition
            for constraint in constraints:
                #eprint(features)
                #eprint(targets)
                #eprint(constraint, " => ", constraint.logic_form(features+targets,targets))
                applicable = True
                for (var, val) in constraint.body:
                    #eprint(var)
                    # Each condition on targets must be achievable by a rule head
                    if var >= len(dataset.features):
                        head_var = var - len(dataset.features)
                        #eprint(var," ",val)
                        matching_rule = False
                        # The conditions of the rule must be in the constraint
                        for rule in rules:
                            #eprint(rule)
                            if rule.head_variable == head_var and rule.head_value == val:
                                matching_conditions = True
                                for (cond_var, cond_val) in rule.body:
                                    if constraint.has_condition(
                                            cond_var
                                    ) and constraint.get_condition(
                                            cond_var) != cond_val:
                                        matching_conditions = False
                                        #eprint("conflict on: ",cond_var,"=",cond_val)
                                        break
                                if matching_conditions:
                                    matching_rule = True
                                    break
                        if not matching_rule:
                            #eprint("USELESS")
                            applicable = False
                            break
                if applicable:
                    #eprint("OK")
                    necessary_constraints.append(constraint)

        constraints = necessary_constraints

        # Clean remaining constraints
        # TODO
        necessary_constraints = []
        for constraint in constraints:
            # Get applicables rules
            compatible_rules = []
            for (var, val) in constraint.body:
                #eprint(var)
                # Each condition on targets must be achievable by a rule head
                if var >= len(dataset.features):
                    compatible_rules.append([])
                    head_var = var - len(dataset.features)
                    #eprint(var," ",val)
                    # The conditions of the rule must be in the constraint
                    for rule in rules:
                        #eprint(rule)
                        if rule.head_variable == head_var and rule.head_value == val:
                            matching_conditions = True
                            for (cond_var, cond_val) in rule.body:
                                if constraint.has_condition(
                                        cond_var) and constraint.get_condition(
                                            cond_var) != cond_val:
                                    matching_conditions = False
                                    #eprint("conflict on: ",cond_var,"=",cond_val)
                                    break
                            if matching_conditions:
                                compatible_rules[-1].append(rule)

            # DBG
            #eprint()
            #eprint(constraint.logic_form(dataset.features,dataset.targets))
            #eprint(compatible_rules)

            nb_combinations = np.prod([len(l) for l in compatible_rules])
            done = 0

            applicable = False
            for combination in itertools.product(*compatible_rules):
                done += 1
                #eprint(done,"/",nb_combinations)

                condition_variables = set()
                conditions = set()
                valid_combo = True
                for r in combination:
                    for var, val in r.body:
                        if var not in condition_variables:
                            condition_variables.add(var)
                            conditions.add((var, val))
                        elif (var, val) not in conditions:
                            valid_combo = False
                            break
                    if not valid_combo:
                        break

                if valid_combo:
                    #eprint("valid combo: ", combination)
                    applicable = True
                    break

            if applicable:
                necessary_constraints.append(constraint)

        return rules, necessary_constraints
    def test_03_01_graph(self):
        """Make a simple graph"""
        #
        # The skeleton looks something like this:
        #
        #   .   .
        #    . .
        #     .
        #     .
        i, j = np.mgrid[-10:11, -10:11]
        skel = (i < 0) & (np.abs(i) == np.abs(j))
        skel[(i >= 0) & (j == 0)] = True
        #
        # Put a single label at the bottom
        #
        labels = np.zeros(skel.shape, int)
        labels[(i > 8) & (np.abs(j) < 2)] = 1
        np.random.seed(31)
        intensity = np.random.uniform(size=skel.shape)
        workspace, module = self.make_workspace(labels,
                                                skel,
                                                intensity_image=intensity,
                                                wants_graph=True)
        module.prepare_run(workspace)
        module.run(workspace)
        edge_graph = self.read_graph_file(EDGE_FILE)
        vertex_graph = self.read_graph_file(VERTEX_FILE)
        vidx = np.lexsort((vertex_graph["j"], vertex_graph["i"]))
        #
        # There should be two vertices at the bottom of the array - these
        # are bogus artifacts of the object hitting the edge of the image
        #
        for vidxx in vidx[-2:]:
            self.assertEqual(vertex_graph["i"][vidxx], 20)
        vidx = vidx[:-2]

        expected_vertices = ((0, 0), (0, 20), (10, 10), (17, 10))
        self.assertEqual(len(vidx), len(expected_vertices))
        for idx, v in enumerate(expected_vertices):
            vv = vertex_graph[vidx[idx]]
            self.assertEqual(vv["i"], v[0])
            self.assertEqual(vv["j"], v[1])

        #
        # Get rid of edges to the bogus vertices
        #
        for v in ("v1", "v2"):
            edge_graph = edge_graph[vertex_graph["i"][edge_graph[v] - 1] != 20]

        eidx = np.lexsort((
            vertex_graph["j"][edge_graph["v1"] - 1],
            vertex_graph["i"][edge_graph["v1"] - 1],
            vertex_graph["j"][edge_graph["v2"] - 1],
            vertex_graph["i"][edge_graph["v2"] - 1],
        ))
        expected_edges = (
            ((0, 0), (10, 10), 11,
             np.sum(intensity[(i <= 0) & (j <= 0) & skel])),
            ((0, 20), (10, 10), 11,
             np.sum(intensity[(i <= 0) & (j >= 0) & skel])),
            ((10, 10), (17, 10), 8,
             np.sum(intensity[(i >= 0) & (i <= 7) & skel])),
        )
        for i, (v1, v2, length, total_intensity) in enumerate(expected_edges):
            ee = edge_graph[eidx[i]]
            for ve, v in ((v1, ee["v1"]), (v2, ee["v2"])):
                self.assertEqual(ve[0], vertex_graph["i"][v - 1])
                self.assertEqual(ve[1], vertex_graph["j"][v - 1])
            self.assertEqual(length, ee["length"])
            self.assertAlmostEqual(total_intensity, ee["total_intensity"], 4)
Пример #58
0
def RandomSeedOnLine(sim, L_x, N, vola, volb, R):

    # begin by randomly assigning cells one of 2 types
    Nb = int(numpy.floor(N * 0.5))
    # half the cells will be type 'b'
    shuffledInds = range(0, N)  # get inds 0 to N-1
    numpy.random.shuffle(shuffledInds)  # shuffle the inds
    Types = numpy.zeros(N)
    # all cells are type 'a' by default
    Types[shuffledInds[0:Nb]] = 1
    # make the first Nb cells in shuffledInds 'b'

    # inital volumes and lengths
    V0s = vola * numpy.ones(N)
    V0s[shuffledInds[0:Nb]] = volb
    Vols = V0s * (
        numpy.random.random(N) + 1
    )  # volumes are drawn randomly from U(V0, 2*V0); V0 depends on type
    Lens = Vols / (
        math.pi * R**2
    ) - 4 * R / 3.0  # all cells have same radius; work lengths out from vols irrespective of type

    # having assigned cell types and volumes, randomly assign positions
    Spans = 0.5 * (Lens + 2 * R
                   )  # half the footprint each cell makes on the x axis

    # create a table of position data to sort

    #Data = numpy.zeros((N+2.3), dtype=[('x',float), ('y',float), ('z',int)])
    Data = numpy.zeros((N + 2, 3))
    Data[1:N + 1, 1] = Spans
    Data[1:N + 1, 2] = 1
    Data[-1, 0] = L_x

    # loop where we check for overlapping cells and remove them
    num_flags = N
    iter = 0
    while num_flags > 0:

        # any cell with a flag gets a new position drawn
        Data[Data[:, 2] > 0, 0] = L_x * numpy.random.random(num_flags)

        # sort cells by x coordinate
        temp = Data.view(numpy.ndarray)
        Data = temp[numpy.lexsort((temp[:, 0], ))]

        # flags are reset, then recomputed
        Data[:, 2] = 0
        Data[numpy.diff(Data[:, 0]) - Data[0:N + 1, 1] - Data[1:N + 2, 1] < 0,
             2] = 1

        # if left wall has been flagged, pass flag to 1st cell on left
        if Data[0, 2] == 1:
            Data[0, 2] = 0
            Data[1, 2] = 1

        # update the flag and iter counts
        num_flags = numpy.sum(Data[:, 2])
        iter += 1

    print 'Random seed on line complete after %i iterations' % iter
    Pos = numpy.zeros((N, 3))
    Pos[:, 0] = Data[1:N + 1, 0]
    Pos[:, 1] = R

    # add cells to the population
    for i in range(0, N):
        sim.addCell(cellType=Types[i],
                    len=Lens[i],
                    rad=R,
                    pos=tuple(Pos[i, :]),
                    dir=tuple([1, 0, 0]))
Пример #59
0
 def argsort(self, *args, **kwargs):
     return np.lexsort((self.right, self.left))
Пример #60
0
def adaboost(X_train, Y_train):
    ## weak classifier
    ## calculate D_i
    ## minimize wighted error
    ##i

    result = np.zeros((1000, 3))
    loss0 = 0
    loss1 = 1
    k = 0
    train = np.c_[X_train, Y_train]
    beta = np.zeros((1000, 1))
    f_x = 0
    ## initial weight
    D = np.ones((X_train.shape[0], 1)) / X_train.shape[0]
    while (k < 100):
        error = np.zeros((X_train.shape[0], X_train.shape[1]))
        for j in range(0, X_train.shape[1]):
            temp = train[np.lexsort((train[:, j], ))].copy()
            X_train_ = temp[:, 0:2].copy()
            Y_train_ = temp[:, 2].copy()
            #print(j)
            for i in range(0, X_train.shape[0]):
                a1 = -1
                a2 = 1
                flag = 0
                ## tree weighted loss
                #print(sum(D[0:(i+1)]*np.reshape((1-(Y_train_[0:(i+1)]*a1))*0.5,(i+1,1))))
                error[i, j] = sum(
                    np.multiply(
                        np.reshape(D[0:(i + 1)], (i + 1, 1)),
                        np.reshape(
                            (1 - (Y_train_[0:(i + 1)] * a1)) * 0.5,
                            (i + 1, 1)))) + sum(
                                np.multiply(
                                    np.reshape(D[(i + 1):],
                                               (X_train_.shape[0] - i - 1, 1)),
                                    np.reshape(
                                        (1 - (Y_train_[(i + 1):] * a2)) * 0.5,
                                        (X_train.shape[0] - i - 1, 1))))
                if (error[i, j] > 0.5):
                    error[i, j] = sum(
                        np.multiply(
                            np.reshape(D[0:(i + 1)], (i + 1, 1)),
                            np.reshape(
                                (1 - (Y_train_[0:(i + 1)] * a2)) * 0.5,
                                (i + 1, 1)))) + sum(
                                    np.multiply(
                                        np.reshape(
                                            D[(i + 1):],
                                            (X_train_.shape[0] - i - 1, 1)),
                                        np.reshape(
                                            (1 -
                                             (Y_train_[(i + 1):] * a1)) * 0.5,
                                            (X_train.shape[0] - i - 1, 1))))
        ij_min = np.c_[np.where(error == error.min())[0][0],
                       np.where(error == error.min())[1][0]]

        print(ij_min)
        #print("error.min")
        #print(error.min())
        beta[k] = 0.5 * np.log((1 - error.min()) / error.min())
        temp = train[np.lexsort((train[:, ij_min[0, 1]], ))].copy()
        X_train = temp[:, 0:2].copy()
        Y_train = temp[:, 2].copy()

        check_error = sum(
            np.multiply(
                np.reshape(D[0:(ij_min[0, 0] + 1)], (ij_min[0, 0] + 1, 1)),
                np.reshape(
                    (1 - (Y_train[0:(ij_min[0, 0] + 1)] * a1)) * 0.5,
                    (ij_min[0, 0] + 1, 1)))) + sum(
                        np.multiply(
                            np.reshape(
                                D[(ij_min[0, 0] + 1):],
                                (X_train.shape[0] - ij_min[0, 0] - 1, 1)),
                            np.reshape(
                                (1 -
                                 (Y_train[(ij_min[0, 0] + 1):] * a2)) * 0.5,
                                (X_train.shape[0] - ij_min[0, 0] - 1, 1))))
        if (check_error < 0.5):
            f_x = f_x + beta[k] * np.r_[-np.ones(ij_min[0, 0] + 1),
                                        np.ones(X_train.shape[0] -
                                                ij_min[0, 0] - 1)]
        else:
            f_x = f_x + beta[k] * np.r_[np.ones(ij_min[
                0, 0] + 1), -np.ones(X_train.shape[0] - ij_min[0, 0] - 1)]
        #print("f_x")
        #print(f_x)
        ## ex loss
        loss0 = loss1
        #print("beta")
        #print(beta[k])
        loss1 = (1 - error.min()) * np.exp(-beta[k]) + error.min() * np.exp(
            beta[k])
        print(loss1)
        result[k] = np.c_[beta[k], X_train[ij_min[0, 0], ij_min[0, 1]],
                          ij_min[0, 1]]
        k = k + 1
        D = np.exp(-Y_train * f_x)
        D = D / sum(D)
        #print("D")
        #print(D[100:105])

    return result