コード例 #1
0
    def auto_truncate(self,
                      update=True,
                      zero_tol=None,
                      return_update_data=False):
        """Automatically reduces the bond-dimension in case of rank-deficiency.
        
        Canonical form is required. Always perform self.restore_CF() first!
        
        Parameters
        ----------
            update : bool (True)
                Whether to call self.update() after truncation.
            zero_tol : float
                Tolerance for interpretation of values as zero.
            return_update_data : bool
                Whether to return additional data needed to perform a minimal update.
        Returns
        -------
            truncated : bool
                Whether truncation was performed (if return_update_data == False).
            data : stuff
                Additional data needed by self._update_after_truncate() (if return_update_data == True).
        """
        if zero_tol is None:
            zero_tol = self.zero_tol

        new_D = self.D.copy()

        if self.canonical_form == 'right':
            for n in xrange(1, self.N + 1):
                try:
                    ldiag = self.l[n].diag
                except AttributeError:
                    ldiag = self.l[n].diagonal()

                new_D[n] = sp.count_nonzero(abs(ldiag) > zero_tol)
        else:
            for n in xrange(1, self.N + 1):
                try:
                    rdiag = self.r[n].diag
                except AttributeError:
                    rdiag = self.r[n].diagonal()

                new_D[n] = sp.count_nonzero(abs(rdiag) > zero_tol)

        if not sp.all(new_D == self.D):
            data = self.truncate(new_D,
                                 update=update,
                                 return_update_data=return_update_data)

            if update:
                self.update()

            if return_update_data:
                return data
            else:
                return True
        else:
            return False
コード例 #2
0
ファイル: otsu.py プロジェクト: kleinfeld/medpy
def otsu (img, bins=64):
    """
    Implementation of the Otsu's method to fin the optimal threshold separating an image into
    fore- and background.
    
    This rather expensive method iterates over a number of thresholds to separate the
    images histogram into two parts with a minimal intra-class variance.
    
    An increase in the number of bins increases the algorithms specificity at the cost of
    slowing it down.
    
    @param img the image for which to determine the threshold
    @type img numpy.ndarray
    @param bins an integer determining the number of histogram bins
    @type bins int
    
    @return the otsu threshold
    @rtype number
    """
    # cast bins parameter to int
    bins = int(bins)
    
    # cast img parameter to scipy arrax
    img = scipy.asarray(img)
    
    # check supplied parameters
    if bins <= 1:
        raise AttributeError('At least a number two bins have to be provided.')
    
    # determine initial threshold and threshold step-length
    steplength = (img.max() - img.min()) / float(bins)
    initial_threshold = img.min() + steplength
    
    # initialize best value variables
    best_bcv = 0
    best_threshold = initial_threshold
    
    # iterate over the thresholds and find highest between class variance
    for threshold in scipy.arange(initial_threshold, img.max(), steplength):
        mask_fg = (img >= threshold)
        mask_bg = (img < threshold)
        
        wfg = scipy.count_nonzero(mask_fg)
        wbg = scipy.count_nonzero(mask_bg)
        
        if 0 == wfg or 0 == wbg: continue
        
        mfg = img[mask_fg].mean()
        mbg = img[mask_bg].mean()
        
        bcv = wfg * wbg * math.pow(mbg - mfg, 2)
        
        if bcv > best_bcv:
            best_bcv = bcv
            best_threshold = threshold
        
    return best_threshold
コード例 #3
0
 def auto_truncate(self, update=True, zero_tol=None, return_update_data=False):
     """Automatically reduces the bond-dimension in case of rank-deficiency.
     
     Canonical form is required. Always perform self.restore_CF() first!
     
     Parameters
     ----------
         update : bool (True)
             Whether to call self.update() after truncation.
         zero_tol : float
             Tolerance for interpretation of values as zero.
         return_update_data : bool
             Whether to return additional data needed to perform a minimal update.
     Returns
     -------
         truncated : bool
             Whether truncation was performed (if return_update_data == False).
         data : stuff
             Additional data needed by self._update_after_truncate() (if return_update_data == True).
     """
     if zero_tol is None:
         zero_tol = self.zero_tol
     
     new_D = self.D.copy()
     
     if self.canonical_form == 'right':
         for n in xrange(1, self.N + 1):
             try:
                 ldiag = self.l[n].diag
             except AttributeError:
                 ldiag = self.l[n].diagonal()
             
             new_D[n] = sp.count_nonzero(abs(ldiag) > zero_tol)
     else:
         for n in xrange(1, self.N + 1):
             try:
                 rdiag = self.r[n].diag
             except AttributeError:
                 rdiag = self.r[n].diagonal()
             
             new_D[n] = sp.count_nonzero(abs(rdiag) > zero_tol)
     
     if not sp.all(new_D == self.D):
         data = self.truncate(new_D, update=update, return_update_data=return_update_data)
     
         if update:
             self.update()
         
         if return_update_data:
             return data
         else:
             return True
     else:
         return False
コード例 #4
0
ファイル: triangulate.py プロジェクト: jdranczewski/Magic2
 def __init__(self, dt, index, points, values):
     # Copy the vertex and neighbours info
     self.vertices = dt.simplices[index]
     self.vert_coordinates = points[self.vertices]
     self.neighbours = dt.neighbors[index]
     self.index = index
     # Check whether the triangle is flat
     self.flat = (values[self.vertices[0]] == values[self.vertices[1]]
                  and values[self.vertices[1]] == values[self.vertices[2]])
     if self.flat:
         # If the triangle is flat, it is important to check which of the
         # edges are not parts of the contour - we can flip those without
         # getting lines that cut the contours. The contour lines are always
         # sqrt(2) or shorter
         self.long_edges = [
             distance(points, self.vertices[1],
                      self.vertices[2]) > sp.sqrt(2),
             distance(points, self.vertices[2],
                      self.vertices[0]) > sp.sqrt(2),
             distance(points, self.vertices[0], self.vertices[1]) >
             sp.sqrt(2)
         ]
         # If none of the edges is longer than sqrt(2), the triangle lies
         # within the contour and it doesn't make sense to fix it.
         self.flat = self.flat and sp.count_nonzero(self.long_edges)
     # This will not be used, but assign a value for consistency
     else:
         self.long_edges = [True, True, True]
コード例 #5
0
ファイル: cross_validation.py プロジェクト: rev112/pcml_mnist
    def compute_estimator(self, svm, validation_set):
        """Compute least-square estimator for validation set"""
        val_set_dp = validation_set['dtp']
        val_set_cl = validation_set['cl']
        estimator = 0.0
        validation_size = len(val_set_dp)
        print "Computing estimator with validation part..."

        val_output = svm.get_output_2d(val_set_dp)

        # Compute estimator
        diff = val_set_cl - val_output
        estimator = diff.dot(diff).sum()
        # See p. 183
        estimator *= 1.0 * self.M / self.n

        classify_vect = s.vectorize(svm.classify_output)
        output_classes = classify_vect(val_output)

        diff_classes = output_classes - val_set_cl
        errors = s.count_nonzero(diff_classes)
        classified_correctly = validation_size - errors

        print "Classified correctly: %u/%u (%.2f%%)" % \
              (classified_correctly, validation_size,
               100.0 * classified_correctly / validation_size)
        return estimator
コード例 #6
0
 def _generate_masked_mesh(self, cell_mask=None):
     r"""
     Generates the mesh based on the cell mask provided
     """
     #
     if cell_mask is None:
         cell_mask = sp.ones(self.data_map.shape, dtype=bool)
     #
     # initializing arrays
     self._edges = sp.ones(0, dtype=str)
     self._merge_patch_pairs = sp.ones(0, dtype=str)
     self._create_blocks(cell_mask)
     #
     # building face arrays
     mapper = sp.ravel(sp.array(cell_mask, dtype=int))
     mapper[mapper == 1] = sp.arange(sp.count_nonzero(mapper))
     mapper = sp.reshape(mapper, (self.nz, self.nx))
     mapper[~cell_mask] = -sp.iinfo(int).max
     #
     boundary_dict = {
         'bottom':
             {'bottom': mapper[0, :][cell_mask[0, :]]},
         'top':
             {'top': mapper[-1, :][cell_mask[-1, :]]},
         'left':
             {'left': mapper[:, 0][cell_mask[:, 0]]},
         'right':
             {'right': mapper[:, -1][cell_mask[:, -1]]},
         'front':
             {'front': mapper[cell_mask]},
         'back':
             {'back': mapper[cell_mask]},
         'internal':
             {'bottom': [], 'top': [], 'left': [], 'right': []}
     }
     #
     # determining cells linked to a masked cell
     cell_mask = sp.where(~sp.ravel(cell_mask))[0]
     inds = sp.in1d(self._field._cell_interfaces, cell_mask)
     inds = sp.reshape(inds, (len(self._field._cell_interfaces), 2))
     inds = inds[:, 0].astype(int) + inds[:, 1].astype(int)
     inds = (inds == 1)
     links = self._field._cell_interfaces[inds]
     #
     # adjusting order so masked cells are all on links[:, 1]
     swap = sp.in1d(links[:, 0], cell_mask)
     links[swap] = links[swap, ::-1]
     #
     # setting side based on index difference
     sides = sp.ndarray(len(links), dtype='<U6')
     sides[sp.where(links[:, 1] == links[:, 0]-self.nx)[0]] = 'bottom'
     sides[sp.where(links[:, 1] == links[:, 0]+self.nx)[0]] = 'top'
     sides[sp.where(links[:, 1] == links[:, 0]-1)[0]] = 'left'
     sides[sp.where(links[:, 1] == links[:, 0]+1)[0]] = 'right'
     #
     # adding each block to the internal face dictionary
     inds = sp.ravel(mapper)[links[:, 0]]
     for side, block_id in zip(sides, inds):
         boundary_dict['internal'][side].append(block_id)
     self.set_boundary_patches(boundary_dict, reset=True)
コード例 #7
0
 def count_nonzero(self,array):
     if hasattr(numpy,'count_nonzero'):
         return numpy.count_nonzero(array)
     elif hasattr(scipy,'count_nonzero'):
         return scipy.count_nonzero(array)
     else:
         return (array != 0).sum()
コード例 #8
0
ファイル: drazin.py プロジェクト: jmorrise/Labs
def drazin(A, tol):
    CB = A.copy()

    Bs = []
    Cs = []
    k = 1

    while not (sp.absolute(CB) < tol).all() and sp.absolute(la.det(CB)) < tol:
        U, s, Vh = la.svd(CB)
        S = sp.diag(s)
        S = S * (S > tol)
        r = sp.count_nonzero(S)
        B = sp.dot(U, sp.sqrt(S))
        C = sp.dot(sp.sqrt(S), Vh)
        B = B[:, 0:r]
        Bs.append(B)
        C = C[0:r, :]
        Cs.append(C)
        CB = sp.dot(C, B)
        k += 1

    D = sp.eye(A.shape[0])
    for B in Bs:
        D = sp.dot(D, B)
    if (sp.absolute(CB) < tol).all():
        D = sp.dot(D, CB)
    else:
        D = sp.dot(D, np.linalg.matrix_power(CB, -(k + 1)))
    for C in reversed(Cs):
        D = sp.dot(D, C)
    return D
コード例 #9
0
def calc_BB_2s(Y, Vlh, Vrh_p1, l_si_m1, r_si_p1, dD_max=16, sv_tol=1E-14):
    try:
        U, sv, Vh = la.svd(Y)
    except la.LinAlgError:
        return None, None, 0
    
    dDn = min(sp.count_nonzero(sv > sv_tol), dD_max)
    
    sv = mm.simple_diag_matrix(sv[:dDn])
    
    ss = sv.sqrt()
    
    Z1 = ss.dot_left(U[:, :dDn])
    
    Z2 = ss.dot(Vh[:dDn, :])
    
    BB12n = sp.zeros((Vlh.shape[0], l_si_m1.shape[0], dDn), dtype=Y.dtype)
    
    for s in xrange(Vlh.shape[0]):
        BB12n[s] = l_si_m1.dot(Vlh[s].conj().T).dot(Z1)
    
    BB21np1 = sp.zeros((Vrh_p1.shape[0], dDn, Vrh_p1.shape[1]), dtype=Y.dtype)
    
    try:
        for s in xrange(Vrh_p1.shape[0]):
            BB21np1[s] = r_si_p1.dot_left(Z2.dot(Vrh_p1[s].conj().T))
    except AttributeError:
        for s in xrange(Vrh_p1.shape[0]):
            BB21np1[s] = Z2.dot(Vrh_p1[s].conj().T).dot(r_si_p1)
        
    return BB12n, BB21np1, dDn
コード例 #10
0
ファイル: adc.py プロジェクト: xzf125244170/medpy
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # loading input images
    b0img, b0hdr = load(args.b0image)
    bximg, bxhdr = load(args.bximage)

    # check if image are compatible
    if not b0img.shape == bximg.shape:
        raise ArgumentError(
            'The input images shapes differ i.e. {} != {}.'.format(
                b0img.shape, bximg.shape))
    if not header.get_pixel_spacing(b0hdr) == header.get_pixel_spacing(bxhdr):
        raise ArgumentError(
            'The input images voxel spacing differs i.e. {} != {}.'.format(
                header.get_pixel_spacing(b0hdr),
                header.get_pixel_spacing(bxhdr)))

    # check if supplied threshold value as well as the b value is above 0
    if args.threshold is not None and not args.threshold >= 0:
        raise ArgumentError(
            'The supplied threshold value must be greater than 0, otherwise a division through 0 might occur.'
        )
    if not args.b > 0:
        raise ArgumentError('The supplied b-value must be greater than 0.')

    # compute threshold value if not supplied
    if args.threshold is None:
        b0thr = otsu(b0img, 32) / 2.  # divide by 2 to decrease impact
        bxthr = otsu(bximg, 32) / 2.
        if 0 >= b0thr:
            raise ArgumentError(
                'The supplied b0image seems to contain negative values.')
        if 0 >= bxthr:
            raise ArgumentError(
                'The supplied bximage seems to contain negative values.')
    else:
        b0thr = bxthr = args.threshold

    logger.debug('thresholds={}/{}, b-value={}'.format(b0thr, bxthr, args.b))

    # threshold b0 + bx DW image to obtain a mask
    # b0 mask avoid division through 0, bx mask avoids a zero in the ln(x) computation
    mask = (b0img > b0thr) & (bximg > bxthr)

    logger.debug(
        'excluding {} of {} voxels from the computation and setting them to zero'
        .format(scipy.count_nonzero(mask), scipy.prod(mask.shape)))

    # compute the ADC
    adc = scipy.zeros(b0img.shape, b0img.dtype)
    adc[mask] = -1. * args.b * scipy.log(bximg[mask] / b0img[mask])

    # saving the resulting image
    save(adc, args.output, b0hdr, args.force)
コード例 #11
0
ファイル: tdvp_common.py プロジェクト: wfBranch/evoMPS
def calc_BB_2s(Y, Vlh, Vrh_p1, l_si_m1, r_si_p1, dD_max=16, sv_tol=1E-14):
    try:
        U, sv, Vh = la.svd(Y)
    except la.LinAlgError:
        return None, None, 0

    dDn = min(sp.count_nonzero(sv > sv_tol), dD_max)

    sv = mm.simple_diag_matrix(sv[:dDn])

    ss = sv.sqrt()

    Z1 = ss.dot_left(U[:, :dDn])

    Z2 = ss.dot(Vh[:dDn, :])

    BB12n = sp.zeros((Vlh.shape[0], l_si_m1.shape[0], dDn), dtype=Y.dtype)

    for s in xrange(Vlh.shape[0]):
        BB12n[s] = l_si_m1.dot(Vlh[s].conj().T).dot(Z1)

    BB21np1 = sp.zeros((Vrh_p1.shape[0], dDn, Vrh_p1.shape[1]), dtype=Y.dtype)

    try:
        for s in xrange(Vrh_p1.shape[0]):
            BB21np1[s] = r_si_p1.dot_left(Z2.dot(Vrh_p1[s].conj().T))
    except AttributeError:
        for s in xrange(Vrh_p1.shape[0]):
            BB21np1[s] = Z2.dot(Vrh_p1[s].conj().T).dot(r_si_p1)

    return BB12n, BB21np1, dDn
コード例 #12
0
    def add_boundary_pores(self, labels=['top', 'bottom', 'front', 'back',
                                         'left', 'right'], offset=None):
        r"""
        Add boundary pores to the specified faces of the network

        Pores are offset from the faces of the domain.

        Parameters
        ----------
        labels : string or list of strings
            The labels indicating the pores defining each face where boundary
            pores are to be added (e.g. 'left' or ['left', 'right'])

        offset : scalar or array_like
            The spacing of the network (e.g. [1, 1, 1]).  This must be given
            since it can be quite difficult to infer from the network,
            for instance if boundary pores have already added to other faces.

        """
        offset = sp.array(offset)
        if offset.size == 1:
            offset = sp.ones(3)*offset
        for item in labels:
            Ps = self.pores(item)
            coords = sp.absolute(self['pore.coords'][Ps])
            axis = sp.count_nonzero(sp.diff(coords, axis=0), axis=0) == 0
            ax_off = sp.array(axis, dtype=int)*offset
            if sp.amin(coords) == sp.amin(coords[:, sp.where(axis)[0]]):
                ax_off = -1*ax_off
            topotools.add_boundary_pores(network=self, pores=Ps, offset=ax_off,
                                         apply_label=item + '_boundary')
コード例 #13
0
def count_nonzero(array):
    if hasattr(numpy,'count_nonzero'):
        return numpy.count_nonzero(array)
    elif hasattr(scipy,'count_nonzero'):
        return scipy.count_nonzero(array)
    else:
        return (array != 0).sum()
コード例 #14
0
def drazin(A, tol):
    CB = A.copy()

    Bs = []
    Cs = []
    k = 1

    while (not (sp.absolute(CB) < tol).all()
           and sp.absolute(la.det(CB)) < tol):
        U, s, Vh = la.svd(CB)
        S = sp.diag(s)
        S = S * (S > tol)
        r = sp.count_nonzero(S)
        B = sp.dot(U, sp.sqrt(S))
        C = sp.dot(sp.sqrt(S), Vh)
        B = B[:, 0:r]
        Bs.append(B)
        C = C[0:r, :]
        Cs.append(C)
        CB = sp.dot(C, B)
        k += 1

    D = sp.eye(A.shape[0])
    for B in Bs:
        D = sp.dot(D, B)
    if ((sp.absolute(CB) < tol).all()):
        D = sp.dot(D, CB)
    else:
        D = sp.dot(D, np.linalg.matrix_power(CB, -(k + 1)))
    for C in reversed(Cs):
        D = sp.dot(D, C)
    return D
コード例 #15
0
ファイル: train_mnist.py プロジェクト: rev112/pcml_mnist
def evaluate(svm, datapoints, classes):
    size = len(datapoints)
    output_classes = svm.classify_2d(datapoints)
    diff_classes = classes - output_classes
    errors = s.count_nonzero(diff_classes)
    classified_correctly = size - errors
    print "Correct: %u/%u, %.2f%%" % (classified_correctly, size,
                                      100.0 * classified_correctly/size)
コード例 #16
0
ファイル: helperfunctions.py プロジェクト: Martin-Jung/QSDM
def count_nonzero(array):
    if hasattr(numpy,'count_nonzero'):
        return numpy.count_nonzero(array)
    try:
        import scipy
        if hasattr(scipy,'count_nonzero'):
            return scipy.count_nonzero(array)
        else:
            return (array != 0).sum()
    except ImportError:
        return (array != 0).sum()
コード例 #17
0
ファイル: helperfunctions.py プロジェクト: Martin-Jung/QSDM
def count_nonzero(array):
    if hasattr(numpy, 'count_nonzero'):
        return numpy.count_nonzero(array)
    try:
        import scipy
        if hasattr(scipy, 'count_nonzero'):
            return scipy.count_nonzero(array)
        else:
            return (array != 0).sum()
    except ImportError:
        return (array != 0).sum()
コード例 #18
0
def reduce_poles(poles, residues):
    """
    This removes the second half of the complex conjugate pair for the poles and residues.
    Each pair together will make a RLC branch
    :param poles: poles from the vector fitting algorithm
    :param residues: residues from the vector fitting algorithm
    :return: poles, residues with the pairs removed.
    """

    number_of_imaginary_poles = scipy.count_nonzero(poles[0, :].imag)
    for pole in range(0, poles.shape[1] - number_of_imaginary_poles // 2):
        if poles[0, pole].imag:
            poles = np.delete(poles, pole + 1, 1)
            residues = np.delete(residues, pole + 1, 1)
    return poles, residues
コード例 #19
0
    def run(self, center, close, far, maxiters):
        """Loss function:
        L(M) = sum(c in close)max((x-c)^T M (x-c) - 1,0)
               + sum(f in far)max(1-(x-f)^T M (x-f),0)
        """
        if self.M == None:
            self.M = sp.eye(len(center))

        print "SVM learning,", len(close), "near points", len(
            far), "far points"
        for c in close:
            assert (len(c) == len(center))
        for c in far:
            assert (len(c) == len(center))
        if len(close) + len(far) == 0:
            return
        self.loss, grad = svmGradient(center, close, far, self.mu, self.M)
        grad = mask(grad, self.mask)
        if self.alpha == None:
            self.alpha = 1.0 / self.linalg.norm(grad)
        while maxiters > 0:
            #if maxiters % 20 == 0:
            if maxiters % 1 == 0:
                print "loss", self.loss, "step size", self.alpha
            maxiters -= 1
            Mnew, numNegative = projectSemidef(self.M - self.alpha * grad)
            if LA.norm(self.M - Mnew) < self.xtol:
                return "converged"
            if sp.count_nonzero(Mnew) == 0:
                self.alpha *= 0.5
            else:
                futureLoss, futureGrad = svmGradient(center, close, far,
                                                     self.mu, Mnew)
                if futureLoss <= self.loss:
                    self.loss = futureLoss
                    self.M = Mnew
                    grad = mask(futureGrad, self.mask)
                    self.alpha *= 1.2
                else:
                    self.alpha *= 0.5
        return "maxiters reached"
コード例 #20
0
    def run(self, clusters):
        """Runs to local convergence or maxiters iterations.
        - clusters: a TrainingClusters instance
        """
        maxiter = self.maxiter
        if self.Ms == None:
            M = sp.eye(clusters.problemSpaceDims())
            #whiten the training data along each axis
            for i in xrange(M.shape[0]):
                vi = sp.var([f[i] for f in clusters.problemFeatures])
                if vi != 0:
                    M[i, i] = 1.0 / vi
            self.Ms = [M for i in clusters.library.primitives]
        #impostorupdatefreq = 10
        impostorupdatefreq = 1
        with open(self.logPrefix + '/logs/localiter.txt', 'w') as stop:
            stop.write('local LMNN ' + self.logPrefix + ' beginning ' +
                       str(datetime.now()) + '\r\n')

        #sortallneighborsglobal(clusters, Mprev)
        zeromat = sp.zeros(self.Ms[0].shape)
        t0 = time.time()
        self.loss, currGrad, currimpostors = lossGradientFull(
            clusters, self.mu, self.Ms, self.logPrefix)
        for i, g in enumerate(currGrad):
            currGrad[i] = mask(g, self.mask)
        self.bestLoss = self.loss
        self.Mbests = self.Ms
        if self.alpha == None:
            self.alpha = 1.0 / sum(sp.linalg.norm(g) for g in currGrad)
        print "Full gradient time", time.time() - t0
        lastimpostorupdate = maxiter
        while maxiter > 0:
            print "Loss:", self.loss, "gradient norm", sum(
                sp.linalg.norm(g) for g in currGrad), "step size", self.alpha
            t0 = time.time()
            maxiter -= 1
            Mnew = []
            for M, g in zip(self.Ms, currGrad):
                Mproj, numNegative = projectSemidef(M - self.alpha * g)
                Mnew.append(Mproj)
            #line search
            while all(sp.count_nonzero(M) == 0 for M in Mnew):
                maxiter -= 1
                self.alpha = self.alpha * .7
                Mnew = []
                for M, g in zip(self.Ms, currGrad):
                    Mproj, numNegative = projectSemidef(M - self.alpha * g)
                    Mnew.append(Mproj)
                #with open(prefix+'/logs/globaliter.txt', 'a') as stop:
                #stop.write(str(maxiter) + ' nonzeroing at ' + str(datetime.now()) + '\r\n')
            if maxiter <= lastimpostorupdate - impostorupdatefreq:
                lastimpostorupdate = maxiter
                with open(self.logPrefix + '/logs/localiter.txt', 'a') as stop:
                    stop.write(
                        str(maxiter) + ' full at ' + str(datetime.now()) +
                        '\r\n')
                #with open(self.logPrefix+'/dumps/Ms.txt', 'wb') as M:
                #    pickle.dump(self.Ms, M)
                futureLoss, futureGrad, currimpostors = lossGradientFull(
                    clusters, self.mu, Mnew, self.logPrefix)
                if futureLoss < self.bestLoss:
                    self.Mbests = Mnew
                    self.bestLoss = futureLoss
            else:
                futureLoss, futureGrad = lossGradientEstimate(
                    clusters, self.mu, Mnew, currimpostors, self.logPrefix)

            if futureLoss < self.loss:
                self.alpha *= 1.2
            else:
                self.alpha *= 0.5
            #print "new alpha:",alpha
            Mchange = sum(
                sp.linalg.norm(M - Mi) for (M, Mi) in zip(self.Ms, Mnew))
            self.Ms = Mnew
            currGrad = [mask(g, self.mask) for g in futureGrad]
            self.loss = futureLoss
            if Mchange < self.xtol:
                return "converged"
        return "maxiters reached"
コード例 #21
0
import scipy as sp
import scipy.linalg as la
import numpy.linalg as npla

#Problem 1
A = sp.array([[.75, .50], [.25, .50]])
npla.matrix_power(A, 2)[0, 0]  #Part a: 0.6875
npla.matrix_power(A, 100)[0, 0]  #Part b: 66.7%

#Problem 2
A = sp.array([[1. / 4, 1. / 3, 1. / 2], [1. / 4, 1. / 3, 1. / 3],
              [1. / 2, 1. / 3, 1. / 6]])
A  #Part a
npla.matrix_power(A, 2)[1, 0]  #Part b: 0.3125
V = la.eig(A)[1]
x = V[:, 0]
la.eig(A)[1]
x = x / sp.sum(x)
x  #Part c: array([ 0.35955056,  0.30337079,  0.33707865])

#Problem 3
bucky = sp.loadtxt("bucky.csv", delimiter=",")

sp.count_nonzero(bucky)
sp.count_nonzero(npla.matrix_power(bucky, 2))
sp.count_nonzero(npla.matrix_power(bucky, 3))

sp.count_nonzero(npla.matrix_power(
    bucky, 9))  #3600, all atoms are connected, so path length 9
コード例 #22
0
def graph_from_voxels(fg_markers,
                        bg_markers,
                        regional_term = False,
                        boundary_term = False,
                        regional_term_args = False,
                        boundary_term_args = False):
    """
    Create a graph-cut ready graph to segment a nD image using the voxel neighbourhood.
    
    Create a `~medpy.graphcut.maxflow.GraphDouble` object for all voxels of an image with a
    :math:`ndim * 2` neighbourhood.
    
    Every voxel of the image is regarded as a node. They are connected to their immediate
    neighbours via arcs. If to voxels are neighbours is determined using
    :math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). In the next step the arcs weights
    (n-weights) are computed using the supplied ``boundary_term`` function
    (see :mod:`~medpy.graphcut.energy_voxel` for a selection).
    
    Implicitly the graph holds two additional nodes: the source and the sink, so called
    terminal nodes. These are connected with all other nodes through arcs of an initial
    weight (t-weight) of zero.
    All voxels that are under the foreground markers are considered to be tightly bound
    to the source: The t-weight of the arc from source to these nodes is set to a maximum
    value. The same goes for the background markers: The covered voxels receive a maximum
    (`~medpy.graphcut.graph.GCGraph.MAX`) t-weight for their arc towards the sink.
    
    All other t-weights are set using the supplied ``regional_term`` function
    (see :mod:`~medpy.graphcut.energy_voxel` for a selection).
    
    Parameters
    ----------
    fg_markers : ndarray
        The foreground markers as binary array of the same shape as the original image.
    bg_markers : ndarray
        The background markers as binary array of the same shape as the original image.
    regional_term : function
        This can be either `False`, in which case all t-weights are set to 0, except for
        the nodes that are directly connected to the source or sink; or a function, in
        which case the supplied function is used to compute the t_edges. It has to
        have the following signature *regional_term(graph, regional_term_args)*, and is
        supposed to compute (source_t_weight, sink_t_weight) for all voxels of the image
        and add these to the passed `~medpy.graphcut.graph.GCGraph` object. The weights
        have only to be computed for nodes where they do not equal zero. Additional
        parameters can be passed to the function via the ``regional_term_args`` parameter.
    boundary_term : function
        This can be either `False`, in which case all n-edges, i.e. between all nodes
        that are not source or sink, are set to 0; or a function, in which case the
        supplied function is used to compute the edge weights. It has to have the
        following signature *boundary_term(graph, boundary_term_args)*, and is supposed
        to compute the edges between the graphs nodes and to add them to the supplied
        `~medpy.graphcut.graph.GCGraph` object. Additional parameters can be passed to
        the function via the ``boundary_term_args`` parameter.
    regional_term_args : tuple
        Use this to pass some additional parameters to the ``regional_term`` function.
    boundary_term_args : tuple    
        Use this to pass some additional parameters to the ``boundary_term`` function.
    
    Returns
    -------
    graph : `~medpy.graphcut.maxflow.GraphDouble`
        The created graph, ready to execute the graph-cut.
    
    Raises
    ------
    AttributeError
        If an argument is malformed.
    FunctionError
        If one of the supplied functions returns unexpected results.
    
    Notes
    -----
    If a voxel is marked as both, foreground and background, the background marker
    is given higher priority.
     
    All arcs whose weight is not explicitly set are assumed to carry a weight of zero.
    """
    # prepare logger
    logger = Logger.getInstance()
    
    # prepare result graph
    logger.debug('Assuming {} nodes and {} edges for image of shape {}'.format(fg_markers.size, __voxel_4conectedness(fg_markers.shape), fg_markers.shape)) 
    graph = GCGraph(fg_markers.size, __voxel_4conectedness(fg_markers.shape))
    
    logger.info('Performing attribute tests...')
    
    # check, set and convert all supplied parameters
    fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_)
    bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_)
    
    # set dummy functions if not supplied
    if not regional_term: regional_term = __regional_term_voxel
    if not boundary_term: boundary_term = __boundary_term_voxel
    
    # check supplied functions and their signature
    if not hasattr(regional_term, '__call__') or not 2 == len(inspect.getargspec(regional_term)[0]):
        raise AttributeError('regional_term has to be a callable object which takes two parameter.')
    if not hasattr(boundary_term, '__call__') or not 2 == len(inspect.getargspec(boundary_term)[0]):
        raise AttributeError('boundary_term has to be a callable object which takes two parameters.')

    logger.debug('#nodes={}, #hardwired-nodes source/sink={}/{}'.format(fg_markers.size,
                                                                        len(fg_markers.ravel().nonzero()[0]),
                                                                        len(bg_markers.ravel().nonzero()[0])))
    
    # compute the weights of all edges from the source and to the sink i.e.
    # compute the weights of the t_edges Wt
    logger.info('Computing and adding terminal edge weights...')
    regional_term(graph, regional_term_args)

    # compute the weights of the edges between the neighbouring nodes i.e.
    # compute the weights of the n_edges Wr
    logger.info('Computing and adding inter-node edge weights...')
    boundary_term(graph, boundary_term_args)
    
    # collect all voxels that are under the foreground resp. background markers i.e.
    # collect all nodes that are connected to the source resp. sink
    logger.info('Setting terminal weights for the markers...')
    if not 0 == scipy.count_nonzero(fg_markers):
        graph.set_source_nodes(fg_markers.ravel().nonzero()[0])
    if not 0 == scipy.count_nonzero(bg_markers):
        graph.set_sink_nodes(bg_markers.ravel().nonzero()[0])    
    
    return graph.get_graph()
コード例 #23
0
import scipy as sp
import scipy.linalg as la
import numpy.linalg as npla

#Problem 1
A = sp.array([[.75,.50],[.25,.50]])
npla.matrix_power(A,2)[0,0]#Part a: 0.6875
npla.matrix_power(A,100)[0,0]#Part b: 66.7%

#Problem 2
A = sp.array([[1./4,1./3,1./2],[1./4,1./3,1./3],[1./2,1./3,1./6]]);A #Part a
npla.matrix_power(A,2)[1,0]#Part b: 0.3125
V = la.eig(A)[1]
x = V[:,0]
la.eig(A)[1]
x = x/sp.sum(x);x #Part c: array([ 0.35955056,  0.30337079,  0.33707865])

#Problem 3
bucky = sp.loadtxt ( "bucky.csv" , delimiter = "," )

sp.count_nonzero(bucky)
sp.count_nonzero(npla.matrix_power(bucky,2))
sp.count_nonzero(npla.matrix_power(bucky,3))

sp.count_nonzero(npla.matrix_power(bucky,9)) #3600, all atoms are connected, so path length 9


コード例 #24
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # constants
    result_percentage = 0.1
    minimal_fill = 1/2.
    initial_radius = 5
    
    # loading input image
    img, hdr = load(args.input)

    # Note:
    # DW, T2 tra, flair: 3rd to iterate over
    # T1, T2 sag: ? to iterate over

    # normalize input image
    img -= img.min() # move to contain only positive values
    img /= img.max() # normalize to values between 0 and 1
    
    if args.intermediate:
        intermediates = []
        bestcircles = []
    
    # run iteratively with increasing circle radius
    r = initial_radius
    condition = True
    while (condition):
        logger.debug('Executing for radius {}...'.format(r))
        
        # prepare sphere
        sphere = template_sphere(r, img.ndim - 1)
        #sphere = template_ellipsoid((r*2, r*2, 2))
    
        # compute threshold for stopping condition
        threshold = scipy.count_nonzero(sphere) * minimal_fill
        
        # execute slice-wise general hough transform
        hough_image = __slicewise_ght(img, sphere, args.dim)
        #hough_image = ght(img, sphere)
        
        # count number of area that excedd the threshold
        hits = scipy.count_nonzero(hough_image > threshold)
        logger.debug('...got {} hits.'.format(hits))
        
        # update trackers and increment
        condition = hits > 0
        r += 1
        
        if args.intermediate:
            intermediates.append(hough_image / float(scipy.count_nonzero(sphere))) # normalize
            bestcircles.append(hough_image > threshold)
            
    # saving the best percent of the last iteration as result segmentation
    hough_image[hough_image < hough_image.max() - result_percentage * hough_image.max()] = 0
    save(hough_image, args.output, hdr, args.force)
    
    if args.intermediate:
        logger.info('Saving intermediate images:')
        logger.info('(1) {} holds the hough transform images of each iteration step'.format(args.output + '_intermediate.nii.gz'))
        intermediate_images = scipy.zeros(list(hough_image.shape) + [len(intermediates)], hough_image.dtype)
        for i in range(intermediate_images.shape[-1]):
            intermediate_images[...,i] = intermediates[i]
        save(intermediate_images, args.output + '_intermediate.nii.gz', hdr, args.force)
        logger.info('(2) {} holds the centers of each turn best circles.'.format(args.output + '_bestcircles.nii.gz'))
        bestcircles_images = scipy.zeros(list(hough_image.shape) + [len(bestcircles)], hough_image.dtype)
        for i in range(bestcircles_images.shape[-1]):
            bestcircles_images[...,i] = bestcircles[i]
        save(bestcircles_images, args.output + '_bestcircles.nii.gz', hdr, args.force)
コード例 #25
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # constants
    result_percentage = 0.1
    minimal_fill = 1 / 2.
    initial_radius = 5

    # loading input image
    img, hdr = load(args.input)

    # Note:
    # DW, T2 tra, flair: 3rd to iterate over
    # T1, T2 sag: ? to iterate over

    # normalize input image
    img -= img.min()  # move to contain only positive values
    img /= img.max()  # normalize to values between 0 and 1

    if args.intermediate:
        intermediates = []
        bestcircles = []

    # run iteratively with increasing circle radius
    r = initial_radius
    condition = True
    while (condition):
        logger.debug('Executing for radius {}...'.format(r))

        # prepare sphere
        sphere = template_sphere(r, img.ndim - 1)
        #sphere = template_ellipsoid((r*2, r*2, 2))

        # compute threshold for stopping condition
        threshold = scipy.count_nonzero(sphere) * minimal_fill

        # execute slice-wise general hough transform
        hough_image = __slicewise_ght(img, sphere, args.dim)
        #hough_image = ght(img, sphere)

        # count number of area that excedd the threshold
        hits = scipy.count_nonzero(hough_image > threshold)
        logger.debug('...got {} hits.'.format(hits))

        # update trackers and increment
        condition = hits > 0
        r += 1

        if args.intermediate:
            intermediates.append(
                hough_image / float(scipy.count_nonzero(sphere)))  # normalize
            bestcircles.append(hough_image > threshold)

    # saving the best percent of the last iteration as result segmentation
    hough_image[hough_image < hough_image.max() -
                result_percentage * hough_image.max()] = 0
    save(hough_image, args.output, hdr, args.force)

    if args.intermediate:
        logger.info('Saving intermediate images:')
        logger.info(
            '(1) {} holds the hough transform images of each iteration step'.
            format(args.output + '_intermediate.nii.gz'))
        intermediate_images = scipy.zeros(
            list(hough_image.shape) + [len(intermediates)], hough_image.dtype)
        for i in range(intermediate_images.shape[-1]):
            intermediate_images[..., i] = intermediates[i]
        save(intermediate_images, args.output + '_intermediate.nii.gz', hdr,
             args.force)
        logger.info(
            '(2) {} holds the centers of each turn best circles.'.format(
                args.output + '_bestcircles.nii.gz'))
        bestcircles_images = scipy.zeros(
            list(hough_image.shape) + [len(bestcircles)], hough_image.dtype)
        for i in range(bestcircles_images.shape[-1]):
            bestcircles_images[..., i] = bestcircles[i]
        save(bestcircles_images, args.output + '_bestcircles.nii.gz', hdr,
             args.force)
コード例 #26
0
ファイル: mnist.py プロジェクト: suji0131/MNIST
 def errorInd(self, x):
     '''calculates the no of mis-classification in test set'''
     a = self.classify(x, self.data.test)
     b = self.data.test[1]
     return sp.count_nonzero(b.astype(int) - a.astype(int))
コード例 #27
0
ファイル: main.py プロジェクト: falquaddoomi/disting
def processInput(data, notify=default_notify):
    # take in the data in the given format and extract all of the fields from it
    inputParams = [re.sub(r"\s\s+", " ", str(line.strip().split('=')[1])) for line in data.split('\n')]

    A = scipy.mat(inputParams[0])
    B = scipy.mat(inputParams[1])
    C = scipy.mat(inputParams[2])
    tempAdj = scipy.mat(inputParams[3])
    AdjMat = tempAdj.T

    n = B.size
    m = scipy.count_nonzero(B)
    newB = scipy.zeros((n,m))
    cnt = 0
    rng = scipy.arange(n)
    for i in rng:
      if B[i,0]==1:
        newB[i,cnt] = 1
        cnt=cnt+1
    B = scipy.mat(newB)
    B=B.astype(int)

    n = C.size
    m = scipy.count_nonzero(C)
    newC = scipy.zeros((m,n))
    cnt = 0
    rng = scipy.arange(n)
    for i in rng:
      if C[0,i]==1:
        newC[cnt,i] = 1
        cnt=cnt+1
    C = scipy.mat(newC)
    C=C.astype(int)

    #num compartments
    n = int(inputParams[4])

    #num inputs
    r = int(inputParams[5])

    #num outputs
    m = int(inputParams[6])

    output = StringIO()

    #A = scipy.mat('[1 1 0 0; 1 1 0 0; 1 0 1 1; 0 0 1 1]')
    #B = scipy.mat('[1; 0; 0; 0]')
    #C = scipy.mat('[1 0 0 0; 0 0 1 0]')

    #in adjMat we need to replace all Aii's with the leaks
    #AdjMat = A.T


    #num compartments
    #n = 4

    #num inputs
    #r = 1

    #num outputs
    #m = 2

    notify(" => Inside input processor!")

    # ============================================================================
    # === STEP 1. generate the original graph model
    # ============================================================================

    #after this everything we need is computed, the Jacobian, alphas, betas, etc
    myGraphModel = graphModel.graphModel(A, B, C, n, r, m, AdjMat, notify=notify)

    notify(" => completed graphModel.graphModel")
#     ######
# 	##MB Additions/Modifications
# #    if myGraphModel.QRank != n:
# #        output.write('q rank != n nonobservable')
# #        notify('q rank != n nonobservable')
# #        notify('process ended')
# #        return "model nonobservable"
# #
# #    if myGraphModel.RRank != n:
# #        output.write('r rank != n noncontrollable')
# #        notify('r rank != n noncontrollable')
# #        notify('process ended')
# #        return "model noncontrollable"
#
#
#     if laplaceTools.hasComplexEigenvalues(A):
#         output.write('Complex eigenvalues--not all models may be discovered! (Michael Bilow)')
#     ## <end> MB Additions/Modifications

    nonobservable = False
    noncontrollable = False

    if myGraphModel.QRank != n:
        output.write('q rank != n nonobservable')
        notify('q rank != n nonobservable')
        nonobservable = True

    if myGraphModel.RRank != n:
        output.write('r rank != n noncontrollable')
        notify('r rank != n noncontrollable')
        noncontrollable = True

    if noncontrollable or nonobservable:
        notify('process ended')

        error_msg = "%s %s" % (
            "model nonbservable" if nonobservable else "",
            "model noncontrollable" if noncontrollable else ""
        )
        return error_msg

    notify('DONE MAKING ORIG MODEL')

    notify(n)
    notify(myGraphModel.Rank)

    # ============================================================================
    # === STEP 2. generate all other graphs, check properties
    # ============================================================================

    allCandidates = graphTools.generateAllGraphs(myGraphModel.myGraph, n, myGraphModel.Rank, myGraphModel.Rank)
    notify('my original model graph (transposed)')
    notify( networkx.to_numpy_matrix(myGraphModel.myGraph))
    notify('my original model edges')
    notify(list(myGraphModel.myGraph.edges()))
    #find number of paths from node to observation
    numOrigPathsToObs = graphTools.findNumPathsToObs(myGraphModel, C)

    #make sure every node has a path from an input, to itself
    hasInputConnOrig = graphTools.ensureInputConn(myGraphModel, B)

    #find number of paths from the input node to the other nodes
    numOrigPathsFromInput = graphTools.findNumPathsFromIn(myGraphModel, B)

    #make sure every node has a path from an output, to itself
    hasOuputConnOrig = graphTools.ensureOutputConn(myGraphModel, C)

    #compare the shortest paths
    shortestPathsOrig = graphTools.shortestInOutPaths(myGraphModel, B, C)
    origPathsList = []

    if len(shortestPathsOrig) > 0:
        try:
            origPathsList.append([p for p in shortestPathsOrig[0]])
        except networkx.exception.NetworkXNoPath:
            notify("No Orig Path")
    else:
        output.write('No path from input to output in the original graph')
        output.close()
        exit


    #find number of traps
    numTrapsOrig = graphTools.findNumTraps(myGraphModel)
    passGraphCand = []

    numPathToObsWrong = 0
    numhasInputConnWrong = 0
    numCandPathsFromInputWrong = 0
    hasOuputConnWrong = 0
    numTrapsWrong = 0
    numPathsListWrong = 0

    notify('number of candidates: %d' % len(allCandidates))
    output.write('num of candidates %d' % len(allCandidates))
    output.write('\n')

    # if we never had any candidates, terminate here
    if len(allCandidates) <= 0:
        result = output.getvalue()
        output.close()
        return result

    for candidate in allCandidates:
        #find number of paths from node to observation
        numCandPathsToObs = graphTools.findNumPathsToObs(candidate, C)

        #make sure every node has a path from an input, to itself
        hasInputConn = graphTools.ensureInputConn(candidate, B)

        #find number of paths from the input node to the other nodes
        numCandPathsFromInput = graphTools.findNumPathsFromIn(candidate, B)

        #make sure every node has a path from an output, to itself
        hasOuputConn = graphTools.ensureOutputConn(candidate, C)

        #compare the shortest paths
        shortestPaths = graphTools.shortestInOutPaths(candidate, B, C)

        #find number of traps took 3620 out
        numTraps = graphTools.findNumTraps(candidate)

        origListResult = (numOrigPathsToObs, hasInputConnOrig, numOrigPathsFromInput, hasOuputConnOrig, numTrapsOrig)
        candListResult = (numCandPathsToObs, hasInputConn, numCandPathsFromInput, hasOuputConn, numTraps)

        candPathsList = []

        try:
            candPathsList.append([q for q in shortestPaths[0]])
        except networkx.exception.NetworkXNoPath:
            a = 1


        if numOrigPathsToObs != numCandPathsToObs:
            numPathToObsWrong = numPathToObsWrong + 1
        if hasInputConnOrig != hasInputConn:
            numhasInputConnWrong = numhasInputConnWrong + 1
        if numOrigPathsFromInput != numCandPathsFromInput:
            numCandPathsFromInputWrong = numCandPathsFromInputWrong + 1
        if hasOuputConnOrig != hasOuputConn:
            hasOuputConnWrong = hasOuputConnWrong + 1
        if origPathsList != candPathsList:
            numPathsListWrong = numPathsListWrong + 1
        if numTrapsOrig != numTraps:
            numTrapsWrong = numTrapsWrong + 1


        #paths match
        if origPathsList == candPathsList:
            #all other results equal
            #if numOrigPathsToObs == numCandPathsToObs and hasInputConnOrig == hasInputConn and numOrigPathsFromInput == numCandPathsFromInput:
            #    if hasOuputConnOrig == hasOuputConnOrig and numTrapsOrig == numTraps:
            #        passGraphCand.append(candidate)
            #if numOrigPathsToObs == numCandPathsToObs:
            #    passGraphCand.append(candidate)
            if origListResult == candListResult:
                passGraphCand.append(candidate)



    notify('num of candidates left after graph properties checked: %d out of %d' % (len(passGraphCand), len(allCandidates)))
    output.write('num of candidates left after graph properties checked: %d out of %d' % (len(passGraphCand), len(allCandidates)))
    output.write('\n')

    output.write('number of candidates that had incorrect # paths to obs %d' % numPathToObsWrong)
    output.write('\n')

    output.write('number of candidates that dont have input connectivity %d' % numhasInputConnWrong)
    output.write('\n')

    output.write('number of candidates that had incorrect # paths from input %d' % numCandPathsFromInputWrong)
    output.write('\n')

    output.write('number of candidates that dont have output connectivity %d' % hasOuputConnWrong)
    output.write('\n')

    output.write('number of candidates that have incorrect # traps %d' % numTrapsWrong)
    output.write('\n')

    output.write('number of candidates that have incorrect shortestpaths %d' % numPathsListWrong)
    output.write('\n')

    # if we don't have any candidates left, terminate here
    if len(passGraphCand) <= 0:
        result = output.getvalue()
        output.close()
        return result

    # ============================================================================
    # === STEP 3. check alphas, betas
    # ============================================================================

    numRank = 0
    rankFailedCand = None
    passLaplaceCand = []
    failAlphaLaplaceCand = []
    failBetaLaplaceCand = []

    for cand in passGraphCand:
        #step one rank(A|B) = n
        candAB = laplaceTools.makeSymMat(numpy.append(cand.A, B, axis=1))

        #if (cand.A == myMatch).all() or (cand.A == myMatch2).all() or (cand.A == myMatch4).all():
        #    print "--------------FOUND MATCH!!!!!--------------"
        #    candTF, candCharEqn, candAlphas, candBetas = laplaceTools.calcTF(cand.A, B, C, n)
        #    print candAlphas
        #    print candBetas
        #    print candTF
        #    print cand.A

        # candABRank = laplaceTools.calcRank(candAB)
        candABRank = n
        if candABRank == n:
            #step 2 compare the moment invariants!
            candTF, candCharEqn, candAlphas, candBetas = laplaceTools.calcTF(cand.A, B, C, n)
            cand.Alphas = candAlphas
            cand.Betas = candBetas

            #print "Alpha Keys"
            currNum = 0
            alphasMatch = True
            candAlphaKeysList = []
            for currDict in candAlphas:
                candAlphaKeys = laplaceTools.getOrderedKeys(currDict)
                #print candAlphaKeys
                #print myGraphModel.alphaKeys[currNum]
                candAlphaKeysList.append(candAlphaKeys)
                if candAlphaKeys != myGraphModel.alphaKeys[currNum]:
                    alphasMatch = False

                currNum = currNum + 1


            #print "Beta Keys"
            currNum = 0
            for currDict in candBetas:
                #print laplaceTools.getOrderedKeys(currDict)
                #print myGraphModel.betaKeys[currNum]
                currNum = currNum + 1

            if alphasMatch:
                currNum = 0
                candBetaKeysList = []
                betasMatch = True
                for currDict in candBetas:
                    candBetaKeys = laplaceTools.getOrderedKeys(currDict)
                    candBetaKeysList.append(candBetaKeys)
                    if candBetaKeys != myGraphModel.betaKeys[currNum]:
                        betasMatch = False

                    currNum = currNum + 1

                if betasMatch:
                    passLaplaceCand.append(cand)
                    #print len(passLaplaceCand)
                    #print 'FOUND CANDIDATE'
                    #print candTF
                    #print cand.A
                    #print candAlphaKeysList
                    #print candBetaKeysList

    notify('the alphas: %s' % str(myGraphModel.alphaKeys))
    notify('the betas: %s' % str(myGraphModel.betaKeys))
    output.write('the alphas: %s' % str(myGraphModel.alphaKeys))
    output.write('\n')
    output.write('the betas: %s' % str(myGraphModel.betaKeys))
    output.write('\n')

    notify('num of candidates left alpha betas checked: %d out of %d' % (len(passLaplaceCand), len(allCandidates)))
    output.write('num of candidates left alpha betas checked %d out of %d' % (len(passLaplaceCand), len(allCandidates)))
    output.write('\n')

    # if we don't have any candidates left, terminate here
    if len(passLaplaceCand) <= 0:
        result = output.getvalue()
        output.close()
        return result

    # ============================================================================
    # === STEP 3. check submatrix jacobians
    # ============================================================================

    passJacobianRank = []
    for cand in passLaplaceCand:
        #now calculate the jacobian
        cand.calcJacobianRank()
        if myGraphModel.Rank == cand.Rank:
            passJacobianRank.append(cand)

    notify('num of candidates left after calc rank full jacobian: %d out of %d' % (len(passJacobianRank), len(allCandidates)))
    output.write('num of candidates left after calc rank full jacobian %d out of %d' % (len(passJacobianRank), len(allCandidates)))
    output.write('\n')

    # if we don't have any candidates left, terminate here
    if len(passJacobianRank) <= 0:
        result = output.getvalue()
        output.close()
        return result

    notify("About to compute laplaceTools.reducedJacMat()...")

    #first get the simplified jacobian
    myGraphModel.Jac = laplaceTools.reducedJacMat(myGraphModel.Jac)

    notify("About to compute laplaceTools.calcAllSubranks()...")

    #get the ranks
    myGraphModel.JacComboRanks = laplaceTools.calcAllSubranks(myGraphModel.Jac, myGraphModel.Rank)

    notify("About to process candidates (distributed: %s)..." % DISTRIBUTED)

    if DISTRIBUTED:
        # compute ranks of all submatrices of the jacobian of the original model
        passSubJacobianRankTask = processSingleTotalJacobian.chunks([(myGraphModel, i) for i in passJacobianRank], 10)()
        result = passSubJacobianRankTask.get()
        passSubJacobianRank = [item for sublist in result for item in sublist if item]
    else:
        # for now, we'll do it in the non-iterative way
        passSubJacobianRank = [x for x in [processSingleTotalJacobian(myGraphModel, i) for i in passJacobianRank] if x is not None]

    notify('num of candidates left at the end: %d out of %d' % (len(passSubJacobianRank), len(allCandidates)))
    output.write('num of candidates left at the end: %d out of %d' % (len(passSubJacobianRank), len(allCandidates)))
    output.write('\n')

    # if we don't have any candidates left, terminate here
    if len(passSubJacobianRank) <= 0:
        result = output.getvalue()
        output.close()
        return result

    output.write('Adjacency graphs\n')
    output.write('Original Model\n')
    output.write("%s\n" % str(networkx.to_numpy_matrix(myGraphModel.myGraph).T))
    output.write('A matrix %s \n' % str(myGraphModel.A))
    output.write('\n')

    for i, cand in enumerate(passSubJacobianRank):
        output.write('Model %d \n' % (i+1))
        output.write(str(networkx.to_numpy_matrix(cand.myGraph).T))
        output.write('\n')
        output.write('A matrix \n%s \n' % str(cand.A))
        output.write('\n')
        output.write('\n')

    result = output.getvalue()
    output.close()

    return result
コード例 #28
0
ファイル: test_script.py プロジェクト: tollbran/Astro
import pandas as pd
import matplotlib.pyplot as plt

count=0
gradient = []
grad_set=3
while count < 50:
    data = pd.read_excel("data/test/test16C/file_test%d.xlsx" % (count))
    mag_column = data.loc[:,'Instrumental Magnitude']
    mag = mag_column.values
    sp.asarray(mag)
    magn_counter = 0
    y_fit=[]
    x_fit = []
    while magn_counter < 20:
        number_count = sp.count_nonzero(mag < magn_counter)
        if 13 <= magn_counter <= 17: 
            x_fit.append(magn_counter)
            y_fit.append(sp.log10(number_count))
        
        #simple poisson statistics for now
        #euclid_num = 0.6*magn_counter - 7.5
        magn_counter += 1
    
    fit,cov =sp.polyfit(x_fit,y_fit,deg=1,w=sp.array([1,1,1,1,1]),cov=True) 
    func = sp.poly1d(fit)
    gradient.append(sp.asarray(fit[0]))
    count +=1

grad = sp.asarray(gradient)
コード例 #29
0
t1 = time.time()
for index in range(len(number_of_atoms)):
    if number_of_atoms[index] == 0:
        pass
    else:
        # Objects _____________________________________________________________________
        the_atom = atom(initial_state, decay_matrix, decay_to)
        the_hamiltonian_p = hamiltonian_construct(dipole_operator,
                                                  field_amplitude,
                                                  frequencies[index])
        single_simulation = single_atom_simulation(the_atom,
                                                   [the_hamiltonian_p], nt, dt)
        the_simulation = inhomogeneous_broadening(single_simulation,
                                                  ib_linewidth,
                                                  number_of_atoms[index])

        # Run the simulation __________________________________________________________
        the_flop = the_flop + the_simulation.broadened_time_evolution()

print("Time elapsed = " + str(round(time.time() - t1, 4)) + " seconds")

the_flop = the_flop / sp.count_nonzero(number_of_atoms)

# Save dat shit _______________________________________________________________
populations_plot(the_times * 1e6, the_flop, loc)
crystal_pop_compare(the_times * 1e6, the_flop, loc)
coherence_plot(the_times * 1e6, the_flop, loc)
total_coherence_7(the_times * 1e6, the_flop, loc)
sp.save(loc + "/data.txt", the_flop)
sp.save(loc + "/times.txt", the_times)
コード例 #30
0
        older_hashtags = [hashtag * ((timestamp - hashtags_init.get(hashtag,timestamp)) > 60) for hashtag in hashtags_init.keys()]
        older_hashtags = filter(None, older_hashtags)

        for hashtag in older_hashtags:
            del hashtags_init[hashtag]
            graph = remove_graph(graph, hashtag)
        # Clean dumplicate 
        hashtags_init.update(new_hashtags_init)
        for hashtag in new_hashtags_init.keys():
            graph[hashtag] = list(sp.unique(graph.get(hashtag,[]) + new_hashtags_init.keys()))
            #graph(hashtag) = list(sp.unique(graph.get(hashtag,[])+ new_hashtags_init.keys()))
            graph[hashtag].remove(hashtag)

        # Calcute avg degree in the graph
        degrees = [len(graph[node]) for node in graph.keys()]

        # Import the decimal package to make every calculation to 0.00 format       
        try:
            avg_degree = str(Decimal(str(1.00*sum(degrees)/sp.count_nonzero(degrees))).quantize(Decimal('0.00')))
        except:
            avg_degree = str(Decimal('0.00').quantize(Decimal('0.00')))
        # Combine the data and ready to write file
        output.append(avg_degree)
# Write the file
with open(output_dir,'w') as out_f:
    out_f.write(os.linesep.join(output))




コード例 #31
0
 def _generate_masked_mesh(self, cell_mask=None):
     r"""
     Generates the mesh based on the cell mask provided
     """
     #
     if cell_mask is None:
         cell_mask = sp.ones(self.data_map.shape, dtype=bool)
     #
     # initializing arrays
     self._edges = sp.ones(0, dtype=str)
     self._merge_patch_pairs = sp.ones(0, dtype=str)
     self._create_blocks(cell_mask)
     #
     # building face arrays
     mapper = sp.ravel(sp.array(cell_mask, dtype=int))
     mapper[mapper == 1] = sp.arange(sp.count_nonzero(mapper))
     mapper = sp.reshape(mapper, (self.nz, self.nx))
     mapper[~cell_mask] = -sp.iinfo(int).max
     #
     boundary_dict = {
         'bottom': {
             'bottom': mapper[0, :][cell_mask[0, :]]
         },
         'top': {
             'top': mapper[-1, :][cell_mask[-1, :]]
         },
         'left': {
             'left': mapper[:, 0][cell_mask[:, 0]]
         },
         'right': {
             'right': mapper[:, -1][cell_mask[:, -1]]
         },
         'front': {
             'front': mapper[cell_mask]
         },
         'back': {
             'back': mapper[cell_mask]
         },
         'internal': {
             'bottom': [],
             'top': [],
             'left': [],
             'right': []
         }
     }
     #
     # determining cells linked to a masked cell
     cell_mask = sp.where(~sp.ravel(cell_mask))[0]
     inds = sp.in1d(self._field._cell_interfaces, cell_mask)
     inds = sp.reshape(inds, (len(self._field._cell_interfaces), 2))
     inds = inds[:, 0].astype(int) + inds[:, 1].astype(int)
     inds = (inds == 1)
     links = self._field._cell_interfaces[inds]
     #
     # adjusting order so masked cells are all on links[:, 1]
     swap = sp.in1d(links[:, 0], cell_mask)
     links[swap] = links[swap, ::-1]
     #
     # setting side based on index difference
     sides = sp.ndarray(len(links), dtype='<U6')
     sides[sp.where(links[:, 1] == links[:, 0] - self.nx)[0]] = 'bottom'
     sides[sp.where(links[:, 1] == links[:, 0] + self.nx)[0]] = 'top'
     sides[sp.where(links[:, 1] == links[:, 0] - 1)[0]] = 'left'
     sides[sp.where(links[:, 1] == links[:, 0] + 1)[0]] = 'right'
     #
     # adding each block to the internal face dictionary
     inds = sp.ravel(mapper)[links[:, 0]]
     for side, block_id in zip(sides, inds):
         boundary_dict['internal'][side].append(block_id)
     self.set_boundary_patches(boundary_dict, reset=True)
コード例 #32
0
        ]
        older_hashtags = filter(None, older_hashtags)

        for hashtag in older_hashtags:
            del hashtags_init[hashtag]
            graph = remove_graph(graph, hashtag)
        # Clean dumplicate
        hashtags_init.update(new_hashtags_init)
        for hashtag in new_hashtags_init.keys():
            graph[hashtag] = list(
                sp.unique(graph.get(hashtag, []) + new_hashtags_init.keys()))
            #graph(hashtag) = list(sp.unique(graph.get(hashtag,[])+ new_hashtags_init.keys()))
            graph[hashtag].remove(hashtag)

        # Calcute avg degree in the graph
        degrees = [len(graph[node]) for node in graph.keys()]

        # Import the decimal package to make every calculation to 0.00 format
        try:
            avg_degree = str(
                Decimal(str(1.00 * sum(degrees) /
                            sp.count_nonzero(degrees))).quantize(
                                Decimal('0.00')))
        except:
            avg_degree = str(Decimal('0.00').quantize(Decimal('0.00')))
        # Combine the data and ready to write file
        output.append(avg_degree)
# Write the file
with open(output_dir, 'w') as out_f:
    out_f.write(os.linesep.join(output))
コード例 #33
0
    def run(self, clusters):
        """Runs to local convergence or maxiter iterations.
        - clusters: a TrainingClusters instance
        """
        maxiter = self.maxiter
        if self.M == None:
            self.M = sp.eye(clusters.problemSpaceDims())
            #whiten the training data along each axis
            """
            for i in xrange(self.M.shape[0]):
                vi = sp.var([f[i] for f in clusters.problemFeatures])
                if vi != 0:
                    self.M[i,i] = 1.0/vi
            """
        #impostorupdatefreq = 10
        impostorupdatefreq = 1
        with open(self.logPrefix + '/logs/globaliter.txt', 'w') as stop:
            stop.write('global ' + self.logPrefix + ' beginning ' +
                       str(datetime.now()) + '\r\n')

        #sortallneighborsglobal(clusters, Mprev)
        zeromat = sp.zeros(self.M.shape)
        t0 = time.time()
        currLoss, currGrad, currimpostors = lossGradientFull(
            clusters, self.mu, self.M, self.logPrefix)
        currGrad = mask(currGrad, self.mask)
        if self.alpha == None:
            self.alpha = 0.5 / sp.linalg.norm(currGrad)
        print "Full gradient time", time.time() - t0
        self.Mbest = self.M
        self.bestLoss = self.loss = currLoss
        lastimpostorupdate = maxiter
        while maxiter > 0:
            print "Loss:", self.loss, "gradient norm", sp.linalg.norm(
                currGrad), "step size", self.alpha
            #print gradient diagonal?
            #print [currGrad[i,i] for i in xrange(currGrad.shape[0])]
            t0 = time.time()
            maxiter -= 1
            Mnew, numNegative = projectSemidef(self.M - self.alpha * currGrad)
            #line search
            while sp.count_nonzero(Mnew) == 0:
                maxiter -= 1
                self.alpha = self.alpha * .7
                Mnew, numNegative = projectSemidef(self.M -
                                                   self.alpha * currGrad)
                #with open(prefix+'/logs/globaliter.txt', 'a') as stop:
                #stop.write(str(maxiter) + ' nonzeroing at ' + str(datetime.now()) + '\r\n')
            print numNegative, "Negative eigenvalues"
            if maxiter <= lastimpostorupdate - impostorupdatefreq:
                lastimpostorupdate = maxiter
                with open(self.logPrefix + '/logs/globaliter.txt',
                          'a') as stop:
                    stop.write(
                        str(maxiter) + ' full at ' + str(datetime.now()) +
                        '\r\n')
                with open(self.logPrefix + '/dumps/M.txt', 'wb') as M:
                    pickle.dump(self.M, M)
                futureLoss, futureGrad, currimpostors = lossGradientFull(
                    clusters, self.mu, Mnew, self.logPrefix)
                if futureLoss < self.bestLoss:
                    self.Mbest = Mnew
                    self.bestLoss = futureLoss
            else:
                futureLoss, futureGrad = lossGradientEstimate(
                    clusters, self.mu, Mnew, currimpostors, self.logPrefix)

            if futureLoss < self.loss:
                self.alpha *= 1.2
            else:
                self.alpha *= 0.5
            #print "new alpha:",alpha
            Mchange = sp.linalg.norm(self.M - Mnew)
            self.M = Mnew
            currGrad = mask(futureGrad, self.mask)
            self.loss = futureLoss
            if Mchange < self.xtol:
                return "converged"
        return "maxiters reached"
コード例 #34
0
ファイル: mcta.py プロジェクト: SinanSalman/mcta
def SolveMCTA(lengths, lanes, P, VehiclesCount=1, Objectives=['D','K','C','PI','E'], FreeFlowSpeeds=[], SkipChecksForSpeed=False):
	"""Solve markov chain traffic assignment problem
	SolveMCTA(lengths, lanes, P, VehiclesCount=1, Objectives=['D','K','C','PI','E'], SkipChecksForSpeed=False)
		lengths			links' length in km
		lanes			links' number of lanes
		P				State transition propability matrix (turning probabilities)
		VehiclesCount	number of vehicles on the network at any time
		Objectives		list objectives to produce; 'D' = Density, 
													'K' = Kemeny constant, 
													'C' = clusters, 
													'PI' = probability distribution
													'E' = CO Emissions
		SkipChecksForSpeed	true to speed up the code by skipping verification and validation steps; it will also turn verbosity off"""
	global _results
	if _verbose and not SkipChecksForSpeed:
		print ('\nMCTA results:')
		import time
		tmr = time.time()

	pi, c, eigenvalues = _FindEigen(P, 'C' in Objectives, SkipChecksForSpeed)  # solve MC
	if 'C' in Objectives:
		_results['Clusters'] = c
	if 'PI' in Objectives:
		_results['StationaryProb'] = pi

	if 'K' in Objectives:	
		# Kemeny constant; where K = Ki, i = 1..n (using eigenvalues)
		# if 1 in [round(x,6) for x in eigenvalues[1:]]:
		if any([x>0.999999 for x in eigenvalues[1:]]):  # removed round call for speed
			K = float("inf")
			ResultMsg = 'reducible network with multiple communicating classes (K=inf)'
		else:
			K = (1/(1 - eigenvalues[1:])).sum()
			ClosedStatesCount = P.shape[0]-_sp.count_nonzero(P.sum(axis=0))
			if ClosedStatesCount > 0: # are there any closed but not deleted states? (when mcta_edit.Delete_States = False)
				K -= ClosedStatesCount
				if _verbose and not SkipChecksForSpeed:
					assert P.sum(axis=0) == P.sum(axis=1), "Error, number of closed states in P columns and rows do not match!"
			ResultMsg = 'success'
		if not SkipChecksForSpeed:
			mfpt = _MFPT(P,pi)  # calculate mean first passage time matrix
			K1 = 0
			# Kemeny constant; where K = Ki, i = 1..n (using mean first passage times); with ClosedStatesCount added to Kemeny to adjust for closed states
			for j in range(P.shape[0]):
				K1 += mfpt[0,j] * pi[j]
			assert round(K-K1,6)==0,"Kemeny constant calculation methods do not agree! ( {:} != {:} )".format(K, K1)
		_results['KemenyConst'] = K

	if 'D' in Objectives:
		D = VehiclesCount * pi/(lengths*lanes)  # calculate vehicle density - vehicles/(km.lane)
		# if D.min()<0:
		# 	print(f'found non-ergodic network with negative Densities\nD_min = {D.min()}\K={K}')
		D = _sp.clip(D, 0, None)	# clip Density values to the interval (0,inf), as non-ergodic networks will produce 
									# negative eigenvalues, and so densities. The negative values (PI and D) will also 
									# be acombinied by extreamly large values in other states, and these solutions will 
									# be eliminated by the optimizer. in these cases K = inf
		_results['Density'] = D

	if 'E' in Objectives:
		assert len(FreeFlowSpeeds) == len(lanes), f'Missing FreeFlowSpeeds for some or all lanes ({FreeFlowSpeeds})'
		Speeds, _ = _em.Speed_via_Density(D,'veh/(km.lane)',FreeFlowSpeeds,'km/h') # estimate vehicles speed on each link
		_results['Speeds']=Speeds
		EC, _ = _em.ExternalCost(Speeds, 'km/h', Method='TRANSYT7f') # Calculate average vehicle emissions cost $/(km.veh)
		_results['EmissionCost'] = EC * VehiclesCount * pi  # Calculate average link emissions $/km (in less operations)
		_results['TotalNetworkEmissionCost'] = sum(EC * lengths * D * Speeds)  # Calculate total network emissions $/hr (Emissions * flow, while flow = Density * Speed)

	if _verbose and not SkipChecksForSpeed:
		print ('\tMCTA Message: ' + ResultMsg)
		if 'K' in Objectives:	
			print ('\tKemeny constant: {:.4f} '.format(K) + ' steps ({:.4f} '.format(K*_results['StepTime']*60) + ' min)')
			print ('\texpecting time to mixing: {:.4f} '.format(K+1) + ' steps ({:.4f} '.format((K+1)*_results['StepTime']*60) + ' min)')
		if 'D' in Objectives:
			print ('\tMax density: {:.4f} '.format(max(D)) + ' vehicles/(km.lane)')
		if 'E' in Objectives:
			print(f'\tMin/Avg/Max network speeds (based on BPR): {min(Speeds):.2f}/{Speeds.mean():.2f}/{max(Speeds):.2f} km/h')
			print(f'\tMin/Avg/Max vehicle emission cost on a road (based on its speed): {min(EC):.5f}/{EC.mean():.5f}/{max(EC):.5f} g/(km.veh)')
			print(f"\tMin/Avg/Max link emissions cost: {min(_results['EmissionCost']):.2f}/{_results['EmissionCost'].mean():.2f}/{max(_results['EmissionCost']):.2f} $/km")
			print(f"\tTotal network emission cost: {_results['TotalNetworkEmissionCost']:.2f} $/hr")
		print ('\tcompleted MCTA in {:.3f} seconds.'.format(time.time()-tmr))

	_results['Message'] = ResultMsg
	return _results
コード例 #35
0
ファイル: data_analysis.py プロジェクト: tollbran/Astro
def magnitude_graph_cumu3(excel_file1, excel_file2, excel_file3):
    data1 = pd.read_excel(excel_file1)
    data2 = pd.read_excel(excel_file2)
    data3 = pd.read_excel(excel_file3)
    mag_column1 = data1.loc[:, 'Instrumental Magnitude']
    mag1 = mag_column1.values
    sp.asarray(mag1)
    mag_column2 = data2.loc[:, 'Instrumental Magnitude']
    mag2 = mag_column2.values
    sp.asarray(mag2)
    mag_column3 = data3.loc[:, 'Instrumental Magnitude']
    mag3 = mag_column3.values
    sp.asarray(mag3)
    magn_counter = 0
    x = []
    xerr = sp.array([11, 12, 13, 14, 15, 16, 17, 18, 20])
    y1 = []
    y2 = []
    y3 = []
    yline = []

    y_fit1 = []
    y_fit2 = []
    y_fit3 = []
    x_fit = []
    while magn_counter < 20:
        number_count1 = sp.count_nonzero(mag1 < magn_counter)
        number_count2 = sp.count_nonzero(mag2 < magn_counter)
        number_count3 = sp.count_nonzero(mag3 < magn_counter)
        x.append(magn_counter)
        y1.append(sp.log10(number_count1))
        y2.append(sp.log10(number_count2))
        y3.append(sp.log10(number_count3))
        if 13 <= magn_counter <= 17:
            x_fit.append(magn_counter)
            y_fit1.append(sp.log10(number_count1))
            y_fit2.append(sp.log10(number_count2))
            y_fit3.append(sp.log10(number_count3))
        #simple poisson statistics for now
        #error.append(sp.log10(sp.sqrt(number_count)))
        euclid_num = 0.6 * magn_counter - 5.5
        yline.append(euclid_num)
        magn_counter += 1

    fit1, cov1 = sp.polyfit(x_fit, y_fit1, deg=1, w=[1, 1, 1, 1, 1], cov=True)
    func1 = sp.poly1d(fit1)

    sp.asarray(x)
    sp.asarray(y1)
    sp.asarray(y2)
    sp.asarray(y3)
    sp.asarray(x_fit)
    sp.asarray(y_fit1)
    sp.asarray(y_fit2)
    sp.asarray(y_fit3)
    sp.asarray(yline)
    error_up3 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.10, 0.061, 0.038, 0.023, 0.015,
        0.010, 0.006, 0.003, 0.0015
    ])
    error_down3 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.13, 0.071, 0.041, 0.0245, 0.0160,
        0.0104, 0.0064, 0.0032, 0.0015
    ])
    error_up4 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.11, 0.063, 0.04, 0.024, 0.0168,
        0.011, 0.006, 0.002, 0.0007
    ])
    error_down4 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.148, 0.078, 0.045, 0.0269, 0.0168,
        0.011, 0.0055, 0.002, 0.001
    ])
    error_up5 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.11, 0.066, 0.042, 0.025, 0.0165,
        0.01, 0.0045, 0.0013, 0.0005
    ])
    error_down5 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.147, 0.078, 0.046, 0.0268, 0.0172,
        0.0102, 0.0046, 0.0013, 0.0005
    ])
    error3 = [error_down3, error_up3]
    error4 = [error_down4, error_up4]
    error5 = [error_down5, error_up5]

    #fig, axs = plt.subplots(1, 3,sharey=True)
    #plt.scatter(x,y1,marker='^',color='b',alpha=0.5)
    plt.errorbar(x, y1, yerr=error3, capsize=2, elinewidth=0.5, fmt='.b')
    plt.ylim(0.5, 4)
    plt.xlim(10, 20)
    plt.ylabel('log(N(<m))')
    plt.xlabel('Calibrated magnitude')
    plt.grid()
    #plt.scatter(x,y2,marker='o',color='g',alpha=0.5)
    plt.errorbar(x, y2, yerr=error4, capsize=2, elinewidth=0.5, fmt='.g')
    #plt.scatter(x,y3,marker='s',color='r',alpha=0.5)
    plt.errorbar(x, y3, yerr=error5, capsize=2, elinewidth=0.5, fmt='.r')
    plt.legend(['3σ threshold', '4σ threshold', '5σ threshold'], loc=4)
    plt.show()
コード例 #36
0
ファイル: generate.py プロジェクト: kleinfeld/medpy
def graph_from_voxels(
    fg_markers, bg_markers, regional_term=False, boundary_term=False, regional_term_args=False, boundary_term_args=False
):
    """
    Create a graphcut.maxflow.GraphDouble object for all voxels of an image with a
    ndim * 2 neighbourhood.
    
    Every voxel of the image is regarded as a node. They are connected to their immediate
    neighbours via arcs. If to voxels are neighbours is determined using
    ndim*2-connectedness (e.g. 3*2=6 for 3D). In the next step the arcs weights
    (n-weights) are computed using the supplied boundary_term function.
    
    Implicitly the graph holds two additional nodes: the source and the sink, so called
    terminal nodes. These are connected with all other nodes through arcs of an initial
    weight (t-weight) of zero.
    All voxels that are under the foreground markers are considered to be tightly bound
    to the source: The t-weight of the arc from source to these nodes is set to a maximum
    value. The same goes for the background markers: The covered voxels receive a maximum
    (graphcut.graph.GCGraph.MAX) t-weight for their arc towards the sink.
    
    @note If a voxel is marked as both, foreground and background, the background marker
    is given higher priority.
     
    @note all arcs whose weight is not explicitly set are assumed to carry a weight of
    zero.
    
    @param fg_markers The foreground markers as binary array of the same shape as the original image.
    @type fg_markers ndarray
    @param bg_markers The background markers as binary array of the same shape as the original image.
    @type bg_markers ndarray
    @param regional_term This can be either
                         False - all t-weights are set to 0, except for the nodes that are
                         directly connected to the source or sink.
                         , or a function - 
                         The supplied function is used to compute the t_edges. It has to
                         have the following signature
                         regional_term(graph, regional_term_args),
                         and is supposed to compute (source_t_weight, sink_t_weight) for
                         all voxels of the image and add these to the passed graph.GCGraph
                         object. The weights have only to be computed for nodes where
                         they do not equal zero. Additional parameters can be passed via
                         the regional_term_args argument.
    @type regional_term function
    @param boundary_term This can be either
                         False - 
                         In which case the weight of all n_edges i.e. between all nodes
                         that are not source or sink, are set to 0.
                         , or a function -
                         In which case it is used to compute the edges weights. The
                         supplied function has to have the following signature
                         fun(graph, boundary_term_args), and is supposed to compute the
                         edges between the graphs node and to add them to the supplied
                         graph.GCGraph object. Additional parameters
                         can be passed via the boundary_term_args argument.
    @type boundary_term function
    @param regional_term_args Use this to pass some additional parameters to the
                              regional_term function.
    @param boundary_term_args Use this to pass some additional parameters to the
                              boundary_term function.
    
    @return the created graph
    @rtype graphcut.maxflow.GraphDouble
    
    @raise AttributeError If an argument is maleformed.
    @raise FunctionError If one of the supplied functions returns unexpected results.
    """
    # prepare logger
    logger = Logger.getInstance()

    # prepare result graph
    logger.debug(
        "Assuming {} nodes and {} edges for image of shape {}".format(
            fg_markers.size, __voxel_4conectedness(fg_markers.shape), fg_markers.shape
        )
    )
    graph = GCGraph(fg_markers.size, __voxel_4conectedness(fg_markers.shape))

    logger.info("Performing attribute tests...")

    # check, set and convert all supplied parameters
    fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_)
    bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_)

    # set dummy functions if not supplied
    if not regional_term:
        regional_term = __regional_term_voxel
    if not boundary_term:
        boundary_term = __boundary_term_voxel

    # check supplied functions and their signature
    if not hasattr(regional_term, "__call__") or not 2 == len(inspect.getargspec(regional_term)[0]):
        raise AttributeError("regional_term has to be a callable object which takes two parameter.")
    if not hasattr(boundary_term, "__call__") or not 2 == len(inspect.getargspec(boundary_term)[0]):
        raise AttributeError("boundary_term has to be a callable object which takes two parameters.")

    logger.debug(
        "#nodes={}, #hardwired-nodes source/sink={}/{}".format(
            fg_markers.size, len(fg_markers.ravel().nonzero()[0]), len(bg_markers.ravel().nonzero()[0])
        )
    )

    # compute the weights of all edges from the source and to the sink i.e.
    # compute the weights of the t_edges Wt
    logger.info("Computing and adding terminal edge weights...")
    regional_term(graph, regional_term_args)

    # compute the weights of the edges between the neighbouring nodes i.e.
    # compute the weights of the n_edges Wr
    logger.info("Computing and adding inter-node edge weights...")
    boundary_term(graph, boundary_term_args)

    # collect all voxels that are under the foreground resp. background markers i.e.
    # collect all nodes that are connected to the source resp. sink
    logger.info("Setting terminal weights for the markers...")
    if not 0 == scipy.count_nonzero(fg_markers):
        graph.set_source_nodes(fg_markers.ravel().nonzero()[0])
    if not 0 == scipy.count_nonzero(bg_markers):
        graph.set_sink_nodes(bg_markers.ravel().nonzero()[0])

    return graph.get_graph()
コード例 #37
0
ファイル: adc.py プロジェクト: AlexanderRuesch/medpy
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # loading input images
    b0img, b0hdr = load(args.b0image)
    bximg, bxhdr = load(args.bximage)

    # check if image are compatible
    if not b0img.shape == bximg.shape:
        raise ArgumentError('The input images shapes differ i.e. {} != {}.'.format(b0img.shape, bximg.shape))
    if not header.get_pixel_spacing(b0hdr) == header.get_pixel_spacing(bxhdr):
        raise ArgumentError('The input images voxel spacing differs i.e. {} != {}.'.format(header.get_pixel_spacing(b0hdr), header.get_pixel_spacing(bxhdr)))
    
    # check if supplied threshold value as well as the b value is above 0
    if args.threshold is not None and not args.threshold >= 0:
        raise ArgumentError('The supplied threshold value must be greater than 0, otherwise a division through 0 might occur.')
    if not args.b > 0:
        raise ArgumentError('The supplied b-value must be greater than 0.')
    
    # compute threshold value if not supplied
    if args.threshold is None:
        b0thr = otsu(b0img, 32) / 2. # divide by 2 to decrease impact
        bxthr = otsu(bximg, 32) / 2.
        if 0 >= b0thr:
            raise ArgumentError('The supplied b0image seems to contain negative values.')
        if 0 >= bxthr:
            raise ArgumentError('The supplied bximage seems to contain negative values.')
    else:
        b0thr = bxthr = args.threshold
    
    logger.debug('thresholds={}/{}, b-value={}'.format(b0thr, bxthr, args.b))
    
    # threshold b0 + bx DW image to obtain a mask
    # b0 mask avoid division through 0, bx mask avoids a zero in the ln(x) computation
    mask = (b0img > b0thr) & (bximg > bxthr)
    
    logger.debug('excluding {} of {} voxels from the computation and setting them to zero'.format(scipy.count_nonzero(mask), scipy.prod(mask.shape)))
    
    # compute the ADC
    adc = scipy.zeros(b0img.shape, b0img.dtype)
    adc[mask] = -1. * args.b * scipy.log(bximg[mask] / b0img[mask])
            
    # saving the resulting image
    save(adc, args.output, b0hdr, args.force)
コード例 #38
0
ファイル: average_degree.py プロジェクト: LouisChang/src
            new_hashtags_init = {}
        
        expired_hashtags = [hashtag*((timestamp - hashtags_init.get(hashtag, timestamp)) > 60) for hashtag in hashtags_init.keys()]
        expired_hashtags =  filter(None, expired_hashtags)
        
        
        for hashtag in expired_hashtags:
            del hashtags_init[hashtag]
            graph = remove_edges(graph, hashtag)

        hashtags_init.update(new_hashtags_init)
        for hashtag in new_hashtags_init.keys():
            graph[hashtag] = list(sp.unique(graph.get(hashtag,[]) + new_hashtags_init.keys()))
            graph[hashtag].remove(hashtag)
        
        # Calculating
        degrees = [len(graph[node]) for node in graph.keys()]

        try:
            avg_degree = str(round(1.0*sum(degrees)/sp.count_nonzero(degrees),2))
        except:
            avg_degree = str(0)

        output_data.append(avg_degree)



with open(output_dir,'w') as output_file:
    output_file.write(os.linesep.join(output_data))

コード例 #39
0
ファイル: data_analysis.py プロジェクト: tollbran/Astro
def magnitude_graph_cumu(excel_file):
    data = pd.read_excel(excel_file)
    mag_column = data.loc[:, 'Instrumental Magnitude']
    mag = mag_column.values
    sp.asarray(mag)
    print(mag)
    magn_counter = 0
    x = []
    y = []
    error = []
    y_fit = []
    x_fit = []
    while magn_counter < 20:

        number_count = sp.count_nonzero(mag < magn_counter)
        x.append(magn_counter)
        y.append(sp.log10(number_count))
        if 13 <= magn_counter <= 16:
            x_fit.append(magn_counter)
            y_fit.append(sp.log10(number_count))

        #simple poisson statistics for now
        error.append(sp.log10(sp.sqrt(number_count)))
        #euclid_num = 0.6*magn_counter - 7.5
        magn_counter += 1

    error_up3 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.10, 0.061, 0.038, 0.023, 0.015,
        0.010, 0.006, 0.003, 0.0015
    ])
    error_down3 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.13, 0.071, 0.041, 0.0245, 0.0160,
        0.0104, 0.0064, 0.0032, 0.0015
    ])
    error_up4 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.11, 0.063, 0.04, 0.024, 0.0168,
        0.011, 0.006, 0.002, 0.0007
    ])
    error_down4 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.148, 0.078, 0.045, 0.0269, 0.0168,
        0.011, 0.0055, 0.002, 0.001
    ])
    error3 = [error_down3, error_up3]
    error4 = [error_down4, error_up4]

    fit, cov = sp.polyfit(x_fit,
                          y_fit,
                          deg=1,
                          w=1 / sp.array([0.045, 0.0269, 0.0168, 0.011]),
                          cov=True)
    func = sp.poly1d(fit)
    sp.asarray(x)
    sp.asarray(y)
    sp.asarray(x_fit)
    sp.asarray(y_fit)
    sp.asarray(error)
    plt.scatter(x, y, color='k', marker='x')
    plt.plot(x_fit, func(x_fit), color='r', ls='--')
    plt.errorbar(x, y, yerr=error4, capsize=2, elinewidth=0.5, fmt='.g')
    plt.grid()
    plt.xlabel('Magnitude')
    plt.ylabel('log(N(<m))')
    plt.ylim(0, 4)
    plt.xlim(10, 20)
    plt.show()
    print('Slope = %.3e Error = %.3e' % (fit[0], cov[0, 0]))
    return fit[0]