def calVCR(segment):
    trackpoints = segment['track_points']
    if len(trackpoints) < 3:
        return 0
    else:
        Pv = 0
        for (i, point) in enumerate(trackpoints[:-2]):
            currPoint = point
            nextPoint = trackpoints[i+1]
            nexNextPt = trackpoints[i+2]
            velocity1 = calSpeed(currPoint, nextPoint)
            velocity2 = calSpeed(nextPoint, nexNextPt)
            if velocity1 != None and velocity2 != None:
                if velocity1 != 0:
                    VC = old_div(abs(velocity2 - velocity1),velocity1)
                else:
                    VC = 0
            else:
                VC = 0

            if VC > 0.7:
                Pv += 1

        segmentDist = segment['distance']
        if segmentDist != None and segmentDist != 0:
            return old_div(Pv,segmentDist)
        else:
            return 0
Example #2
0
    def time_std(self):
        if hasattr(self, '_time_std'):
            return self._time_std
        if self.savedir is not None:
            try:
                with open(join(self.savedir, 'time_std.pkl'),
                          'rb') as f:
                    time_std = pickle.load(f)
            except IOError:
                pass
            else:
                # Same protocol as the averages. Make sure the
                # std is a single 4D (zyxc) array and if not just
                # re-calculate the time std.
                if isinstance(time_std, np.ndarray):
                    self._time_std = time_std
                    return self._time_std

        sums = np.zeros(self.frame_shape)
        sums_squares = np.zeros(self.frame_shape)
        counts = np.zeros(self.frame_shape)
        for frame in it.chain.from_iterable(self):
            sums += np.nan_to_num(frame)
            sums_squares += np.square(np.nan_to_num(frame))
            counts[np.isfinite(frame)] += 1
        means = old_div(sums, counts)
        mean_of_squares = old_div(sums_squares, counts)
        std = np.sqrt(mean_of_squares-np.square(means))
        if self.savedir is not None and not self._read_only:
            with open(join(self.savedir, 'time_std.pkl'), 'wb') as f:
                pickle.dump(std, f, pickle.HIGHEST_PROTOCOL)
        self._time_std = std
        return self._time_std
    def _execute_GRAPHCUT(self, roi, result):
        data = self.Input(roi.start, roi.stop).wait()
        data = vigra.taggedView(data, self.Input.meta.axistags)
        data_zyx = data[0,...,0]

        beta = self.GraphcutBeta.value
        ft = self.FinalThreshold.value

        # The segmentGC() function will implicitly threshold at 0.5,
        # but we want to respect the user's FinalThreshold setting.
        # Here, we scale from input pixels --> gc potentials in the following way:
        #
        # 0.0..FT --> 0.0..0.5
        # 0.5..FT --> 0.5..1.0
        #
        # For instance, input pixels that match the user's FT exactly will map to 0.5,
        # and graphcut will place them on the threshold border.

        above_threshold_mask = (data_zyx >= ft)
        below_threshold_mask = ~above_threshold_mask

        data_zyx[below_threshold_mask] *= old_div(0.5,ft)
        data_zyx[above_threshold_mask] = 0.5 + old_div((data_zyx[above_threshold_mask] - ft),(1-ft))
        
        binary_seg_zyx = segmentGC( data_zyx, beta ).astype(np.uint8)
        del data_zyx
        vigra.analysis.labelMultiArrayWithBackground(binary_seg_zyx, out=result[0,...,0])
Example #4
0
def _minolta2float(inVal):
    """Takes a number, or numeric array (any shape) and returns the appropriate
    float.

    minolta stores;
        +ve values as val * 10000
        -ve values as -val * 10000 + 50000

    >>> _minolta2Float(50347)  # NB returns a single float
    -0.034700000000000002
    >>> _minolta2Float(10630)
    1.0630999999999999
    >>> _minolta2Float([10635, 50631])  # NB returns a numpy array
    array([ 1.0635, -0.0631])

    """
    # convert  to array if needed
    arr = numpy.asarray(inVal)
    # handle single vals
    if arr.shape == ():
        if inVal < 50000:
            return old_div(inVal, 10000.0)
        else:
            return old_div((-inVal + 50000.0), 10000.0)
    # handle arrays
    negs = (arr > 50000)  # find negative values
    out = old_div(arr, 10000.0)  # these are the positive values
    out[negs] = old_div((-arr[negs] + 50000.0), 10000.0)
    return out
def mean_psd(y, method = 'logmexp'):
    """
    Averaging the PSD
    Inputs:
    y: np.ndarray
        PSD values
    method: string
        method of averaging the noise.
        Choices:
            'mean': Mean
            'median': Median
            'logmexp': Exponential of the mean of the logarithm of PSD (default)
    """

    if method == 'mean':
        mp = np.sqrt(np.mean(old_div(y,2),axis=-1))
    elif method == 'median':
        mp = np.sqrt(np.median(old_div(y,2),axis=-1))
    else:
        mp = np.log(old_div((y+1e-10),2))
        mp = np.mean(mp,axis=-1)
        mp = np.exp(mp)
        mp = np.sqrt(mp)
#        mp = np.sqrt(np.exp(np.mean(np.log(y/2),axis=-1)))

    return mp
Example #6
0
    def fitGammaFun(self, x, y):
        """
        Fits a gamma function to the monitor calibration data.

        **Parameters:**
            -xVals are the monitor look-up-table vals, either 0-255 or 0.0-1.0
            -yVals are the measured luminances from a photometer/spectrometer

        """
        minGamma = 0.8
        maxGamma = 20.0
        gammaGuess = 2.0
        y = numpy.asarray(y)
        minLum = y[0]
        maxLum = y[-1]
        if self.eq == 4:
            aGuess = old_div(minLum, 5.0)
            kGuess = (maxLum - aGuess)**(old_div(1.0, gammaGuess)) - aGuess
            guess = [gammaGuess, aGuess, kGuess]
            bounds = [[0.8, 5.0], [0.00001, minLum - 0.00001], [2, 200]]
        else:
            guess = [gammaGuess]
            bounds = [[0.8, 5.0]]
        # gamma = optim.fmin(self.fitGammaErrFun, guess, (x, y, minLum, maxLum))
        # gamma = optim.fminbound(self.fitGammaErrFun,
        #    minGamma, maxGamma,
        #    args=(x,y, minLum, maxLum))
        params = optim.fmin_tnc(self.fitGammaErrFun, numpy.array(guess),
                                approx_grad=True,
                                args=(x, y, minLum, maxLum),
                                bounds=bounds, messages=0)
        return minLum, maxLum, params[0]
Example #7
0
def GetSn(fluor, range_ff=[0.25, 0.5], method='logmexp'):
    """    
    Estimate noise power through the power spectral density over the range of large frequencies    

    Inputs:
    ----------

    fluor    : nparray
        One dimensional array containing the fluorescence intensities with
        one entry per time-bin.

    range_ff : (1,2) array, nonnegative, max value <= 0.5
        range of frequency (x Nyquist rate) over which the spectrum is averaged  

    method   : string
        method of averaging: Mean, median, exponentiated mean of logvalues (default)

    Returns:
    -----------
    sn       : noise standard deviation
    """

    ff, Pxx = scipy.signal.welch(fluor)
    ind1 = ff > range_ff[0]
    ind2 = ff < range_ff[1]
    ind = np.logical_and(ind1, ind2)
    Pxx_ind = Pxx[ind]
    sn = {
        'mean': lambda Pxx_ind: np.sqrt(np.mean(old_div(Pxx_ind, 2))),
        'median': lambda Pxx_ind: np.sqrt(np.median(old_div(Pxx_ind, 2))),
        'logmexp': lambda Pxx_ind: np.sqrt(np.exp(np.mean(np.log(old_div(Pxx_ind, 2)))))
    }[method](Pxx_ind)

    return sn
    def test_call_with_linear_momentum_fix(self):
        toy_modifier = SingleAtomVelocityDirectionModifier(
            delta_v=[1.0, 2.0],
            subset_mask=[1, 2],
            remove_linear_momentum=True
        )
        new_toy_snap = toy_modifier(self.toy_snapshot)
        velocities = new_toy_snap.velocities
        momenta = velocities * new_toy_snap.masses[:, np.newaxis]
        assert_array_almost_equal(sum(momenta), np.array([0.0]*2))
        double_ke = sum(sum(momenta * velocities))
        assert_almost_equal(double_ke, 86.0)

        u_vel = old_div(u.nanometer, u.picosecond)
        u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)

        openmm_modifier = SingleAtomVelocityDirectionModifier(
            delta_v=1.2*u_vel,
            remove_linear_momentum=False
        )
        new_openmm_snap = openmm_modifier(self.openmm_snap)
        velocities = new_openmm_snap.velocities
        momenta = velocities * new_openmm_snap.masses[:, np.newaxis]
        zero_momentum = 0 * u_vel * u_mass
        total_momenta = sum(momenta, zero_momentum)
        assert_array_almost_equal(total_momenta,
                                  np.array([0.0]*3) * u_vel * u_mass)
Example #9
0
 def applyNormalization(self):
     nrow, ncol, _ = self.data.shape
     # column-wise normalization factor
     self.colcal = np.zeros((ncol,))
     for col in np.arange(ncol):
         rowmask = self.datamask[:, col]
         if not rowmask.any():
             continue
         self.colcal[col] = np.mean(
             self.data[rowmask.nonzero(), col, 0:old_div(self.ntime, 20)])
     # boxcar filter column-wise normalization factor
     tmp = self.colcal.copy()
     for col in np.arange(ncol):
         if col == 0 or col == ncol - 1:
             continue
         d = self.colcal[col - 1:col + 2]
         if np.count_nonzero(d) != 3:
             continue
         tmp[col] = np.mean(d)
     self.colcal = tmp.copy()
     # apply normalization to data array
     for row in np.arange(nrow):
         for col in np.arange(ncol):
             if self.datamask[row, col]:
                 self.data[row, col, :] = self.data[row, col, :] * \
                     self.colcal[col] / \
                     np.mean(self.data[row, col, 0:old_div(self.ntime, 20)])
    def OnRenderEvent(self, renderer, event):

        x, y, z = self.GetPosition()

        if renderer not in self._ActorDict:
            return

        actors = self._ActorDict[renderer]
        if actors:
            camera = renderer.GetActiveCamera()
            if camera.GetParallelProjection():
                worldsize = camera.GetParallelScale()
            else:
                cx, cy, cz = camera.GetPosition()
                worldsize = math.sqrt((x - cx) ** 2 + (y - cy) ** 2 +
                                      (z - cz) ** 2) * \
                    math.tan(0.5 * camera.GetViewAngle() / 57.296)
            windowWidth, windowHeight = renderer.GetSize()
            if windowWidth > 0 and windowHeight > 0:
                pitch = old_div(worldsize, windowHeight)
                for actor in actors:
                    # ignore resize of caption
                    if not actor.IsA('vtkCaptionActor2D'):
                        old_pitch = actor.GetScale()[0]
                        ratio = old_div(pitch, float(old_pitch))
                        if ratio > 1.5 or ratio < 0.66:
                            actor.SetScale(pitch)
    def test_remove_momentum_rescale_energy_openmm(self):
        # don't actually need to do everything with OpenMM, but do need to
        # add units
        u_vel = old_div(u.nanometer, u.picosecond)
        u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)
        u_energy = old_div(u.kilojoule_per_mole, u.AVOGADRO_CONSTANT_NA)

        velocities = \
                np.array([[1.5, -1.0], [-1.0, 2.0], [0.25, -1.0]]) * u_vel
        masses = np.array([1.0, 1.5, 4.0]) * u_mass
        new_vel = self.openmm_modifier._remove_linear_momentum(
            velocities=velocities,
            masses=masses
        )
        new_momenta = new_vel * masses[:, np.newaxis]
        total_momenta = sum(new_momenta, new_momenta[0])
        assert_array_almost_equal(total_momenta,
                                  np.array([0.0]*2) * u_vel * u_mass)

        new_vel = self.openmm_modifier._rescale_kinetic_energy(
            velocities=velocities,
            masses=masses,
            double_KE=20.0 * u_energy
        )
        new_momenta = new_vel * masses[:, np.newaxis]
        total_momenta = sum(new_momenta, new_momenta[0])
        zero_energy = 0.0 * u_energy
        new_ke = sum(sum(new_momenta * new_vel, zero_energy), zero_energy)
        # tests require that the linear momentum be 0, and KE be correct
        assert_array_almost_equal(total_momenta,
                                  np.array([0.0]*2) * u_vel * u_mass)
        assert_equal(new_ke.unit, (20.0 * u_energy).unit)
        assert_almost_equal(new_ke._value, (20.0 * u_energy)._value)
Example #12
0
 def _getNextFrame(self):
     """get next frame info ( do not decode frame yet)
     """
     while self.status == PLAYING:
         if self._video_stream.grab():
             self._prev_frame_index = self._next_frame_index
             self._prev_frame_sec = self._next_frame_sec
             self._next_frame_index = self._video_stream.get(
                 cv2.CAP_PROP_POS_FRAMES)
             self._next_frame_sec = old_div(self._video_stream.get(
                 cv2.CAP_PROP_POS_MSEC), 1000.0)
             self._video_perc_done = self._video_stream.get(
                 cv2.CAP_PROP_POS_AVI_RATIO)
             self._next_frame_displayed = False
             halfInterval = old_div(self._inter_frame_interval, 2.0)
             if self.getTimeToNextFrameDraw() > -halfInterval:
                 return self._next_frame_sec
             else:
                 self.nDroppedFrames += 1
                 if self.nDroppedFrames < reportNDroppedFrames:
                     msg = "MovieStim2 dropping video frame index: %d"
                     logging.warning(msg % self._next_frame_index)
                 elif self.nDroppedFrames == reportNDroppedFrames:
                     msg = ("Multiple Movie frames have occurred - "
                            "I'll stop bothering you about them!")
                     logging.warning(msg)
         else:
             self._onEos()
             break
Example #13
0
    def partition_FOV_KMeans(self,tradeoff_weight=.5,fx=.25,fy=.25,n_clusters=4,max_iter=500):
        """
        Partition the FOV in clusters that are grouping pixels close in space and in mutual correlation

        Parameters
        ------------------------------
        tradeoff_weight:between 0 and 1 will weight the contributions of distance and correlation in the overall metric
        fx,fy: downsampling factor to apply to the movie
        n_clusters,max_iter: KMeans algorithm parameters

        Outputs
        -------------------------------
        fovs:array 2D encoding the partitions of the FOV
        mcoef: matric of pairwise correlation coefficients
        distanceMatrix: matrix of picel distances

        Example

        """

        _,h1,w1=self.shape
        self.resize(fx,fy)
        T,h,w=self.shape
        Y=np.reshape(self,(T,h*w))
        mcoef=np.corrcoef(Y.T)
        idxA,idxB =  np.meshgrid(list(range(w)),list(range(h)));
        coordmat=np.vstack((idxA.flatten(),idxB.flatten()))
        distanceMatrix=euclidean_distances(coordmat.T);
        distanceMatrix=old_div(distanceMatrix,np.max(distanceMatrix))
        estim=KMeans(n_clusters=n_clusters,max_iter=max_iter);
        kk=estim.fit(tradeoff_weight*mcoef-(1-tradeoff_weight)*distanceMatrix)
        labs=kk.labels_
        fovs=np.reshape(labs,(h,w))
        fovs=cv2.resize(np.uint8(fovs),(w1,h1),old_div(1.,fx),old_div(1.,fy),interpolation=cv2.INTER_NEAREST)
        return np.uint8(fovs), mcoef, distanceMatrix
Example #14
0
def combine(items, k=None):
    """
    Create a matrix in wich each row is a tuple containing one of solutions or
    solution k-esima.
    """
    length_items = len(items)
    lengths = [len(i) for i in items]
    length = reduce(lambda x, y: x * y, lengths)
    repeats = [reduce(lambda x, y: x * y, lengths[i:])
               for i in range(1, length_items)] + [1]
    if k is not None:
        k = k % length
        # Python division by default is integer division (~ floor(a/b))
        indices = [old_div((k % (lengths[i] * repeats[i])), repeats[i])
                   for i in range(length_items)]
        return [items[i][indices[i]] for i in range(length_items)]
    else:
        matrix = []
        for i, item in enumerate(items):
            row = []
            for subset in item:
                row.extend([subset] * repeats[i])
            times = old_div(length, len(row))
            matrix.append(row * times)
        # Transpose the matrix or return the columns instead rows
        return list(zip(*matrix))
Example #15
0
def test_XSFANode():
    T = 5000
    N = 3
    src = numx_rand.random((T, N))*2-1
    # create three souces with different speeds
    fsrc = numx_fft.rfft(src, axis=0)

    for i in range(N):
        fsrc[(i+1)*(old_div(T,10)):, i] = 0.

    src = numx_fft.irfft(fsrc,axis=0)
    src -= src.mean(axis=0)
    src /= src.std(axis=0)

    #mix = sigmoid(numx.dot(src, mdp.utils.random_rot(3)))
    mix = src

    flow = mdp.Flow([mdp.nodes.XSFANode()])
    # let's test also chunk-mode training
    flow.train([[mix[:old_div(T,2), :], mix[old_div(T,2):, :]]])

    out = flow(mix)
    #import bimdp
    #tr_filename = bimdp.show_training(flow=flow,
    #                                  data_iterators=[[mix[:T/2, :], mix[T/2:, :]]])
    #ex_filename, out = bimdp.show_execution(flow, x=mix)

    corrs = mdp.utils.cov_maxima(mdp.utils.cov2(out, src))
    assert min(corrs) > 0.8, ('source/estimate minimal'
                              ' covariance: %g' % min(corrs))
Example #16
0
    def draw_fft(self):
        """
        This method is slow.
        But, computers are fast.
        """
        #start = time.clock()
        if self.btn_receiving_audio.text() != "":
            self.btn_receiving_audio.setText("FFT Data Streaming")
        fft_data = self.mixer.audio.fft[0]
        self.fft_max.append(max(fft_data))
        if len(self.fft_max) > 64:
            self.fft_max.pop(0)
        max_val = max(self.fft_max)

        width = 256
        height = 64

        if self.fft_pixmap is None:
            self.fft_pixmap = np.full([height, width * 4], 0, dtype=np.uint8)

        for row in range(height - 1):
            self.fft_pixmap[row] = self.fft_pixmap[row + 1]

        if max_val > 0:
            for x in range(0, width * 4, 4):
                f = np.interp(old_div(x, 4), np.arange(len(fft_data)), fft_data)# / max_val
                #f = math.sqrt(math.sqrt(f))
                self.fft_pixmap[height - 1][x:x + 4] = \
                    (hsv_float_to_rgb_uint8((old_div(x, (4.0 * width)), 1.0, f)) + (255,))


        pm = self.fft_pixmap.flatten()
        img = QImage(pm, width, height, QImage.Format_ARGB32)
        self.fft_graphics_view.setPixmap(QPixmap.fromImage(img))
Example #17
0
def train(nn, reader, args):
    """Trains a neural network for the selected task."""
    num_sents = len(reader.sentences)
    logger.debug("----------------------------------------------------")
    logger.debug("Starting training with %d sentences" % num_sents)
    
    avg_len = old_div(sum(len(x) for x in text_reader.sentences), float(num_sents))
    logger.debug("Average sentence length is %.2f tokens" % avg_len)
    
    logger.debug("Network learning rate: %.2f" % nn.learning_rate)
    logger.debug("L2 normalization factor set to %.2f" % nn.l2_factor)
    logger.debug("Dropout factor set to %.2f" % nn.dropout)
    logger.debug("Maximum weight norm set to %.2f (0 means disabled)" % nn.max_norm)
    logger.debug("----------------------------------------------------\n")
    
    intervals = max(old_div(args.iterations, 200), 1)
    np.seterr(over='raise', divide='raise', invalid='raise')
    
    if args.task.startswith('srl') and args.task != 'srl_predicates':
        arg_limits = None if args.task != 'srl_classify' else text_reader.arg_limits
        
        nn.train(reader.sentences, reader.predicates, reader.tags, 
                 args.iterations, intervals, args.accuracy, arg_limits)
    
    elif args.task.endswith('dependency'): 
        if args.labeled:
            nn.train(reader.sentences, reader.heads, args.iterations, 
                     intervals, args.accuracy, text_reader.labels)
        else:
            nn.train(reader.sentences, reader.heads, args.iterations, 
                     intervals, args.accuracy)

    else:
        nn.train(reader.sentences, reader.tags, 
                 args.iterations, intervals, args.accuracy)
Example #18
0
def newton_Bt ( psixy, Rxy, Btxy, Bpxy, pxy, hthe, mesh):
    #global  psi, a, b
    MU = 4.e-7*numpy.pi
  
    s = numpy.shape(Rxy)
    nx = s[0]
    ny = s[1]
  
    axy = old_div(DDX(psixy, Rxy), Rxy)
    bxy = MU*DDX(psixy, pxy) - Bpxy*DDX(psixy, Bpxy*hthe)/hthe
        
    Btxy2 = numpy.zeros((nx, ny))
    for i in range (ny) :
        psi = psixy[:,i]
        a = axy[:,i]
        b = bxy[:,i]
        print("Solving f for y=", i)
        sol=root(Bt_func, Btxy[:,i], args=(psi, a, b) )
        Btxy2[:,i] = sol.x
        
       
  
    # Average f over flux surfaces
    fxy = surface_average(Btxy2*Rxy, mesh)
    
    return old_div(fxy, Rxy)
Example #19
0
def _demixing_matrix(dataset):
    """Calculate the linear transformation to demix two channels.

    Parameters
    ----------
    dataset : ImagingDataset
        The dataset which is to be demixed. This must have two channels.

    Returns
    -------
    array
        Matrix by which the data can be (left) multiplied to be demixed.

    """

    from mdp.nodes import FastICANode

    # Make matrix of the time averaged channels.
    time_avgs = np.concatenate(
        [im.reshape(-1, 1) for im in dataset.time_averages], axis=1)

    # Perform ICA on the time averaged data.
    node = FastICANode()  # TODO: switch to ICA from scikit-learn ?
    node(time_avgs)
    W = np.dot(node.white.v, node.filters).T

    # Reorder and normalize the rows so that the diagonal coefficients
    # are 1 and the off diagonals have minimal magnitude.
    if abs(old_div(W[0, 0], W[0, 1])) < abs(old_div(W[1, 0], W[1, 1])):
        W = W[::-1]
    W[0] /= W[0, 0]
    W[1] /= W[1, 1]
    assert np.allclose(np.diag(W), 1.)

    return W
Example #20
0
def order_components(A, C):
    """Order components based on their maximum temporal value and size

    Parameters:
    -----------
    A:   sparse matrix (d x K)
         spatial components

    C:   matrix or np.ndarray (K x T)
         temporal components

    Returns:
    -------
    A_or:  np.ndarray
        ordered spatial components

    C_or:  np.ndarray
        ordered temporal components

    srt:   np.ndarray
        sorting mapping

    """
    A = np.array(A.todense())
    nA2 = np.sqrt(np.sum(A**2, axis=0))
    K = len(nA2)
    A = np.array(np.matrix(A) * spdiags(old_div(1, nA2), 0, K, K))
    nA4 = np.sum(A**4, axis=0)**0.25
    C = np.array(spdiags(nA2, 0, K, K) * np.matrix(C))
    mC = np.ndarray.max(np.array(C), axis=1)
    srt = np.argsort(nA4 * mC)[::-1]
    A_or = A[:, srt] * spdiags(nA2[srt], 0, K, K)
    C_or = spdiags(old_div(1., nA2[srt]), 0, K, K) * (C[srt, :])

    return A_or, C_or, srt
Example #21
0
def force_balance ( psixy, Rxy, Bpxy, Btxy, hthe, pxy):
    MU =4.e-7*numpy.pi
  
    a = old_div(DDX(psixy, Rxy), Rxy)
    b = MU*DDX(psixy, pxy) - Bpxy*DDX(psixy, Bpxy*hthe)/hthe
  
    return DDX(psixy, Btxy) + a*Btxy + old_div(b,Btxy)
Example #22
0
def fft_deriv ( var ):
    #on_error, 2
                 
    n = numpy.size(var)

    F = old_div(numpy.fft.fft(var),n)  #different definition between IDL - python
                 
    imag = numpy.complex(0.0, 1.0)
    imag = numpy.complex_(imag)

    F[0] = 0.0
          
                
    if (n % 2) == 0 :
      # even number
        for i in range (1, old_div(n,2)) :
          a = imag*2.0*numpy.pi*numpy.float(i)/numpy.float(n)
          F[i] = F[i] * a         # positive frequencies
          F[n-i] = - F[n-i] * a   # negative frequencies
           
        F[old_div(n,2)] = F[old_div(n,2)] * (imag*numpy.pi)
    else:
      # odd number
        for i in range (1, old_div((n-1),2)+1) :
          a = imag*2.0*numpy.pi*numpy.float(i)/numpy.float(n)
          F[i] = F[i] * a
          F[n-i] = - F[n-i] * a 
        
    

    result = numpy.fft.ifft(F)*n  #different definition between IDL - python
    
      
    return result 
Example #23
0
def cart2sph(z, y, x):
    """Convert from cartesian coordinates (x,y,z) to spherical (elevation,
    azimuth, radius). Output is in degrees.

    usage:
        array3xN[el,az,rad] = cart2sph(array3xN[x,y,z])
        OR
        elevation, azimuth, radius = cart2sph(x,y,z)

        If working in DKL space, z = Luminance, y = S and x = LM
    """
    width = len(z)

    elevation = numpy.empty([width, width])
    radius = numpy.empty([width, width])
    azimuth = numpy.empty([width, width])

    radius = numpy.sqrt(x**2 + y**2 + z**2)
    azimuth = numpy.arctan2(y, x)
    # Calculating the elevation from x,y up
    elevation = numpy.arctan2(z, numpy.sqrt(x**2 + y**2))

    # convert azimuth and elevation angles into degrees
    azimuth *= (old_div(180.0, numpy.pi))
    elevation *= (old_div(180.0, numpy.pi))

    sphere = numpy.array([elevation, azimuth, radius])
    sphere = numpy.rollaxis(sphere, 0, 3)

    return sphere
Example #24
0
def gomoryCut(lp, integerIndices = None, sense = '>=', sol = None,
              rowInds = None, value = None, epsilon = .01):
    '''Return the Gomory cut of rows in ``rowInds`` of lp 
    (a CyClpSimplex object)'''
    cuts = []
    if sol is None:
        sol = lp.primalVariableSolution['x']
    if rowInds is None:
        rowInds = list(range(lp.nConstraints))
    if integerIndices is None:
        integerIndices = list(range(lp.nVariables))
    for row in rowInds:
        basicVarInd = lp.basicVariables[row]
        if (basicVarInd in integerIndices) and (not isInt(sol[basicVarInd], epsilon)):
            f0 = getFraction(sol[basicVarInd])
            f = []
            for i in range(lp.nVariables):
                if i in lp.basicVariables:
                    #This is to try to avoid getting very small numbers that 
                    #should be zero
                    f.append(0)
                else:
                    f.append(getFraction(lp.tableau[row, i]))
            pi = np.array([old_div(f[j],f0) if f[j] <= f0 
                           else old_div((1-f[j]),(1-f0)) for j in range(lp.nVariables)])
            pi_slacks = np.array([old_div(x,f0) if x > 0 else old_div(-x,(1-f0))  
                                 for x in lp.tableau[row, lp.nVariables:]])
            pi -= pi_slacks * lp.coefMatrix
            pi0 = (1 - np.dot(pi_slacks, lp.constraintsUpper) if sense == '<='
                   else 1 + np.dot(pi_slacks, lp.constraintsUpper))
            if sense == '>=':
                cuts.append((pi, pi0))
            else:
                cuts.append((-pi, -pi0))
    return cuts, []
Example #25
0
def back_translate(aln, nuc, alphabet):
    'Back translate a nucleotidic sequence based on an amino acid (gapped) sequence'
    prot_seq = aln.seq
    nucl_seq = nuc.seq
    gaps = 0
    bt_seq = ''
    if len(nucl_seq)%3 != 0:
        print('Nucleotide sequence is not divisible by 3, removing excess nucleotides.')
        nucl_seq = nucl_seq[:-(len(nucl_seq)%3)]
    if old_div(len(nucl_seq), 3) < len(str(prot_seq).replace('-', '')):
        print(old_div(len(nucl_seq), 3), str(prot_seq).replace('-', ''))
        raise ValueError('Nucleotide sequence is smaller than protein sequence times 3!')
    for n, aa in enumerate(list(prot_seq)):
        if aa == '-':
            bt_seq += '---'
            gaps += 1
        else:
            pos = n * 3 - gaps * 3
            codon = nucl_seq[pos:pos+3]
            translated = codon.translate(table=alphabet)
            if aa != str(translated):
                print('Translation error!')
                print('aminoacid/position:', aa, n)
                print('codon/translated', codon, translated)
                print(nuc.id, aln.id)
                return 0
            bt_seq += str(codon)
    return bt_seq
    def __init__(self, window, warper):
        self.window = window
        self.warper = warper

        self.stimT = TextStim(self.window, text='Null warper', units = 'pix', pos=(0, -140), alignHoriz='center', height=20)

        self.bl = old_div(-window.size, 2.0)
        self.tl = (self.bl[0], -self.bl[1])
        self.tr = old_div(window.size, 2.0)

        self.stims = []
        self.degrees = 120
        nLines = 12
        for x in range(-nLines, nLines+1):
            t = GratingStim(window,tex=None,units='deg',size=[2,window.size[1]],texRes=128,color=foregroundColor, pos=[float(x) / nLines * self.degrees,0])
            self.stims.append (t)

        for y in range (-nLines, nLines+1):
            t = GratingStim(window,tex=None,units='deg',size=[window.size[0],2],texRes=128,color=foregroundColor,pos=[0,float(y)/nLines * self.degrees])
            self.stims.append (t)

        for c in range (1, nLines+1):
            t = Circle (window, radius=c * 10, edges=128, units='deg', lineWidth=4)
            self.stims.append (t)

        self.updateInfo()

        self.keys = key.KeyStateHandler()
        window.winHandle.push_handlers(self.keys)
        self.mouse = event.Mouse(win=self.window)
Example #27
0
def branin():
    """
    The Branin, or Branin-Hoo, function has three global minima,
    and is roughly an angular trough across a 2D input space.

        f(x, y) = a (y - b x ** 2 + c x - r ) ** 2 + s (1 - t) cos(x) + s

    The recommended values of a, b, c, r, s and t are:
        a = 1
        b = 5.1 / (4 pi ** 2)
        c = 5 / pi
        r = 6
        s = 10
        t = 1 / (8 * pi)

    Global Minima:
      [(-pi, 12.275),
       (pi, 2.275),
       (9.42478, 2.475)]

    Source: http://www.sfu.ca/~ssurjano/branin.html
    """
    x = hp.uniform('x', -5., 10.)
    y = hp.uniform('y', 0., 15.)
    pi = float(np.pi)
    loss = ((y - (old_div(5.1, (4 * pi ** 2))) * x ** 2 + 5 * x / pi - 6) ** 2 +
            10 * (1 - old_div(1, (8 * pi))) * scope.cos(x) + 10)
    return {'loss': loss,
            'loss_variance': 0,
            'status': base.STATUS_OK}
Example #28
0
    def __init__(self, low, high, q):
        low, high, q = list(map(float, (low, high, q)))
        qlow = np.round(old_div(low, q)) * q
        qhigh = np.round(old_div(high, q)) * q
        if qlow == qhigh:
            xs = [qlow]
            ps = [1.0]
        else:
            lowmass = 1 - (old_div((low - qlow + .5 * q), q))
            assert 0 <= lowmass <= 1.0, (lowmass, low, qlow, q)
            highmass = old_div((high - qhigh + .5 * q), q)
            assert 0 <= highmass <= 1.0, (highmass, high, qhigh, q)
            # -- xs: qlow to qhigh inclusive
            xs = np.arange(qlow, qhigh + .5 * q, q)
            ps = np.ones(len(xs))
            ps[0] = lowmass
            ps[-1] = highmass
            ps /= ps.sum()

        self.low = low
        self.high = high
        self.q = q
        self.qlow = qlow
        self.qhigh = qhigh
        self.xs = np.asarray(xs)
        self.ps = np.asarray(ps)
def link_functions_gaussian():
  print("Read in prostate data.")
  h2o_data = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate_complete.csv.zip"))
  h2o_data.head()

  sm_data = pd.read_csv(zipfile.ZipFile(pyunit_utils.locate("smalldata/prostate/prostate_complete.csv.zip")).
                        open("prostate_complete.csv")).as_matrix()
  sm_data_response = sm_data[:,9]
  sm_data_features = sm_data[:,1:9]

  print("Testing for family: GAUSSIAN")
  print("Set variables for h2o.")
  myY = "GLEASON"
  myX = ["ID","AGE","RACE","CAPSULE","DCAPS","PSA","VOL","DPROS"]

  print("Create models with canonical link: IDENTITY")
  h2o_model = H2OGeneralizedLinearEstimator(family="gaussian", link="identity",alpha=0.5, Lambda=0)
  h2o_model.train(x=myX, y=myY, training_frame=h2o_data)
  sm_model = sm.GLM(endog=sm_data_response, exog=sm_data_features,
                    family=sm.families.Gaussian(sm.families.links.identity)).fit()

  print("Compare model deviances for link function identity")
  h2o_deviance = old_div(h2o_model.residual_deviance(), h2o_model.null_deviance())
  sm_deviance = old_div(sm_model.deviance, sm_model.null_deviance)
  assert h2o_deviance - sm_deviance < 0.01, "expected h2o to have an equivalent or better deviance measures"
Example #30
0
def _colorf(color,alpha,default):
    if color is None:
        return (default,default,default,alpha)
    if type(color) is dom3ds.COLOR_24:
        return (old_div(color.red,255.0),old_div(color.green,255.0),
                old_div(color.blue,255.0),alpha)
    return (color.red,color.green,color.blue,alpha)
Example #31
0
                                    ISI=.25,
                                    draw_rois=False,
                                    plot_traces=False,
                                    mov_filt_1d=True,
                                    window_lp=5)
    t_end = time() - t_start
    print(t_end)
    #%%
    np.savez(base_folder + 'behavioral_traces.npz', **res_bt)
    #%%
    with np.load(base_folder + 'behavioral_traces.npz') as ld:
        res_bt = dict(**ld)
    #%%
    pl.close()
    tm = res_bt['time']
    f_rate_bh = old_div(1, np.median(np.diff(tm)))
    ISI = res_bt['trial_info'][0][3] - res_bt['trial_info'][0][2]
    eye_traces = np.array(res_bt['eyelid'])
    idx_CS_US = res_bt['idx_CS_US']
    idx_US = res_bt['idx_US']
    idx_CS = res_bt['idx_CS']

    idx_ALL = np.sort(np.hstack([idx_CS_US, idx_US, idx_CS]))
    eye_traces, amplitudes_at_US, trig_CRs = gc.process_eyelid_traces(
        eye_traces,
        tm,
        idx_CS_US,
        idx_US,
        idx_CS,
        thresh_CR=.15,
        time_CR_on=-.1,
Example #32
0
def episodios(item):
    logger.info()
    itemlist = []
    # Descarga la pagina
    data = httptools.downloadpage(item.url, canonical=canonical).data
    scrapedplot = scrapertools.find_single_match(
        data, '<meta name="description" content="([^"]+)"/>')
    scrapedthumbnail = scrapertools.find_single_match(
        data, '<div class="separedescrip">.*?src="([^"]+)"')
    idserie = scrapertools.find_single_match(
        data, "ajax/pagination_episodes/(\d+)/")
    logger.info("idserie=" + idserie)
    if " Eps" in item.extra and "Desc" not in item.extra:
        caps_x = item.extra
        caps_x = caps_x.replace(" Eps", "")
        capitulos = int(caps_x)
        paginas = old_div(capitulos, 10) + (capitulos % 10 > 0)
    else:
        paginas, capitulos = get_pages_and_episodes(data)
    for num_pag in range(1, paginas + 1):
        numero_pagina = str(num_pag)
        headers = {"Referer": item.url}
        data2 = httptools.downloadpage(host +
                                       "ajax/pagination_episodes/%s/%s/" %
                                       (idserie, numero_pagina),
                                       headers=headers,
                                       canonical=canonical).data
        patron = '"number"\:"(\d+)","title"\:"([^"]+)"'
        matches = scrapertools.find_multiple_matches(data2, patron)
        for numero, scrapedtitle in matches:
            try:
                int(numero.strip())
            except:
                pass
            infoLabels = item.infoLabels
            infoLabels["season"] = 1
            infoLabels["episode"] = numero
            title = scrapedtitle.strip()
            url = item.url + numero
            plot = scrapedplot
            itemlist.append(
                item.clone(action="findvideos",
                           infoLabels=infoLabels,
                           title=title,
                           url=url,
                           plot=plot))
    if len(itemlist) == 0:
        try:
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title="Serie por estrenar",
                     url="",
                     thumbnail=scrapedthumbnail,
                     fanart=scrapedthumbnail,
                     plot=scrapedplot,
                     server="directo",
                     folder=False))
        except:
            pass
    tmdb.set_infoLabels(itemlist, True)
    return itemlist
Example #33
0
    def read_bytes(
        self
    ):  # return a tuple with boolean for OK and array of bytes (isOK, List)

        MAXCOUNT = self.MAXCOUNT
        MAXSAMPLING = self.MAXSAMPLING

        #Set pin to output.
        GPIO.setup(self._dout_pin, GPIO.OUT)
        GPIO.output(self._dout_pin, 1)
        time.sleep(0.001)
        # Set pin low for t_init_low milliseconds. This will tell the sensor to start measuring and get beck the data
        GPIO.output(self._dout_pin, 0)
        time.sleep(self._t_init_low)
        GPIO.output(self._dout_pin, 1)
        time.sleep(0.001)
        #Set pin to imput, ready to receive data. Configuration pull-up
        GPIO.setup(self._dout_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)

        cyclewait = 0.001
        numcycles = int(old_div(self._t_wait_sensor, cyclewait))
        print("numero di cicli --------------------------->", numcycles)

        # Wait for sensor to pull pin low.
        count = 0
        while (GPIO.input(self._dout_pin)) and (numcycles > count):
            count = count + 1
            time.sleep(cyclewait)

        print("Conta --------------------------->", count)
        if (count >= numcycles):
            # Timeout waiting for response.
            print(
                "error reading the SlowWire sensor: Wait too long for sensor answer"
            )
            logger.error(
                "error reading the SlowWire sensor: Wait too long for sensor answer"
            )
            return False, 0

        # Record pulse widths for the self.PULSES bits expected from the sensor
        LowpulseCounts = []
        HighpulseCounts = []
        n = MAXSAMPLING
        exitcondition = False
        while (n > 0) and (not exitcondition):
            #for i in range(0,self.PULSES*2,2): # i starts from zero and increase by +2
            # Count how long pin is low and store in pulseCounts[i]
            thispulsecount = 0
            while (not GPIO.input(self._dout_pin)) and (not exitcondition):
                thispulsecount = thispulsecount + 1
                time.sleep(0.0001)
                if (thispulsecount >= MAXCOUNT):
                    # Timeout waiting for pulse lenght.
                    exitcondition = True
            if (not exitcondition) and (thispulsecount):
                LowpulseCounts.append(thispulsecount)

            # Count how long pin is high and store in pulseCounts[i+1]
            thispulsecount = 0
            while GPIO.input(self._dout_pin) and (not exitcondition):
                thispulsecount = thispulsecount + 1
                time.sleep(0.0001)
                if (thispulsecount >= MAXCOUNT):
                    # Timeout waiting for pulse lenght.
                    exitcondition = True
            if (not exitcondition) and (thispulsecount):
                HighpulseCounts.append(thispulsecount)

        print("High pulse count ------------------------------------>",
              HighpulseCounts)
        #check data consistency:
        if len(HighpulseCounts) > 7:
            print("lenghts High=%d Low=%d ", len(HighpulseCounts),
                  len(LowpulseCounts))
            if not ((len(HighpulseCounts) + 1) == len(LowpulseCounts)):
                #data mismatch
                print("error reading the SlowWire sensor: Data mismatch ")
                logger.error(
                    "error reading the SlowWire sensor: Data mismatch ")
                return False, 0
        else:
            print("error reading the SlowWire sensor: Insufficient data")
            logger.error(
                "error reading the SlowWire sensor: Insufficient data")
            return False, 0

        # Compute the average low pulse width in terms of number of samples
        # Ignore the first readings because it is not relevant.
        threshold = 0
        for i in range(
                1, len(LowpulseCounts)):  # i starts from 2 and increase by +2
            threshold = threshold + LowpulseCounts[i]

        threshold /= len(LowpulseCounts) - 1
        threshold /= 2
        print(
            "Slow Wire Threshold: -------------------------------------------- ",
            threshold)
        #Interpret each high pulse as a 0 or 1 by comparing it to the average size of the low pulses.

        data = []
        databyte = 0
        # skip the first 1 pulse
        for i in range(1, len(HighpulseCounts)):
            databyte = (databyte >> 1)
            if (HighpulseCounts[i] <= threshold):
                # One bit for long pulse.
                databyte |= 0x80
            # Else zero bit for short pulse.
            if (i % 8 == 0):  # got one byte
                data.append(databyte)
                databyte = 0

        print("Slow Wire Data: -------------------------------------------- ",
              data)
        for item in data:
            print("The hexadecimal data", hex(item))

        # Verify checksum of received data.
        if len(data) >= 2:
            if self.checkCRC(data):
                print("CRC OK --------------------")
                data.pop()  # remove last byte from list as this is the CRC
                return True, data
            else:
                print("error reading the SlowWire sensor: Data Checksum error")
                logger.error(
                    "error reading the SlowWire sensor: Data Checksum error")
                return False, 0
        else:
            print(
                "error reading the SlowWire sensor: Not enough bites of data")
            logger.error(
                "error reading the SlowWire sensor: Not enough bites of data")

        return False, 0
Example #34
0
def genPoly(polyfileBase="blockDomain", nx=4, ny=4, Lx=1.0, Ly=1.0):
    """
    create a simple block domain in 2d
    """
    dx = old_div(Lx, nx)
    dy = old_div(Ly, ny)

    vertices = []
    for j in range(ny + 1):
        for i in range(nx + 1):
            vertices.append((0.0 + dx * i, 0.0 + dy * j))
        #
    #
    nVertices = len(vertices)
    assert nVertices == (nx + 1) * (ny + 1)

    #
    boundaryTags = {
        'bottom': 1,
        'right': 2,
        'top': 3,
        'left': 5,
        'interior': 0
    }

    base = 1
    #write a segment for each edge
    nSegmentsTotal = ny * (nx + 1) + nx * (ny + 1)
    segments = {}
    nSegments = 0
    #xfaces
    #i=0, left, i = Nx right
    for i in range(nx + 1):
        for j in range(ny):
            #vertical edges
            vb = i + j * (nx + 1)
            vt = i + (j + 1) * (nx + 1)
            tag = boundaryTags['interior']
            if i == 0: tag = boundaryTags['left']
            if i == nx: tag = boundaryTags['right']
            #segment number, start vertex, end vertex, id
            segments[nSegments] = (nSegments, vb, vt, tag)
            nSegments += 1
    #
    #yfaces
    #j=0, bottom, j=Ny, top
    for j in range(ny + 1):
        for i in range(nx):
            vl = i + j * (nx + 1)
            vr = i + 1 + j * (nx + 1)
            tag = boundaryTags['interior']
            if j == 0: tag = boundaryTags['bottom']
            if j == ny: tag = boundaryTags['top']
            segments[nSegments] = (nSegments, vl, vr, tag)
            nSegments += 1
    #
    assert nSegments == nSegmentsTotal

    #return a table to identify regions by a unique flag too
    regions = {}
    curRegion = 0
    for j in range(ny):
        for i in range(nx):
            #region number, x,y, region id
            regions[(i, j)] = (curRegion + base, 0. + (i + 0.5) * dx,
                               0. + (j + 0.5) * dy, curRegion)
            curRegion += 1
        #
    #
    polyfile = open(polyfileBase + '.poly', 'w')
    polyfile.write('#poly file for [%s,%s] domain with %s x %s blocks \n' %
                   (Lx, Ly, nx, ny))
    polyfile.write('%d %d %d %d \n' % (nVertices, 2, 1, 0))
    polyfile.write('#vertices \n')
    for iv in range(len(vertices)):
        polyfile.write('%d %12.5e %12.5e %d \n' %
                       (iv + base, vertices[iv][0], vertices[iv][1], 1))
    #

    #write a segment for each edge
    polyfile.write('%d %d \n' % (nSegments, 1))
    polyfile.write('#segments \n')
    for seg in range(nSegments):
        polyfile.write('%d %d %d %d \n ' %
                       (segments[seg][0] + base, segments[seg][1] + base,
                        segments[seg][2] + base, segments[seg][3]))
    polyfile.write('#holes\n 0\n')
    polyfile.write('#regions\n')
    nRegions = nx * ny
    polyfile.write('%d \n' % nRegions)
    curRegion = 0
    for j in range(ny):
        for i in range(nx):
            polyfile.write('%d %12.5e %12.5e %d \n' %
                           (regions[(i, j)][0], regions[(i, j)][1],
                            regions[(i, j)][2], regions[(i, j)][3]))
    #
    polyfile
    return (Lx, Ly, 1.0), boundaryTags, regions
Example #35
0
    def clone(
        self,
        new_counts=None,
        new_count_errors=None,
        new_exposure=None,
        new_scale_factor=None,
    ):
        """
        make a new spectrum with new counts and errors and all other
        parameters the same


        :param new_exposure: the new exposure for the clone
        :param new_scale_factor: the new scale factor for the clone

        :param new_counts: new counts for the spectrum
        :param new_count_errors: new errors from the spectrum
        :return: new pha spectrum
        """

        if new_exposure is None:

            new_exposure = self.exposure

        if new_counts is None:
            new_counts = self.counts
            new_count_errors = self.count_errors

        if new_count_errors is None:
            stat_err = None

        else:

            stat_err = old_div(new_count_errors, new_exposure)

        if self._tstart is None:

            tstart = 0

        else:

            tstart = self._tstart

        if self._tstop is None:

            telapse = new_exposure

        else:

            telapse = self._tstop - tstart

        if new_scale_factor is None:

            new_scale_factor = self.scale_factor

        # create a new PHAII instance

        pha = PHAII(
            instrument_name=self.instrument,
            telescope_name=self.mission,
            tstart=tstart,
            telapse=telapse,
            channel=list(range(1,
                               len(self) + 1)),
            rate=old_div(new_counts, self.exposure),
            stat_err=stat_err,
            quality=self.quality.to_ogip(),
            grouping=self.grouping,
            exposure=new_exposure,
            backscale=new_scale_factor,
            respfile=None,
            ancrfile=None,
            is_poisson=self.is_poisson,
        )

        return pha
Example #36
0
def _read_pha_or_pha2_file(
    pha_file_or_instance,
    spectrum_number=None,
    file_type="observed",
    rsp_file=None,
    arf_file=None,
    treat_as_time_series=False,
):
    """
    A function to extract information from pha and pha2 files. It is kept separate because the same method is
    used for reading time series (MUCH faster than building a lot of individual spectra) and single spectra.


    :param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
    :param spectrum_number: (optional) the spectrum number of the TypeII file to be used
    :param file_type: observed or background
    :param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
    :param arf_file: (optional) and ARF filename
    :param treat_as_time_series:
    :return:
    """

    assert isinstance(pha_file_or_instance, six.string_types) or isinstance(
        pha_file_or_instance,
        PHAII), "Must provide a FITS file name or PHAII instance"

    if isinstance(pha_file_or_instance, six.string_types):

        ext = os.path.splitext(pha_file_or_instance)[-1]

        if "{" in ext:
            spectrum_number = int(ext.split("{")[-1].replace("}", ""))

            pha_file_or_instance = pha_file_or_instance.split("{")[0]

        # Read the data

        filename = pha_file_or_instance

        # create a FITS_FILE instance

        pha_file_or_instance = PHAII.from_fits_file(pha_file_or_instance)

    # If this is already a FITS_FILE instance,

    elif isinstance(pha_file_or_instance, PHAII):

        # we simply create a dummy filename

        filename = "pha_instance"

    else:

        raise RuntimeError("This is a bug")

    file_name = filename

    assert file_type.lower() in [
        "observed",
        "background",
    ], "Unrecognized filetype keyword value"

    file_type = file_type.lower()

    try:

        HDUidx = pha_file_or_instance.index_of("SPECTRUM")

    except:

        raise RuntimeError("The input file %s is not in PHA format" %
                           (pha_file_or_instance))

    # spectrum_number = spectrum_number

    spectrum = pha_file_or_instance[HDUidx]

    data = spectrum.data
    header = spectrum.header

    # We don't support yet the rescaling

    if "CORRFILE" in header:

        if (header.get("CORRFILE").upper().strip() !=
                "NONE") and (header.get("CORRFILE").upper().strip() != ""):
            raise RuntimeError("CORRFILE is not yet supported")

    # See if there is there is a QUALITY==0 in the header

    if "QUALITY" in header:

        has_quality_column = False

        if header["QUALITY"] == 0:

            is_all_data_good = True

        else:

            is_all_data_good = False

    else:

        if "QUALITY" in data.columns.names:

            has_quality_column = True

            is_all_data_good = False

        else:

            has_quality_column = False

            is_all_data_good = True

            warnings.warn(
                "Could not find QUALITY in columns or header of PHA file. This is not a valid OGIP file. Assuming QUALITY =0 (good)"
            )

    # looking for tstart and tstop

    tstart = None
    tstop = None

    has_tstart = False
    has_tstop = False
    has_telapse = False

    if "TSTART" in header:

        has_tstart_column = False

        has_tstart = True

    else:

        if "TSTART" in data.columns.names:

            has_tstart_column = True

            has_tstart = True

    if "TELAPSE" in header:

        has_telapse_column = False

        has_telapse = True

    else:

        if "TELAPSE" in data.columns.names:
            has_telapse_column = True

            has_telapse = True

    if "TSTOP" in header:

        has_tstop_column = False

        has_tstop = True

    else:

        if "TSTOP" in data.columns.names:
            has_tstop_column = True

            has_tstop = True

    if has_tstop and has_telapse:

        warnings.warn(
            "Found TSTOP and TELAPSE. This file is invalid. Using TSTOP.")

        has_telapse = False

    # Determine if this file contains COUNTS or RATES

    if "COUNTS" in data.columns.names:

        has_rates = False
        data_column_name = "COUNTS"

    elif "RATE" in data.columns.names:

        has_rates = True
        data_column_name = "RATE"

    else:

        raise RuntimeError(
            "This file does not contain a RATE nor a COUNTS column. "
            "This is not a valid PHA file")

    # Determine if this is a PHA I or PHA II
    if len(data.field(data_column_name).shape) == 2:

        typeII = True

        if spectrum_number == None and not treat_as_time_series:
            raise RuntimeError(
                "This is a PHA Type II file. You have to provide a spectrum number"
            )

    else:

        typeII = False

    # Collect information from mandatory keywords

    keys = _required_keywords[file_type]

    gathered_keywords = {}

    for k in keys:

        internal_name, keyname = k.split(":")

        key_has_been_collected = False

        if keyname in header:
            if (keyname in _required_keyword_types
                    and type(header.get(keyname))
                    is not _required_keyword_types[keyname]):
                warnings.warn(
                    "unexpected type of %(keyname)s, expected %(expected_type)s\n found %(found_type)s: %(found_value)s"
                    % dict(
                        keyname=keyname,
                        expected_type=_required_keyword_types[keyname],
                        found_type=type(header.get(keyname)),
                        found_value=header.get(keyname),
                    ))
            else:
                gathered_keywords[internal_name] = header.get(keyname)

                # Fix "NONE" in None
                if (gathered_keywords[internal_name] == "NONE"
                        or gathered_keywords[internal_name] == "none"):
                    gathered_keywords[internal_name] = None

                key_has_been_collected = True

        # Note that we check again because the content of the column can override the content of the header

        if keyname in _might_be_columns[file_type] and typeII:

            # Check if there is a column with this name

            if keyname in data.columns.names:
                # This will set the exposure, among other things

                if not treat_as_time_series:

                    # if we just want a single spectrum

                    gathered_keywords[internal_name] = data[keyname][
                        spectrum_number - 1]

                else:

                    # else get all the columns

                    gathered_keywords[internal_name] = data[keyname]

                # Fix "NONE" in None
                if (gathered_keywords[internal_name] == "NONE"
                        or gathered_keywords[internal_name] == "none"):
                    gathered_keywords[internal_name] = None

                key_has_been_collected = True

        if not key_has_been_collected:

            # The keyword POISSERR is a special case, because even if it is missing,
            # it is assumed to be False if there is a STAT_ERR column in the file

            if keyname == "POISSERR" and "STAT_ERR" in data.columns.names:

                warnings.warn(
                    "POISSERR is not set. Assuming non-poisson errors as given in the "
                    "STAT_ERR column")

                gathered_keywords["poisserr"] = False

            elif keyname == "ANCRFILE":

                # Some non-compliant files have no ARF because they don't need one. Don't fail, but issue a
                # warning

                warnings.warn(
                    "ANCRFILE is not set. This is not a compliant OGIP file. Assuming no ARF."
                )

                gathered_keywords["ancrfile"] = None

            elif keyname == "FILTER":

                # Some non-compliant files have no FILTER because they don't need one. Don't fail, but issue a
                # warning

                warnings.warn(
                    "FILTER is not set. This is not a compliant OGIP file. Assuming no FILTER."
                )

                gathered_keywords["filter"] = None

            else:

                raise RuntimeError(
                    "Keyword %s not found. File %s is not a proper PHA "
                    "file" % (keyname, filename))

    is_poisson = gathered_keywords["poisserr"]

    exposure = gathered_keywords["exposure"]

    # now we need to get the response file so that we can extract the EBOUNDS

    if file_type == "observed":

        if rsp_file is None:

            # this means it should be specified in the header
            rsp_file = gathered_keywords["respfile"]

            if arf_file is None:
                arf_file = gathered_keywords["ancrfile"]

                # Read in the response

        if isinstance(rsp_file, six.string_types) or isinstance(rsp_file, str):
            rsp = OGIPResponse(rsp_file, arf_file=arf_file)

        else:

            # assume a fully formed OGIPResponse
            rsp = rsp_file

    if file_type == "background":
        # we need the rsp ebounds from response to build the histogram

        assert isinstance(
            rsp_file, InstrumentResponse
        ), "You must supply and OGIPResponse to extract the energy bounds"

        rsp = rsp_file

    # Now get the data (counts or rates) and their errors. If counts, transform them in rates

    if typeII:

        # PHA II file
        if has_rates:

            if not treat_as_time_series:

                rates = data.field(data_column_name)[spectrum_number - 1, :]

                rate_errors = None

                if not is_poisson:
                    rate_errors = data.field("STAT_ERR")[spectrum_number -
                                                         1, :]

            else:

                rates = data.field(data_column_name)

                rate_errors = None

                if not is_poisson:
                    rate_errors = data.field("STAT_ERR")

        else:

            if not treat_as_time_series:

                rates = old_div(
                    data.field(data_column_name)[spectrum_number - 1, :],
                    exposure)

                rate_errors = None

                if not is_poisson:
                    rate_errors = old_div(
                        data.field("STAT_ERR")[spectrum_number - 1, :],
                        exposure)

            else:

                rates = old_div(data.field(data_column_name),
                                np.atleast_2d(exposure).T)

                rate_errors = None

                if not is_poisson:
                    rate_errors = old_div(data.field("STAT_ERR"),
                                          np.atleast_2d(exposure).T)

        if "SYS_ERR" in data.columns.names:

            if not treat_as_time_series:

                sys_errors = data.field("SYS_ERR")[spectrum_number - 1, :]

            else:

                sys_errors = data.field("SYS_ERR")

        else:

            sys_errors = np.zeros(rates.shape)

        if has_quality_column:

            if not treat_as_time_series:

                try:

                    quality = data.field("QUALITY")[spectrum_number - 1, :]

                except (IndexError):

                    # GBM CSPEC files do not follow OGIP conventions and instead
                    # list simply QUALITY=0 for each spectrum
                    # so we have to read them differently

                    quality_element = data.field("QUALITY")[spectrum_number -
                                                            1]

                    warnings.warn(
                        "The QUALITY column has the wrong shape. This PHAII file does not follow OGIP standards"
                    )

                    if quality_element == 0:

                        quality = np.zeros_like(rates, dtype=int)

                    else:

                        quality = np.zeros_like(rates, dtype=int) + 5

            else:

                # we need to be careful again because the QUALITY column is not always the correct shape

                quality_element = data.field("QUALITY")

                if quality_element.shape == rates.shape:

                    # This is the proper way for the quality to be stored

                    quality = quality_element

                else:

                    quality = np.zeros_like(rates, dtype=int)

                    for i, q in enumerate(quality_element):

                        if q != 0:
                            quality[i, :] = 5

        else:

            if is_all_data_good:

                quality = np.zeros_like(rates, dtype=int)

            else:

                quality = np.zeros_like(rates, dtype=int) + 5

        if has_tstart:

            if has_tstart_column:

                if not treat_as_time_series:

                    tstart = data.field("TSTART")[spectrum_number - 1]

                else:

                    tstart = data.field("TSTART")

        if has_tstop:

            if has_tstop_column:

                if not treat_as_time_series:

                    tstop = data.field("TSTOP")[spectrum_number - 1]

                else:

                    tstop = data.field("TSTOP")

        if has_telapse:

            if has_telapse_column:

                if not treat_as_time_series:

                    tstop = tstart + data.field("TELAPSE")[spectrum_number - 1]

                else:

                    tstop = tstart + data.field("TELAPSE")

    elif typeII == False:

        assert (
            not treat_as_time_series
        ), "This is not a PHAII file but you specified to treat it as a time series"

        # PHA 1 file
        if has_rates:

            rates = data.field(data_column_name)

            rate_errors = None

            if not is_poisson:
                rate_errors = data.field("STAT_ERR")

        else:

            rates = old_div(data.field(data_column_name), exposure)

            rate_errors = None

            if not is_poisson:
                rate_errors = old_div(data.field("STAT_ERR"), exposure)

        if "SYS_ERR" in data.columns.names:

            sys_errors = data.field("SYS_ERR")

        else:

            sys_errors = np.zeros(rates.shape)

        if has_quality_column:

            quality = data.field("QUALITY")

        else:

            if is_all_data_good:

                quality = np.zeros_like(rates, dtype=int)

            else:

                quality = np.zeros_like(rates, dtype=int) + 5

        # read start and stop times if needed

        if has_tstart:

            if has_tstart_column:

                tstart = data.field("TSTART")

            else:

                tstart = header["TSTART"]

        if has_tstop:

            if has_tstop_column:

                tstop = data.field("TSTOP")

            else:

                tstop = header["TSTOP"]

        if has_telapse:

            if has_telapse_column:

                tstop = tstart + data.field("TELAPSE")

            else:

                tstop = tstart + header["TELAPSE"]

        # Now that we have read it, some safety checks

        assert rates.shape[0] == gathered_keywords["detchans"], (
            "The data column (RATES or COUNTS) has a different number of entries than the "
            "DETCHANS declared in the header")

    quality = Quality.from_ogip(quality)

    if not treat_as_time_series:

        counts = rates * exposure

        if not is_poisson:

            count_errors = rate_errors * exposure

        else:

            count_errors = None

    else:

        exposure = np.atleast_2d(exposure).T

        counts = rates * exposure

        if not is_poisson:

            count_errors = rate_errors * exposure

        else:

            count_errors = None

    out = collections.OrderedDict(
        counts=counts,
        count_errors=count_errors,
        rates=rates,
        rate_errors=rate_errors,
        sys_errors=sys_errors,
        exposure=exposure,
        is_poisson=is_poisson,
        rsp=rsp,
        gathered_keywords=gathered_keywords,
        quality=quality,
        file_name=file_name,
        tstart=tstart,
        tstop=tstop,
    )

    return out
Example #37
0
def JCAMP_calc_xsec(jcamp_dict,
                    wavemin=None,
                    wavemax=None,
                    skip_nonquant=True,
                    debug=False):
    '''
    Taking as input a JDX file, extract the spectrum information and transform the absorption spectrum
    from existing units to absorption cross-section.

    This function also corrects for unphysical data (such as negative transmittance values, or
    transmission above 1.0), and calculates absorbance if transmittance given. Instead of a return
    value, the function inserts the information into the input dictionary.

    Note that the conversion assumes that the measurements were collected for gas at a temperature of
    296K (23 degC).

    Parameters
    ----------
    jcamp_dict : dict
        A JCAMP spectrum dictionary.
    wavemin : float, optional
        The shortest wavelength in the spectrum to limit the calculation to.
    wavemax : float, optional
        The longest wavelength in the spectrum to limit the calculation to.
    skip_nonquant: bool
        If True then return "None" if the spectrum is missing quantitative data. If False, then try \
        to fill in missing quantitative values with defaults.
    '''

    x = jcamp_dict['x']
    y = jcamp_dict['y']

    T = 296.0  ## the temperature (23 degC) used by NIST when collecting spectra
    R = 1.0355E-25  ## the constant for converting data (includes the gas constant)

    ## Note: normally when we convert from wavenumber to wavelength units, the ordinate must be nonuniformly
    ## rescaled in order to compensate. But this is only true if we resample the abscissa to a uniform sampling
    ## grid. In this case here, we keep the sampling grid nonuniform in wavelength space, such that each digital
    ## bin retains its proportionality to energy, which is what we want.
    if (jcamp_dict['xunits'].lower() in ('1/cm', 'cm-1', 'cm^-1')):
        jcamp_dict['wavenumbers'] = array(
            x)  ## note that array() always performs a copy
        x = old_div(10000.0, x)
        jcamp_dict['wavelengths'] = x
    elif (jcamp_dict['xunits'].lower()
          in ('micrometers', 'um', 'wavelength (um)')):
        jcamp_dict['wavelengths'] = x
        jcamp_dict['wavenumbers'] = old_div(10000.0, x)
    elif (jcamp_dict['xunits'].lower()
          in ('nanometers', 'nm', 'wavelength (nm)')):
        x = x * 1000.0
        jcamp_dict['wavelengths'] = x
        jcamp_dict['wavenumbers'] = old_div(10000.0, x)
    else:
        raise ValueError(
            'Don\'t know how to convert the spectrum\'s x units ("' +
            jcamp_dict['xunits'] + '") to micrometers.')

    ## Correct for any unphysical negative values.
    y[y < 0.0] = 0.0

    ## Make sure "y" refers to absorbance.
    if (jcamp_dict['yunits'].lower() == 'transmittance'):
        ## If in transmittance, then any y > 1.0 are unphysical.
        y[y > 1.0] = 1.0

        ## Convert to absorbance.
        okay = (y > 0.0)
        y[okay] = log10(old_div(1.0, y[okay]))
        y[logical_not(okay)] = nan

        jcamp_dict['absorbance'] = y
    elif (jcamp_dict['yunits'].lower() == 'absorbance'):
        pass
    elif (jcamp_dict['yunits'].lower() == '(micromol/mol)-1m-1 (base 10)'):
        jcamp_dict['yunits'] = 'xsec (m^2))'
        jcamp_dict['xsec'] = old_div(y, 2.687e19)
        return
    else:
        raise ValueError(
            'Don\'t know how to convert the spectrum\'s y units ("' +
            jcamp_dict['yunits'] + '") to absorbance.')

    ## Determine the effective path length "ell" of the measurement chamber, in meters.
    if ('path length' in jcamp_dict):
        (val, unit) = jcamp_dict['path length'].lower().split()[0:2]
        if (unit == 'cm'):
            ell = old_div(float(val), 100.0)
        elif (unit == 'm'):
            ell = float(val)
        elif (unit == 'mm'):
            ell = old_div(float(val), 1000.0)
        else:
            ell = 0.1
    else:
        if skip_nonquant:
            return ({'info': None, 'x': None, 'xsec': None, 'y': None})
        ell = 0.1
        if debug:
            print(
                'Path length variable not found. Using 0.1m as a default ...')

    assert (alen(x) == alen(y))

    if ('npoints' in jcamp_dict):
        if (alen(x) != jcamp_dict['npoints']):
            npts_retrieved = str(alen(x))
            msg = '"' + jcamp_dict['title'] + '": Number of data points retrieved (' + npts_retrieved + \
                  ') does not equal the expected length (npoints = ' + str(jcamp_dict['npoints']) + ')!'
            raise ValueError(msg)

    ## For each gas, manually define the pressure "p" at which the measurement was taken (in units of mmHg).
    ## These values are obtained from the NIST Infrared spectrum database, which for some reason did not
    ## put the partial pressure information into the header.
    if ('partial_pressure' in jcamp_dict):
        p = float(jcamp_dict['partial_pressure'].split()[0])
        p_units = jcamp_dict['partial_pressure'].split()[1]
        if (p_units.lower() == 'mmhg'):
            pass
        elif (p_units.lower() == 'ppm'):
            p = p * 759.8 * 1.0E-6  ## scale PPM units at atmospheric pressure to partial pressure in mmHg
    else:
        if debug:
            print('Partial pressure variable value for ' +
                  jcamp_dict['title'] +
                  ' is missing. Using the default p = 150.0 mmHg ...')
        if skip_nonquant:
            return ({'info': None, 'x': None, 'xsec': None, 'y': None})
        p = 150.0

    ## Convert the absorbance units to cross-section in meters squared per molecule.
    xsec = y * T * R / (p * ell)

    ## Add the "xsec" values to the data dictionary.
    jcamp_dict['xsec'] = xsec

    return
def step(split, epoch, opt, dataLoader, model, criterion, optimizer=None):
    if split == 'train':
        model.train()
    else:
        model.eval()
    Loss, Acc = AverageMeter(), AverageMeter()
    preds = []

    nIters = len(dataLoader)
    bar = Bar('{}'.format(opt.expID), max=nIters)

    for i, (input, targets, action, meta) in enumerate(dataLoader):
        input_var = torch.autograd.Variable(input).float().cuda(opt.GPU)
        target_var = []
        for t in range(len(targets)):
            target_var.append(
                torch.autograd.Variable(targets[t]).float().cuda(opt.GPU))
        z = []
        for k in range(opt.numNoise):
            noise = torch.autograd.Variable(
                torch.randn((input_var.shape[0], 1, 64, 64))).cuda(opt.GPU)
            z.append(noise)

        output, samples = model(input_var, z, action)
        pred_sample = maximumExpectedUtility(samples, criterion)
        target = maximumExpectedUtility(target_var, criterion)

        if opt.DEBUG >= 2:
            gt = getPreds(target.cpu().numpy()) * 4
            pred = getPreds((pred_sample.data).cpu().numpy()) * 4
            debugger = Debugger()
            img = (input[0].numpy().transpose(1, 2, 0) * 256).astype(
                np.uint8).copy()
            debugger.addImg(img)
            debugger.addPoint2D(pred[0], (255, 0, 0))
            debugger.addPoint2D(gt[0], (0, 0, 255))
            debugger.showAllImg(pause=True)

        loss = DiscoLoss(output, samples, target_var, criterion)

        Loss.update(loss.item(), input.size(0))
        Acc.update(
            Accuracy((pred_sample.data).cpu().numpy(),
                     (target.data).cpu().numpy()))
        if split == 'train':
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        else:
            input_ = input.cpu().numpy()
            input_[0] = Flip(input_[0]).copy()
            inputFlip_var = torch.autograd.Variable(
                torch.from_numpy(input_).view(1, input_.shape[1], ref.inputRes,
                                              ref.inputRes)).float().cuda(
                                                  opt.GPU)
            _, samplesFlip = model(inputFlip_var, z, action)
            pred_sample_flip = maximumExpectedUtility(samplesFlip, criterion)
            outputFlip = ShuffleLR(
                Flip((pred_sample_flip.data).cpu().numpy()[0])).reshape(
                    1, ref.nJoints, ref.outputRes, ref.outputRes)
            output_ = old_div(((pred_sample.data).cpu().numpy() + outputFlip),
                              2)
            preds.append(
                finalPreds(output_, meta['center'], meta['scale'],
                           meta['rotate'])[0])

        Bar.suffix = '{split} Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {loss.avg:.6f} | Acc {Acc.avg:.6f} ({Acc.val:.6f})'.format(
            epoch,
            i,
            nIters,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=Loss,
            Acc=Acc,
            split=split)
        bar.next()

    bar.finish()
    return {'Loss': Loss.avg, 'Acc': Acc.avg}, preds
Example #39
0
def class_average(images,ref=None,focused=None,niter=1,normproc=("normalize.edgemean",{}),prefilt=0,align=("rotate_translate_flip",{}),
		aligncmp=("ccc",{}),ralign=None,raligncmp=None,averager=("mean",{}),scmp=("ccc",{}),keep=1.5,keepsig=1,automask=0,saveali=0,verbose=0,callback=None,center="xform.center"):
	"""Create a single class-average by iterative alignment and averaging.
	images - may either be a list/tuple of images OR a tuple containing a filename followed by integer image numbers
	ref - optional reference image (EMData).
	niter - Number of alignment/averaging iterations. If 0, will align to the reference with no further iterations.
	normproc - a processor tuple, normalization applied to particles before alignments
	prefilt - boolean. If set will 'match' reference to particle before alignment
	align - aligner tuple to align particle to averaged
	aligncmp - cmp for aligner
	ralign - aligner tuple for refining alignment
	raligncmp - cmp for ralign
	averager - averager tuple to generate class-average
	scmp - cmp tuple for comparing particle to reference for purposes of discarding bad particles
	keep - 'keep' value. Meaning depends on keepsig.
	keepsig - if set, keep is a 'sigma multiplier', otherwise keep is a fractional value (ie - 0.9 means discard the worst 10% of particles)

	returns (average,((cmp,xform,used),(cmp,xform,used),...))
	"""

	if verbose>2 : print("class_average(",images,ref,niter,normproc,prefilt,align,aligncmp,ralign,raligncmp,averager,scmp,keep,keepsig,automask,verbose,callback,center,")")
	if focused==None: focused=ref

	# nimg is the number of particles we have to align/average
	if isinstance(images[0],EMData) : nimg=len(images)
	elif isinstance(images[0],str) and isinstance(images[1],int) : nimg=len(images)-1
	else : raise Exception("Bad images list (%s)"%str(images))

	if verbose>2 : print("Average %d images"%nimg)

	# If one image and no reference, just return it
	if nimg==1 and ref==None : return (get_image(images,0,normproc),[(0,Transform(),1)])

	# If one particle and reference, align and return
	if nimg==1:
		if averager[0]!="mean" : raise Exception("Cannot perform correct average of single particle")
		ali=align_one(get_image(images,0,normproc),ref,prefilt,align,aligncmp,ralign,raligncmp)
		try: ali["model_id"]=ref["model_id"]
		except: pass
		sim=ali.cmp(scmp[0],ref,scmp[1])			# compare similarity to reference (may use a different cmp() than the aligner)
		return (ali,[(sim,ali["xform.align2d"],1)])

	# If we don't have a reference image, we need to make one
	if ref==None :
		if verbose : print("Generating reference")
#		sigs=[(get_image(i)["sigma"],i) for i in range(nimg)]		# sigma for each input image, inefficient
#		ref=get_image(images,max(sigs)[1])
		ref=get_image(images,0,normproc)										# just start with the first, as EMAN1

		# now align and average the set to the gradually improving average
		for i in range(1,nimg):
			if verbose>1 :
				print(".", end=' ')
				sys.stdout.flush()
			ali=align_one(get_image(images,i,normproc),ref,prefilt,align,aligncmp,ralign,raligncmp)
			ref.add(ali)

		# A little masking and centering
		try:
			gmw=max(5,old_div(ref["nx"],16))		# gaussian mask width
			#ref.process_inplace("filter.highpass.gauss",{"cutoff_pixels":min(ref["nx"]/10,5)})	# highpass to reduce gradient issues
			#ref.process_inplace("normalize.circlemean")
			#ref2=ref.process("mask.gaussian",{"inner_radius":ref["nx"]/2-gmw,"outer_radius":gmw/1.3})
			#ref2.process_inplace("filter.lowpass.gauss",{"cutoff_abs":0.07})	# highpass to reduce gradient issues
			#ref2.process_inplace("normalize.circlemean")
			#ref2.process_inplace("threshold.binary",{"value":ref["mean"]+ref["sigma"]*1.5})
			#ref2.process_inplace("xform.centerofmass",{"threshold":0.5})						# TODO: should probably check how well this works
			#fxf=ref2["xform.align2d"]
			#ref.translate(fxf.get_trans())
			if center!=None : ref.process_inplace(center)
			ref.process_inplace("normalize.circlemean",{"radius":old_div(ref["nx"],2)-gmw})
			ref.process_inplace("mask.gaussian",{"inner_radius":old_div(ref["nx"],2)-gmw,"outer_radius":old_div(gmw,1.3)})
			ref_orient=None
		except:
			traceback.print_exc()
	else:
		try: ref_orient=ref["xform.projection"]
		except: ref_orient=None

		try: ref_model=ref["model_id"]
		except: ref_model=0

	if verbose>1 : print("")

	init_ref=ref.copy()

	# Iterative alignment
	ptcl_info=[None]*nimg		# empty list of particle info

	# This is really niter+1 1/2 iterations. It gets terminated 1/2 way through the final loop
	for it in range(niter+2):
		if verbose : print("Starting iteration %d"%it)
		if callback!=None : callback(int(old_div(it*100,(niter+2))))

		mean,sigma=0.0,1.0		# defaults for when similarity isn't computed

		# Evaluate quality from last iteration, and set a threshold for keeping particles
		if it>0:
			# measure statistics of quality values
			mean,sigma=0,0
			for sim,xf,use in ptcl_info:
				mean+=sim
				sigma+=sim**2
			mean/=len(ptcl_info)
			sigma=sqrt(old_div(sigma,len(ptcl_info))-mean**2)

			# set a threshold based on statistics and options
			if keepsig:					# keep a relative fraction based on the standard deviation of the similarity values
				thresh=mean+sigma*keep
				if verbose>1 : print("mean = %f\tsigma = %f\tthresh=%f"%(mean,sigma,thresh))
			else:						# keep an absolute fraction of the total
				l=[i[0] for i in ptcl_info]
				l.sort()
				try: thresh=l[int(len(l)*keep)]
				except:
					if verbose: print("Keeping all particles")
					thresh=l[-1]+1.0

			if verbose:
				print("Threshold = %1.4f   Quality: min=%f max=%f mean=%f sigma=%f"%(thresh,min(ptcl_info)[0],max(ptcl_info)[0],mean,sigma))

			# mark the particles to keep and exclude
			nex=0
			for i,pi in enumerate(ptcl_info):
				if pi[0]>thresh :
					nex+=1
					ptcl_info[i]=(pi[0],pi[1],0)
				elif pi[2]==0:
					ptcl_info[i]=(pi[0],pi[1],1)

			if verbose : print("%d/%d particles excluded"%(nex,len(ptcl_info)))

			# if all of the particles were thrown out for some reason, we keep the best one
			if nex==len(ptcl_info) :
				best=ptcl_info.index(min(ptcl_info))
				ptcl_info[best]=(ptcl_info[best][0],ptcl_info[best][1],1)
				if verbose : print("Best particle reinstated")

		if it==niter+1 : break		# This is where the loop actually terminates. This makes sure that inclusion/exclusion is updated at the end

		# Now align and average
		avgr=Averagers.get(averager[0], averager[1])
		for i in range(nimg):
			if callback!=None and nimg%10==9 : callback(int((it+old_div(i,float(nimg)))*100/(niter+2.0)))
			ptcl=get_image(images,i,normproc)					# get the particle to align
			ali=align_one(ptcl,ref,prefilt,align,aligncmp,ralign,raligncmp,focused)  # align to reference
			sim=ali.cmp(scmp[0],ref,scmp[1])			# compare similarity to reference (may use a different cmp() than the aligner)
			if saveali and it==niter : ali.write_image("aligned.hdf",-1)

			try: use=ptcl_info[i][2]
			except: use=1
			if use :
				avgr.add_image(ali)				# only include the particle if we've tagged it as good
				if verbose>1 :
					sys.stdout.write(".")
					sys.stdout.flush()
			elif verbose>1:
				sys.stdout.write("X")
				sys.stdout.flush()
			ptcl_info[i]=(sim,ali["xform.align2d"],use)

		if verbose>1 : print("")

		ref=avgr.finish()
		ref["class_ptcl_qual"]=mean
		ref["class_ptcl_qual_sigma"]=sigma

		# A little masking before the next iteration
		gmw=max(5,old_div(ref["nx"],12))		# gaussian mask width
		ref.process_inplace("normalize.circlemean",{"radius":old_div(ref["nx"],2)-gmw})
		if automask :
			ref.process_inplace("mask.auto2d",{"nmaxseed":10,"nshells":gmw-2,"nshellsgauss":gmw,"sigma":0.2})
		else :
			ref.process_inplace("mask.gaussian",{"inner_radius":old_div(ref["nx"],2)-gmw,"outer_radius":old_div(gmw,1.3)})

	if ref_orient!=None :
		ref["xform.projection"]=ref_orient
		ref["model_id"]=ref_model
	return [ref,ptcl_info]
Example #40
0
def channel_search(item):
    logger.info(item)

    start = time.time()
    searching = list()
    searching_titles = list()
    results = list()
    valid = list()
    ch_list = dict()
    mode = item.mode
    max_results = 10
    if item.infoLabels['title']:
        item.text = item.infoLabels['title']

    searched_id = item.infoLabels['tmdb_id']

    channel_list, channel_titles = get_channels(item)

    searching += channel_list
    searching_titles += channel_titles
    cnt = 0

    progress = platformtools.dialog_progress(
        config.get_localized_string(30993) % item.title,
        config.get_localized_string(70744) % len(channel_list),
        ', '.join(searching_titles))
    config.set_setting('tmdb_active', False)

    search_action_list = []
    module_dict = {}
    for ch in channel_list:
        try:
            module = __import__('channels.%s' % ch,
                                fromlist=["channels.%s" % ch])
            mainlist = getattr(module, 'mainlist')(Item(channel=ch,
                                                        global_search=True))

            module_dict[ch] = module
            search_action_list.extend([
                elem for elem in mainlist if elem.action == "search" and (
                    mode == 'all' or elem.contentType == mode)
            ])
            if progress.iscanceled():
                return []
        except:
            import traceback
            logger.error('error importing/getting search items of ' + ch)
            logger.error(traceback.format_exc())

    total_search_actions = len(search_action_list)
    with futures.ThreadPoolExecutor(max_workers=set_workers()) as executor:
        c_results = []
        for search_action in search_action_list:
            c_results.append(
                executor.submit(get_channel_results, item, module_dict,
                                search_action))
            if progress.iscanceled():
                break

        for res in futures.as_completed(c_results):
            search_action = res.result()[0]
            channel = search_action.channel
            if res.result()[1]:
                if channel not in ch_list:
                    ch_list[channel] = []
                ch_list[channel].extend(res.result()[1])

            if progress.iscanceled():
                break

            search_action_list.remove(search_action)
            # if no action of this channel remains
            for it in search_action_list:
                if it.channel == channel:
                    break
            else:
                cnt += 1
                searching_titles.remove(
                    searching_titles[searching.index(channel)])
                searching.remove(channel)
                progress.update(
                    old_div(((total_search_actions - len(search_action_list)) *
                             100), total_search_actions),
                    config.get_localized_string(70744) %
                    str(len(channel_list) - cnt), ', '.join(searching_titles))

    progress.close()

    cnt = 0
    progress = platformtools.dialog_progress(
        config.get_localized_string(30993) % item.title,
        config.get_localized_string(60295), config.get_localized_string(60293))

    config.set_setting('tmdb_active', True)
    # res_count = 0
    for key, value in ch_list.items():
        ch_name = channel_titles[channel_list.index(key)]
        grouped = list()
        cnt += 1
        progress.update(old_div((cnt * 100), len(ch_list)),
                        config.get_localized_string(60295),
                        config.get_localized_string(60293))
        if len(value) <= max_results and item.mode != 'all':
            if len(value) == 1:
                if not value[0].action or config.get_localized_string(
                        70006).lower() in value[0].title.lower():
                    continue
            for elem in value:
                if not elem.infoLabels.get('year', ""):
                    elem.infoLabels['year'] = '-'
            tmdb.set_infoLabels_itemlist(value, True, forced=True)
            for elem in value:
                if elem.infoLabels['tmdb_id'] == searched_id:
                    elem.from_channel = key
                    if not config.get_setting('unify'):
                        elem.title += ' [%s]' % key
                    valid.append(elem)

        for it in value:
            if it.channel == item.channel:
                it.channel = key
            if it in valid:
                continue
            if mode == 'all' or (it.contentType and mode == it.contentType):
                if config.get_setting('result_mode') != 0:
                    if config.get_localized_string(30992) not in it.title:
                        it.title += typo(ch_name, '_ [] color kod bold')
                        results.append(it)
                else:
                    grouped.append(it)
            elif (mode == 'movie'
                  and it.contentTitle) or (mode == 'tvshow' and
                                           (it.contentSerieName or it.show)):
                grouped.append(it)
            else:
                continue

        if not grouped:
            continue
        # to_temp[key] = grouped
        if config.get_setting('result_mode') == 0:
            if not config.get_setting('unify'):
                title = typo(ch_name, 'bold') + typo(str(len(grouped)),
                                                     '_ [] color kod bold')
            else:
                title = typo(
                    '%s %s' %
                    (len(grouped), config.get_localized_string(70695)), 'bold')
            # res_count += len(grouped)
            plot = ''

            for it in grouped:
                plot += it.title + '\n'
            ch_thumb = channeltools.get_channel_parameters(key)['thumbnail']
            results.append(
                Item(channel='search',
                     title=title,
                     action='get_from_temp',
                     thumbnail=ch_thumb,
                     itemlist=[ris.tourl() for ris in grouped],
                     plot=plot,
                     page=1))

    progress.close()
    # "All Together" and movie mode -> search servers
    if config.get_setting('result_mode') == 1 and mode == 'movie':
        progress = platformtools.dialog_progress(
            config.get_localized_string(30993) % item.title,
            config.get_localized_string(60683))
        valid_servers = []
        with futures.ThreadPoolExecutor(max_workers=set_workers()) as executor:
            c_results = [
                executor.submit(get_servers, v, module_dict) for v in valid
            ]
            completed = 0

            for res in futures.as_completed(c_results):
                if progress.iscanceled():
                    break
                if res.result():
                    completed += 1
                    valid_servers.extend(res.result())
                    progress.update(old_div(completed * 100, len(valid)))
        valid = valid_servers
        progress.close()

    # send_to_temp(to_temp)

    results = sorted(results, key=lambda it: it.title)
    results_statistic = config.get_localized_string(59972) % (
        item.title, time.time() - start)
    if mode == 'all':
        results.insert(
            0,
            Item(title=typo(results_statistic, 'color kod bold'),
                 thumbnail=get_thumb('search.png')))
    else:
        valid.insert(
            0,
            Item(title=typo(results_statistic, 'color kod bold'),
                 thumbnail=get_thumb('search.png')))

        if results:
            results.insert(
                0,
                Item(title=typo(config.get_localized_string(30025),
                                'color kod bold'),
                     thumbnail=get_thumb('search.png')))
    # logger.debug(results_statistic)
    return valid + results
Example #41
0
def manually_refine_components(Y,
                               xxx_todo_changeme,
                               A,
                               C,
                               Cn,
                               thr=0.9,
                               display_numbers=True,
                               max_number=None,
                               cmap=None,
                               **kwargs):
    """Plots contour of spatial components

     against a background image and allows to interactively add novel components by clicking with mouse

     Parameters
     -----------
     Y: ndarray
               movie in 2D

     (dx,dy): tuple
               dimensions of the square used to identify neurons (should be set to the galue of gsiz)

     A:   np.ndarray or sparse matrix
               Matrix of Spatial components (d x K)

     Cn:  np.ndarray (2D)
               Background image (e.g. mean, correlation)

     thr: scalar between 0 and 1
               Energy threshold for computing contours (default 0.995)

     display_number:     Boolean
               Display number of ROIs if checked (default True)

     max_number:    int
               Display the number for only the first max_number components (default None, display all numbers)

     cmap:     string
               User specifies the colormap (default None, default colormap)



     Returns
     --------
     A: np.ndarray
         matrix A os estimated  spatial component contributions

     C: np.ndarray
         array of estimated calcium traces

    """
    (dx, dy) = xxx_todo_changeme
    if issparse(A):
        A = np.array(A.todense())
    else:
        A = np.array(A)

    d1, d2 = np.shape(Cn)
    d, nr = np.shape(A)
    if max_number is None:
        max_number = nr

    x, y = np.mgrid[0:d1:1, 0:d2:1]

    pl.imshow(Cn, interpolation=None, cmap=cmap)
    cm = com(A, d1, d2)

    Bmat = np.zeros((np.minimum(nr, max_number), d1, d2))
    for i in range(np.minimum(nr, max_number)):
        indx = np.argsort(A[:, i], axis=None)[::-1]
        cumEn = np.cumsum(A[:, i].flatten()[indx]**2)
        cumEn /= cumEn[-1]
        Bvec = np.zeros(d)
        Bvec[indx] = cumEn
        Bmat[i] = np.reshape(Bvec, np.shape(Cn), order='F')

    T = np.shape(Y)[-1]

    pl.close()
    fig = pl.figure()
    ax = pl.gca()
    ax.imshow(Cn,
              interpolation=None,
              cmap=cmap,
              vmin=np.percentile(Cn[~np.isnan(Cn)], 1),
              vmax=np.percentile(Cn[~np.isnan(Cn)], 99))
    for i in range(np.minimum(nr, max_number)):
        pl.contour(y, x, Bmat[i], [thr])

    if display_numbers:
        for i in range(np.minimum(nr, max_number)):
            ax.text(cm[i, 1], cm[i, 0], str(i + 1))

    A3 = np.reshape(A, (d1, d2, nr), order='F')
    while True:
        pts = fig.ginput(1, timeout=0)

        if pts != []:
            print(pts)
            xx, yy = np.round(pts[0]).astype(np.int)
            coords_y = np.array(list(range(yy - dy, yy + dy + 1)))
            coords_x = np.array(list(range(xx - dx, xx + dx + 1)))
            coords_y = coords_y[(coords_y >= 0) & (coords_y < d1)]
            coords_x = coords_x[(coords_x >= 0) & (coords_x < d2)]
            a3_tiny = A3[coords_y[0]:coords_y[-1] + 1,
                         coords_x[0]:coords_x[-1] + 1, :]
            y3_tiny = Y[coords_y[0]:coords_y[-1] + 1,
                        coords_x[0]:coords_x[-1] + 1, :]

            dy_sz, dx_sz = np.shape(a3_tiny)[:-1]
            y2_tiny = np.reshape(y3_tiny, (dx_sz * dy_sz, T), order='F')
            a2_tiny = np.reshape(a3_tiny, (dx_sz * dy_sz, nr), order='F')
            y2_res = y2_tiny - a2_tiny.dot(C)

            y3_res = np.reshape(y2_res, (dy_sz, dx_sz, T), order='F')
            a__, c__, center__, b_in__, f_in__ = greedyROI(
                y3_res,
                nr=1,
                gSig=[
                    np.floor(old_div(dx_sz, 2)),
                    np.floor(old_div(dy_sz, 2))
                ],
                gSiz=[dx_sz, dy_sz])

            a_f = np.zeros((d, 1))
            idxs = np.meshgrid(coords_y, coords_x)
            a_f[np.ravel_multi_index(idxs, (d1, d2),
                                     order='F').flatten()] = a__

            A = np.concatenate([A, a_f], axis=1)
            C = np.concatenate([C, c__], axis=0)
            indx = np.argsort(a_f, axis=None)[::-1]
            cumEn = np.cumsum(a_f.flatten()[indx]**2)
            cumEn /= cumEn[-1]
            Bvec = np.zeros(d)
            Bvec[indx] = cumEn
            bmat = np.reshape(Bvec, np.shape(Cn), order='F')
            pl.contour(y, x, bmat, [thr])
            pl.pause(.01)

        elif pts == []:
            break

        nr += 1
        A3 = np.reshape(A, (d1, d2, nr), order='F')

    return A, C
Example #42
0
def voc_eval(detpath, annopath, imageset_file, classname, annocache, ovthresh=0.5, use_07_metric=False):
    """
    pascal voc evaluation
    :param detpath: detection results detpath.format(classname)
    :param annopath: annotations annopath.format(classname)
    :param imageset_file: text file containing list of images
    :param classname: category name
    :param annocache: caching annotations
    :param ovthresh: overlap threshold
    :param use_07_metric: whether to use voc07's 11 point ap computation
    :return: rec, prec, ap
    """
    with open(imageset_file, 'r') as f:
        lines = f.readlines()
    image_filenames = [x.strip() for x in lines]

    # load annotations from cache
    if not os.path.isfile(annocache):
        recs = {}
        for ind, image_filename in enumerate(image_filenames):
            recs[image_filename] = parse_voc_rec(annopath.format(image_filename))
            if ind % 100 == 0:
                print('reading annotations for {:d}/{:d}'.format(ind + 1, len(image_filenames)))
        print('saving annotations cache to {:s}'.format(annocache))
        with open(annocache, 'wb') as f:
            pickle.dump(recs, f, protocol=pickle.HIGHEST_PROTOCOL)
    else:
        with open(annocache, 'rb') as f:
            recs = pickle.load(f)

    # extract objects in :param classname:
    class_recs = {}
    npos = 0
    for image_filename in image_filenames:
        objects = [obj for obj in recs[image_filename] if obj['name'] == classname]
        bbox = np.array([x['bbox'] for x in objects])
        difficult = np.array([x['difficult'] for x in objects]).astype(np.bool)
        det = [False] * len(objects)  # stand for detected
        npos = npos + sum(~difficult)
        class_recs[image_filename] = {'bbox': bbox,
                                      'difficult': difficult,
                                      'det': det}

    # read detections
    detfile = detpath.format(classname)
    with open(detfile, 'r') as f:
        lines = f.readlines()

    splitlines = [x.strip().split(' ') for x in lines]
    image_ids = [x[0] for x in splitlines]
    confidence = np.array([float(x[1]) for x in splitlines])
    bbox = np.array([[float(z) for z in x[2:]] for x in splitlines])

    # sort by confidence
    sorted_inds = np.argsort(-confidence)
    sorted_scores = np.sort(-confidence)
    bbox = bbox[sorted_inds, :]
    image_ids = [image_ids[x] for x in sorted_inds]

    # go down detections and mark true positives and false positives
    nd = len(image_ids)
    tp = np.zeros(nd)
    fp = np.zeros(nd)
    for d in range(nd):
        r = class_recs[image_ids[d]]
        bb = bbox[d, :].astype(float)
        ovmax = -np.inf
        bbgt = r['bbox'].astype(float)

        if bbgt.size > 0:
            # compute overlaps
            # intersection
            ixmin = np.maximum(bbgt[:, 0], bb[0])
            iymin = np.maximum(bbgt[:, 1], bb[1])
            ixmax = np.minimum(bbgt[:, 2], bb[2])
            iymax = np.minimum(bbgt[:, 3], bb[3])
            iw = np.maximum(ixmax - ixmin + 1., 0.)
            ih = np.maximum(iymax - iymin + 1., 0.)
            inters = iw * ih

            # union
            uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
                   (bbgt[:, 2] - bbgt[:, 0] + 1.) *
                   (bbgt[:, 3] - bbgt[:, 1] + 1.) - inters)

            overlaps = old_div(inters, uni)
            ovmax = np.max(overlaps)
            jmax = np.argmax(overlaps)

        if ovmax > ovthresh:
            if not r['difficult'][jmax]:
                if not r['det'][jmax]:
                    tp[d] = 1.
                    r['det'][jmax] = 1
                else:
                    fp[d] = 1.
        else:
            fp[d] = 1.

    # compute precision recall
    fp = np.cumsum(fp)
    tp = np.cumsum(tp)
    rec = old_div(tp, float(npos))
    # avoid division by zero in case first detection matches a difficult ground ruth
    prec = old_div(tp, np.maximum(tp + fp, np.finfo(np.float64).eps))
    ap = voc_ap(rec, prec, use_07_metric)

    return rec, prec, ap
Example #43
0
    def extractImage(self, nameOffset=0, action=True, parse=True,
            width=None, length = None):
        """
           Use gdal python bindings to extract image
        """
        try:
            from osgeo import gdal
        except ImportError:
            raise Exception('GDAL python bindings not found. Need this for RSAT2/ TandemX / Sentinel1.')

        if parse:
            self.parse()


        ###If not specified, for single slice, use width and length from first burst
        if width is None:
            width = self.bursts[0].numberOfSamples

        if length is None:
            length = self.bursts[0].numberOfLines

        src = gdal.Open(self.tiff.strip(), gdal.GA_ReadOnly)
        band = src.GetRasterBand(1)

        print('Total Width  = %d'%(src.RasterXSize))
        print('Total Length = %d'%(src.RasterYSize))

        if os.path.isdir(self.outdir):
            print('Output directory {0} already exists.'.format(self.outdir))
        else:
            print('Creating directory {0} '.format(self.outdir))
            os.makedirs(self.outdir)

        for index, burst in enumerate(self.bursts):
            outfile = os.path.join(self.outdir, 'burst_%02d'%(nameOffset+index+1) + '.slc')
            originalWidth = burst.numberOfSamples
            originalLength = burst.numberOfLines

            if action:
                ###Write original SLC to file
                fid = open(outfile, 'wb')

                ####Use burstnumber to look into tiff file
                lineOffset = (burst.burstNumber-1) * burst.numberOfLines

                ###Read whole burst for debugging. Only valid part is used.
                data = band.ReadAsArray(0, lineOffset, burst.numberOfSamples, burst.numberOfLines)

                ###Create output array and copy in valid part only
                ###Easier then appending lines and columns.
                outdata = np.zeros((length,width), dtype=np.complex64)
                outdata[burst.firstValidLine:burst.lastValidLine, burst.firstValidSample:burst.lastValidSample] =  data[burst.firstValidLine:burst.lastValidLine, burst.firstValidSample:burst.lastValidSample]

                ###################################################################################
                #Check if IPF version is 2.36 we need to correct for the Elevation Antenna Pattern
                if burst.IPFversion == '002.36':
                   print('The IPF version is 2.36. Correcting the Elevation Antenna Pattern ...')
                   Geap = self.elevationAntennaPattern(burst)
                   for i in range(burst.firstValidLine, burst.lastValidLine):
                       outdata[i, burst.firstValidSample:burst.lastValidSample] = old_div(outdata[i, burst.firstValidSample:burst.lastValidSample],Geap[burst.firstValidSample:burst.lastValidSample])
                ########################

                outdata.tofile(fid)
                fid.close()

                #Updated width and length to match extraction
                burst.numberOfSamples = width
                burst.numberOfLines = length

            ####Render ISCE XML
            slcImage = isceobj.createSlcImage()
            slcImage.setByteOrder('l')
            slcImage.setFilename(outfile)
            slcImage.setAccessMode('read')
            slcImage.setWidth(burst.numberOfSamples)
            slcImage.setLength(burst.numberOfLines)
            slcImage.setXmin(0)
            slcImage.setXmax(burst.numberOfSamples)
            slcImage.renderHdr()
            burst.image = slcImage

        band = None
        src = None
Example #44
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage = """prog <output> [options]

	This program produces iterative class-averages, one of the secrets to EMAN's rapid convergence.
	Normal usage is to provide a stack of particle images and a classification matrix file defining
	class membership. Members of each class are then iteratively aligned to each other and averaged
	together with (optional) CTF correction.  It is also possible to use this program on all of the
	images in a single stack.

	"""

	parser = EMArgumentParser(usage=usage,version=EMANVERSION)

	parser.add_argument("--input", type=str, help="The name of the input particle stack", default=None)
	parser.add_argument("--output", type=str, help="The name of the output class-average stack", default=None)
	parser.add_argument("--oneclass", type=int, help="Create only a single class-average. Specify the number.",default=None)
	parser.add_argument("--classmx", type=str, help="The name of the classification matrix specifying how particles in 'input' should be grouped. If omitted, all particles will be averaged.", default=None)
	parser.add_argument("--focused",type=str,help="Name of a reference projection file to read 1st iteration refine alignment references from.", default=None)
	parser.add_argument("--ref", type=str, help="Reference image(s). Used as an initial alignment reference and for final orientation adjustment if present. Also used to assign euler angles to the generated classes. This is typically the projections that were used for classification.", default=None)
	parser.add_argument("--storebad", action="store_true", help="Even if a class-average fails, write to the output. Forces 1->1 numbering in output",default=False)
	parser.add_argument("--decayedge", action="store_true", help="Applies an edge decay to zero on the output class-averages. A very good idea if you plan on 3-D reconstruction.",default=False)
	parser.add_argument("--resultmx",type=str,help="Specify an output image to store the result matrix. This contains 5 images where row is particle number. Rows in the first image contain the class numbers and in the second image consist of 1s or 0s indicating whether or not the particle was included in the class. The corresponding rows in the third, fourth and fifth images are the refined x, y and angle (respectively) used in the final alignment, these are updated and accurate, even if the particle was excluded from the class.", default=None)
	parser.add_argument("--iter", type=int, help="The number of iterations to perform. Default is 1.", default=1)
	parser.add_argument("--prefilt",action="store_true",help="Filter each reference (c) to match the power spectrum of each particle (r) before alignment and comparison",default=False)
	parser.add_argument("--prectf",action="store_true",help="Apply particle CTF to each reference before alignment",default=False)
	parser.add_argument("--align",type=str,help="This is the aligner used to align particles to the previous class average. Default is None.", default=None)
	parser.add_argument("--aligncmp",type=str,help="The comparitor used for the --align aligner. Default is ccc.",default="ccc")
	parser.add_argument("--ralign",type=str,help="This is the second stage aligner used to refine the first alignment. This is usually the \'refine\' aligner.", default=None)
	parser.add_argument("--raligncmp",type=str,help="The comparitor used by the second stage aligner.",default="ccc")
	parser.add_argument("--averager",type=str,help="The type of averager used to produce the class average.",default="mean")
	parser.add_argument("--setsfref",action="store_true",help="This will impose the 1-D structure factor of the reference on the class-average (recommended when a reference is available)",default=False)
	parser.add_argument("--cmp",type=str,help="The comparitor used to generate quality scores for the purpose of particle exclusion in classes, strongly linked to the keep argument.", default="ccc")
	parser.add_argument("--keep",type=float,help="The fraction of particles to keep in each class.",default=1.0)
	parser.add_argument("--keepsig", action="store_true", help="Causes the keep argument to be interpreted in standard deviations.",default=False)
	parser.add_argument("--automask",action="store_true",help="Applies a 2-D automask before centering. Can help with negative stain data, and other cases where centering is poor.")
	parser.add_argument("--center",type=str,default="xform.center",help="If the default centering algorithm (xform.center) doesn't work well, you can specify one of the others here (e2help.py processor center), or the word 'nocenter' for no centering")
	parser.add_argument("--bootstrap",action="store_true",help="Ignored. Present for historical reasons only.")
	parser.add_argument("--normproc",type=str,help="Normalization processor applied to particles before alignment. Default is normalize.edgemean. If you want to turn this option off specify \'None\'", default="normalize.edgemean")
	parser.add_argument("--usefilt", dest="usefilt", default=None, help="Specify a particle data file that has been low pass or Wiener filtered. Has a one to one correspondence with your particle data. If specified will be used to align particles to the running class average, however the original particle will be used to generate the actual final class average")
	parser.add_argument("--idxcache", default=False, action="store_true", help="Ignored. Present for historical reasons.")
	parser.add_argument("--dbpath", help="Ignored. Present for historical reasons.", default=".")
	parser.add_argument("--resample",action="store_true",help="If set, will perform bootstrap resampling on the particle data for use in making variance maps.",default=False)
	parser.add_argument("--odd", default=False, help="Used by EMAN2 when running eotests. Includes only odd numbered particles in class averages.", action="store_true")
	parser.add_argument("--even", default=False, help="Used by EMAN2 when running eotests. Includes only even numbered particles in class averages.", action="store_true")
	parser.add_argument("--parallel", default=None, help="parallelism argument")
	parser.add_argument("--force", "-f",dest="force",default=False, action="store_true",help="Force overwrite the output file if it exists.")
	parser.add_argument("--saveali",action="store_true",help="Writes aligned particle images to aligned.hdf. Normally resultmx produces more useful informtation. This can be used for debugging.",default=False)
	parser.add_argument("--verbose", "-v", dest="verbose", action="store", metavar="n",type=int, default=0, help="verbose level [0-9], higner number means higher level of verboseness")
	parser.add_argument("--debug","-d",action="store_true",help="Print debugging infromation while the program is running. Default is off.",default=False)
	parser.add_argument("--nofilecheck",action="store_true",help="Turns file checking off in the check functionality - used by e2refine.py.",default=False)
	parser.add_argument("--check","-c",action="store_true",help="Performs a command line argument check only.",default=False)
	parser.add_argument("--ppid", type=int, help="Set the PID of the parent process, used for cross platform PPID",default=-1)

	(options, args) = parser.parse_args()

	if (options.check): options.verbose = 9 # turn verbose on if the user is only checking...

	error = check(options,True)

	if options.align : options.align=parsemodopt(options.align)
	if options.ralign : options.ralign=parsemodopt(options.ralign)
	if options.aligncmp : options.aligncmp=parsemodopt(options.aligncmp)
	if options.raligncmp : options.raligncmp=parsemodopt(options.raligncmp)
	if options.averager : options.averager=parsemodopt(options.averager)
	if options.cmp : options.cmp=parsemodopt(options.cmp)
	if options.normproc : options.normproc=parsemodopt(options.normproc)
	if options.center.lower()[:5]=="nocen" : options.center=None
	if options.resultmx!=None : options.storebad=True

	if (options.verbose>0):
		if (error):
			print("e2classaverage.py command line arguments test.... FAILED")
		else:
			print("e2classaverage.py command line arguments test.... PASSED")

	# returning a different error code is currently important to e2refine.py - returning 0 tells e2refine.py that it has enough
	# information to execute this script
	if error : exit(1)
	if options.check: exit(0)

	logger=E2init(sys.argv,options.ppid)
	print("Class averaging beginning")

	try:
		classmx=EMData.read_images(options.classmx)		# we keep the entire classification matrix in memory, since we need to update it in most cases
		ncls=int(classmx[0]["maximum"])+1
	except:
		ncls=1
		if options.resultmx!=None :
			print("resultmx can only be specified in conjunction with a valid classmx input.")
			sys.exit(1)

	nptcl=EMUtil.get_image_count(options.input)

	try: apix=EMData(options.input,0,True)["apix_x"]
	except:
		apix=1.0
		print("WARNING: could not get apix from first image. Setting to 1.0. May impact results !")

	# Initialize parallelism
	if options.parallel :
		from EMAN2PAR import EMTaskCustomer
		etc=EMTaskCustomer(options.parallel, module="e2classaverage.ClassAvTask")
		pclist=[options.input]
		if options.ref: pclist.append(options.ref)
		if options.usefilt: pclist.append(options.usefilt)
		etc.precache(pclist)

	if options.prefilt and options.prectf :
		print("ERROR: only one of prefilt and prectf can be specified")
		sys.exit(1)
	if options.prectf: options.prefilt=2
	elif options.prefilt : options.prefilt=1
	else : options.prefilt=0

	# prepare tasks
	tasks=[]
	if ncls>1:
		if options.oneclass==None : clslst=list(range(ncls))
		else : clslst=[options.oneclass]

		for cl in clslst:
			ptcls=classmx_ptcls(classmx[0],cl)
			if options.resample : ptcls=[random.choice(ptcls) for i in ptcls]	# this implements bootstrap resampling of the class-average
			if options.odd : ptcls=[i for i in ptcls if i%2==1]
			if options.even: ptcls=[i for i in ptcls if i%2==0]
			tasks.append(ClassAvTask(options.input,ptcls,options.usefilt,options.ref,options.focused,options.iter,options.normproc,options.prefilt,
			  options.align,options.aligncmp,options.ralign,options.raligncmp,options.averager,options.cmp,options.keep,options.keepsig,
			  options.automask,options.saveali,options.setsfref,options.verbose,cl,options.center))

	else:
		ptcls=list(range(nptcl))
		if options.resample : ptcls=[random.choice(ptcls) for i in ptcls]
		if options.odd : ptcls=[i for i in ptcls if i%2==1]
		if options.even: ptcls=[i for i in ptcls if i%2==0]
		tasks.append(ClassAvTask(options.input,list(range(nptcl)),options.usefilt,options.ref,options.focused,options.iter,options.normproc,options.prefilt,
			  options.align,options.aligncmp,options.ralign,options.raligncmp,options.averager,options.cmp,options.keep,options.keepsig,
			  options.automask,options.saveali,options.setsfref,options.verbose,0,options.center))

	# execute task list
	if options.parallel:				# run in parallel
		taskids=etc.send_tasks(tasks)
		alltaskids=taskids[:]

		while len(taskids)>0 :
			curstat=etc.check_task(taskids)
			for i,j in enumerate(curstat):
				if j==100 :
					rslt=etc.get_results(taskids[i])
					if rslt[1]["average"]!=None:
						rslt[1]["average"]["class_ptcl_src"]=options.input
						if options.decayedge:
							nx=rslt[1]["average"]["nx"]
							rslt[1]["average"].process_inplace("normalize.circlemean",{"radius":old_div(nx,2)-old_div(nx,15)})
							rslt[1]["average"].process_inplace("mask.gaussian",{"inner_radius":old_div(nx,2)-old_div(nx,15),"outer_radius":old_div(nx,20)})
							#rslt[1]["average"].process_inplace("mask.decayedge2d",{"width":nx/15})

						if options.ref!=None : rslt[1]["average"]["projection_image"]=options.ref
#						print("write",rslt[1]["n"])
						if options.storebad : rslt[1]["average"].write_image(options.output,rslt[1]["n"])
						else: rslt[1]["average"].write_image(options.output,-1)


						# Update the resultsmx if requested
						if options.resultmx!=None:
							allinfo=rslt[1]["info"]				# the info result array list of (qual,xform,used) tuples
							pnums=rslt[0].data["images"][2]		# list of image numbers corresponding to information

							for n,info in enumerate(allinfo):
								y=pnums[n]		# actual particle number

								# find the matching class in the existing classification matrix
								for x in range(classmx[0]["nx"]):
									if classmx[0][x,y]==rslt[1]["n"] :		# if the class number in the classmx matches the current class-average number
										break
								else :
									print("Resultmx error: no match found ! (%d %d %d)"%(x,y,rslt[1]["n"]))
									continue
								xform=info[1].get_params("2d")
								classmx[1][x,y]=info[2]					# used
								classmx[2][x,y]=xform["tx"]				# dx
								classmx[3][x,y]=xform["ty"]				# dy
								classmx[4][x,y]=xform["alpha"]			# da
								classmx[5][x,y]=xform["mirror"]			# flip
								try: classmx[6][x,y]=xform["scale"]
								except: pass
					# failed average
					elif options.storebad :
						blk=EMData(options.ref,0)
						apix=blk["apix_x"]
						blk=EMData(blk["nx"],blk["ny"],1)
						blk["apix_x"]=apix
						blk.to_zero()
						blk.set_attr("ptcl_repr", 0)
						blk.set_attr("apix_x",apix)
						blk.write_image(options.output,rslt[1]["n"])

			taskids=[j for i,j in enumerate(taskids) if curstat[i]!=100]

			if options.verbose and 100 in curstat :
				print("%d/%d tasks remain"%(len(taskids),len(alltaskids)))
			if 100 in curstat :
				E2progress(logger,1.0-(old_div(float(len(taskids)),len(alltaskids))))

			time.sleep(3)


		if options.verbose : print("Completed all tasks")

	# single thread
	else:
		for t in tasks:
			rslt=t.execute()
			if rslt==None : sys.exit(1)

			if rslt["average"]!=None :
				rslt["average"]["class_ptcl_src"]=options.input
				if options.decayedge:
					nx=rslt["average"]["nx"]
					rslt["average"].process_inplace("normalize.circlemean",{"radius":old_div(nx,2)-old_div(nx,15)})
					rslt["average"].process_inplace("mask.gaussian",{"inner_radius":old_div(nx,2)-old_div(nx,15),"outer_radius":old_div(nx,20)})
					#rslt["average"].process_inplace("mask.decayedge2d",{"width":nx/15})
				if options.ref!=None : rslt["average"]["projection_image"]=options.ref
				try:
					if options.storebad : rslt["average"].write_image(options.output,t.options["n"])
					else: rslt["average"].write_image(options.output,-1)
				except:
					traceback.print_exc()
					print("Error writing class average {} to {}".format(t.options["n"],options.output))
					print("Image attr: ",rslt["average"].get_attr_dict())
					display(rslt["average"])
					sys.exit(1)

				# Update the resultsmx if requested
				if options.resultmx!=None:
					allinfo=rslt["info"]				# the info result array list of (qual,xform,used) tuples
					pnums=t.data["images"][2]		# list of image numbers corresponding to information
					for n,info in enumerate(allinfo):
						y=pnums[n]		# actual particle number

						# find the matching class in the existing classification matrix
						for x in range(classmx[0]["nx"]):
							if classmx[0][x,y]==rslt["n"] :		# if the class number in the classmx matches the current class-average number
								break
						else :
							print("Resultmx error: no match found ! (%d %d %d)"%(x,y,rslt[1]["n"]))
							continue
						xform=info[1].get_params("2d")
						classmx[1][x,y]=info[2]					# used
						classmx[2][x,y]=xform["tx"]				# dx
						classmx[3][x,y]=xform["ty"]				# dy
						classmx[4][x,y]=xform["alpha"]			# da
						classmx[5][x,y]=xform["mirror"]			# flip
						try: classmx[6][x,y]=xform["scale"]
						except: pass

			# Failed average
			elif options.storebad :
				blk=EMData(options.ref,0)
				apix=blk["apix_x"]
				blk=EMData(blk["nx"],blk["ny"],1)
				blk["apix_x"]=apix
				blk.to_zero()
				blk.set_attr("ptcl_repr", 0)
				blk.set_attr("apix_x",apix)
				blk.write_image(options.output,t.options["n"])

	if options.resultmx!=None:
		if options.verbose : print("Writing results matrix")
		for i,j in enumerate(classmx) : j.write_image(options.resultmx,i)

	print("Class averaging complete")
	E2end(logger)
Example #45
0
def main():
    """
    NAME
        aarm_magic.py

    DESCRIPTION
        Converts AARM  data to best-fit tensor (6 elements plus sigma)
         Original program ARMcrunch written to accomodate ARM anisotropy data
          collected from 6 axial directions (+X,+Y,+Z,-X,-Y,-Z) using the
          off-axis remanence terms to construct the tensor. A better way to
          do the anisotropy of ARMs is to use 9,12 or 15 measurements in
          the Hext rotational scheme.

    SYNTAX
        aarm_magic.py [-h][command line options]

    OPTIONS
        -h prints help message and quits
        -usr USER:   identify user, default is ""
        -f FILE: specify input file, default is aarm_measurements.txt
        -crd [s,g,t] specify coordinate system, requires samples file
        -fsa  FILE: specify er_samples.txt file, default is er_samples.txt (2.5) or samples.txt (3.0)
        -Fa FILE: specify anisotropy output file, default is arm_anisotropy.txt (MagIC 2.5 only)
        -Fr FILE: specify results output file, default is aarm_results.txt (MagIC 2.5 only)
        -Fsi FILE: specify output file, default is specimens.txt (MagIC 3 only)
        -DM DATA_MODEL: specify MagIC 2 or MagIC 3, default is 3

    INPUT
        Input for the present program is a series of baseline, ARM pairs.
      The baseline should be the AF demagnetized state (3 axis demag is
      preferable) for the following ARM acquisition. The order of the
      measurements is:

           positions 1,2,3, 6,7,8, 11,12,13 (for 9 positions)
           positions 1,2,3,4, 6,7,8,9, 11,12,13,14 (for 12 positions)
           positions 1-15 (for 15 positions)
    """
    # initialize some parameters
    args = sys.argv

    if "-h" in args:
        print(main.__doc__)
        sys.exit()

    user = ""
    meas_file = "aarm_measurements.txt"
    rmag_anis = "arm_anisotropy.txt"
    rmag_res = "aarm_results.txt"
    dir_path = '.'
    #
    # get name of file from command line
    #
    data_model_num = int(pmag.get_named_arg_from_sys("-DM", 3))
    spec_file = pmag.get_named_arg_from_sys("-Fsi", "specimens.txt")
    if data_model_num == 3:
        samp_file = pmag.get_named_arg_from_sys("-fsa", "samples.txt")
    else:
        samp_file = pmag.get_named_arg_from_sys("-fsa", "er_samples.txt")
    if '-WD' in args:
        ind = args.index('-WD')
        dir_path = args[ind + 1]
    if "-usr" in args:
        ind = args.index("-usr")
        user = sys.argv[ind + 1]
    if "-f" in args:
        ind = args.index("-f")
        meas_file = sys.argv[ind + 1]
    coord = '-1'
    if "-crd" in sys.argv:
        ind = sys.argv.index("-crd")
        coord = sys.argv[ind + 1]
        if coord == 's':
            coord = '-1'
        if coord == 'g':
            coord = '0'
        if coord == 't':
            coord = '100'
    if "-Fa" in args:
        ind = args.index("-Fa")
        rmag_anis = args[ind + 1]
    if "-Fr" in args:
        ind = args.index("-Fr")
        rmag_res = args[ind + 1]
    meas_file = dir_path + '/' + meas_file
    samp_file = dir_path + '/' + samp_file
    rmag_anis = dir_path + '/' + rmag_anis
    rmag_res = dir_path + '/' + rmag_res
    spec_file = os.path.join(dir_path, spec_file)
    # read in data
    # read in data
    if data_model_num == 3:
        meas_data = []
        meas_data3, file_type = pmag.magic_read(meas_file)
        if file_type != 'measurements':
            print(file_type,
                  "This is not a valid MagIC 3.0. measurements file ")
            sys.exit()
        # convert meas_data to 2.5
        for rec in meas_data3:
            meas_map = map_magic.meas_magic3_2_magic2_map
            meas_data.append(map_magic.mapping(rec, meas_map))
    else:
        meas_data, file_type = pmag.magic_read(meas_file)
        if file_type != 'magic_measurements':
            print(file_type,
                  "This is not a valid MagIC 2.5 magic_measurements file ")
            sys.exit()
    # fish out relevant data
    meas_data = pmag.get_dictitem(meas_data, 'magic_method_codes', 'LP-AN-ARM',
                                  'has')

    # figure out how to do this with 3 vs. 2.5
    if coord != '-1':  # need to read in sample data
        if data_model_num == 3:
            samp_data3, file_type = pmag.magic_read(samp_file)
            if file_type != 'samples':
                print(file_type, "This is not a valid er_samples file ")
                print("Only specimen coordinates will be calculated")
                coord = '-1'
            else:
                # translate to 2
                samp_data = []
                samp_map = map_magic.samp_magic3_2_magic2_map
                for rec in samp_data3:
                    samp_data.append(map_magic.mapping(rec, samp_map))
        else:
            samp_data, file_type = pmag.magic_read(samp_file)
            if file_type != 'er_samples':
                print(file_type, "This is not a valid er_samples file ")
                print("Only specimen coordinates will be calculated")
                coord = '-1'
    #
    # sort the specimen names
    #
    ssort = []
    for rec in meas_data:
        spec = rec["er_specimen_name"]
        if spec not in ssort:
            ssort.append(spec)
    if len(ssort) > 1:
        sids = sorted(ssort)
    else:
        sids = ssort
    #
    # work on each specimen
    #
    specimen = 0
    RmagSpecRecs, RmagResRecs = [], []
    SpecRecs, SpecRecs3 = [], []
    while specimen < len(sids):
        s = sids[specimen]
        data = []
        RmagSpecRec = {}
        RmagResRec = {}
        method_codes = []
        #
        # find the data from the meas_data file for this sample
        #
        data = pmag.get_dictitem(meas_data, 'er_specimen_name', s, 'T')
        #
        # find out the number of measurements (9, 12 or 15)
        #
        npos = old_div(len(data), 2)
        if npos == 9:
            #
            # get dec, inc, int and convert to x,y,z
            #
            # B matrix made from design matrix for positions
            B, H, tmpH = pmag.designAARM(npos)
            X = []
            for rec in data:
                Dir = []
                Dir.append(float(rec["measurement_dec"]))
                Dir.append(float(rec["measurement_inc"]))
                Dir.append(float(rec["measurement_magn_moment"]))
                X.append(pmag.dir2cart(Dir))
        #
        # subtract baseline and put in a work array
        #
            work = numpy.zeros((npos, 3), 'f')
            for i in range(npos):
                for j in range(3):
                    work[i][j] = X[2 * i + 1][j] - X[2 * i][j]
        #
        # calculate tensor elements
        # first put ARM components in w vector
        #
            w = numpy.zeros((npos * 3), 'f')
            index = 0
            for i in range(npos):
                for j in range(3):
                    w[index] = work[i][j]
                    index += 1
            s = numpy.zeros((6), 'f')  # initialize the s matrix
            for i in range(6):
                for j in range(len(w)):
                    s[i] += B[i][j] * w[j]
            trace = s[0] + s[1] + s[2]  # normalize by the trace
            for i in range(6):
                s[i] = old_div(s[i], trace)
            a = pmag.s2a(s)
            #------------------------------------------------------------
            #  Calculating dels is different than in the Kappabridge
            #  routine. Use trace normalized tensor (a) and the applied
            #  unit field directions (tmpH) to generate model X,Y,Z
            #  components. Then compare these with the measured values.
            #------------------------------------------------------------
            S = 0.
            comp = numpy.zeros((npos * 3), 'f')
            for i in range(npos):
                for j in range(3):
                    index = i * 3 + j
                    compare = a[j][0] * tmpH[i][0] + a[j][1] * \
                        tmpH[i][1] + a[j][2] * tmpH[i][2]
                    comp[index] = compare
            for i in range(npos * 3):
                d = old_div(w[i], trace) - comp[i]  # del values
                S += d * d
            nf = float(npos * 3 - 6)  # number of degrees of freedom
            if S > 0:
                sigma = numpy.sqrt(old_div(S, nf))
            else:
                sigma = 0
            RmagSpecRec["rmag_anisotropy_name"] = data[0]["er_specimen_name"]
            RmagSpecRec["er_location_name"] = data[0].get(
                "er_location_name", "")
            RmagSpecRec["er_specimen_name"] = data[0]["er_specimen_name"]
            RmagSpecRec["er_sample_name"] = data[0].get("er_sample_name", "")
            RmagSpecRec["er_site_name"] = data[0].get("er_site_name", "")
            RmagSpecRec["magic_experiment_names"] = RmagSpecRec[
                "rmag_anisotropy_name"] + ":AARM"
            RmagSpecRec["er_citation_names"] = "This study"
            RmagResRec[
                "rmag_result_name"] = data[0]["er_specimen_name"] + ":AARM"
            RmagResRec["er_location_names"] = data[0].get(
                "er_location_name", "")
            RmagResRec["er_specimen_names"] = data[0]["er_specimen_name"]
            RmagResRec["er_sample_names"] = data[0].get("er_sample_name", "")
            RmagResRec["er_site_names"] = data[0].get("er_site_name", "")
            RmagResRec["magic_experiment_names"] = RmagSpecRec[
                "rmag_anisotropy_name"] + ":AARM"
            RmagResRec["er_citation_names"] = "This study"
            if "magic_instrument_codes" in list(data[0].keys()):
                RmagSpecRec["magic_instrument_codes"] = data[0][
                    "magic_instrument_codes"]
            else:
                RmagSpecRec["magic_instrument_codes"] = ""
            RmagSpecRec["anisotropy_type"] = "AARM"
            RmagSpecRec[
                "anisotropy_description"] = "Hext statistics adapted to AARM"
            if coord != '-1':  # need to rotate s
                # set orientation priorities
                SO_methods = []
                for rec in samp_data:
                    if "magic_method_codes" not in rec:
                        rec['magic_method_codes'] = 'SO-NO'
                    if "magic_method_codes" in rec:
                        methlist = rec["magic_method_codes"]
                        for meth in methlist.split(":"):
                            if "SO" in meth and "SO-POM" not in meth.strip():
                                if meth.strip() not in SO_methods:
                                    SO_methods.append(meth.strip())
                SO_priorities = pmag.set_priorities(SO_methods, 0)
                # continue here
                redo, p = 1, 0
                if len(SO_methods) <= 1:
                    az_type = SO_methods[0]
                    orient = pmag.find_samp_rec(RmagSpecRec["er_sample_name"],
                                                samp_data, az_type)
                    if orient["sample_azimuth"] != "":
                        method_codes.append(az_type)
                    redo = 0
                while redo == 1:
                    if p >= len(SO_priorities):
                        print("no orientation data for ", s)
                        orient["sample_azimuth"] = ""
                        orient["sample_dip"] = ""
                        method_codes.append("SO-NO")
                        redo = 0
                    else:
                        az_type = SO_methods[SO_methods.index(
                            SO_priorities[p])]
                        orient = pmag.find_samp_rec(
                            RmagSpecRec["er_sample_name"], samp_data, az_type)
                        if orient["sample_azimuth"] != "":
                            method_codes.append(az_type)
                            redo = 0
                    p += 1
                az, pl = orient['sample_azimuth'], orient['sample_dip']
                s = pmag.dosgeo(s, az, pl)  # rotate to geographic coordinates
                if coord == '100':
                    sample_bed_dir, sample_bed_dip = orient[
                        'sample_bed_dip_direction'], orient['sample_bed_dip']
                    # rotate to geographic coordinates
                    s = pmag.dostilt(s, sample_bed_dir, sample_bed_dip)
            hpars = pmag.dohext(nf, sigma, s)
            #
            # prepare for output
            #
            RmagSpecRec["anisotropy_s1"] = '%8.6f' % (s[0])
            RmagSpecRec["anisotropy_s2"] = '%8.6f' % (s[1])
            RmagSpecRec["anisotropy_s3"] = '%8.6f' % (s[2])
            RmagSpecRec["anisotropy_s4"] = '%8.6f' % (s[3])
            RmagSpecRec["anisotropy_s5"] = '%8.6f' % (s[4])
            RmagSpecRec["anisotropy_s6"] = '%8.6f' % (s[5])
            RmagSpecRec["anisotropy_mean"] = '%8.3e' % (old_div(trace, 3))
            RmagSpecRec["anisotropy_sigma"] = '%8.6f' % (sigma)
            RmagSpecRec["anisotropy_unit"] = "Am^2"
            RmagSpecRec["anisotropy_n"] = '%i' % (npos)
            RmagSpecRec["anisotropy_tilt_correction"] = coord
            # used by thellier_gui - must be taken out for uploading
            RmagSpecRec["anisotropy_F"] = '%7.1f ' % (hpars["F"])
            # used by thellier_gui - must be taken out for uploading
            RmagSpecRec["anisotropy_F_crit"] = hpars["F_crit"]
            RmagResRec["anisotropy_t1"] = '%8.6f ' % (hpars["t1"])
            RmagResRec["anisotropy_t2"] = '%8.6f ' % (hpars["t2"])
            RmagResRec["anisotropy_t3"] = '%8.6f ' % (hpars["t3"])
            RmagResRec["anisotropy_v1_dec"] = '%7.1f ' % (hpars["v1_dec"])
            RmagResRec["anisotropy_v2_dec"] = '%7.1f ' % (hpars["v2_dec"])
            RmagResRec["anisotropy_v3_dec"] = '%7.1f ' % (hpars["v3_dec"])
            RmagResRec["anisotropy_v1_inc"] = '%7.1f ' % (hpars["v1_inc"])
            RmagResRec["anisotropy_v2_inc"] = '%7.1f ' % (hpars["v2_inc"])
            RmagResRec["anisotropy_v3_inc"] = '%7.1f ' % (hpars["v3_inc"])
            RmagResRec["anisotropy_ftest"] = '%7.1f ' % (hpars["F"])
            RmagResRec["anisotropy_ftest12"] = '%7.1f ' % (hpars["F12"])
            RmagResRec["anisotropy_ftest23"] = '%7.1f ' % (hpars["F23"])
            RmagResRec["result_description"] = 'Critical F: ' + \
                hpars["F_crit"] + ';Critical F12/F13: ' + hpars["F12_crit"]
            if hpars["e12"] > hpars["e13"]:
                RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
                    hpars['e12'])
                RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
                    hpars['v2_dec'])
                RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
                    hpars['v2_inc'])
                RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
                    hpars['e12'])
                RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
                    hpars['v1_dec'])
                RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
                    hpars['v1_inc'])
                RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
                    hpars['e13'])
                RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
                    hpars['v3_dec'])
                RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
                    hpars['v3_inc'])
                RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
                    hpars['e13'])
                RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
                    hpars['v1_dec'])
                RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
                    hpars['v1_inc'])
            else:
                RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
                    hpars['e13'])
                RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
                    hpars['v3_dec'])
                RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
                    hpars['v3_inc'])
                RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
                    hpars['e13'])
                RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
                    hpars['v1_dec'])
                RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
                    hpars['v1_inc'])
                RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
                    hpars['e12'])
                RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
                    hpars['v2_dec'])
                RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
                    hpars['v2_inc'])
                RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
                    hpars['e12'])
                RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
                    hpars['v1_dec'])
                RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
                    hpars['v1_inc'])
            if hpars["e23"] > hpars['e12']:
                RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
                    hpars['e23'])
                RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
                    hpars['v3_dec'])
                RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
                    hpars['v3_inc'])
                RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
                    hpars['e23'])
                RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
                    hpars['v2_dec'])
                RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
                    hpars['v2_inc'])
                RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
                    hpars['e13'])
                RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
                    hpars['v1_dec'])
                RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
                    hpars['v1_inc'])
                RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
                    hpars['e12'])
                RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
                    hpars['v1_dec'])
                RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
                    hpars['v1_inc'])
            else:
                RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
                    hpars['e12'])
                RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
                    hpars['v1_dec'])
                RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
                    hpars['v1_inc'])
                RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
                    hpars['e23'])
                RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
                    hpars['v2_dec'])
                RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
                    hpars['v2_inc'])
                RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
                    hpars['e13'])
                RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
                    hpars['v1_dec'])
                RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
                    hpars['v1_inc'])
                RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
                    hpars['e23'])
                RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
                    hpars['v3_dec'])
                RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
                    hpars['v3_inc'])
            RmagResRec["tilt_correction"] = '-1'
            RmagResRec["anisotropy_type"] = 'AARM'
            RmagResRec["magic_method_codes"] = 'LP-AN-ARM:AE-H'
            RmagSpecRec["magic_method_codes"] = 'LP-AN-ARM:AE-H'
            RmagResRec["magic_software_packages"] = pmag.get_version()
            RmagSpecRec["magic_software_packages"] = pmag.get_version()
            specimen += 1
            RmagSpecRecs.append(RmagSpecRec)
            RmagResRecs.append(RmagResRec)
            if data_model_num == 3:
                SpecRec = RmagResRec.copy()
                SpecRec.update(RmagSpecRec)
                SpecRecs.append(SpecRec)

        else:
            print('skipping specimen ', s, ' only 9 positions supported',
                  '; this has ', npos)
            specimen += 1

    if data_model_num == 3:
        # translate records
        for rec in SpecRecs:
            rec3 = map_magic.convert_aniso('magic3', rec)
            SpecRecs3.append(rec3)

        # write output to 3.0 specimens file
        pmag.magic_write(spec_file, SpecRecs3, 'specimens')
        print("specimen data stored in {}".format(spec_file))

    else:
        if rmag_anis == "":
            rmag_anis = "rmag_anisotropy.txt"
        pmag.magic_write(rmag_anis, RmagSpecRecs, 'rmag_anisotropy')
        print("specimen tensor elements stored in ", rmag_anis)
        if rmag_res == "":
            rmag_res = "rmag_results.txt"
        pmag.magic_write(rmag_res, RmagResRecs, 'rmag_results')
        print("specimen statistics and eigenparameters stored in ", rmag_res)
Example #46
0
def main(iargs=None):
    inps = cmdLineParse(iargs)

    slices = []
    relTimes = []
    burstWidths = []
    burstLengths = []
    numSlices = len(inps.tiff)

    ####Stage 1: Gather all the different slices
    for kk in range(numSlices):
        obj = Sentinel1_TOPS()
        obj.configure()
        obj.xml = inps.xml[kk]
        obj.tiff = inps.tiff[kk]
        obj.manifest = inps.manifests[kk]
        obj.outdir = inps.outdir
        obj.orbitFile = inps.orbit
        obj.auxFile = inps.auxprod
        obj.auxDir = inps.auxdir
        obj.orbitDir = inps.orbitdir
        obj.parse()
        if kk == 0:
           dt = (obj.bursts[1].sensingStart - obj.bursts[0].sensingStart).total_seconds()  # assuming dt is constant! should be checked

        if inps.bbox is not None:
            obj.crop(inps.bbox)

        if obj.numberBursts != 0:
            ##Add to list of slices
            slices.append(obj)
            relTimes.append((obj.bursts[0].sensingStart - slices[0].bursts[0].sensingStart).total_seconds())
            burstWidths.append(obj.bursts[0].numberOfSamples)
            burstLengths.append(obj.bursts[0].numberOfLines)

    ###Adjust the number of slices to account for cropping
    numSlices = len(slices)

    if numSlices == 0:
        raise Exception('No bursts left to process')
    elif numSlices == 1:
        obj = slices[0]
        obj.extractImage(parse=False)
    else:
        print('Stitching slices')
        indices = np.argsort(relTimes)
        commonWidth = max(burstWidths)
        commonLength = max(burstLengths)
        firstSlice = slices[indices[0]]

        t0 = firstSlice.bursts[0].sensingStart
      #  if len(firstSlice.bursts)==1:
      #     dt = (firstSlice.bursts[0].sensingStop - t0).total_seconds()
      #  else:
      #     dt = (firstSlice.bursts[1].sensingStart - t0).total_seconds()

        obj = Sentinel1_TOPS()
        obj.xml = inps.xml
        obj.tiff = inps.tiff
        obj.outdir = inps.outdir
        obj.orbitFile = inps.orbit
        obj.orbitDir = inps.orbitdir
        obj.auxDir = inps.auxdir
        obj.auxFile = inps.auxprod
        obj.IPFversion = firstSlice.IPFversion

        for index in indices:
            slc = slices[index]
            print('slc.numberBursts')
            print (slc.numberBursts)
            offset = np.int(np.rint(old_div((slc.bursts[0].sensingStart - t0).total_seconds(),dt)))
            slc.extractImage(parse=False, nameOffset=offset,
                    width=commonWidth, length=commonLength)
            print ('offset: ',offset)
            for kk in range(slc.numberBursts):
                ###Overwrite previous copy if one exists

                if (offset + kk) < len(obj.bursts):
                    print('Overwrite previous copy')
                    print (obj.bursts[offset+kk].sensingStart,obj.bursts[offset+kk].sensingStop)
                    print (slc.bursts[kk].sensingStart,slc.bursts[kk].sensingStop)
                    obj.bursts[offset+kk] = slc.bursts[kk]
                ###Else append new burst
                elif (offset+kk) == len(obj.bursts):
                    obj.bursts.append(slc.bursts[kk])
                else:
                    print('Offset indices = ', indices)
                    raise Exception('There seems to be a gap between slices.')

        obj.numberBursts = len(obj.bursts)
        print(obj.numberBursts)
        ####Reparsing the orbit file
        obj.orbitFile = firstSlice.orbitFile
        if obj.orbitFile is None:
            raise Exception('Need restituted / precise orbits for stitching slices')
        else:
            orb = obj.extractPreciseOrbit()

            for burst in obj.bursts:
                burst.orbit = Orbit()
                burst.orbit.configure()
                burst.orbit.setOrbitSource('Header')

                for sv in orb:
                    burst.orbit.addStateVector(sv)


    sname = os.path.join(inps.outdir, 'data')

    ###Reindex all the bursts for later use
    for ind, burst in enumerate(obj.bursts):
        burst.burstNumber = ind+1

    with shelve.open(os.path.join(inps.outdir, 'data')) as db:
        db['swath'] = obj
Example #47
0
def nf_match_neurons_in_binary_masks(masks_gt,
                                     masks_comp,
                                     thresh_cost=.7,
                                     min_dist=10,
                                     print_assignment=False,
                                     plot_results=False,
                                     Cn=None,
                                     labels=None,
                                     cmap='viridis',
                                     D=None,
                                     enclosed_thr=None):
    """
    Match neurons expressed as binary masks. Uses Hungarian matching algorithm

    Parameters:
    -----------

    masks_gt: bool ndarray  components x d1 x d2
        ground truth masks

    masks_comp: bool ndarray  components x d1 x d2
        mask to compare to

    thresh_cost: double
        max cost accepted

    min_dist: min distance between cm

    print_assignment:
        for hungarian algorithm

    plot_results: bool

    Cn:
        correlation image or median

    D: list of ndarrays
        list of distances matrices

    enclosed_thr: float
        if not None set distance to at most the specified value when ground truth is a subset of inferred

    Returns:
    --------
    idx_tp_1:
        indeces true pos ground truth mask

    idx_tp_2:
        indeces true pos comp

    idx_fn_1:
        indeces false neg

    idx_fp_2:
        indeces false pos

    performance:

    """

    _, d1, d2 = np.shape(masks_gt)
    dims = d1, d2

    # transpose to have a sparse list of components, then reshaping it to have a 1D matrix red in the Fortran style
    A_ben = scipy.sparse.csc_matrix(
        np.reshape(masks_gt[:].transpose([1, 2, 0]), (
            np.prod(dims),
            -1,
        ),
                   order='F'))
    A_cnmf = scipy.sparse.csc_matrix(
        np.reshape(masks_comp[:].transpose([1, 2, 0]), (
            np.prod(dims),
            -1,
        ),
                   order='F'))

    # have the center of mass of each element of the two masks
    cm_ben = [scipy.ndimage.center_of_mass(mm) for mm in masks_gt]
    cm_cnmf = [scipy.ndimage.center_of_mass(mm) for mm in masks_comp]

    if D is None:
        #% find distances and matches
        # find the distance between each masks
        D = distance_masks([A_ben, A_cnmf], [cm_ben, cm_cnmf],
                           min_dist,
                           enclosed_thr=enclosed_thr)
        level = 0.98
    else:
        level = .98

    matches, costs = find_matches(D, print_assignment=print_assignment)
    matches = matches[0]
    costs = costs[0]

    #%% compute precision and recall
    TP = np.sum(np.array(costs) < thresh_cost) * 1.
    FN = np.shape(masks_gt)[0] - TP
    FP = np.shape(masks_comp)[0] - TP
    TN = 0

    performance = dict()
    performance['recall'] = old_div(TP, (TP + FN))
    performance['precision'] = old_div(TP, (TP + FP))
    performance['accuracy'] = old_div((TP + TN), (TP + FP + FN + TN))
    performance['f1_score'] = 2 * TP / (2 * TP + FP + FN)
    print(performance)
    #%%
    idx_tp = np.where(np.array(costs) < thresh_cost)[0]
    idx_tp_ben = matches[0][idx_tp]  # ground truth
    idx_tp_cnmf = matches[1][idx_tp]  # algorithm - comp

    idx_fn = np.setdiff1d(list(range(np.shape(masks_gt)[0])),
                          matches[0][idx_tp])

    idx_fp = np.setdiff1d(list(range(np.shape(masks_comp)[0])),
                          matches[1][idx_tp])

    idx_fp_cnmf = idx_fp

    idx_tp_gt, idx_tp_comp, idx_fn_gt, idx_fp_comp = idx_tp_ben, idx_tp_cnmf, idx_fn, idx_fp_cnmf

    if plot_results:
        try:  # Plotting function
            pl.rcParams['pdf.fonttype'] = 42
            font = {'family': 'Myriad Pro', 'weight': 'regular', 'size': 10}
            pl.rc('font', **font)
            lp, hp = np.nanpercentile(Cn, [5, 95])
            ses_1 = mpatches.Patch(color='red', label='Session 1')
            ses_2 = mpatches.Patch(color='white', label='Session 2')
            pl.subplot(1, 2, 1)
            pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap)
            [
                pl.contour(norm_nrg(mm),
                           levels=[level],
                           colors='w',
                           linewidths=1) for mm in masks_comp[idx_tp_comp]
            ]
            [
                pl.contour(norm_nrg(mm),
                           levels=[level],
                           colors='r',
                           linewidths=1) for mm in masks_gt[idx_tp_gt]
            ]
            if labels is None:
                pl.title('MATCHES')
            else:
                pl.title('MATCHES: ' + labels[1] + '(w), ' + labels[0] + '(r)')
            pl.legend(handles=[ses_1, ses_2])
            pl.show()
            pl.axis('off')
            pl.subplot(1, 2, 2)
            pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap)
            [
                pl.contour(norm_nrg(mm),
                           levels=[level],
                           colors='w',
                           linewidths=1) for mm in masks_comp[idx_fp_comp]
            ]
            [
                pl.contour(norm_nrg(mm),
                           levels=[level],
                           colors='r',
                           linewidths=1) for mm in masks_gt[idx_fn_gt]
            ]
            if labels is None:
                pl.title('FALSE POSITIVE (w), FALSE NEGATIVE (r)')
            else:
                pl.title(labels[1] + '(w), ' + labels[0] + '(r)')
            pl.legend(handles=[ses_1, ses_2])
            pl.show()
            pl.axis('off')
        except Exception as e:
            print(
                "not able to plot precision recall usually because we are on travis"
            )
            print(e)
    return idx_tp_gt, idx_tp_comp, idx_fn_gt, idx_fp_comp, performance
Example #48
0
    def populateBurstSpecificMetadata(self):
        '''
        Extract burst specific metadata from the xml file.
        '''

        burstList = self.getxmlelement('swathTiming/burstList')
        for index, burst in enumerate(burstList.getchildren()):
            bb = self.bursts[index]
            bb.sensingStart = self.convertToDateTime(burst.find('azimuthTime').text)
            deltaT = datetime.timedelta(seconds=(bb.numberOfLines - 1)*bb.azimuthTimeInterval)
            bb.sensingStop = bb.sensingStart + deltaT

            bb.sensingMid = bb.sensingStart + datetime.timedelta(seconds = 0.5 * deltaT.total_seconds())

            bb.startUTC = self.convertToDateTime(burst.find('sensingTime').text)
            deltaT = datetime.timedelta(seconds=old_div((bb.numberOfLines-1),bb.prf))
            bb.stopUTC = bb.startUTC + deltaT
            bb.midUTC  = bb.startUTC + datetime.timedelta(seconds = 0.5*deltaT.total_seconds())

            firstValidSample = [int(val) for val in burst.find('firstValidSample').text.split()]
            lastValidSample = [int(val) for val in burst.find('lastValidSample').text.split()]

            first=False
            last=False
            count=0
            for ii, val in enumerate(firstValidSample):
                if (val >= 0) and (not first):
                    first = True
                    bb.firstValidLine = ii

                if (val < 0) and (first) and (not last):
                    last = True
                    bb.numValidLines = ii - bb.firstValidLine

            lastLine = bb.firstValidLine + bb.numValidLines - 1

            bb.firstValidSample = max(firstValidSample[bb.firstValidLine], firstValidSample[lastLine])
            lastSample = min(lastValidSample[bb.firstValidLine], lastValidSample[lastLine])

            bb.numValidSamples = lastSample - bb.firstValidSample

        ####Read in fm rates separately
        fmrateList = self.getxmlelement('generalAnnotation/azimuthFmRateList')
        fmRates = []
        for index, burst in enumerate(fmrateList.getchildren()):
            r0 = 0.5 * Const.c * float(burst.find('t0').text)
            try:
                c0 = float(burst.find('c0').text)
                c1 = float(burst.find('c1').text)
                c2 = float(burst.find('c2').text)
                coeffs = [c0,c1,c2]
            except AttributeError:
                coeffs = [float(val) for val in burst.find('azimuthFmRatePolynomial').text.split()]

            refTime = self.convertToDateTime(burst.find('azimuthTime').text)
            poly = Poly1D.Poly1D()
            poly.initPoly(order=len(coeffs)-1)
            poly.setMean(r0)
            poly.setNorm(0.5*Const.c)
            poly.setCoeffs(coeffs)

            fmRates.append((refTime, poly))

        for index, burst in enumerate(self.bursts):

            dd = [ np.abs((burst.sensingMid - val[0]).total_seconds()) for val in fmRates]

            arg = np.argmin(dd)
            burst.azimuthFMRate = fmRates[arg][1]

#            print('FM rate matching: Burst %d to Poly %d'%(index, arg))



        dcList = self.getxmlelement('dopplerCentroid/dcEstimateList')
        dops = [ ]
        for index, burst in enumerate(dcList.getchildren()):

            r0 = 0.5 * Const.c* float(burst.find('t0').text)
            refTime = self.convertToDateTime(burst.find('azimuthTime').text)
            coeffs = [float(val) for val in burst.find('dataDcPolynomial').text.split()]
            poly = Poly1D.Poly1D()
            poly.initPoly(order=len(coeffs)-1)
            poly.setMean(r0)
            poly.setNorm(0.5*Const.c)
            poly.setCoeffs(coeffs)

            dops.append((refTime, poly))

        for index, burst in enumerate(self.bursts):

            dd = [np.abs((burst.sensingMid - val[0]).total_seconds()) for val in dops]

            arg = np.argmin(dd)
            burst.doppler = dops[arg][1]
Example #49
0
def compute_event_exceptionality(
        traces: np.ndarray,
        robust_std: bool = False,
        N: int = 5,
        use_mode_fast: bool = False,
        sigma_factor: float = 3.) -> Tuple[np.ndarray, np.ndarray, Any, Any]:
    """
    Define a metric and order components according to the probability of some "exceptional events" (like a spike).

    Such probability is defined as the likelihood of observing the actual trace value over N samples given an estimated noise distribution.
    The function first estimates the noise distribution by considering the dispersion around the mode.
    This is done only using values lower than the mode. The estimation of the noise std is made robust by using the approximation std=iqr/1.349.
    Then, the probability of having N consecutive events is estimated.
    This probability is used to order the components.

    Args:
        Y: ndarray
            movie x,y,t

        A: scipy sparse array
            spatial components

        traces: ndarray
            Fluorescence traces

        N: int
            N number of consecutive events

        sigma_factor: float
            multiplicative factor for noise estimate (added for backwards compatibility)

    Returns:
        fitness: ndarray
            value estimate of the quality of components (the lesser the better)

        erfc: ndarray
            probability at each time step of observing the N consequtive actual trace values given the distribution of noise

        noise_est: ndarray
            the components ordered according to the fitness
    """

    T = np.shape(traces)[-1]
    if use_mode_fast:
        md = mode_robust_fast(traces, axis=1)
    else:
        md = mode_robust(traces, axis=1)

    ff1 = traces - md[:, None]

    # only consider values under the mode to determine the noise standard deviation
    ff1 = -ff1 * (ff1 < 0)
    if robust_std:

        # compute 25 percentile
        ff1 = np.sort(ff1, axis=1)
        ff1[ff1 == 0] = np.nan
        Ns = np.round(np.sum(ff1 > 0, 1) * .5)
        iqr_h = np.zeros(traces.shape[0])

        for idx, _ in enumerate(ff1):
            iqr_h[idx] = ff1[idx, -Ns[idx]]

        # approximate standard deviation as iqr/1.349
        sd_r = 2 * iqr_h / 1.349

    else:
        Ns = np.sum(ff1 > 0, 1)
        sd_r = np.sqrt(old_div(np.sum(ff1**2, 1), Ns))

    # compute z value
    z = old_div((traces - md[:, None]), (sigma_factor * sd_r[:, None]))

    # probability of observing values larger or equal to z given normal
    # distribution with mean md and std sd_r
    #erf = 1 - norm.cdf(z)

    # use logarithm so that multiplication becomes sum
    #erf = np.log(erf)
    # compute with this numerically stable function
    erf = scipy.special.log_ndtr(-z)

    # moving sum
    erfc = np.cumsum(erf, 1)
    erfc[:, N:] -= erfc[:, :-N]

    # select the maximum value of such probability for each trace
    fitness = np.min(erfc, 1)

    return fitness, erfc, sd_r, md
Example #50
0
def register_ROIs(A1,
                  A2,
                  dims,
                  template1=None,
                  template2=None,
                  align_flag=True,
                  D=None,
                  thresh_cost=.7,
                  max_dist=10,
                  enclosed_thr=None,
                  print_assignment=False,
                  plot_results=False,
                  Cn=None,
                  cmap='viridis'):
    """
    Register ROIs across different sessions using an intersection over union metric
    and the Hungarian algorithm for optimal matching

    Parameters:
    -----------

    A1: ndarray or csc_matrix  # pixels x # of components
        ROIs from session 1

    A2: ndarray or csc_matrix  # pixels x # of components
        ROIs from session 2

    dims: list or tuple
        dimensionality of the FOV

    template1: ndarray dims
        template from session 1

    template2: ndarray dims
        template from session 2

    align_flag: bool
        align the templates before matching

    D: ndarray
        matrix of distances in the event they are pre-computed

    thresh_cost: scalar
        maximum distance considered

    max_dist: scalar
        max distance between centroids

    enclosed_thr: float
        if not None set distance to at most the specified value when ground truth is a subset of inferred

    print_assignment: bool
        print pairs of matched ROIs

    plot_results: bool
        create a plot of matches and mismatches

    Cn: ndarray
        background image for plotting purposes

    cmap: string
        colormap for background image

    Returns:
    --------
    matched_ROIs1: list
        indeces of matched ROIs from session 1

    matched_ROIs2: list
        indeces of matched ROIs from session 2

    non_matched1: list
        indeces of non-matched ROIs from session 1

    non_matched2: list
        indeces of non-matched ROIs from session 1

    performance:  list
        (precision, recall, accuracy, f_1 score) with A1 taken as ground truth

    """

    if template1 is None or template2 is None:
        align_flag = False

    if align_flag:  # first align ROIs from session 2 to the template from session 1
        template2, shifts, _, xy_grid = tile_and_correct(
            template2,
            template1 - template1.min(),
            [int(dims[0] / 4), int(dims[1] / 4)], [16, 16], [10, 10],
            add_to_movie=template2.min(),
            shifts_opencv=True)
        A_2t = np.reshape(A2.toarray(), dims + (-1, ),
                          order='F').transpose(2, 0, 1)
        dims_grid = tuple(
            np.max(np.stack(xy_grid, axis=0), axis=0) -
            np.min(np.stack(xy_grid, axis=0), axis=0) + 1)
        _sh_ = np.stack(shifts, axis=0)
        shifts_x = np.reshape(_sh_[:, 1], dims_grid,
                              order='C').astype(np.float32)
        shifts_y = np.reshape(_sh_[:, 0], dims_grid,
                              order='C').astype(np.float32)
        x_grid, y_grid = np.meshgrid(
            np.arange(0., dims[0]).astype(np.float32),
            np.arange(0., dims[1]).astype(np.float32))
        x_remap = (-np.resize(shifts_x, dims) + x_grid).astype(np.float32)
        y_remap = (-np.resize(shifts_y, dims) + y_grid).astype(np.float32)
        A2 = np.stack([
            cv2.remap(img.astype(np.float32), x_remap, y_remap,
                      cv2.INTER_CUBIC) for img in A_2t
        ],
                      axis=0)
        A2 = np.reshape(A2.transpose(1, 2, 0), (A1.shape[0], A_2t.shape[0]),
                        order='F')

    if D is None:
        if 'csc_matrix' not in str(type(A1)):
            A1 = scipy.sparse.csc_matrix(A1)
        if 'csc_matrix' not in str(type(A2)):
            A2 = scipy.sparse.csc_matrix(A2)

        cm_1 = com(A1, dims[0], dims[1])
        cm_2 = com(A2, dims[0], dims[1])
        A1_tr = (A1 > 0).astype(float)
        A2_tr = (A2 > 0).astype(float)
        D = distance_masks([A1_tr, A2_tr], [cm_1, cm_2],
                           max_dist,
                           enclosed_thr=enclosed_thr)

    matches, costs = find_matches(D, print_assignment=print_assignment)
    matches = matches[0]
    costs = costs[0]

    #%% store indeces

    idx_tp = np.where(np.array(costs) < thresh_cost)[0]
    if len(idx_tp) > 0:
        matched_ROIs1 = matches[0][idx_tp]  # ground truth
        matched_ROIs2 = matches[1][idx_tp]  # algorithm - comp
        non_matched1 = np.setdiff1d(list(range(D[0].shape[0])),
                                    matches[0][idx_tp])
        non_matched2 = np.setdiff1d(list(range(D[0].shape[1])),
                                    matches[1][idx_tp])
        TP = np.sum(np.array(costs) < thresh_cost) * 1.
    else:
        TP = 0.
        plot_results = False
        matched_ROIs1 = []
        matched_ROIs2 = []
        non_matched1 = list(range(D[0].shape[0]))
        non_matched2 = list(range(D[0].shape[1]))

    #%% compute precision and recall

    FN = D[0].shape[0] - TP
    FP = D[0].shape[1] - TP
    TN = 0

    performance = dict()
    performance['recall'] = old_div(TP, (TP + FN))
    performance['precision'] = old_div(TP, (TP + FP))
    performance['accuracy'] = old_div((TP + TN), (TP + FP + FN + TN))
    performance['f1_score'] = 2 * TP / (2 * TP + FP + FN)
    print(performance)

    if plot_results:
        if Cn is None:
            if template1 is not None:
                Cn = template1
            elif template2 is not None:
                Cn = template2
            else:
                Cn = np.reshape(A1.sum(1) + A2.sum(1), dims, order='F')

        masks_1 = np.reshape(A1.toarray(), dims + (-1, ),
                             order='F').transpose(2, 0, 1)
        masks_2 = np.reshape(A2.toarray(), dims + (-1, ),
                             order='F').transpose(2, 0, 1)
        #        try : #Plotting function
        level = 0.98
        pl.rcParams['pdf.fonttype'] = 42
        font = {'family': 'Myriad Pro', 'weight': 'regular', 'size': 10}
        pl.rc('font', **font)
        lp, hp = np.nanpercentile(Cn, [5, 95])
        pl.subplot(1, 2, 1)
        pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap)
        [
            pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1)
            for mm in masks_1[matched_ROIs1]
        ]
        [
            pl.contour(norm_nrg(mm), levels=[level], colors='r', linewidths=1)
            for mm in masks_2[matched_ROIs2]
        ]
        pl.title('Matches')
        pl.axis('off')
        pl.subplot(1, 2, 2)
        pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap)
        [
            pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1)
            for mm in masks_1[non_matched1]
        ]
        [
            pl.contour(norm_nrg(mm), levels=[level], colors='r', linewidths=1)
            for mm in masks_2[non_matched2]
        ]
        pl.title('Mismatches')
        pl.axis('off')
#        except Exception as e:
#            print("not able to plot precision recall usually because we are on travis")
#            print(e)

    return matched_ROIs1, matched_ROIs2, non_matched1, non_matched2, performance
Example #51
0
def swift2MJD(tswift):
   return old_div(tswift,86400.0)  + 51910.0
Example #52
0
class myconfig(object):
    expname = 'roianMouse'
    baseName = 'Base'
    fineName = 'Fine'  #_resize'
    mrfName = 'MRF'  #_identity'
    acName = 'AC'
    regName = 'Reg'
    evalName = 'eval'
    genName = 'gen'

    # ----- Network parameters

    scale = 2
    rescale = 1  # how much to downsize the base image.
    numscale = 3
    pool_scale = 4
    pool_size = 3
    pool_stride = 2
    # sel_sz determines the patch size used for the final decision
    # i.e., patch seen by the fc6 layer
    # ideally as large as possible but limited by
    # a) gpu memory size
    # b) overfitting due to large number of variables.
    sel_sz = old_div(512, 3)
    psz = sel_sz // (scale**(numscale - 1)) // rescale // pool_scale
    dist2pos = 5
    label_blur_rad = 5  #1.5
    fine_label_blur_rad = 1.5
    n_classes = 2
    dropout = 0.5
    nfilt = 128
    nfcfilt = 512
    doBatchNorm = True
    useMRF = True
    useHoldout = False

    # ----- Fine Network parameters
    fine_flt_sz = 5
    fine_nfilt = 48
    fine_sz = 48

    # ----- MRF Network Parameters
    baseIter4MRFTrain = 5000
    baseIter4ACTrain = 5000
    add_loc_info = True

    # ----- Learning parameters

    base_learning_rate = 0.0003
    mrf_learning_rate = 0.00001
    ac_learning_rate = 0.0003
    fine_learning_rate = 0.0003

    batch_size = 8
    mult_fac = old_div(16, batch_size)
    base_training_iters = 10000 * mult_fac
    fine_training_iters = 5000 * mult_fac
    mrf_training_iters = 3000 * mult_fac
    ac_training_iters = 5000
    gamma = 0.1
    step_size = 100000
    display_step = 30
    num_test = 100

    # range for contrast, brightness and rotation adjustment
    horz_flip = False
    vert_flip = False
    brange = [-0.2, 0.2]
    crange = [0.7, 1.3]
    rrange = 30
    imax = 255.
    adjust_contrast = False
    clahe_grid_size = 20
    normalize_mean_img = True

    # ----- Data parameters

    split = True
    view = 0
    l1_cropsz = 0
    imsz = (641, 641)
    map_size = 100000 * imsz[0] * imsz[1] * 3
    cropLoc = {(641, 641): [0, 0]}
    selpts = np.arange(0, 2)
    img_dim = 1

    cachedir = os.path.join(localSetup.bdir, 'cache', 'roianMouse')
    labelfile = os.path.join(localSetup.bdir, 'data', 'roian',
                             'head_tail_20170411.lbl')
    # this label file has more data and includes the correction for vertical flipping
    trainfilename = 'train_TF'
    fulltrainfilename = 'fullTrain_TF'
    valfilename = 'val_TF'
    valdatafilename = 'valdata'
    valratio = 0.3
    holdoutratio = 0.8

    # ----- Save parameters

    save_step = 500
    maxckpt = 20
    baseoutname = expname + baseName
    fineoutname = expname + fineName
    mrfoutname = expname + mrfName
    acoutname = expname + acName
    baseckptname = baseoutname + 'ckpt'
    fineckptname = fineoutname + 'ckpt'
    mrfckptname = mrfoutname + 'ckpt'
    acckptname = acoutname + 'ckpt'
    basedataname = baseoutname + 'traindata'
    finedataname = fineoutname + 'traindata'
    mrfdataname = mrfoutname + 'traindata'
    acdataname = acoutname + 'traindata'

    def getexpname(self, dirname):
        expname = os.path.basename(dirname)
        return expname[:-4]

    def getexplist(self, L):
        # fname = 'vid{:d}files'.format(self.view+1)
        # return L[fname]

        return L['movieFilesAll'][self.view, :]
Example #53
0
def main():

    progname = os.path.basename(sys.argv[0])
    usage = """prog <references> <particles> <classmx> [options]
	
	** EXPERIMENTAL **
	
	This program classifies a set of particles based on a set of references (usually projections). This program makes use of
	rotational/translational invariants which, aside from computing the invariants, makes the process extremely fast.
	
	"""

    parser = EMArgumentParser(usage=usage, version=EMANVERSION)
    parser.add_argument(
        "--sep",
        type=int,
        help=
        "The number of classes a particle can contribute towards (default is 1)",
        default=1)
    parser.add_argument(
        "--align",
        type=str,
        help=
        "specify an aligner to use after classification. Default rotate_translate_tree",
        default="rotate_translate_tree")
    parser.add_argument("--aligncmp",
                        type=str,
                        help="Similarity metric for the aligner",
                        default="ccc")
    parser.add_argument(
        "--ralign",
        type=str,
        help="specify a refine aligner to use after the coarse alignment",
        default=None)
    parser.add_argument("--raligncmp",
                        type=str,
                        help="Similarity metric for the refine aligner",
                        default="ccc")
    parser.add_argument(
        "--cmp",
        type=str,
        help=
        "Default=auto. The name of a 'cmp' to be used in assessing the aligned images",
        default="ccc")
    parser.add_argument("--classmx",
                        type=str,
                        help="Store results in a classmx_xx.hdf style file",
                        default=None)
    parser.add_argument("--classinfo",
                        type=str,
                        help="Store results in a classinfo_xx.json style file",
                        default=None)
    parser.add_argument(
        "--classes",
        type=str,
        help=
        "Generate class-averages directly. No bad particle exclusion or iteration. Specify filename.",
        default=None)
    parser.add_argument("--averager",
                        type=str,
                        help="Averager to use for class-averages",
                        default="ctf.weight")
    parser.add_argument(
        "--invartype",
        choices=["auto", "bispec", "harmonic"],
        help="Which type of invariants to generate: (bispec,harmonic)",
        default="auto")

    parser.add_argument(
        "--threads",
        default=4,
        type=int,
        help="Number of threads to run in parallel on the local computer")
    parser.add_argument(
        "--verbose",
        "-v",
        dest="verbose",
        action="store",
        metavar="n",
        type=int,
        default=0,
        help=
        "verbose level [0-9], higher number means higher level of verboseness")
    parser.add_argument(
        "--ppid",
        type=int,
        help="Set the PID of the parent process, used for cross platform PPID",
        default=-1)

    (options, args) = parser.parse_args()

    if (len(args) < 2): parser.error("Please specify <references> <particles>")

    options.align = parsemodopt(options.align)
    options.aligncmp = parsemodopt(options.aligncmp)
    options.ralign = parsemodopt(options.ralign)
    options.raligncmp = parsemodopt(options.raligncmp)
    options.cmp = parsemodopt(options.cmp)

    if options.invartype == "auto":
        try:
            options.invartype = str(project(["global.invartype"]))
        except:
            print(
                "Warning: no project invariant type spectified, using bispectrum"
            )
            options.invartype = "bispec"

    E2n = E2init(sys.argv, options.ppid)

    options.threads += 1  # one extra thread for storing results

    nref = EMUtil.get_image_count(args[0])
    nptcl = EMUtil.get_image_count(args[1])

    # get refs and invariants
    refs = EMData.read_images(args[0])
    refsbsfs = args[0].rsplit(".")[0] + "_invar.hdf"
    try:
        nrefbs = EMUtil.get_image_count(refsbsfs)
        if nrefbs != len(refs):
            print("Reference invariant file too short :", nrefbs, len(refs))
            raise Exception
    except:
        #		traceback.print_exc()
        #		print("\nError! No good invariants found for refs. Please rerun CTF generate output and set building.")
        #		sys.exit(1)

        if options.invartype == "bispec":
            com = "e2proc2dpar.py {inp} {out} --process filter.highpass.gauss:cutoff_freq=0.01 --process normalize.edgemean --process mask.soft:outer_radius={maskrad}:width={maskw} --process math.bispectrum.slice:size={bssize}:fp={bsdepth} --threads {threads}".format(
                inp=args[0],
                out=refsbsfs,
                maskrad=int(refs[0]["nx"] // 2.2),
                maskw=int(refs[0]["nx"] // 15),
                bssize=bispec_invar_parm[0],
                bsdepth=bispec_invar_parm[1],
                threads=options.threads)
        else:
            com = "e2proc2dpar.py {inp} {out} --process filter.highpass.gauss:cutoff_freq=0.01 --process normalize.edgemean --process mask.soft:outer_radius={maskrad}:width={maskw} --process math.harmonicpow:fp=1 --threads {threads}".format(
                inp=args[0],
                out=refsbsfs,
                maskrad=int(refs[0]["nx"] // 2.2),
                maskw=int(refs[0]["nx"] // 15),
                threads=options.threads)

        run(com)

    refsbs = EMData.read_images(refsbsfs)
    #refsbs=[i.process("filter.highpass.gauss",{"cutoff_freq":0.01}).process("normalize.edgemean").process("math.bispectrum.slice:size=32:fp=6") for i in refs]

    # Find particle invariants
    if "__ctf_flip" in args[1]:
        if "even" in args[1]:
            bsfs = args[1].split("__ctf_flip")[0] + "__ctf_flip_invar_even.lst"
        elif "odd" in args[1]:
            bsfs = args[1].split("__ctf_flip")[0] + "__ctf_flip_invar_odd.lst"
        else:
            bsfs = args[1].split("__ctf_flip")[0] + "__ctf_flip_invar.lst"
        try:
            nptclbs = EMUtil.get_image_count(bsfs)
        except:
            print("Could not get particle count on ", bsfs)
            sys.exit(1)
        if nptclbs != nptcl:
            print(nptclbs, nptcl)
            raise Exception("Particle invariant file has wrong particle count")
    else:
        if "even" in args[1]:
            bsfs = args[1].split("_even")[0] + "_invar_even.lst"
        elif "odd" in args[1]:
            bsfs = args[1].split("_odd")[0] + "_invar_odd.lst"

    ### initialize output files

    # class, weight, dx,dy,dalpha,flip
    clsmx = [EMData(options.sep, nptcl, 1) for i in range(6)]

    # JSON style output, classes keyed by class number
    clsinfo = {}

    # avgs
    if options.classes != None:
        options.averager = parsemodopt(options.averager)
        avgrs = [
            Averagers.get(options.averager[0], options.averager[1])
            for i in range(nref)
        ]

    # Set up threads
    N = nptcl
    npt = max(min(100, old_div(N, (options.threads - 2))), 1)

    jsd = queue.Queue(0)
    # these start as arguments, but get replaced with actual threads
    thrds = [(jsd, refs, refsbs, args[1], bsfs, options, i, i * npt,
              min(i * npt + npt, N)) for i in range(old_div(N, npt) + 1)]

    # standard thread execution loop
    thrtolaunch = 0
    while thrtolaunch < len(thrds) or threading.active_count() > 1:
        if thrtolaunch < len(thrds):
            while (threading.active_count() >= options.threads):
                time.sleep(0.1)
            if options.verbose > 0:
                print("\r Starting thread {}/{}      ".format(
                    thrtolaunch, len(thrds)),
                      end=' ')
                sys.stdout.flush()
            thrds[thrtolaunch] = threading.Thread(
                target=clsfn, args=thrds[thrtolaunch])  # replace args
            thrds[thrtolaunch].start()
            thrtolaunch += 1
        else:
            time.sleep(0.1)

        # return is [N,dict] a dict of image# keyed processed images
        while not jsd.empty():
            rd = jsd.get()
            r = rd[0]
            pt = r[0]
            for i, a in enumerate(r[1:]):
                clsmx[0][i, pt] = a[1]
                clsmx[1][i, pt] = 1.0
                clsmx[2][i, pt] = a[3]
                clsmx[3][i, pt] = a[4]
                clsmx[4][i, pt] = a[5]
                clsmx[5][i, pt] = a[6]

                if options.classinfo != None:
                    try:
                        clsinfo[a[1]].append(
                            (pt, a[0], a[2], a[3], a[4], a[5], a[6]))
                    except:
                        clsinfo[a[1]] = [(pt, a[0], a[2], a[3], a[4], a[5],
                                          a[6])]

                if options.classes != None:
                    avgrs[a[1]].add_image(a[7])

            if rd[2]:
                thrds[rd[1]].join()
                thrds[rd[1]] = None

                if options.verbose > 1:
                    print("{} done. ".format(rd[1]), end=' ')

    ### Write output files
    if options.classmx != None:
        if os.path.exists(options.classmx): remove_file(options.classmx)
        for i, m in enumerate(clsmx):
            m.write_image(options.classmx, i)

    if options.classinfo != None:
        if os.path.exists(options.classinfo): remove_file(options.classinfo)
        db = js_open_dict(options.classinfo)
        db["input"] = args[1]
        db["inputbs"] = bsfs
        db["refs"] = args[0]
        db["refsbs"] = refsbsfs
        db["classes"] = clsinfo

    if options.classes != None:
        if os.path.exists(options.classes): remove_file(options.classes)
        empty = EMData(refs[0]["nx"], refs[0]["ny"], 1)
        empty.to_zero()
        empty["ptcl_repr"] = 0
        for i, avgr in enumerate(avgrs):
            if i in clsinfo:
                avg = avgr.finish()
                #				avg.process_inplace("normalize.circlemean",{"radius":avg["ny"]/2-4})
                avg.process_inplace("normalize.toimage", {
                    "to": refs[i],
                    "fourieramp": 1,
                    "ignore_lowsig": 0.3
                })
                avg.process_inplace("mask.soft", {
                    "outer_radius": old_div(avg["ny"], 2) - 4,
                    "width": 3
                })
                #				avg.process_inplace("normalize.toimage",{"to":refs[i],"ignore_lowsig":0.75})
                avg["class_ptcl_idxs"] = [p[0] for p in clsinfo[i]
                                          ]  # particle indices
                quals = array([p[1] for p in clsinfo[i]])
                avg["class_ptcl_qual"] = quals.mean()
                avg["class_ptcl_qual_sigma"] = quals.std()
                #				avg["class_qual"]=avg.cmp("frc",refs[i],{"minres":25,"maxres":10})
                #				avg["class_qual"]=avg.cmp("ccc",refs[i])	# since we are doing SNR below now, frc seems unnecessary, particularly since ccc is used in e2classaverage
                avg["class_qual"] = old_div(
                    avg.cmp("frc", refs[i], {
                        "minres": 30,
                        "maxres": 10
                    }), avg.cmp("frc", refs[i], {
                        "minres": 100,
                        "maxres": 30
                    })
                )  # Trying something new 2/7/18. This ratio seems pretty effective at identifying bad class-averages. A bit slow, should consider writing something specifically for this

                # We compute a smoothed SSNR curve by comparing to the reference. We keep overwriting ssnr to gradually produce what we're after
                ssnr = avg.calc_fourier_shell_correlation(refs[i])
                third = old_div(len(ssnr), 3)
                ssnr = [ssnr[third]] * 4 + ssnr[third:third * 2] + [
                    ssnr[third * 2 - 1]
                ] * 4  # we extend the list by replication to make the running average more natural
                ssnr = [
                    old_div(sum(ssnr[j - 4:j + 5]), 9.0)
                    for j in range(4, third + 4)
                ]  # smoothing by running average
                ssnr = [old_div(v, (1.0 - min(v, .999999)))
                        for v in ssnr]  # convert FSC to pseudo SSNR
                avg["class_ssnr"] = ssnr

                avg["class_ptcl_src"] = args[1]
                avg["projection_image"] = args[0]
                avg["projection_image_idx"] = i
                try:
                    avg["xform.projection"] = refs[i]["xform.projection"]
                except:
                    pass
                avg.write_image(options.classes, i)
            else:
                empty.write_image(options.classes, i)

    E2end(E2n)

    print("Classification complete, writing classmx")
Example #54
0
def evaluate_components(
        Y: np.ndarray,
        traces: np.ndarray,
        A,
        C,
        b,
        f,
        final_frate,
        remove_baseline: bool = True,
        N: int = 5,
        robust_std: bool = False,
        Athresh: float = 0.1,
        Npeaks: int = 5,
        thresh_C: float = 0.3,
        sigma_factor: float = 3.) -> Tuple[Any, Any, Any, Any, Any, Any]:
    """ Define a metric and order components according to the probability of some "exceptional events" (like a spike).

    Such probability is defined as the likeihood of observing the actual trace value over N samples given an estimated noise distribution.
    The function first estimates the noise distribution by considering the dispersion around the mode.
    This is done only using values lower than the mode.
    The estimation of the noise std is made robust by using the approximation std=iqr/1.349.
    Then, the probavility of having N consecutive eventsis estimated.
    This probability is used to order the components.
    The algorithm also measures the reliability of the spatial mask by comparing the filters in A
     with the average of the movies over samples where exceptional events happen, after  removing (if possible)
    frames when neighboring neurons were active

    Args:
        Y: ndarray
            movie x,y,t

        traces: ndarray
            Fluorescence traces

        A,C,b,f: various types
            outputs of cnmf

        final_frate: (undocumented)

        remove_baseline: bool
            whether to remove the baseline in a rolling fashion *(8 percentile)

        N: int
            N number of consecutive events probability multiplied


        Athresh: float
            threshold on overlap of A (between 0 and 1)

        Npeaks: int
            Number of local maxima to consider

        thresh_C: float
            fraction of the maximum of C that is used as minimum peak height

        sigma_factor: float
            multiplicative factor for noise

    Returns:
        idx_components: ndarray
            the components ordered according to the fitness

        fitness_raw: ndarray
            value estimate of the quality of components (the lesser the better) on the raw trace

        fitness_delta: ndarray
            value estimate of the quality of components (the lesser the better) on diff(trace)

        erfc_raw: ndarray
            probability at each time step of observing the N consequtive actual trace values given the distribution of noise on the raw trace

        erfc_raw: ndarray
            probability at each time step of observing the N consequtive actual trace values given the distribution of noise on diff(trace)

        r_values: list
            float values representing correlation between component and spatial mask obtained by averaging important points

        significant_samples: ndarray
            indexes of samples used to obtain the spatial mask by average
    """

    tB = np.minimum(-2, np.floor(-5. / 30 * final_frate))
    tA = np.maximum(5, np.ceil(25. / 30 * final_frate))
    logging.debug('tB:' + str(tB) + ',tA:' + str(tA))
    dims, T = np.shape(Y)[:-1], np.shape(Y)[-1]

    Yr = np.reshape(Y, (np.prod(dims), T), order='F')

    logging.info('Computing event exceptionality delta')
    fitness_delta, erfc_delta, _, _ = compute_event_exceptionality(
        np.diff(traces, axis=1),
        robust_std=robust_std,
        N=N,
        sigma_factor=sigma_factor)

    logging.debug('Removing Baseline')
    if remove_baseline:
        num_samps_bl = np.minimum(old_div(np.shape(traces)[-1], 5), 800)
        slow_baseline = False
        if slow_baseline:

            traces = traces - \
                scipy.ndimage.percentile_filter(
                    traces, 8, size=[1, num_samps_bl])

        else:  # fast baseline removal

            downsampfact = num_samps_bl
            elm_missing = int(
                np.ceil(T * 1.0 / downsampfact) * downsampfact - T)
            padbefore = int(np.floor(old_div(elm_missing, 2.0)))
            padafter = int(np.ceil(old_div(elm_missing, 2.0)))
            tr_tmp = np.pad(traces.T, ((padbefore, padafter), (0, 0)),
                            mode='reflect')
            numFramesNew, num_traces = np.shape(tr_tmp)
            #% compute baseline quickly
            logging.debug("binning data ...")
            tr_BL = np.reshape(
                tr_tmp, (downsampfact, int(old_div(numFramesNew,
                                                   downsampfact)), num_traces),
                order='F')
            tr_BL = np.percentile(tr_BL, 8, axis=0)
            logging.info("interpolating data ...")
            logging.info(tr_BL.shape)
            tr_BL = scipy.ndimage.zoom(np.array(tr_BL, dtype=np.float32),
                                       [downsampfact, 1],
                                       order=3,
                                       mode='constant',
                                       cval=0.0,
                                       prefilter=True)
            if padafter == 0:
                traces -= tr_BL.T
            else:
                traces -= tr_BL[padbefore:-padafter].T

    logging.info('Computing event exceptionality')
    fitness_raw, erfc_raw, _, _ = compute_event_exceptionality(
        traces, robust_std=robust_std, N=N, sigma_factor=sigma_factor)

    logging.info('Evaluating spatial footprint')
    # compute the overlap between spatial and movie average across samples with significant events
    r_values, significant_samples = classify_components_ep(Yr,
                                                           A,
                                                           C,
                                                           b,
                                                           f,
                                                           Athresh=Athresh,
                                                           Npeaks=Npeaks,
                                                           tB=tB,
                                                           tA=tA,
                                                           thres=thresh_C)

    return fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples
Example #55
0
    def evaluate(self, x, F, mu, sigma):

        norm = old_div(self.__norm_const, sigma)

        return F * norm * np.exp(
            old_div(-np.power(x - mu, 2.), (2 * np.power(sigma, 2.))))
Example #56
0
def swift2JD(tswift):
   return old_div(tswift,86400.0)  + 2451910.5
Example #57
0
    def evaluate(self, x, K, x0, gamma):
        norm = old_div(1, (gamma * np.pi))

        gamma2 = gamma * gamma

        return K * norm * gamma2 / ((x - x0) * (x - x0) + gamma2)
Example #58
0
def main(args):

    progname = os.path.basename(sys.argv[0])
    usage = """e2moviealigner.py [options] <ddd_movie_stack> <ddd_movie_stack> ... <ddd_movie_stack>
	
	Determines the optimal whole-frame alignment of a DDD movie. It can be
	used to determine the proper x and y translations for each movie frame and
	can perform the actual alignment according to the translations.
	
	Example: e2moviealigner.py --dark dark.hdf --gain gain.hdf movie.hdf -v 9
	"""

    parser = EMArgumentParser(usage=usage, version=EMANVERSION)

    parser.add_argument(
        "--dark",
        type=str,
        default=None,
        help="Perform dark image correction using the specified image file")
    parser.add_argument(
        "--gain",
        type=str,
        default=None,
        help="Perform gain image normalization using the specified image file")
    parser.add_argument(
        "--gaink2",
        type=str,
        default=None,
        help=
        "Perform gain image normalization. Gatan K2 gain images are the reciprocal of DDD gain images."
    )
    parser.add_argument(
        "--boxsize",
        type=int,
        help=
        "Set the boxsize used to compute power spectra across movie frames",
        default=512)
    parser.add_argument(
        "--maxshift",
        type=int,
        help="Set the maximum frame translation distance in pixels.",
        default=24)
    parser.add_argument("--step",
                        type=float,
                        help="Set step size for cross coherence calculation.",
                        default=1.0)
    parser.add_argument(
        "--nnorm",
        type=float,
        help="Set the norm to be used for fixing axes. Default is sqrt(2)",
        default=np.sqrt(2))
    parser.add_argument(
        "--fixaxes",
        action="store_true",
        default=False,
        help=
        "Tries to identify bad pixels and fill them in with sane values instead"
    )
    parser.add_argument(
        "--fixbadlines",
        action="store_true",
        default=False,
        help=
        "If you wish to remove detector-specific bad lines, you must specify this flag and --xybadlines."
    )
    parser.add_argument(
        '--xybadlines',
        help=
        "Specify the list of bad pixel coordinates for your detector. Will only be used if --fixbadlines is also specified.",
        nargs=2,
        default=['3106,3093', '3621,3142', '4719,3494'])
    parser.add_argument(
        "--rotavg",
        action="store_true",
        default=False,
        help="Use a rotationally averaged coherent power spectrum.")
    parser.add_argument(
        "--verbose",
        "-v",
        dest="verbose",
        action="store",
        metavar="n",
        type=int,
        default=0,
        help=
        "verbose level [0-9], higner number means higher level of verboseness")
    parser.add_argument(
        "--ppid",
        type=int,
        help="Set the PID of the parent process, used for cross platform PPID",
        default=-2)

    (options, args) = parser.parse_args()

    if len(args) < 1:
        print("You must specify at least one ddd movie stack.")
        exit(1)

    pid = E2init(sys.argv)

    for fname in args:

        if not os.path.isfile(fname):
            print(("Sorry, {} does not exist".format(fname)))
            continue
        else:
            print(("Processing {}".format(fname)))

        hdr = EMData(fname, 0, True)
        if (old_div(hdr['nx'], options.boxsize) -
                1) < 2 or (old_div(hdr['ny'], options.boxsize) - 1) < 2:
            print("You will need to use a smaller box size with your data.")
            sys.exit(1)

        if options.gain or options.dark or options.gaink2:
            if options.verbose: print("Correcting frames")
            fname = DirectDetectorUtil.correct_frames(options, fname)

        incfile = "{}_incoherent_pws.hdf".format(fname[:-4])
        aligned = "{}_aligned.hdf".format(fname[:-4])
        average = aligned[:-4] + "_average.hdf"

        try:
            os.remove('{}_pairwise_coherent_pws.hdf'.format(fname[:-4]))
        except OSError:
            pass
        try:
            os.remove('{}_ccf.hdf'.format(fname[:-4]))
        except OSError:
            pass
        try:
            os.remove('{}_best_coherent_pws.hdf'.format(fname[:-4]))
        except OSError:
            pass
        try:
            os.remove('{}_pwcc.hdf'.format(fname[:-4]))
        except OSError:
            pass

        n = EMUtil.get_image_count(fname)
        pairs = [(i, j) for i in range(n) for j in range(i + 1, n)]
        npairs = len(pairs)

        if options.verbose: print(("Loading frames from {}".format(fname)))
        all_frames = []
        for i in range(n):
            print2line("\t{}/{}".format(i, n))
            f = EMData(fname, i)
            if options.fixbadlines:
                for line in options.xybadlines:
                    coords = list(map(int, line.split(',')))
                    f.process_inplace('math.xybadline', {
                        'xloc': coords[0],
                        'yloc': coords[1]
                    })
            if options.fixaxes:
                f.process_inplace('filter.xyaxes0', {
                    'neighbor': 1,
                    'neighbornorm': options.nnorm,
                    'x': 1,
                    'y': 1
                })
            all_frames.append(f)

        try:
            ok = EMUtil.is_same_size(EMData(options.boxsize, options.boxsize),
                                     EMData(incfile))
        except:
            ok = False
        if ok:
            if options.verbose: print("Loading incoherent power spectrum")
            if options.rotavg: ips = EMData(incfile, 1)  # 1: ROT-AVG
            else: ips = EMData(incfile, 0)  # 0: NOT ROT-AVG
        else:
            if options.verbose: print("Generating incoherent power spectrum")
            ips = incoherent_pws(all_frames, bs=options.boxsize)
            ips.do_ift().write_image(incfile, 0)
            ips.process_inplace('math.rotationalaverage')
            ips.do_ift().write_image(incfile, 1)
        #ips.process_inplace('filter.highpass.gauss',{'cutoff_pixels':1})

        ips_fit = fit_ctf(ips)

        if options.verbose: print("Generating coefficient matrix")
        A = gen_coeff_matrix(n)
        #print("A: ({} X {})".format(A.shape[0],A.shape[1]))

        if options.verbose: print("Computing ordinate vector components")
        bx = []
        by = []
        ccf_xs = []
        ccf_ys = []
        ctr = 0
        for i, (a, b) in enumerate(pairs):

            # UCSF
            print2line("{}/{}".format(str(i).rjust(3), npairs))
            ccf_x, ccf_y, ccf = ccf_ordinate(all_frames[a], all_frames[b], i,
                                             fname)
            ccf_xs.append(ccf_x)
            ccf_ys.append(ccf_y)
            ccf.write_image('{}_ccf.hdf'.format(fname[:-4]), ctr)

            # ALTERNATE
            pair = [all_frames[a], all_frames[b]]
            pc = PairwiseCoherence(fname, pair, ips_fit, bs=options.boxsize)
            x, y, pwcc, cps = pc.maximize(options.maxshift, options.step)
            pwcc.write_image('{}_pwcc.hdf'.format(fname[:-4]), ctr)

            bx.append(
                x
            )  # NEED TO CHECK...is x,y from center, allowing for negative translations?
            by.append(
                y
            )  # OR is x,y from bottom left so all translations are positive (or negative)?
            ctr += 1

        b = np.concatenate([bx, by])
        #print("b: ({})".format(b.shape[0]))

        if options.verbose: print("\nOptimizing alignment")
        #results = np.linalg.lstsq(A,b)
        #r=results[0]
        #print("r: ({})".format(r.shape[0]))
        results_x = np.linalg.lstsq(A, bx)
        results_y = np.linalg.lstsq(A, by)
        rx = results_y[0]
        ry = results_y[0]

        print("\nFrame\tX\tY")
        avg = Averagers.get('mean')
        for i, (tx, ty) in enumerate(zip(rx, ry)):
            #tx = round(t[0],2)
            #ty = round(t[1],2)
            print(("{}/{}\t{}\t{}".format(str(i + 1).rjust(2), n, tx, ty)))
            f = EMData(fname, i)
            f.process_inplace(
                'xform',
                {'transform': Transform({
                    'type': 'eman',
                    'tx': tx,
                    'ty': ty
                })})
            avg.add_image(f)
            f.write_image(aligned, i)
        avg = avg.finish()
        avg.write_image(average, 0)

    return
Example #59
0
class Cauchy(with_metaclass(FunctionMeta, Function1D)):
    r"""
    description :

        The Cauchy distribution

    latex : $ K \frac{1}{ \gamma \pi} \left[ \frac{\gamma^2}{(x-x_0)^2 + \gamma^2}  \right] $

    parameters :

        K :

            desc : Integral between -inf and +inf. Fix this to 1 to obtain a Cauchy distribution
            initial value : 1

        x0 :

            desc : Central value
            initial value : 0.0

        gamma :

            desc : standard deviation
            initial value : 1.0
            min : 1e-12

    tests :
        - { x : 0.0, function value: 0.3989422804014327, tolerance: 1e-10}
        - { x : -1.0, function value: 0.24197072451914337, tolerance: 1e-9}

    """

    # Place this here to avoid recomputing it all the time

    __norm_const = old_div(1.0, (math.sqrt(2 * np.pi)))

    def _setup(self):
        self._is_prior = True

    def _set_units(self, x_unit, y_unit):
        # The normalization is the integral from -inf to +inf, i.e., has dimensions of
        # y_unit * x_unit
        self.K.unit = y_unit * x_unit

        # The mu has the same dimensions as the x
        self.x0.unit = x_unit

        # sigma has the same dimensions as x
        self.gamma.unit = x_unit

    # noinspection PyPep8Naming
    def evaluate(self, x, K, x0, gamma):
        norm = old_div(1, (gamma * np.pi))

        gamma2 = gamma * gamma

        return K * norm * gamma2 / ((x - x0) * (x - x0) + gamma2)

    def from_unit_cube(self, x):
        """
        Used by multinest

        :param x: 0 < x < 1
        :param lower_bound:
        :param upper_bound:
        :return:
        """

        x0 = self.x0.value
        gamma = self.gamma.value

        half_pi = 1.57079632679

        res = np.tan(np.pi * x - half_pi) * gamma + x0

        return res
Example #60
0
class Log_normal(with_metaclass(FunctionMeta, Function1D)):
    r"""
       description :

           A log normal function

       latex : $ K \frac{1}{ x \sigma \sqrt{2 \pi}}\exp{\frac{(\log x/piv - \mu/piv)^2}{2~(\sigma)^2}} $

       parameters :

           F :
               desc : Integral between 0and +inf. Fix this to 1 to obtain a log Normal distribution
               initial value : 1

           mu :

               desc : Central value
               initial value : 0.0

           sigma :

               desc : standard deviation
               initial value : 1.0
               min : 1e-12

           piv :
               desc : pivot. Leave this to 1 for a proper log normal distribution
               initial value : 1.0
               fix : yes

       tests :
           - { x : 0.0, function value: 0.3989422804014327, tolerance: 1e-10}
           - { x : -1.0, function value: 0.24197072451914337, tolerance: 1e-9}

       """

    # Place this here to avoid recomputing it all the time

    __norm_const = old_div(1.0, (math.sqrt(2 * np.pi)))

    def _setup(self):

        self._is_prior = True

    def _set_units(self, x_unit, y_unit):

        # The normalization is the integral from -inf to +inf, i.e., has dimensions of
        # y_unit * x_unit
        self.F.unit = y_unit

        # The mu has the same dimensions as the x
        self.mu.unit = x_unit

        # The pivot has the same units as x
        self.piv.unit = x_unit

        # sigma has the same dimensions as x
        self.sigma.unit = x_unit

    # noinspection PyPep8Naming
    def evaluate(self, x, F, mu, sigma, piv):

        # The value * 0 is to keep the units right

        result = np.zeros(x.shape) * F * 0

        # The log normal is not defined if x < 0. The "0 * x" part is to conserve the units if
        # x has them, because 0 * x will be a Quantity with the same units as x
        idx = (x > 0 * x)

        result[idx] = F * self.__norm_const / (sigma / piv * x / piv) * np.exp(
            old_div(-np.power(np.log(old_div(x, piv)) - old_div(mu, piv), 2.),
                    (2 * np.power(old_div(sigma, piv), 2.))))

        return result

    def from_unit_cube(self, x):
        """
        Used by multinest

        :param x: 0 < x < 1
        :param lower_bound:
        :param upper_bound:
        :return:
        """

        mu = self.mu.value
        sigma = self.sigma.value

        sqrt_two = 1.414213562

        if x < 1e-16 or (1 - x) < 1e-16:

            res = -1e32

        else:

            res = mu + sigma * sqrt_two * erfcinv(2 * (1 - x))

        return np.exp(res)