Example #1
0
def point_inside_polygon(x,y,poly):
    # adapted from http://www.ariel.com.au/a/python-point-int-poly.html
    # by Patrick Jordan

    n = len(poly)
    inside = num.zeros(x.shape,dtype=bool)
    xinters = num.zeros(x.shape)

    p1x,p1y = poly[0]
    idx2 = num.zeros(x.shape,dtype=bool)
    for i in range(n+1):
        p2x,p2y = poly[i % n]
        idx = num.logical_and(y > min(p1y,p2y),
                              num.logical_and(y <= max(p1y,p2y),
                                              x <= max(p1x,p2x)))
        if p1y != p2y:
            xinters[idx] = (y[idx]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
        if p1x == p2x:
            inside[idx] = ~inside[idx]
        else:
            idx2[:] = False
            idx2[idx] = x[idx] <= xinters[idx]
            inside[idx2] = ~inside[idx2]
        p1x,p1y = p2x,p2y

    return inside
Example #2
0
    def _unique_baselines(self):
        """Map of equivalent baseline lengths, and mask of ones to exclude.
        """
        # Construct array of indices
        fshape = [self.nfeed, self.nfeed]
        f_ind = np.indices(fshape)

        # Construct array of baseline separations in complex representation
        bl1 = (self.feedpositions[f_ind[0]] - self.feedpositions[f_ind[1]])
        bl2 = np.around(bl1[..., 0] + 1.0J * bl1[..., 1], self._bl_tol)

        # Flip sign if required to get common direction to correctly find redundant baselines
        #flip_sign = np.logical_or(bl2.real < 0.0, np.logical_and(bl2.real == 0, bl2.imag < 0))
        #bl2 = np.where(flip_sign, -bl2, bl2)

        # Construct array of baseline lengths
        blen = np.sum(bl1**2, axis=-1)**0.5

        # Create mask of included baselines
        mask = np.logical_and(blen >= self.minlength, blen <= self.maxlength)

        # Remove the auto correlated baselines between all polarisations
        if not self.auto_correlations:
            mask = np.logical_and(blen > 0.0, mask)

        return _remap_keyarray(bl2, mask), mask
Example #3
0
    def test4d(self):
        g = Graph()
        oper = OpThresholdOneLevel(graph=g)
        oper.MinSize.setValue(self.minSize)
        oper.MaxSize.setValue(self.maxSize)
        oper.Threshold.setValue(0.5)
        oper.InputImage.setValue(self.data)

        output = oper.Output[:].wait()
        assert numpy.all(output.shape == self.data.shape)

        clusters = self.generateData((self.nx, self.ny, self.nz))

        cluster1 = numpy.logical_and(output, clusters[0])
        assert numpy.any(cluster1 != 0)

        oper.MinSize.setValue(5)
        output = oper.Output[:].wait()
        cluster1 = numpy.logical_and(output, clusters[0])
        assert numpy.all(cluster1 == 0)

        cluster4 = numpy.logical_and(output.squeeze(), clusters[3])
        assert numpy.all(cluster4 == 0)

        cluster5 = numpy.logical_and(output.squeeze(), clusters[2])
        assert numpy.all(cluster5 == 0)
        oper.Threshold.setValue(0.2)
        output = oper.Output[:].wait()
        cluster5 = numpy.logical_and(output.squeeze(), clusters[2])
        assert numpy.any(cluster5 != 0)
Example #4
0
def calc_link_dis(base_side, side1, side2):
    # print base_side.shape, side1.shape, side2.shape
    ans = np.zeros_like(base_side, dtype=np.float)
    mask = np.ones_like(base_side, dtype=np.bool)

    #point on the link
    mask_on_line = np.logical_and(base_side == side1+side2, mask)
    mask = np.logical_xor(mask, mask_on_line)
    ans[mask_on_line] = 0

    #the adjaceny points on the link is overlapped
    mask_point = np.logical_and(base_side < 1e-10, mask)
    mask = np.logical_xor(mask, mask_point)
    ans[mask_point] = side1[mask_point]

    side1_sqr = side1 * side1
    side2_sqr = side2 * side2
    base_side_sqr = base_side * base_side

    #obtuse case 1
    mask_obtuse1 = np.logical_and(side1_sqr > base_side_sqr + side2_sqr, mask)
    mask = np.logical_xor(mask, mask_obtuse1)
    ans[mask_obtuse1] = side2[mask_obtuse1]

    #obtuse case 2
    mask_obtuse2 = np.logical_and(side2_sqr > base_side_sqr + side1_sqr, mask)
    mask = np.logical_xor(mask, mask_obtuse2)
    ans[mask_obtuse2] = side1[mask_obtuse2]

    #compute height by Heron's formula
    half_p = (base_side[mask] + side1[mask] + side2[mask]) * 0.5 # half perimeter
    area = np.sqrt(half_p * (half_p - side1[mask]) * (half_p - side2[mask]) * (half_p - base_side[mask]))
    ans[mask] = 2 * area / base_side[mask]
    return ans
def events_in_polygon(locations, source_zone, flag_vector = None, 
    upper_depth=None, lower_depth=None):
    '''Function to identify valid events inside a polygon
    :param locations: xyz cartesian represent hypocentres
    :type locations: numpy.ndarray
    :param source_zone: source zone polygon in xyz format
    :type source_zone: numpy.ndarray
    
    '''
    neq = np.shape(locations)[0]
    if instance(flag_vector, np.ndarray):
        '''A flag vector is input'''
        if len(flag_vector) != neq:
            raise ValueError(
                'Flag vector length is not equal to number of events')
    else:
        '''A flag vector is needed'''
        flag_vector = np.zeros(neq, dtype = int)

    valid_id = flag_vector == 0
    if upper_depth:
        valid_id = np.logical_and(valid_id, locations[:, -1] <= upper_depth)
    
    if lower_depth:
        valid_id = np.logical_and(valid_id, locations[:, -1] >= lower_depth)
    
    valid_id[valid_id] = points_in_poly(locations[valid_id, :-1], 
                                source_zone)
    
    #if not(np.all(valid_id)):
    flag_vector[np.logical_not(valid_id)] = 1
    return flag_vector
Example #6
0
def genGraph(S_actual, S_est, S_previous, empCov_set, nodeID, e1, e2, e3, e4, display = False):
    D = np.where(S_est != 0)[0].shape[0]
    T = np.where(S_actual != 0)[0].shape[0]
    TandD = float(np.where(np.logical_and(S_actual,S_est) == True)[0].shape[0])
    P = TandD/D
    R = TandD/T
    offDiagDiff = S_actual - S_est
    offDiagDiff = offDiagDiff - np.diag(np.diag(offDiagDiff))
    S_diff = (S_est - S_previous)  
    S_diff = S_diff - np.diag(np.diag(S_diff))
    ind = (S_diff < 1e-2) & (S_diff > - 1e-2)
    S_diff[ind] = 0    
    K = np.count_nonzero(S_diff)
    e1.append( alg.norm(offDiagDiff, 'fro'))
    e2.append(2* P*R/(P+R))
    
    
    K = float(np.where(np.logical_and((S_est>0) != (S_previous>0), S_est>0) == True)[0].shape[0])
    e3.append(-np.log(alg.det(S_est)) + np.trace(np.dot(S_est, empCov_set[nodeID])) + K)
    e4.append(alg.norm(S_est -  S_previous, 'fro'))
    
    display = False
    if display == True:
        if (nodeID >timeShift -10) and (nodeID < timeShift + 10):
            print 'nodeID = ', nodeID
            print 'S_true = ', S_actual,'\nS_est', S_est
#            print 'S_error = ',S_actual - S_est, '\n its Fro error = ', alg.norm(S_actual - S_est, 'fro')
            print 'D = ',D,'T = ', T,'TandD = ', TandD,'K = ', K,'P = ', P,'R = ', R,'Score = ', 2* P*R/(P+R)
            
    return e1, e2, e3, e4
def generate_sky_model_alms(fits_file,lmax=10):
    # http://healpy.readthedocs.org/en/latest/generated/healpy.sphtfunc.map2alm.html#healpy.sphtfunc.map2alm
    healmap = a.map.Map(fromfits=fits_file)
    as_pos = hp.sphtfunc.map2alm(healmap.map.map, lmax=lmax, pol=False)
    alms_pos = n.zeros([as_pos.shape[0],3],dtype='complex')
    #print alms_pos.shape
    kk=0
    for ll in range(lmax+1):
        for mm in range(0,ll+1):
            alms_pos[kk] = n.array([ll,mm,as_pos[kk]])
            kk+=1
    #print alms_pos
    alms = n.zeros([(lmax+1)**2,3],dtype='complex')
    kk=0
    for ll in range(lmax+1):
        for mm in range(-ll,ll+1):
            if mm<0:
                alm = alms_pos[n.where(n.logical_and(alms_pos[:,0]==ll, alms_pos[:,1]==-mm)),2]
                #print 'less',ll,mm,alm
                alms[kk] = n.array([ll,mm,n.conj(alm[0,0])])
            else:
                alm = alms_pos[n.where(n.logical_and(alms_pos[:,0]==ll, alms_pos[:,1]==mm)),2]
                #print 'greater ',ll,mm,alm 
                alms[kk] = n.array([ll,mm,alm[0,0]])
            kk+=1
    return alms 
Example #8
0
def binary_hit_or_miss(input, structure1 = None, structure2 = None,
                       output = None, origin1 = 0, origin2 = None):
    """Multi-dimensional binary hit-or-miss transform.

    An output array can optionally be provided. The origin parameters
    controls the placement of the structuring elements. If the first
    structuring element is not given one is generated with a squared
    connectivity equal to one. If the second structuring element is
    not provided, it set equal to the inverse of the first structuring
    element. If the origin for the second structure is equal to None
    it is set equal to the origin of the first.
    """
    input = numpy.asarray(input)
    if structure1 is None:
        structure1 = generate_binary_structure(input.ndim, 1)
    if structure2 is None:
        structure2 = numpy.logical_not(structure1)
    origin1 = _ni_support._normalize_sequence(origin1, input.ndim)
    if origin2 is None:
        origin2 = origin1
    else:
        origin2 = _ni_support._normalize_sequence(origin2, input.ndim)

    tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1,
                           0, False)
    inplace = isinstance(output, numpy.ndarray)
    result = _binary_erosion(input, structure2, 1, None, output, 0,
                             origin2, 1, False)
    if inplace:
        numpy.logical_not(output, output)
        numpy.logical_and(tmp1, output, output)
    else:
        numpy.logical_not(result, result)
        return numpy.logical_and(tmp1, result)
Example #9
0
 def evaluate(x, y, amplitude, x_0, y_0):
     """Two dimensional delta model function"""
     dx = x - x_0
     dy = y - y_0
     x_mask = np.logical_and(dx > -0.5, dx <= 0.5)
     y_mask = np.logical_and(dy > -0.5, dy <= 0.5)
     return np.select([np.logical_and(x_mask, y_mask)], [amplitude])
Example #10
0
    def __iter__(self):
        
        MAX_X,MAX_Y = self.dimensions
        MIN_V, MAX_V = self.velocity
        
        wt_min = 0.
        
        if self.init_stationary:

            x, y, x_waypoint, y_waypoint, velocity, wt = \
                init_random_waypoint(self.nr_nodes, MAX_X, MAX_Y, MIN_V, MAX_V, wt_min, 
                             (self.wt_max if self.wt_max is not None else 0.))

        else:

            NODES = np.arange(self.nr_nodes)
            print NODES
            x = U(0, MAX_X, NODES)
            y = U(0, MAX_Y, NODES)
            x_waypoint = U(0, MAX_X, NODES)
            y_waypoint = U(0, MAX_Y, NODES)
            wt = np.zeros(self.nr_nodes)
            velocity = U(MIN_V, MAX_V, NODES)

        theta = np.arctan2(y_waypoint - y, x_waypoint - x)
        costheta = np.cos(theta)
        sintheta = np.sin(theta)
        
        while True:
            # update node position
            x += velocity * costheta
            y += velocity * sintheta
            # calculate distance to waypoint
            d = np.sqrt(np.square(y_waypoint-y) + np.square(x_waypoint-x))
            # update info for arrived nodes
            arrived = np.where(np.logical_and(d<=velocity, wt<=0.))[0]
            
            # step back for nodes that surpassed waypoint
            x[arrived] = x_waypoint[arrived]
            y[arrived] = y_waypoint[arrived]
            
            if self.wt_max:
                velocity[arrived] = 0.
                wt[arrived] = U(0, self.wt_max, arrived)
                # update info for paused nodes
                wt[np.where(velocity==0.)[0]] -= 1.
                # update info for moving nodes
                arrived = np.where(np.logical_and(velocity==0., wt<0.))[0]
            
            if arrived.size > 0:
                x_waypoint[arrived] = U(0, MAX_X, arrived)
                y_waypoint[arrived] = U(0, MAX_Y, arrived)
                velocity[arrived] = U(MIN_V, MAX_V, arrived)
                theta[arrived] = np.arctan2(y_waypoint[arrived] - y[arrived], x_waypoint[arrived] - x[arrived])
                costheta[arrived] = np.cos(theta[arrived])
                sintheta[arrived] = np.sin(theta[arrived])
            
            self.velocity = velocity
            self.wt = wt
            yield np.dstack((x,y))[0]
Example #11
0
def doCombinations(stats, varname, data, amoeboids, mesenchymals, successful, function=np.mean, axis=None):
    """Apply a given function to all elements of a given array, and do so for all desired combinations.
    The combinations are for example "all agents that are amoeboid and were successful", which would be denoted by `_a_s`.
    Note that some of these results may make little sense because too few agents fit the criteria.
    """
    a_s = np.logical_and(amoeboids, successful)
    m_s = np.logical_and(mesenchymals, successful)
    a_us = np.logical_and(amoeboids, ~successful)
    m_us = np.logical_and(mesenchymals, ~successful)
    combinations = {
                    "" : np.ones_like(data, dtype=np.bool_),
                    "_a" : amoeboids,
                    "_m" : mesenchymals,
                    "_a_s" : a_s,
                    "_m_s" : m_s,
                    "_a_us" : a_us,
                    "_m_us" : m_us,
                    }
    for suffix, selector in combinations.iteritems():
        if axis is None:
            myselector = np.s_[selector]
        elif axis==1:
            myselector = np.s_[:,selector]
        value = function( data[myselector] ) if selector.any() else None
        if value is not None and type(value)!=float:
            value = float(value)
        stats[varname + suffix] = value 
    return stats
Example #12
0
def plot_subplots(offsets, tag, n, window):
  fig, axarr = plt.subplots(4, sharex=True, **dict(figsize=(12,12)))
  sel1 = np.logical_and(offsets['mchirp'] > 0.0, offsets['mchirp'] <= 5.0)
  sel2 = np.logical_and(offsets['mchirp'] > 5.0, offsets['mchirp'] <= 10.0)
  sel3 = np.logical_and(offsets['mchirp'] > 10.0, offsets['mchirp'] <= 15.0)
  sel4 = np.logical_and(offsets['mchirp'] > 15.0, offsets['mchirp'] <= 100.0)
  
  sel = [sel1, sel2, sel3, sel4]
  labels = [r'$\mathcal{M}\ \leq\ 5M_{\odot}$', r'$5M_{\odot}\ <\ \mathcal{M}\ \leq\ 10M_{\odot}$'\
      ,r'$10M_{\odot}\ <\ \mathcal{M}\ \leq\ 15M_{\odot}$', r'$\mathcal{M}\ >\ 15M_{\odot}$']
  
  hist_min = np.min(offsets['offset'][sel[0]])
  hist_max = np.max(offsets['offset'][sel[0]])
  
  for i in xrange(len(axarr)):
    if not np.any(sel[i]):
      continue
    axarr[i].hist(offsets['offset'][sel[i]], histtype='step', bins=n,\
        range=[hist_min, hist_max], label=labels[i])
    axarr[i].set_yscale('log', nonposy='clip')
    axarr[i].set_xlim(-window/2, window/2)
    axarr[i].set_ylabel(r'N')
    axarr[i].grid(True)
    axarr[i].legend(loc="upper left", bbox_to_anchor=(1,1))

  axarr[3].set_xlabel(r'Offset  [Sec]')
  fig.subplots_adjust(hspace=0.5)
  plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)

  axarr[0].set_title( tag)
  plt.savefig(tag + '_subplotshistogram.png', bbox_inches='tight')
  #, bbox_extra_artists=(lgd,), bbox_inches='tight')
  plt.close()
Example #13
0
    def set_boundary_pixels(self, value=0.0, n_pixels=1):
        r"""
        Returns a copy of this :map:`MaskedImage` for which n pixels along
        the its mask boundary have been set to a particular value. This is
        useful in situations where there is absent data in the image which
        can cause, for example, erroneous computations of gradient or features.

        Parameters
        ----------
        value : float or (n_channels, 1) ndarray
        n_pixels : int, optional
            The number of pixels along the mask boundary that will be set to 0.

        Returns
        -------
         : :map:`MaskedImage`
            The copy of the image for which the n pixels along its mask
            boundary have been set to a particular value.
        """
        global binary_erosion
        if binary_erosion is None:
            from scipy.ndimage import binary_erosion  # expensive
        # Erode the edge of the mask in by one pixel
        eroded_mask = binary_erosion(self.mask.mask, iterations=n_pixels)

        # replace the eroded mask with the diff between the two
        # masks. This is only true in the region we want to nullify.
        np.logical_and(~eroded_mask, self.mask.mask, out=eroded_mask)
        # set all the boundary pixels to a particular value
        self.pixels[..., eroded_mask] = value
Example #14
0
def computeBinnedRates(
        spk_bins, times, step_size=5, bin_size=50, direction='forward'):

    if direction == 'forward':
        time_windows = np.c_[
            np.arange(times[0], times[-1] - bin_size + 1, step_size),
            np.arange(times[0], times[-1] - bin_size + 1,
                      step_size) + bin_size]
    elif direction == 'backward':
        time_windows = np.c_[
            np.arange(times[-1], times[0] + bin_size - 1,
                      -step_size) - bin_size,
            np.arange(times[-1], times[0] + bin_size - 1, -step_size)]

    n_trials = spk_bins.shape[0]
    n_windows = time_windows.shape[0]

    spk_counts = np.zeros([n_trials, n_windows])
    spk_counts[:] = np.nan

    for win_num in range(n_windows):
        if direction == 'forward':
            spk_counts[:, win_num] = np.sum(
                spk_bins[:, np.logical_and(
                    times >= time_windows[win_num][0],
                    times < time_windows[win_num][1])], axis=1)
            bin_centers = np.mean(time_windows, axis=1)
        elif direction == 'backward':
            spk_counts[:, -(win_num + 1)] = np.sum(
                spk_bins[:, np.logical_and(
                    times >= time_windows[win_num][0],
                    times < time_windows[win_num][1])], axis=1)
            bin_centers = np.mean(time_windows, axis=1)[::-1]
    spk_rates = spk_counts / bin_size * 1000
    return spk_rates, bin_centers
  def check_cross_amplification(self, max_dist):
    pop_set = set()
    for pos in self.pairs_pos:
      p1,p2 = pos

      c1,a1,s1 = zip(*self.alignments_dict[p1])
      c2,a2,s2 = zip(*self.alignments_dict[p2])
     
      chrs_x, chrs_y = na.meshgrid(c1, c2)
      chrs_eq = chrs_x==chrs_y
    
      orient_x,orient_y = na.meshgrid(s1, s2)
      orient_fr = orient_x==na.logical_not(orient_y)
    
      pos_x,pos_y = na.meshgrid(a1, a2)
    
      # Filter by minimum distance.
      # And if orientation is forward/reverse
      # Amps is [Alignments i X Alignments j ]
      amps = na.logical_and(na.logical_and(na.abs(pos_x-pos_y)<max_dist, 
                      na.logical_and(orient_fr, pos_x<pos_y)), # Forward reverse if one position is less than the other and is forward, while the other is reverse              
              chrs_eq)
      number_of_amps = na.sum(amps)
      #assert number_of_amps!=0
      if number_of_amps>1:
        pop_set.add(pos)
        
    self.pairs_pos.difference_update(pop_set)
Example #16
0
def handle_movement(message, gridProb):
    updateHeadingBelief(message.rotation1, gridProb)
    print 'aftr rot1:',np.where(np.logical_and(gridProb == gridProb.max(), gridProb.max() > 0))
    updatePositionBelief(message.translation, gridProb)
    print 'aftr trans:',np.where(np.logical_and(gridProb == gridProb.max(), gridProb.max() > 0))
    updateHeadingBelief(message.rotation2, gridProb)
    print 'aftr rot2:',np.where(np.logical_and(gridProb == gridProb.max(), gridProb.max() > 0))
Example #17
0
def count_edges_within_band(a, b, band=3, rising=True):
    '''
    Counts the number of rising (or falling) edges match, within a sample band
    
    Params
    -------
    @param a, b: Arrays that will be compared
    @type a, b: Boolean array 
    @param band: The number of samples of tolerance
    @type band: float
    @param rising: Specify rising or falling edge
    @type rising: boolean 
    
    Returns
    -------
    @return: Count of matching edges, total true rising (or falling) edges
    @rtype: int
    '''
    if rising:
        a = np.r_[a[0], np.diff(a)]>0
        b = np.r_[b[0], np.diff(b)]>0
    else:
        a = np.r_[a[0], np.diff(a)]<0
        b = np.r_[b[0], np.diff(b)]<0

    total_edges = sum(a)
    result = np.logical_and(a, b)
    for offset in np.add(range(3),1):
        posoff = np.r_[[0]*offset, np.logical_and(a[:-offset], b[offset:])]
        negoff = np.r_[np.logical_and(a[offset:], b[:-offset]), [0]*offset]
        result = np.logical_or(result, posoff)
        result = np.logical_or(result, negoff)

    return sum(result), total_edges
Example #18
0
    def cartesian_square_centred_on_point(self, point, distance, **kwargs):
        '''
        Select earthquakes from within a square centered on a point

        :param point:
            Centre point as instance of nhlib.geo.point.Point class

        :param distance:
            Distance (km)

        :returns:
            Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
            class containing only selected events
        '''
        point_surface = Point(point.longitude, point.latitude, 0.)
        # As distance is
        north_point = point_surface.point_at(distance, 0., 0.)
        east_point = point_surface.point_at(distance, 0., 90.)
        south_point = point_surface.point_at(distance, 0., 180.)
        west_point = point_surface.point_at(distance, 0., 270.)
        is_long = np.logical_and(
            self.catalogue.data['longitude'] >= west_point.longitude,
            self.catalogue.data['longitude'] < east_point.longitude)
        is_surface = np.logical_and(
            is_long,
            self.catalogue.data['latitude'] >= south_point.latitude,
            self.catalogue.data['latitude'] < north_point.latitude)

        upper_depth, lower_depth = _check_depth_limits(kwargs)
        is_valid = np.logical_and(
            is_surface,
            self.catalogue.data['depth'] >= upper_depth,
            self.catalogue.data['depth'] < lower_depth)

        return self.select_catalogue(is_valid)
Example #19
0
    def within_joyner_boore_distance(self, surface, distance, **kwargs):
        '''
        Select events within a Joyner-Boore distance of a fault

        :param surface:
            Fault surface as instance of
            nhlib.geo.surface.base.SimpleFaultSurface  or as instance of
            nhlib.geo.surface.ComplexFaultSurface

        :param float distance:
            Rupture distance (km)

        :returns:
            Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
            containing only selected events
        '''

        upper_depth, lower_depth = _check_depth_limits(kwargs)

        rjb = surface.get_joyner_boore_distance(
            self.catalogue.hypocentres_as_mesh())
        is_valid = np.logical_and(
            rjb <= distance,
            np.logical_and(self.catalogue.data['depth'] >= upper_depth,
                           self.catalogue.data['depth'] < lower_depth))
        return self.select_catalogue(is_valid)
Example #20
0
    def close_gripper(self, lr, step_viewer=1, max_vel=.02, close_dist_thresh=0.004, grab_dist_thresh=0.005):
        print 'CLOSING GRIPPER'
        # generate gripper finger trajectory
        joint_ind = self.robot.GetJoint("%s_gripper_l_finger_joint" % lr).GetDOFIndex()
        start_val = self.robot.GetDOFValues([joint_ind])[0]
        print 'start_val: ', start_val
        # execute gripper finger trajectory
        dyn_bt_objs = [bt_obj for sim_obj in self.dyn_sim_objs for bt_obj in sim_obj.get_bullet_objects()]
        next_val = start_val
        while next_val:
            flr2finger_pts_grid = self._get_finger_pts_grid(lr)
            ray_froms, ray_tos = flr2finger_pts_grid['l'], flr2finger_pts_grid['r']

            # stop closing if any ray hits a dynamic object within a distance of close_dist_thresh from both sides
            next_vel = max_vel
            for bt_obj in dyn_bt_objs:
                from_to_ray_collisions = self.bt_env.RayTest(ray_froms, ray_tos, bt_obj)
                to_from_ray_collisions = self.bt_env.RayTest(ray_tos, ray_froms, bt_obj)
                rays_dists = np.inf * np.ones((len(ray_froms), 2))
                for rc in from_to_ray_collisions:
                    ray_id = np.argmin(np.apply_along_axis(np.linalg.norm, 1, ray_froms - rc.rayFrom))
                    rays_dists[ray_id, 0] = np.linalg.norm(rc.pt - rc.rayFrom)
                for rc in to_from_ray_collisions:
                    ray_id = np.argmin(np.apply_along_axis(np.linalg.norm, 1, ray_tos - rc.rayFrom))
                    rays_dists[ray_id, 1] = np.linalg.norm(rc.pt - rc.rayFrom)
                colliding_rays_inds = np.logical_and(rays_dists[:, 0] != np.inf, rays_dists[:, 1] != np.inf)
                if np.any(colliding_rays_inds):
                    rays_dists = rays_dists[colliding_rays_inds, :]
                    if np.any(np.logical_and(rays_dists[:, 0] < close_dist_thresh,
                                             rays_dists[:, 1] < close_dist_thresh)):
                        next_vel = 0
                    else:
                        next_vel = np.minimum(next_vel, np.min(rays_dists.sum(axis=1)))
            if next_vel == 0:
                break
            next_val = np.maximum(next_val - next_vel, 0)

            self.robot.SetDOFValues([next_val], [joint_ind])
            self.step()
            if self.viewer and step_viewer:
                self.viewer.Step()
        handles = []
        # add constraints at the points where a ray hits a dynamic link within a distance of grab_dist_thresh
        for bt_obj in dyn_bt_objs:
            from_to_ray_collisions = self.bt_env.RayTest(ray_froms, ray_tos, bt_obj)
            to_from_ray_collisions = self.bt_env.RayTest(ray_tos, ray_froms, bt_obj)
            
            for i in range(ray_froms.shape[0]):
                self.viewer.Step()
            ray_collisions = [rc for rcs in [from_to_ray_collisions, to_from_ray_collisions] for rc in rcs]

            for rc in ray_collisions:
                if rc.link == bt_obj.GetKinBody().GetLink('rope_59'):
                    self.viewer.Step()
                if np.linalg.norm(rc.pt - rc.rayFrom) < grab_dist_thresh:
                    link_tf = rc.link.GetTransform()
                    link_tf[:3, 3] = rc.pt
                    self._add_constraints(lr, rc.link, link_tf)
        if self.viewer and step_viewer:
            self.viewer.Step()
Example #21
0
    def within_polygon(self, polygon, distance=None, **kwargs):
        '''
        Select earthquakes within polygon

        :param polygon:
            Centre point as instance of nhlib.geo.polygon.Polygon class

        :param float distance:
            Buffer distance (km) (can take negative values)

        :returns:
            Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
            containing only selected events
        '''

        if distance:
            # If a distance is specified then dilate the polyon by distance
            zone_polygon = polygon.dilate(distance)
        else:
            zone_polygon = polygon

        # Make valid all events inside depth range
        upper_depth, lower_depth = _check_depth_limits(kwargs)
        valid_depth = np.logical_and(
            self.catalogue.data['depth'] >= upper_depth,
            self.catalogue.data['depth'] < lower_depth)

        # Events outside polygon returned to invalid assignment
        catalogue_mesh = Mesh(self.catalogue.data['longitude'],
                              self.catalogue.data['latitude'],
                              self.catalogue.data['depth'])
        valid_id = np.logical_and(valid_depth,
                                  zone_polygon.intersects(catalogue_mesh))

        return self.select_catalogue(valid_id)
def filter_params(io, rsh, rs, ee, isc):
    # Function filter_params identifies bad parameter sets. A bad set contains
    # Nan, non-positive or imaginary values for parameters; Rs > Rsh; or data
    # where effective irradiance Ee differs by more than 5% from a linear fit
    # to Isc vs. Ee

    badrsh = np.logical_or(rsh < 0., np.isnan(rsh))
    negrs = rs < 0.
    badrs = np.logical_or(rs > rsh, np.isnan(rs))
    imagrs = ~(np.isreal(rs))
    badio = np.logical_or(~(np.isreal(rs)), io <= 0)
    goodr = np.logical_and(~badrsh, ~imagrs)
    goodr = np.logical_and(goodr, ~negrs)
    goodr = np.logical_and(goodr, ~badrs)
    goodr = np.logical_and(goodr, ~badio)

    matrix = np.vstack((ee / 1000., np.zeros(len(ee)))).T
    eff = np.linalg.lstsq(matrix, isc)[0][0]
    pisc = eff * ee / 1000
    pisc_error = np.abs(pisc - isc) / isc
    # check for departure from linear relation between Isc and Ee
    badiph = pisc_error > .05

    u = np.logical_and(goodr, ~badiph)
    return u
Example #23
0
def calc_India_Burma_througt(Field,I_Year):
    '''
    计算西伯利亚高压
    '''
    print(np.shape(Field))
    lons = np.arange(0, 360, 2.5, dtype=float)
    lats = np.arange(90, -90 - 1, -2.5, dtype=float)

    lat1 = np.where(lats <= 20,True,False)
    lat2 = np.where(lats >= 15,True,False)
    lat = np.logical_and(lat1,lat2)

    lon1 = np.where(lons <= 100,True,False)
    lon2 = np.where(lons >= 80,True,False)
    lon = np.logical_and(lon1,lon2)
    print(lat.shape,lon.shape)

    Field2 = Field[:,:,lon]
    Field2 = Field2[:,lat,:]
    n1 = Field2.shape
    Field2 = Field2.reshape(n1[0],-1)
    Field2=np.mean(Field2,axis=1)
    Field2,a =dclim.mapstd(Field2)
    Field2  = Field2.flatten()
    print('c1=',Field2.shape)
    print('c2=',np.array(I_Year).shape)

    return Field2,np.array(I_Year)
Example #24
0
    def refine_Hessian(self, kpx, kpy, kps):
        """
        Refine the keypoint location based on a 3 point derivative, and delete
        non-coherent keypoints.

        :param kpx: x_pos of keypoint
        :param kpy: y_pos of keypoint
        :param kps: s_pos of keypoint
        :return: arrays of corrected coordinates of keypoints, values and
            locations of keypoints
        """
        curr = self.dogs[(kps, kpy, kpx)]
        nx = self.dogs[(kps, kpy, kpx + 1)]
        px = self.dogs[(kps, kpy, kpx - 1)]
        ny = self.dogs[(kps, kpy + 1, kpx)]
        py = self.dogs[(kps, kpy - 1, kpx)]
        ns = self.dogs[(kps + 1, kpy, kpx)]
        ps = self.dogs[(kps - 1, kpy, kpx)]

        nxny = self.dogs[(kps, kpy + 1, kpx + 1)]
        nxpy = self.dogs[(kps, kpy - 1, kpx + 1)]
        pxny = self.dogs[(kps, kpy + 1, kpx - 1)]
        pxpy = self.dogs[(kps, kpy - 1, kpx - 1)]

        nsny = self.dogs[(kps + 1, kpy + 1, kpx)]
        nspy = self.dogs[(kps + 1, kpy - 1, kpx)]
        psny = self.dogs[(kps - 1, kpy + 1, kpx)]
        pspy = self.dogs[(kps - 1, kpy - 1, kpx)]

        nxns = self.dogs[(kps + 1, kpy, kpx + 1)]
        nxps = self.dogs[(kps - 1, kpy, kpx + 1)]
        pxns = self.dogs[(kps + 1, kpy, kpx - 1)]
        pxps = self.dogs[(kps - 1, kpy, kpx - 1)]

        dx = (nx - px) / 2.0
        dy = (ny - py) / 2.0
        ds = (ns - ps) / 2.0
        dxx = (nx - 2.0 * curr + px)
        dyy = (ny - 2.0 * curr + py)
        dss = (ns - 2.0 * curr + ps)
        dxy = (nxny - nxpy - pxny + pxpy) / 4.0
        dxs = (nxns - nxps - pxns + pxps) / 4.0
        dsy = (nsny - nspy - psny + pspy) / 4.0
        det = -(dxs * dyy * dxs) + dsy * dxy * dxs + dxs * dsy * dxy - dss * dxy * dxy - dsy * dsy * dxx + dss * dyy * dxx
        K00 = dyy * dxx - dxy * dxy
        K01 = dxs * dxy - dsy * dxx
        K02 = dsy * dxy - dxs * dyy
        K10 = dxy * dxs - dsy * dxx
        K11 = dss * dxx - dxs * dxs
        K12 = dxs * dsy - dss * dxy
        K20 = dsy * dxy - dyy * dxs
        K21 = dsy * dxs - dss * dxy
        K22 = dss * dyy - dsy * dsy

        delta_s = -(ds * K00 + dy * K01 + dx * K02) / det
        delta_y = -(ds * K10 + dy * K11 + dx * K12) / det
        delta_x = -(ds * K20 + dy * K21 + dx * K22) / det
        peakval = curr + 0.5 * (delta_s * ds + delta_y * dy + delta_x * dx)
        mask = numpy.logical_and(numpy.logical_and(abs(delta_x) < self.tresh, abs(delta_y) < self.tresh), abs(delta_s) < self.tresh)
        return kpx + delta_x, kpy + delta_y, kps + delta_s, peakval, mask
 def _call_joint_genotypes(self, data, genotypes):
     normal = genotypes['normal']
     tumour = genotypes['tumour']
             
     normal_aa = (normal == 0)
     normal_ab = (normal == 1)
     normal_bb = (normal == 2)
         
     normal_var = np.logical_or(normal_ab, normal_bb)
     
     tumour_aa = (tumour == 0)
     tumour_ab = (tumour == 1)
     tumour_bb = (tumour == 2)
     
     tumour_var = np.logical_or(tumour_ab, tumour_bb)
     tumour_hom = np.logical_and(tumour_aa, tumour_bb)
     
     reference = np.logical_and(normal_aa, tumour_aa)
     germline = np.logical_and(normal_var, tumour_var)
     somatic = np.logical_and(normal_aa, tumour_var)
     loh = np.logical_and(normal_ab, tumour_hom)
     
     
     n = normal_aa.size
     joint_genotypes = 4 * np.ones((n,))
     
     joint_genotypes[reference] = 0
     joint_genotypes[germline] = 1
     joint_genotypes[somatic] = 2
     joint_genotypes[loh] = 3
     
     return joint_genotypes
Example #26
0
  def _compute_health_pill(self, x):
    x_clean = x[np.where(
        np.logical_and(
            np.logical_not(np.isnan(x)), np.logical_not(np.isinf(x))))]
    if np.size(x_clean):
      x_min = np.min(x_clean)
      x_max = np.max(x_clean)
      x_mean = np.mean(x_clean)
      x_var = np.var(x_clean)
    else:
      x_min = np.inf
      x_max = -np.inf
      x_mean = np.nan
      x_var = np.nan

    return np.array([
        1.0,  # Assume is initialized.
        np.size(x),
        np.sum(np.isnan(x)),
        np.sum(x == -np.inf),
        np.sum(np.logical_and(x < 0.0, x != -np.inf)),
        np.sum(x == 0.0),
        np.sum(np.logical_and(x > 0.0, x != np.inf)),
        np.sum(x == np.inf),
        x_min,
        x_max,
        x_mean,
        x_var,
        float(tf.as_dtype(x.dtype).as_datatype_enum),
        float(len(x.shape)),
    ] + list(x.shape))
def analyzeResult(x, accuracy, perturbAt=10000, movingAvg=True, smooth=True):
  if movingAvg:
    accuracy = movingAverage(accuracy, min(len(accuracy), 100))

  x = np.array(x)
  accuracy = np.array(accuracy)
  if smooth:
    # perform smoothing convolution
    mask = np.ones(shape=(100,))
    mask = mask/np.sum(mask)
    # extend accuracy vector to eliminate boundary effect of convolution
    accuracy = np.concatenate((accuracy, np.ones((200, ))*accuracy[-1]))
    accuracy = np.convolve(accuracy, mask, 'same')
    accuracy = accuracy[:len(x)]


  perturbAtX = np.where(x > perturbAt)[0][0]

  finalAccuracy = accuracy[perturbAtX-len(mask)/2]
  learnTime = min(np.where(np.logical_and(accuracy > finalAccuracy * 0.99,
                                          x < x[perturbAtX - len(mask)/2-1]))[0])
  learnTime = x[learnTime]

  finalAccuracyAfterPerturbation = accuracy[-1]
  learnTimeAfterPerturbation = min(np.where(
    np.logical_and(accuracy > finalAccuracyAfterPerturbation * 0.99,
                   x > x[perturbAtX + len(mask)]))[0])

  learnTimeAfterPerturbation = x[learnTimeAfterPerturbation] - perturbAt

  result = {"finalAccuracy": finalAccuracy,
            "learnTime": learnTime,
            "finalAccuracyAfterPerturbation": finalAccuracyAfterPerturbation,
            "learnTimeAfterPerturbation": learnTimeAfterPerturbation}
  return result
Example #28
0
def makeValueGridzWithMask(x, y, values, prj_path):
    from .parameter import get_param_value
    xi, yi = np.linspace(min(x), max(x), 200), np.linspace(min(y), max(y), 200)
    grid_x, grid_y = np.meshgrid(xi, yi)
    grid_z = scipy.interpolate.griddata((x, y), values, (grid_x, grid_y), method='linear')

    # structure = getParamValue('structure', prj_path)
    tunnel_thick = float(get_param_value('tc.tunnel.thick', prj_path))
    trap_thick = float(get_param_value('tc.trap.thick', prj_path))
    block_thick = float(get_param_value('tc.block.thick', prj_path))
    iso1_width = float(get_param_value('tc.iso1.width', prj_path))
    gate1_width = float(get_param_value('tc.gate1.width', prj_path))
    iso2_width = float(get_param_value('tc.iso2.width', prj_path))
    gate2_width = float(get_param_value('tc.gate2.width', prj_path))
    iso3_width = float(get_param_value('tc.iso3.width', prj_path))
    gate3_width = float(get_param_value('tc.gate3.width', prj_path))
    iso4_width = float(get_param_value('tc.iso4.width', prj_path))
    main_thick = tunnel_thick + trap_thick + block_thick

    mask_y = np.array(grid_y > main_thick)
    mask_x_gate1 = np.logical_and(grid_x > iso1_width, grid_x < iso1_width + gate1_width)
    mask_x_gate2 = np.logical_and(grid_x > iso1_width + gate1_width + iso2_width,
                                  grid_x < iso1_width + gate1_width + iso2_width + gate2_width)
    mask_x_gate3 = np.logical_and(grid_x > iso1_width + gate1_width + iso2_width + gate2_width + iso3_width,
                                  grid_x < iso1_width + gate1_width + iso2_width + gate2_width + iso3_width
                                  + gate3_width)
    mask_z = mask_y & (mask_x_gate1 | mask_x_gate2 | mask_x_gate3)
    grid_z_masked = np.ma.array(grid_z, mask=mask_z)
    return grid_z_masked
Example #29
0
def res2_true_and_false(hs, res, SV):
    'Organizes results into true positive and false positive sets'
    if not 'SV' in vars():
        SV = True
    #if not 'res' in vars():
        #res = qcx2_res[qcx]
    indx_samp = hs.indexed_sample_cx
    qcx = res.qcx
    cx2_score = res.cx2_score if SV else res.cx2_score
    unfilt_top_cx = np.argsort(cx2_score)[::-1]
    # Get top chip indexes and scores
    top_cx    = np.array(helpers.intersect_ordered(unfilt_top_cx, indx_samp))
    top_score = cx2_score[top_cx]
    # Get the true and false ground truth ranks
    qnx         = hs.tables.cx2_nx[qcx]
    if qnx <= 1:
        qnx = -1  # disallow uniden animals from being marked as true
    top_nx      = hs.tables.cx2_nx[top_cx]
    true_ranks  = np.where(np.logical_and(top_nx == qnx, top_cx != qcx))[0]
    false_ranks = np.where(np.logical_and(top_nx != qnx, top_cx != qcx))[0]
    # Construct the true positive tuple
    true_scores  = top_score[true_ranks]
    true_cxs     = top_cx[true_ranks]
    true_tup     = (true_cxs, true_scores, true_ranks)
    # Construct the false positive tuple
    false_scores = top_score[false_ranks]
    false_cxs    = top_cx[false_ranks]
    false_tup    = (false_cxs, false_scores, false_ranks)
    # Return tuples
    return true_tup, false_tup
Example #30
0
File: case.py Project: LucMiaz/KG
 def compare(self, result, t , noiseType = 'Z', sum = True, full=True):
     """Compares the discretization of this case with the one of an algorithm whose results are given in otherdisc. timeparam variable contains the variables for the discretization. Returns a dictionnary with the number of True positives, True negatives, False positives and False negatives"""
     #restrict comparation between Tb and Te
     
     otherdisc=result
     try:
         fulldisc=self.case[noiseType].discretize(t)
         intdisc=[int(b) for b in fulldisc]
         assert( len(otherdisc) == len(fulldisc) )
         #assert(not any([i==None for i in disc]))
     except AssertionError:
         print('something wrong in function of ', self)
     if full:
         disc=fulldisc
     else: 
         mask = np.logical_and(t >= self.case['Tb'], t <= self.case['Te'])
         otherdisc = result[mask]
         disc = fulldisc[mask]
         t=t[mask]
     retTF={}
     retTF['TP'] = np.logical_and(otherdisc,disc)
     retTF['TN'] = np.logical_and(np.logical_not(otherdisc), np.logical_not(disc))
     retTF['FP'] = np.logical_and(otherdisc, np.logical_not(disc))
     retTF['FN'] = np.logical_and(np.logical_not(otherdisc),  disc)
     if sum:
         for k, v in retTF.items():
             retTF[k]= int(v.sum())
     else:
         retTF['t'] = t
         retTF['disc'] = disc
     return retTF, intdisc
Example #31
0
    def test_overall(self):
        self.eval()

        correct_pos = 0
        total_pos = 0

        correct_neg = 0
        total_neg = 0

        total = 0
        correct = 0

        running_reg_loss = 0
        running_answer_loss = 0
        running_answer_reg_loss = 0
        count = 0

        with torch.no_grad():

            for data in self.validloader:

                if len(data) == 2:
                    inputs, labels = data
                    tasks = None
                else:
                    inputs, tasks, labels = data

                if self.cuda is not None:
                    inputs = inputs.cuda(self.cuda)
                    labels = labels.cuda(self.cuda)
                    if tasks is not None:
                        tasks = tasks.cuda(self.cuda)

                tasks = tasks[:, None]
                task_labels = torch.stack([
                    torch.tensor(self.tasks[tasks[l]].label_to_task(labels[l]),
                                 dtype=torch.long) for l in range(len(labels))
                ])
                task_labels = task_labels[:, None]

                if self.cuda is not None:
                    task_labels = task_labels.cuda(self.cuda)

                _, answer_loss, reg_loss = self.get_losses(
                    inputs, tasks, task_labels)
                answer_reg_loss = answer_loss + reg_loss
                running_answer_loss += answer_loss
                running_reg_loss += reg_loss
                running_answer_reg_loss += answer_reg_loss
                count += 1

                outputs, _, _ = self.forward(inputs, tasks, None)
                predicted = torch.argmax(outputs, dim=1)
                task_labels = task_labels.reshape(-1)

                # True Positive (TP): we predict a label of 1 (positive), and the true label is 1.
                TP = np.logical_and(task_labels.cpu() == 1,
                                    predicted.cpu() == 1).sum()

                # True Negative (TN): we predict a label of 0 (negative), and the true label is 0.
                TN = np.logical_and(task_labels.cpu() == 0,
                                    predicted.cpu() == 0).sum()

                correct_pos += float(TP)
                correct_neg += float(TN)
                total_pos += float(sum(task_labels == 1))
                total_neg += float(sum(task_labels == 0))

                correct += float(sum(task_labels == predicted))
                total += len(task_labels)

            if self.log:
                print(
                    'overall accuracy of the network on the 10000 valid images: %.2f %%'
                    % (100 * float(correct) / float(total)),
                    file=open(self.log, "a"))
                print(
                    'overall valid loss of the network: pred: %.2f, gate: %.2f, pred+gate: %.2f'
                    % (running_answer_loss / count, running_reg_loss / count,
                       running_answer_reg_loss / count),
                    file=open(self.log, "a"))

            print(
                'overall accuracy of the network on the 10000 valid images: %.2f %%'
                % (100 * float(correct) / float(total)))
            print(
                'overall valid loss of the network: pred: %.2f, gate: %.2f, pred+gate: %.2f'
                % (running_answer_loss / count, running_reg_loss / count,
                   running_answer_reg_loss / count))

            correct_pos = 0
            total_pos = 0

            correct_neg = 0
            total_neg = 0

            total = 0
            correct = 0

            running_reg_loss = 0
            running_answer_loss = 0
            running_answer_reg_loss = 0
            count = 0

            for data in self.testloader:

                if len(data) == 2:
                    inputs, labels = data
                    tasks = None
                else:
                    inputs, tasks, labels = data

                if self.cuda is not None:
                    inputs = inputs.cuda(self.cuda)
                    labels = labels.cuda(self.cuda)
                    if tasks is not None:
                        tasks = tasks.cuda(self.cuda)

                tasks = tasks[:, None]
                task_labels = torch.stack([
                    torch.tensor(self.tasks[tasks[l]].label_to_task(labels[l]),
                                 dtype=torch.long) for l in range(len(labels))
                ])
                task_labels = task_labels[:, None]
                if self.cuda is not None:
                    task_labels = task_labels.cuda(self.cuda)

                _, answer_loss, reg_loss = self.get_losses(
                    inputs, tasks, task_labels)
                answer_reg_loss = answer_loss + reg_loss
                running_answer_loss += answer_loss
                running_reg_loss += reg_loss
                running_answer_reg_loss += answer_reg_loss

                outputs, _, _ = self.forward(inputs, tasks, None)
                predicted = torch.argmax(outputs, dim=1)
                task_labels = task_labels.reshape(-1)

                # True Positive (TP): we predict a label of 1 (positive), and the true label is 1.
                TP = np.logical_and(task_labels.cpu() == 1,
                                    predicted.cpu() == 1).sum()

                # True Negative (TN): we predict a label of 0 (negative), and the true label is 0.
                TN = np.logical_and(task_labels.cpu() == 0,
                                    predicted.cpu() == 0).sum()

                correct_pos += float(TP)
                correct_neg += float(TN)
                total_pos += float(sum(task_labels == 1))
                total_neg += float(sum(task_labels == 0))
                count += 1

                correct += float(sum(task_labels == predicted))
                total += len(task_labels)

            if self.log:
                print(
                    'overall accuracy of the network on the 10000 test images: %.2f %%'
                    % (100 * float(correct) / float(total)),
                    file=open(self.log, "a"))
                print(
                    'overall test loss of the network: pred: %.2f, gate: %.2f, pred+gate: %.2f'
                    % (running_answer_loss / count, running_reg_loss / count,
                       running_answer_reg_loss / count),
                    file=open(self.log, "a"))

            print(
                'overall accuracy of the network on the 10000 test images: %.2f %%'
                % (100 * float(correct) / float(total)))
            print(
                'overall test loss of the network: pred: %.2f, gate: %.2f, pred+gate: %.2f'
                % (running_answer_loss / count, running_reg_loss / count,
                   running_answer_reg_loss / count))

        self.train()
Example #32
0
def getHigherPoints(blurred, udc):
    """"
    STRUCTURE OF HIGHER_PTS:
    ['Distance to next highest point, index of higher point, value of current point']
    """
    blur_thresh = udc['blur_thresh']
    time_factor = udc['time_factor']
    nCores = g.settings['nCores']
    idxs = np.where(blurred > blur_thresh)
    densities = blurred[idxs]
    densities_jittered = densities + np.arange(len(densities)) / (
        2 * np.float(len(densities))
    )  #I do this so no two densities are the same, so each cluster has a peak.
    C = np.zeros(blurred.shape)
    C_idx = np.zeros(blurred.shape, dtype=np.int)
    idxs = np.vstack((idxs[0], idxs[1], idxs[2])).T
    C[idxs[:, 0], idxs[:, 1], idxs[:, 2]] = densities_jittered
    C_idx[idxs[:, 0], idxs[:, 1], idxs[:, 2]] = np.arange(len(idxs))
    print("Number of pixels to analyze: {}".format(len(idxs)))
    remander = np.arange(len(idxs))
    nTotal_pts = len(idxs)
    block_ends = np.linspace(0, len(remander), nCores + 1, dtype=np.int)
    data_blocks = [
        remander[block_ends[i]:block_ends[i + 1]] for i in np.arange(nCores)
    ]

    if g.settings['multiprocessing']:
        # create the ProgressBar object
        args = (nTotal_pts, C, idxs, densities_jittered, C_idx, time_factor)
        progress = ProgressBar(getHigherPoint,
                               data_blocks,
                               args,
                               nCores,
                               msg='Getting Higher Points')
        if progress.results is None or any(r is None
                                           for r in progress.results):
            higher_pts = None
        else:
            higher_pts = np.sum(progress.results, 0)
    else:
        args = (nTotal_pts, C, idxs, densities_jittered, C_idx, time_factor)
        remander = np.arange(len(idxs))
        higher_pts = getHigherPointSingleProcess(args, remander)

    mt, mx, my = blurred.shape
    maxDistance = np.sqrt((mt / time_factor)**2 + mx**2 + my**2)
    remander = np.argwhere(higher_pts[:, 0] == 0)
    remander = remander.T[0]
    if len(remander) == 1:
        ii = remander[0]
        higher_pts[ii] = [maxDistance, ii, densities_jittered[ii]]
    elif len(remander) > 1:
        if True:
            dens2 = densities_jittered[remander]
            possible_higher_pts = np.where(
                densities_jittered > np.min(dens2))[0]
            dens3 = densities_jittered[possible_higher_pts]
            pos1 = idxs[remander].astype(np.float)
            pos1[:, 0] = pos1[:, 0] / time_factor
            pos2 = idxs[possible_higher_pts].astype(np.float)
            pos2[:, 0] = pos2[:, 0] / time_factor
            try:
                print('Constructing {}x{} distance matrix'.format(
                    len(pos1), len(pos2)))
                D = spatial.distance_matrix(
                    pos1, pos2
                )  # If this matrix is large, this will fail with a MemoryError.
                for i, pt in enumerate(remander):
                    density = densities_jittered[pt]
                    idxs2 = dens3 > density
                    pts_of_higher_density = possible_higher_pts[idxs2]
                    if len(pts_of_higher_density
                           ) == 0:  # This is the most dense point
                        higher_pts[pt] = [maxDistance, pt, density]
                    else:
                        distances_to_pts_of_higher_density = D[i, :][idxs2]
                        higher_pt = pts_of_higher_density[np.argmin(
                            distances_to_pts_of_higher_density)]
                        distance_to_nearest_pt_with_higher_density = np.min(
                            distances_to_pts_of_higher_density)
                        higher_pts[pt] = [
                            distance_to_nearest_pt_with_higher_density,
                            higher_pt, density
                        ]
            except MemoryError:
                most_dense_point = None
                for i, pt in enumerate(remander):
                    # Finding most dense point
                    density = densities_jittered[pt]
                    idxs2 = dens3 > density
                    pts_of_higher_density = possible_higher_pts[idxs2]
                    if len(pts_of_higher_density
                           ) == 0:  # This is the most dense point
                        most_dense_point = pt
                for i, pt in enumerate(remander):
                    density = densities_jittered[pt]
                    higher_pts[pt] = [maxDistance, most_dense_point, density]

        elif False:
            idxs_time_adjusted = idxs[:].astype(np.float)
            idxs_time_adjusted[:, 0] = idxs_time_adjusted[:, 0] / time_factor
            while len(remander) > 1:
                print(len(remander))
                dens2 = densities_jittered[remander]
                density = np.min(dens2)
                current_pt = remander[np.argmin(dens2)]
                possible_higher_pts = np.where(densities_jittered > density)[0]
                pos1 = idxs_time_adjusted[current_pt]
                pos2 = idxs_time_adjusted[possible_higher_pts]
                Dsq = np.sum((pos2 - pos1)**2, 1)
                higher_pt = possible_higher_pts[np.argmin(Dsq)]
                d = np.sqrt(np.min(Dsq))
                higher_pts[current_pt] = [d, higher_pt, density]
                remander = remander[remander != current_pt]
            highest_pt = remander[0]
            density = densities_jittered[highest_pt]
            higher_pts[highest_pt] = [maxDistance, highest_pt, density]
        else:
            blockFrames = 200
            block_ends = np.arange(0, mt, blockFrames).astype(np.int)
            block_ends = np.append(block_ends, mt)
            times_remander = idxs[remander][:, 0]
            times_remander_blocks = [
                np.where(
                    np.logical_and(times_remander > block_ends[i],
                                   times_remander <= block_ends[i + 1]))[0]
                for i in np.arange(nCores)
            ]
            idxs_time_adjusted = idxs[:].astype(np.float)
            idxs_time_adjusted[:, 0] = idxs_time_adjusted[:, 0] / time_factor
            for times_remander_block in times_remander_blocks:  # times_remander_block is the
                density_block = densities_jittered[times_remander_block]
                possible_higher_pts = np.where(
                    densities_jittered > np.min(density_block))[0]

    return higher_pts, idxs
Example #33
0
def _update_valid_indices_by_removing_high_iou_boxes(
    selected_indices, is_index_valid, intersect_over_union, threshold
):
    max_iou = np.max(intersect_over_union[:, selected_indices], axis=1)
    return np.logical_and(is_index_valid, max_iou <= threshold)
Example #34
0
def non_max_suppression(
    boxlist, max_output_size=10000, iou_threshold=1.0, score_threshold=-10.0
):
    """Non maximum suppression.

  This op greedily selects a subset of detection bounding boxes, pruning
  away boxes that have high IOU (intersection over union) overlap (> thresh)
  with already selected boxes. In each iteration, the detected bounding box with
  highest score in the available pool is selected.

  Args:
    boxlist: BoxList holding N boxes.  Must contain a 'scores' field
      representing detection scores. All scores belong to the same class.
    max_output_size: maximum number of retained boxes
    iou_threshold: intersection over union threshold.
    score_threshold: minimum score threshold. Remove the boxes with scores
                     less than this value. Default value is set to -10. A very
                     low threshold to pass pretty much all the boxes, unless
                     the user sets a different score threshold.

  Returns:
    a BoxList holding M boxes where M <= max_output_size
  Raises:
    ValueError: if 'scores' field does not exist
    ValueError: if threshold is not in [0, 1]
    ValueError: if max_output_size < 0
  """
    if not boxlist.has_field("scores"):
        raise ValueError("Field scores does not exist")
    if iou_threshold < 0.0 or iou_threshold > 1.0:
        raise ValueError("IOU threshold must be in [0, 1]")
    if max_output_size < 0:
        raise ValueError("max_output_size must be bigger than 0.")

    boxlist = filter_scores_greater_than(boxlist, score_threshold)
    if boxlist.num_boxes() == 0:
        return boxlist

    boxlist = sort_by_field(boxlist, "scores")

    # Prevent further computation if NMS is disabled.
    if iou_threshold == 1.0:
        if boxlist.num_boxes() > max_output_size:
            selected_indices = np.arange(max_output_size)
            return gather(boxlist, selected_indices)
        else:
            return boxlist

    boxes = boxlist.get()
    num_boxes = boxlist.num_boxes()
    # is_index_valid is True only for all remaining valid boxes,
    is_index_valid = np.full(num_boxes, 1, dtype=bool)
    selected_indices = []
    num_output = 0
    for i in range(num_boxes):
        if num_output < max_output_size:
            if is_index_valid[i]:
                num_output += 1
                selected_indices.append(i)
                is_index_valid[i] = False
                valid_indices = np.where(is_index_valid)[0]
                if valid_indices.size == 0:
                    break

                intersect_over_union = np_box_ops.iou(
                    np.expand_dims(boxes[i, :], axis=0), boxes[valid_indices, :]
                )
                intersect_over_union = np.squeeze(intersect_over_union, axis=0)
                is_index_valid[valid_indices] = np.logical_and(
                    is_index_valid[valid_indices],
                    intersect_over_union <= iou_threshold,
                )
    return gather(boxlist, np.array(selected_indices))
Example #35
0
    batch_sessions_df = p.map(feature_df_generator.batch_file_to_df,
                              glob.glob(csv_path))
logger.info("{}".format(pendulum.now().diff_for_humans(start)))

batch_sessions_df = [df for df in batch_sessions_df if not df.empty]
logger.info(len(batch_sessions_df))

session_ids = [df['session_name'][0] for df in batch_sessions_df]
session_map = {
    df['session_name'][0]: idx
    for idx, df in enumerate(batch_sessions_df)
}

asleep_sessions_df = list()
for session in batch_sessions_df:
    asleep_mask = np.where(np.logical_and(session["y"] < 4,
                                          session["y"] > 0))[0]
    asleep_df = session.iloc[asleep_mask]
    asleep_df.loc[:, "y"] -= 1
    asleep_sessions_df.append(asleep_df)

file_batch_config = {
    "directory": "test",
    "feature_set": sorted([
        'active.log_int',
        'active.range',
        'active.var',
    ]),
    "look_back": 120,
    "look_forward": 60,
    "batch_size": 4096,
    "batch_seconds": 5,
Example #36
0
def get_boxcar_spectra(ion_data, distribution, agc_target, max_it, nBoxes,
                       nScans):
    '''
    Create centroids for boxcar spectra using parameters below.
    
    Parameters
    ----------
    ion_data : DataFrame, contains ion currents for all ions
    distribution : str, one of 'equal', 'lognormal', 'lognormal-major'
    agc_target: float, number of ions to sample per scan
    max_it: float, maximal injection time in milliseconds per scan
    nBoxes: int, number of boxes per scan
    nScans: int, number of scans
    
    Return
        one tuple of six elements per each boxcar scan
        1. 2D array of mz and intensities
                [[mz0, intensity0],
                 [mz1, intensity1],
                 ....
                 [mzN, intensityN]]        
        2. required scan time in milliseconds
        3. acquired number of ions
        4. set of observed peptide sequences
        5. maximum observed ion intensity under the distrubution
        6. minimum observed ion intensity under the distribution
    '''
    BCscans = []
    for scan in range(nScans):
        scan_mz = []
        scan_counts = []
        scan_time = 0
        agc = 0
        for box in range(nBoxes):
            selector = np.logical_and(ion_data['box'] == box,
                                      ion_data['scan'] == scan)
            if selector.sum() > 0:
                intensities, box_time, box_agc = sample_ions(
                    ion_data[selector], distribution, agc_target / nBoxes,
                    max_it / nBoxes)
                scan_time += box_time
                agc += box_agc
                scan_mz.append(ion_data.loc[selector, 'mz'].values)
                scan_counts.append(intensities)
            else:
                scan_time += 1e-3 * max_it / nBoxes

        scan_mz = np.concatenate(scan_mz)
        scan_counts = np.concatenate(scan_counts)
        dyn_range_filter = scan_counts > max(scan_counts.max() * 1e-4, 10)
        mzdata = np.stack((scan_mz, scan_counts), axis=-1)
        scan_ion_data = ion_data[ion_data['scan'] == scan][dyn_range_filter]

        if scan_ion_data.shape[0] > 0:  #non-empty
            peptides = set(scan_ion_data['sequence'])
            max_int = scan_ion_data['ic_' + distribution].max()
            min_int = scan_ion_data['ic_' + distribution].min()
        else:
            peptides, max_int, min_int = set(), -1, -1

        BCscans.append((mzdata[dyn_range_filter, :], scan_time * 1000, agc,
                        peptides, max_int, min_int))

    return np.array(BCscans)
Example #37
0
def normalize_ion_currents(ion_data, low, high):
    '''
    Restrict m/z to (low, high) mass range
    '''
    in_mass = np.logical_and(ion_data['mz'] >= low, ion_data['mz'] <= high)
    ion_data.drop(ion_data.index[~in_mass], axis='index', inplace=True)
Example #38
0
 def g(a, b):
     return np.logical_and(a, b)
Example #39
0
    def accumulate(self, p=None):
        '''
        Accumulate per image evaluation results and store the result in self.eval
        :param p: input params for evaluation
        :return: None
        '''
        print('Accumulating evaluation results...')
        tic = time.time()
        if not self.evalImgs:
            print('Please run evaluate() first')
        # allows input customized parameters
        if p is None:
            p = self.params
        p.catIds = p.catIds if p.useCats == 1 else [-1]
        T = len(p.iouThrs)
        R = len(p.recThrs)
        K = len(p.catIds) if p.useCats else 1
        A = len(p.areaRng)
        M = len(p.maxDets)
        # -1 for the precision of absent categories
        precision = -np.ones((T, R, K, A, M))
        recall = -np.ones((T, K, A, M))
        scores = -np.ones((T, R, K, A, M))

        # create dictionary for future indexing
        _pe = self._paramsEval
        catIds = _pe.catIds if _pe.useCats else [-1]
        setK = set(catIds)
        setA = set(map(tuple, _pe.areaRng))
        setM = set(_pe.maxDets)
        setI = set(_pe.imgIds)
        # get inds to evaluate
        k_list = [n for n, k in enumerate(p.catIds) if k in setK]
        m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
        a_list = [
            n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng))
            if a in setA
        ]
        i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
        I0 = len(_pe.imgIds)
        A0 = len(_pe.areaRng)
        # retrieve E at each category, area range, and max number of detections
        for k, k0 in enumerate(k_list):
            Nk = k0 * A0 * I0
            for a, a0 in enumerate(a_list):
                Na = a0 * I0
                for m, maxDet in enumerate(m_list):
                    E = [self.evalImgs[Nk + Na + i] for i in i_list]
                    E = [e for e in E if e is not None]
                    if len(E) == 0:
                        continue
                    dtScores = np.concatenate(
                        [e['dtScores'][0:maxDet] for e in E])

                    # different sorting method generates slightly different results.
                    # mergesort is used to be consistent as Matlab
                    # implementation.
                    inds = np.argsort(-dtScores, kind='mergesort')
                    dtScoresSorted = dtScores[inds]

                    dtm = np.concatenate(
                        [e['dtMatches'][:, 0:maxDet] for e in E], axis=1)[:,
                                                                          inds]
                    dtIg = np.concatenate(
                        [e['dtIgnore'][:, 0:maxDet] for e in E], axis=1)[:,
                                                                         inds]
                    gtIg = np.concatenate([e['gtIgnore'] for e in E])
                    npig = np.count_nonzero(gtIg == 0)
                    if npig == 0:
                        continue
                    tps = np.logical_and(dtm, np.logical_not(dtIg))
                    fps = np.logical_and(np.logical_not(dtm),
                                         np.logical_not(dtIg))

                    tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
                    fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
                    for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
                        tp = np.array(tp)
                        fp = np.array(fp)
                        nd = len(tp)
                        rc = tp / npig
                        pr = tp / (fp + tp + np.spacing(1))
                        q = np.zeros((R, ))
                        ss = np.zeros((R, ))

                        if nd:
                            recall[t, k, a, m] = rc[-1]
                        else:
                            recall[t, k, a, m] = 0

                        # numpy is slow without cython optimization for accessing elements
                        # use python array gets significant speed improvement
                        pr = pr.tolist()
                        q = q.tolist()

                        for i in range(nd - 1, 0, -1):
                            if pr[i] > pr[i - 1]:
                                pr[i - 1] = pr[i]

                        inds = np.searchsorted(rc, p.recThrs, side='left')
                        try:
                            for ri, pi in enumerate(inds):
                                q[ri] = pr[pi]
                                ss[ri] = dtScoresSorted[pi]
                        except BaseException:
                            pass
                        precision[t, :, k, a, m] = np.array(q)
                        scores[t, :, k, a, m] = np.array(ss)
        self.eval = {
            'params': p,
            'counts': [T, R, K, A, M],
            'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'precision': precision,
            'recall': recall,
            'scores': scores,
        }
        toc = time.time()
        print('DONE (t={:0.2f}s).'.format(toc - tic))
Example #40
0
def Scatter2D(Data, title, folder, **kwargs):

    # def density_scatter(x, y, xlabel, ylabel, sort = True, bins = [20,20], **kwargs):

    # 	"""
    # 	Scatter plot colored by 2d histogram
    # 	"""

    # 	fig , ax = plt.subplots()
    # 	data , x_e, y_e = np.histogram2d( x, y, bins = bins, density = True)
    # 	z = interpn((0.5*(x_e[1:] + x_e[:-1]),0.5*(y_e[1:]+y_e[:-1])) ,data ,np.vstack([x,y]).T ,method = "splinef2d", bounds_error = False)

    # 	#To be sure to plot all data
    # 	z[np.where(np.isnan(z))] = 0.0

    # 	# Sort the points by density, so that the densest points are plotted last
    # 	if sort :
    # 		idx = z.argsort()
    # 		x, y, z = x[idx], y[idx], z[idx]

    # 	ax.scatter(x, y, s = 5, c=z, **kwargs)
    # 	ax.set_xlabel(xlabel)
    # 	ax.set_ylabel(ylabel)

    # 	norm = Normalize(vmin = np.min(z), vmax = np.max(z))
    # 	cbar = fig.colorbar(cm.ScalarMappable(norm = norm), ax=ax)
    # 	cbar.ax.set_ylabel('Density')

    # 	return fig, ax

    # out = "Scatter"
    # if not os.path.exists(Path(folder,'Scatter')):
    #     os.mkdir(Path(folder,'Scatter'))

    Nodes = []
    for node in open('sclcnetwork.ids').readlines():
        Nodes.append(str(node.split('\t')[0].strip()))
    Nodes = sorted(Nodes)

    Remove = []
    for node in Nodes:
        if not node in list(Data.index):
            Remove.append(node)

    for node in Remove:
        Nodes.remove(node)

    if len(Remove) > 0:
        with open(Path(folder, 'Nodes_not_found.txt'), 'w') as f:
            f.write(
                "The nodes of SCLC which are not found in dataset are \n\n")
            for node in Remove:
                f.write(node + '\n')

    data = Data.loc[Nodes].copy()
    data = data.astype('float64')
    scaled_data = pd.DataFrame(data=preprocessing.scale(data.T).T, index=Nodes)

    ## append list of nodes that needs to be scattered plotted

    plot_nodes = []
    plot_nodes.append(["ASCL1",
                       'NEUROD1'])  ######################################

    for pnodes in plot_nodes:
        X = np.array(scaled_data.loc[pnodes[0]])
        Y = np.array(scaled_data.loc[pnodes[1]])
        Z = np.array(scaled_data.loc["ASCL1"])
        clr = np.array([0 for i in range(len(X))])
        clr[np.logical_and(Y > 0, Z < 0)] = 1
        clr[np.logical_and(Y < 0, Z > 0)] = 2
        clr[np.logical_and(Y > 0, Z > 0)] = 3

        # fig, ax = density_scatter(X, Y, pnodes[0], pnodes[1], bins = [200,200])
        fig, ax = plt.subplots()
        plt.suptitle(
            title +
            ": Scatter plot of {} and {}.png".format(pnodes[0], pnodes[1]))
        ax.scatter(X, Y, s=25, c=clr)
        ax.set_xlabel(pnodes[0])
        ax.set_ylabel(pnodes[1])
        plt.savefig(str(folder) + "/Scatter" + pnodes[0] + "_" + pnodes[1] +
                    ".png",
                    format="png")
        plt.close()
Example #41
0
def run_fitting(args):
    logging.info("Parsed args: %s" % args)
        
    indata = args.input   
    output_path = args.output   
    verbose = args.verbose or args.debug
    f = args.switching_fraction
    assert f>=0 and f<=1.0
    
    process_constructor_args = {}
    if args.switch_time is not None: process_constructor_args["switch_time"] = args.switch_time
    if args.hypothesis0: process_constructor_args["switch_time"] = INF #force estimation of a1 over all data points
    if args.max_time is not None: process_constructor_args["max_time"] = args.max_time
    if args.start_time is not None: process_constructor_args["start_time"] = args.start_time 
    if not args.verbose: process_constructor_args["verbose"] = 0 
    if args.debug: process_constructor_args["verbose"] = 3
    logging.info("process constructor args: %s" % process_constructor_args)

    ###########################################################################
    
    logging.log(LOG_DBGINFO, "[%s] reading data from %s" % (str(datetime.now()), indata))    
    df = VAR[indata[7: ]] if indata.startswith("shared_") else pd.read_csv(indata, sep="\t")  
    ids = sorted(pd.unique(df["id"]))
    
    if verbose:
        logging.log(LOG_DBGINFO, " #users=%s" % len(ids))    
        max_times, switch_times, start_times, execution_times = extract_processes_times(df)
        if args.switch_time is not None:
            switch_times = args.switch_time
        logging.log(LOG_DBGINFO, " #not executed actions=%s" % sum(execution_times>max_times))
        logging.log(LOG_DBGINFO, " #executed before switching=%s" % sum(execution_times<=switch_times))
        logging.log(LOG_DBGINFO, " #executed after switching=%s" % sum(numpy.logical_and(execution_times>switch_times, execution_times<=max_times)))
        logging.log(LOG_DBGINFO, " #started before switching=%s" % sum(start_times<switch_times)) #start_times==switch_times means that started after switching 
        logging.log(LOG_DBGINFO, " #started after switching=%s" % sum(numpy.logical_and(start_times>=switch_times, start_times<=max_times)))
        logging.log(LOG_DBGINFO, " start_times:\n  "+str(pd.Series(start_times).describe()).replace("\n", "\n  "))
        logging.log(LOG_DBGINFO, " execution_times:\n  "+str(pd.Series(execution_times).describe()).replace("\n", "\n  "))

    ###########################################################################
    
    logging.log(LOG_DBGINFO, "[%s] calculating ll" % str(datetime.now()))
    id2p = build_processes(df, **process_constructor_args)

    logging.log(LOG_DBGINFO, "[%s] summing weights and estimating alphas" % str(datetime.now()))
    a1, a2, ll = fit_hazard_rates(id2p, f)
            
    #a1 is estimated over all the data points anyways by forcing switching_time=inf above
    #therefore we do not need to do this here and we can simply overwrite a2 (that would otherwise be 0)
    if args.hypothesis0: a2 = a1 
    
    ###########################################################################
    
    if output_path is not None:
        logging.info("saving params to %s" % output_path)    
        df = pd.DataFrame()
        df["id"] = ids 
        df["a1"] = a1
        df["a2"] = a2
        df.to_csv(output_path, sep="\t", index=False)

    ###########################################################################    
                
    logging.info("a0=%f a1=%f ll=%f" % (a1, a2, ll))
    
    return a1, a2, None, ll 
            test_dir = fig_dir + 'group_' + test_name + os.path.sep
            zeta.util.mkdir(test_dir)

            """
            TODO:
            describe more or make a pandas framework for queries
            """

            # find subjects in test group
            group_names = type_inhib.loc[type_inhib['type_inhib'].isin(group)]['patient'].tolist()

            # get corresponding indexes
            keep = dfs.isin(group_names)[0].tolist()

            # keep subjects that respect different criteria
            subjects_to_keep = np.logical_and(~np.array(bad_subjects), np.array(keep))
            nb_sub = np.count_nonzero(subjects_to_keep == True)

            if nb_sub < 5:
                print("SKIP stats for group (%s), not enough subjects (%i/%i))" % (test_name, nb_sub, len(keep)))
            else:
                print("Perform stats for group (%s), included (%i/%i))" % (test_name, nb_sub, len(keep)))
                for ch_name in ['t7']:  # tfr_av0[0].info['ch_names']:  #
                    plt.close("all")
                    ch_ind = mne.pick_channels(tfr_av0[0].info['ch_names'], [ch_name])

                    # compute TF chuncks electrode by electrode
                    epochs_power_0 = tfr0_data[subjects_to_keep, ch_ind, :, :]  # only ch_name channel and remove NANs
                    epochs_power_1 = tfr1_data[subjects_to_keep, ch_ind, :, :]  # only ch_name channel and remove NANs

                    # compute permutation test (cluster-based)
def find_extrema(x, Fs, f_range, boundary=None, first_extrema='peak',
                 filter_fn=None, filter_kwargs=None):
    """
    Identify peaks and troughs in a time series.

    Parameters
    ----------
    x : array-like 1d
        voltage time series
    Fs : float
        sampling rate
    f_range : (low, high), Hz
        frequency range for narrowband signal of interest,
        used to find zerocrossings of the oscillation
    boundary : int
        number of samples from edge of recording to ignore
    first_extrema: str or None
        if 'peak', then force the output to begin with a peak and end in a trough
        if 'trough', then force the output to begin with a trough and end in peak
        if None, force nothing
    filter_fn : filter function, `filterfn(x, Fs, pass_type, f_lo, f_hi, remove_edge_artifacts=True)
        Must have the same API as neurodsp.filter
    filter_kwargs : dict
        keyword arguments to the filter_fn

    Returns
    -------
    Ps : array-like 1d
        indices at which oscillatory peaks occur in the input signal x
    Ts : array-like 1d
        indices at which oscillatory troughs occur in the input signal x

    Notes
    -----
    This function assures that there are the same number of peaks and troughs
    if the first extrema is forced to be either peak or trough.
    """

    # Set default filtering parameters
    if filter_fn is None:
        filter_fn = neurodsp.filter
    if filter_kwargs is None:
        filter_kwargs = {}

    # Default boundary value as 1 cycle length
    if boundary is None:
        boundary = int(np.ceil(Fs / float(f_range[0])))

    # Filter signal
    x_filt = filter_fn(x, Fs, 'bandpass', f_lo=f_range[0], f_hi=f_range[1], remove_edge_artifacts=False, **filter_kwargs)

    # Find rising and falling zerocrossings
    zeroriseN = _fzerorise(x_filt)
    zerofallN = _fzerofall(x_filt)

    # Compute number of peaks and troughs
    if zeroriseN[-1] > zerofallN[-1]:
        P = len(zeroriseN) - 1
        T = len(zerofallN)
    else:
        P = len(zeroriseN)
        T = len(zerofallN) - 1

    # Calculate peak samples
    Ps = np.zeros(P, dtype=int)
    for p in range(P):
        # Calculate the sample range between the most recent zero rise
        # and the next zero fall
        mrzerorise = zeroriseN[p]
        nfzerofall = zerofallN[zerofallN > mrzerorise][0]
        # Identify time fo peak
        Ps[p] = np.argmax(x[mrzerorise:nfzerofall]) + mrzerorise

    # Calculate trough samples
    Ts = np.zeros(T, dtype=int)
    for tr in range(T):
        # Calculate the sample range between the most recent zero fall
        # and the next zero rise
        mrzerofall = zerofallN[tr]
        nfzerorise = zeroriseN[zeroriseN > mrzerofall][0]
        # Identify time of trough
        Ts[tr] = np.argmin(x[mrzerofall:nfzerorise]) + mrzerofall

    # Remove peaks and troughs within the boundary limit
    Ps = Ps[np.logical_and(Ps > boundary, Ps < len(x) - boundary)]
    Ts = Ts[np.logical_and(Ts > boundary, Ts < len(x) - boundary)]

    # Force the first extrema to be as desired
    # Assure equal # of peaks and troughs
    if first_extrema == 'peak':
        if Ps[0] > Ts[0]:
            Ts = Ts[1:]
        if Ps[-1] > Ts[-1]:
            Ps = Ps[:-1]
    elif first_extrema == 'trough':
        if Ts[0] > Ps[0]:
            Ps = Ps[1:]
        if Ts[-1] > Ps[-1]:
            Ts = Ts[:-1]
    elif first_extrema is None:
        pass
    else:
        raise ValueError('Parameter forcestart is invalid')

    return Ps, Ts
Example #44
0
    def evaluateImg(self, imgId, catId, aRng, maxDet):
        '''
        perform evaluation for single category and image
        :return: dict (single image results)
        '''
        p = self.params
        if p.useCats:
            gt = self._gts[imgId, catId]
            dt = self._dts[imgId, catId]
        else:
            gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
            dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
        if len(gt) == 0 and len(dt) == 0:
            return None

        for g in gt:
            if g['ignore'] or (g['area'] < aRng[0] or g['area'] > aRng[1]):
                g['_ignore'] = 1
            else:
                g['_ignore'] = 0

        # sort dt highest score first, sort gt ignore last
        gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
        gt = [gt[i] for i in gtind]
        dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
        dt = [dt[i] for i in dtind[0:maxDet]]
        iscrowd = [int(o['iscrowd']) for o in gt]
        # load computed ious
        ious = self.ious[imgId, catId][:, gtind] if len(
            self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]

        T = len(p.iouThrs)
        G = len(gt)
        D = len(dt)
        gtm = np.zeros((T, G))
        dtm = np.zeros((T, D))
        gtIg = np.array([g['_ignore'] for g in gt])
        dtIg = np.zeros((T, D))
        if not len(ious) == 0:
            for tind, t in enumerate(p.iouThrs):
                for dind, d in enumerate(dt):
                    # information about best match so far (m=-1 -> unmatched)
                    iou = min([t, 1 - 1e-10])
                    m = -1
                    for gind, g in enumerate(gt):
                        # if this gt already matched, and not a crowd, continue
                        if gtm[tind, gind] > 0 and not iscrowd[gind]:
                            continue
                        # if dt matched to reg gt, and on ignore gt, stop
                        if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1:
                            break
                        # continue to next gt unless better match made
                        if ious[dind, gind] < iou:
                            continue
                        # if match successful and best so far, store
                        # appropriately
                        iou = ious[dind, gind]
                        m = gind
                    # if match made store id of match for both dt and gt
                    if m == -1:
                        continue
                    dtIg[tind, dind] = gtIg[m]
                    dtm[tind, dind] = gt[m]['id']
                    gtm[tind, m] = d['id']
        # set unmatched detections outside of area range to ignore
        a = np.array([d['area'] < aRng[0] or d['area'] > aRng[1]
                      for d in dt]).reshape((1, len(dt)))
        dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T,
                                                                      0)))
        # store results for given image and category
        return {
            'image_id': imgId,
            'category_id': catId,
            'aRng': aRng,
            'maxDet': maxDet,
            'dtIds': [d['id'] for d in dt],
            'gtIds': [g['id'] for g in gt],
            'dtMatches': dtm,
            'gtMatches': gtm,
            'dtScores': [d['score'] for d in dt],
            'gtIgnore': gtIg,
            'dtIgnore': dtIg,
        }
def split_by_vals(vec,cuts=0,group = None,tol=0):
    """
    Aggregates the indices of a vector based on specified
    values at which to cut the sorted array. Assumes the
    right-continuity of the cumulative distribution function.

    Arguments
    ---------
    vec :   A one-dimensional array of values.

    Keyword Arguments
    -----------------
    cuts :  A single value or list/array of values which will be used to divide the vector components.

    group : The group which labels the indices of vec, and which will be the item set of the returned Aggregation.

    Output
    ------
    Aggregation of the indices of vec
    """
    if group is None:
        group = _Group(_np.arange(len(vec)))

    if not(isinstance(cuts,_np.ndarray)):
        if isinstance(cuts,list):
            cuts = _np.array(cuts)
        else:
            cuts = _np.array([cuts])
    
    cuts = cuts[
        _np.where(
            _np.logical_and(
                cuts>=_np.amin(vec),
                cuts<_np.amax(vec)
            )
        )[0]
    ]

    if len(cuts)==0:
        return _Aggregation(
            group,
            _Group(_np.array([0])),
            {0:_np.arange(len(vec))}
        )
    else:
        agg_dict = {
            k+1:_np.where(
                _np.logical_and(
                    vec > cuts[k],
                    vec <= cuts[k+1]
                )
            )[0]
            for k in range(0,len(cuts)-1)
        }
        agg_dict.update({
            0:_np.where(vec <= cuts[0])[0],
            len(cuts):_np.where(vec > cuts[len(cuts)-1])[0]
        })

        return _Aggregation(
            group,
            _Group(_np.arange(len(cuts)+1)),
            agg_dict
        )
#%% data manipulation and cleanup
pipelines = r['pipeline'].unique()
threshold_nans_percent = 10
for p in pipelines:
    asd = r.loc[r['pipeline'] == p]
    nan_perc = 100*asd['score'].isna().sum() / len(asd)
    print(f'{p}: {nan_perc:1.2f}% NaNs')
    if nan_perc >= threshold_nans_percent:
        print(f'{p} exceeds nan_threshold of {threshold_nans_percent}. Removing from analysis.')
        r = r.loc[~(r['pipeline'] == p)]
r = r.sort_values(by=['pipeline'])

#%%
# Split EPFL dataset into two datasets: healthy vs. patients
r.loc[np.logical_and(r['dataset'] == 'EPFL', r['subject'] <= 4), 'dataset'] = 'EPFL_disabled'
r.loc[np.logical_and(r['dataset'] == 'EPFL', r['subject'] > 4), 'dataset'] = 'EPFL_healthy'

# Split Brain invaders into two datasets: single-session and multi-session
r.loc[np.logical_and(r['dataset'] == 'Brain_invaders', r['subject'] <= 7), 'dataset'] = 'Brain_invaders_multisession'
r.loc[np.logical_and(r['dataset'] == 'Brain_invaders', r['subject'] > 7), 'dataset'] = 'Brain_invaders_singlesession'

for ds in r['dataset'].unique():
    r = r.replace({
        ds: _ds_pretty(ds)
    })
# # %%
# cp = sns.color_palette()
#

# %%
Example #47
0
B_hat = np.zeros([rows, cols, tot_frames])
for t in range(0, tot_frames):
    print('Processing frame ', t)
    for k in range(0, K):
        f_t_gray = cv2.cvtColor(frame_list.frames[t], cv2.COLOR_BGR2GRAY)
        dk_sq = np.power((f_t_gray - mu[..., k]), 2)
        dk_sq = dk_sq / variance[..., k]
        #dk = np.sqrt(dk_sq)
        is_matched = np.less(dk_sq, lambda_thr)

        # matched
        '''if match == 0: # if it was 'matched' in the previous stage?
            m = k'''
        # TODO test this part

        target_pixels = np.logical_and(
            is_matched, np.logical_not(match))  # ~match: 0 becomes true
        target_pixels_to_zeros = m * np.logical_not(
            target_pixels)  # only target pixels become zero
        m = target_pixels_to_zeros + k * target_pixels  # now add k to target pixels which are zero

        temp1 = m.reshape((rows * cols))
        temp2 = np.arange(rows * cols)
        temp3 = np.array([temp2, temp1], dtype=int).T
        temp4 = temp3.tolist()
        temp_rows_cols, temp_m = zip(*temp4)

        w_temp = w.reshape((rows * cols, K))
        w_m = w_temp[temp_rows_cols, temp_m].reshape((rows, cols))

        var_temp = variance.reshape((rows * cols, K))
        variance_m = var_temp[temp_rows_cols, temp_m].reshape((rows, cols))
Example #48
0
def merge(sources, bounds=None, res=None, nodata=None, precision=7):
    """Copy valid pixels from input files to an output file.

    All files must have the same number of bands, data type, and
    coordinate reference system.

    Input files are merged in their listed order using the reverse
    painter's algorithm. If the output file exists, its values will be
    overwritten by input values.

    Geospatial bounds and resolution of a new output file in the
    units of the input file coordinate reference system may be provided
    and are otherwise taken from the first input file.

    Parameters
    ----------
    sources: list of source datasets
        Open rasterio RasterReader objects to be merged.
    bounds: tuple, optional
        Bounds of the output image (left, bottom, right, top).
        If not set, bounds are determined from bounds of input rasters.
    res: tuple, optional
        Output resolution in units of coordinate reference system. If not set,
        the resolution of the first raster is used. If a single value is passed,
        output pixels will be square.
    nodata: float, optional
        nodata value to use in output file. If not set, uses the nodata value
        in the first input raster.

    Returns
    -------
    tuple

        Two elements:

            dest: numpy ndarray
                Contents of all input rasters in single array.

            out_transform: affine.Affine()
                Information for mapping pixel coordinates in `dest` to another
                coordinate system
    """
    first = sources[0]
    first_res = first.res
    nodataval = first.nodatavals[0]
    dtype = first.dtypes[0]

    # Extent from option or extent of all inputs.
    if bounds:
        dst_w, dst_s, dst_e, dst_n = bounds
    else:
        # scan input files.
        xs = []
        ys = []
        for src in sources:
            left, bottom, right, top = src.bounds
            xs.extend([left, right])
            ys.extend([bottom, top])
        dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)

    logger.debug("Output bounds: %r", (dst_w, dst_s, dst_e, dst_n))
    output_transform = Affine.translation(dst_w, dst_n)
    logger.debug("Output transform, before scaling: %r", output_transform)

    # Resolution/pixel size.
    if not res:
        res = first_res
    elif not np.iterable(res):
        res = (res, res)
    elif len(res) == 1:
        res = (res[0], res[0])
    output_transform *= Affine.scale(res[0], -res[1])
    logger.debug("Output transform, after scaling: %r", output_transform)

    # Compute output array shape. We guarantee it will cover the output
    # bounds completely.
    output_width = int(math.ceil((dst_e - dst_w) / res[0]))
    output_height = int(math.ceil((dst_n - dst_s) / res[1]))

    # Adjust bounds to fit.
    dst_e, dst_s = output_transform * (output_width, output_height)
    logger.debug("Output width: %d, height: %d", output_width, output_height)
    logger.debug("Adjusted bounds: %r", (dst_w, dst_s, dst_e, dst_n))

    # create destination array
    dest = np.zeros((first.count, output_height, output_width), dtype=dtype)

    if nodata is not None:
        nodataval = nodata
        logger.debug("Set nodataval: %r", nodataval)

    if nodataval is not None:
        # Only fill if the nodataval is within dtype's range.
        inrange = False
        if np.dtype(dtype).kind in ('i', 'u'):
            info = np.iinfo(dtype)
            inrange = (info.min <= nodataval <= info.max)
        elif np.dtype(dtype).kind == 'f':
            info = np.finfo(dtype)
            inrange = (info.min <= nodataval <= info.max)
        if inrange:
            dest.fill(nodataval)
        else:
            warnings.warn("Input file's nodata value, %s, is beyond the valid "
                          "range of its data type, %s. Consider overriding it "
                          "using the --nodata option for better results." %
                          (nodataval, dtype))
    else:
        nodataval = 0

    for src in sources:
        # Real World (tm) use of boundless reads.
        # This approach uses the maximum amount of memory to solve the problem.
        # Making it more efficient is a TODO.

        # 1. Compute spatial intersection of destination and source.
        src_w, src_s, src_e, src_n = src.bounds

        int_w = src_w if src_w > dst_w else dst_w
        int_s = src_s if src_s > dst_s else dst_s
        int_e = src_e if src_e < dst_e else dst_e
        int_n = src_n if src_n < dst_n else dst_n

        # 2. Compute the source window.
        src_window = windows.from_bounds(int_w,
                                         int_s,
                                         int_e,
                                         int_n,
                                         src.transform,
                                         boundless=True,
                                         precision=precision)
        logger.debug("Src %s window: %r", src.name, src_window)

        # 3. Compute the destination window.
        dst_window = windows.from_bounds(int_w,
                                         int_s,
                                         int_e,
                                         int_n,
                                         output_transform,
                                         boundless=True,
                                         precision=precision)

        logger.debug("Dst window: %r", dst_window)

        # 4. Initialize temp array.
        tcount = first.count
        trows, tcols = tuple(b - a for a, b in dst_window)

        temp_shape = (tcount, trows, tcols)
        logger.debug("Temp shape: %r", temp_shape)

        temp = np.zeros(temp_shape, dtype=dtype)
        temp = src.read(out=temp,
                        window=src_window,
                        boundless=False,
                        masked=True)

        # 5. Copy elements of temp into dest.
        roff, coff = dst_window[0][0], dst_window[1][0]

        region = dest[:, roff:roff + trows, coff:coff + tcols]
        np.copyto(region,
                  temp,
                  where=np.logical_and(region == nodataval,
                                       temp.mask == False))

    return dest, output_transform
Example #49
0
def filter_camera_angle(places):
    """Filter camera angles for KiTTI Datasets"""
    bool_in = np.logical_and((places[:, 1] < places[:, 0] - 0.27),
                             (-places[:, 1] < places[:, 0] - 0.27))
    # bool_in = np.logical_and((places[:, 1] < places[:, 0]), (-places[:, 1] < places[:, 0]))
    return places[bool_in]
import numpy as np
import xarray as xr
import pandas as pd
import plots

RUTA='~/data/'
#abro el archivo de geopotencial y junto la coordenada year y numbre
ds = xr.open_dataset(RUTA + 'monthly_hgt200_aug_feb.nc')
ninio34 = xr.open_dataset(RUTA + 'ninio34_index.nc')
month = ['Aug', 'Sep', 'Oct', 'Nov','Dec', 'Jan', 'Feb']
seas = ['ASO', 'SON', 'OND', 'NDJ', 'DJF']

index_monthly_upper = ninio34.ninio34_mon >= ninio34.ninio34_mon.quantile(0.75, dim='dim_0', interpolation='linear')
index_monthly_lower = ninio34.ninio34_mon <= ninio34.ninio34_mon.quantile(0.25, dim='dim_0', interpolation='linear')

index_monthly_normal = np.logical_and(ninio34.ninio34_mon < ninio34.ninio34_mon.quantile(0.75, dim='dim_0', interpolation='linear'), ninio34.ninio34_mon > ninio34.ninio34_mon.quantile(0.25, dim='dim_0', interpolation='linear'))

for i in np.arange(0,7):
	var = np.mean(ds.z.values[i, ~index_monthly_normal.values, :, :], axis=0) - np.mean(ds.z.values[i, index_monthly_normal.values, :, :], axis=0)
	tit = 'Composites differences EN+LN Years - ' + month[i]
	filename = './figures/hgt_200_composites_sum_NINIO_' + month[i] +'.png'
	plots.PlotCompDiff(var, ds.latitude, ds.longitude, tit, filename)

for i in np.arange(0,5):
	var = ds.isel(month=range(i, i + 3)).mean(dim='month')
	var = np.mean(var.z[~index_monthly_normal.values, :, :], axis=0) - np.mean(var.z[index_monthly_normal.values, :, :], axis=0)
	tit = 'Composites differences EN+LN Years - ' + seas[i]
	filename = './figures/hgt_200_composites_sum_NINIO_' + seas[i] +'.png'
	plots.PlotCompDiff(var, ds.latitude, ds.longitude, tit, filename)

Example #51
0
    'tf.math.log1p',
    lambda x, name=None: np.log1p(_convert_to_tensor(x)))

log_sigmoid = utils.copy_docstring(
    'tf.math.log_sigmoid',
    lambda x, name=None: -_softplus(-_convert_to_tensor(x)))

log_softmax = utils.copy_docstring(
    'tf.math.log_softmax',
    lambda logits, axis=None, name=None: (np.subtract(  # pylint: disable=g-long-lambda
        logits,
        reduce_logsumexp(logits, -1 if axis is None else axis, keepdims=True))))

logical_and = utils.copy_docstring(
    'tf.math.logical_and',
    lambda x, y, name=None: np.logical_and(x, y))

logical_not = utils.copy_docstring(
    'tf.math.logical_not',
    lambda x, name=None: np.logical_not(x))

logical_or = utils.copy_docstring(
    'tf.math.logical_or',
    lambda x, y, name=None: np.logical_or(x, y))

logical_xor = utils.copy_docstring(
    'tf.math.logical_xor',
    lambda x, y, name=None: np.logical_xor(x, y))


if JAX_MODE:
Example #52
0
                        idx_pred = idx_pred.data.cpu().numpy()

                        permu_edge_idx = None
                        permu_edge_acc = 0.
                        permu_edge_cor = 0.

                        if permu_edge_idx is None:
                            permu = list(itertools.permutations(np.arange(args.edge_type_num)))

                            edge_attr_np = to_np(edge_attr)
                            edge_attr_gt_np = to_np(edge_attr_gt)

                            for ii in permu:
                                p = np.array(ii)
                                idx_mapped = p[idx_gt]
                                acc = np.logical_and(idx_mapped == idx_pred, np.logical_not(np.eye(n_kp)))
                                acc = np.sum(acc) / (B * n_kp * (n_kp - 1))

                                if acc > permu_edge_acc:
                                    permu_edge_acc = acc
                                    permu_edge_idx = p

                        else:
                            idx_mapped = permu_edge_idx[idx_gt]
                            permu_edge_acc = np.logical_and(idx_mapped == idx_pred, np.logical_not(np.eye(n_kp)))
                            permu_edge_acc = np.sum(permu_edge_acc) / (B * n_kp * (n_kp - 1))

                        permu_edge_cor = np.corrcoef(
                            edge_attr_np.reshape(-1),
                            edge_attr_gt_np.reshape(-1))[0, 1]
Example #53
0
def Construction_A(Nx, Ny, dx, Neuf_points, k2_eau, k2_bois, gamma_eau, gamma_bois, rho_eau, v_eau, p_source,
                   SourceCylindrique, Source_Lineaire, Source_Ponctuelle, Map, \
                   N_PML, Source_Map, Q_map, coeff, centre_bois_x, centre_bois_y, Nx_Bois, Ny_Bois, alpha_Map, omega,
                   B_eau, PML_mode=1, TF_SF=True):
    h = dx
    # **********************Construction de la matrice A************************

    # L'ordre des coefficients est toujours
    # [p(i-2,j),p(i-1,j) ,p(i,j-2),p(i,j-1),p(i,j),p(i+1,j),p(i+2,j),p(i,j+1),p(i,j+2)]

    # Cas 1:
    if Neuf_points == True:
        Coeff1 = [0, 1, 0, 1, -(4 - k2_eau * h**2), 1, 0, 1, 0]
    else:
        # Version à 9 points
        # [p(i-1,j-1),p(i-1,j) ,p(i-1,j+1),p(i,j-1),p(i,j),p(i,j+1),p(i+1,j-1),p(i+1,j),p(i+1,j+1)]
        Coeff1 = [1, 4, 1, 4, -11 + 6 * h**2 * k2_eau, 4, 1, 4, 1]

    # Cas 2:
    if Neuf_points == True:
        Coeff2 = [0, 1, 0, 1, -(4 - k2_bois * h**2), 1, 0, 1, 0]
    else:

        # Version à 9 points
        # [p(i-1,j-1),p(i-1,j) ,p(i-1,j+1),p(i,j-1),p(i,j),p(i,j+1),p(i+1,j-1),p(i+1,j),p(i+1,j+1)]
        Coeff2 = [1, 4, 1, 4, -11 + 6 * h**2 * k2_bois, 4, 1, 4, 1]

    # Cas 3 à 10:

    Coeff3 = Coeff_Frontiere(gamma_eau, gamma_bois, -1 / np.sqrt(2),
                             -1 / np.sqrt(2))
    Coeff4 = Coeff_Frontiere(gamma_eau, gamma_bois, 0, -1)
    Coeff5 = Coeff_Frontiere(gamma_bois, gamma_eau, 1 / np.sqrt(2),
                             -1 / np.sqrt(2))  # -ny
    Coeff6 = Coeff_Frontiere(gamma_bois, gamma_eau, 1, 0)
    Coeff7 = Coeff_Frontiere(gamma_bois, gamma_eau, 1 / np.sqrt(2),
                             1 / np.sqrt(2))
    Coeff8 = Coeff_Frontiere(gamma_bois, gamma_eau, 0, 1)
    Coeff9 = Coeff_Frontiere(gamma_eau, gamma_bois, -1 / np.sqrt(2),
                             1 / np.sqrt(2))
    Coeff10 = Coeff_Frontiere(gamma_eau, gamma_bois, -1, 0)

    # Cas 11 à 12 (triangle)
    # Cas 11
    Nx11 = -np.cos(coeff / 2)  # -
    Ny11 = -np.sin(coeff / 2)  # -
    Coeff11 = Coeff_Frontiere(gamma_eau, gamma_bois, Nx11, Ny11)
    # Cas 12
    Nx12 = np.cos(coeff / 2)
    Ny12 = -np.sin(coeff / 2)
    Coeff12 = Coeff_Frontiere(gamma_eau, gamma_bois, Nx12, Ny12)

    # Cas 13 (Cercle)
    # Voir la boucle plus bas

    # Cas 14 à 21 (PML):Dans les fonctions suivantes

    # Cas 22 (source): Option 2
    # Coeff22 = [0, 1, 0, 1, -(4 - k2_eau * h ** 2), 1, 0, 1, 0]

    Dict_Coeff = {
        1: Coeff1,
        2: Coeff2,
        3: Coeff3,
        4: Coeff4,
        5: Coeff5,
        6: Coeff6,
        7: Coeff7,
        8: Coeff8,
        9: Coeff9,
        10: Coeff10,
        11: Coeff11,
        12: Coeff12
    }

    # A = np.zeros([Nx * Ny, Nx * Ny], dtype=complex)
    b = np.zeros([Nx * Ny], dtype=complex)
    b_TFSF = np.zeros([Nx * Ny], dtype=complex)

    data_A = []
    ligne_A = []
    colonne_A = []

    # Matrice sans bois
    data_Q = []
    ligne_Q = []
    colonne_Q = []

    # Q = np.zeros([Nx * Ny, Nx * Ny], dtype=int)

    if PML_mode == 2:
        PML_Range = 22
    elif PML_mode == 1:
        PML_Range = 21

    Source_mask = np.ones([Ny, Nx], dtype=np.complex) * np.finfo(float).eps
    Source_mask[1:-1, 1:-1] = 0
    Source_mask[N_PML + 2:Nx - N_PML - 2, N_PML + 2:Nx - N_PML - 2] = 1
    #    Source_mask[N_PML-1,N_PML-1:Nx-N_PML] = 0
    #    Source_mask[N_PML-1:Nx-N_PML,N_PML-1] = 0
    #    Source_mask[Nx-N_PML,N_PML-1:Nx-N_PML] = 0
    #    Source_mask[N_PML-1:Nx-N_PML+1,Nx-N_PML] = 0

    for i in range(Nx):
        for j in range(Ny):
            L = p(i, j, Nx)

            Type = int(Map[i, j])

            if np.logical_and(Type >= 14, Type <= PML_Range):
                if PML_mode == 1:
                    Coefficient = Coeff_PML(Type, i, j, h, Nx, Ny, k2_eau,
                                            v_eau, N_PML)
                if PML_mode == 2:
                    alpha = alpha_Map[i, j]
                    Coefficient = Coeff_PML2(Type, h, Nx, Ny, omega, B_eau,
                                             alpha, rho_eau)

            elif Type == 13:
                Nx13 = (i - centre_bois_x) / coeff
                # Coordonnées en y du centre du cercle
                centre_y = centre_bois_y - Ny_Bois / 2 + np.sqrt(coeff**2 -
                                                                 (Nx_Bois /
                                                                  2)**2)
                Ny13 = (j - centre_y) / coeff
                Coefficient = Coeff_Frontiere(gamma_eau, gamma_bois, Nx13,
                                              Ny13)
            else:
                if Type != 0:
                    Coefficient = Dict_Coeff[Type]

            if np.logical_and(np.logical_or(Type == 1, Type == 2),
                              Neuf_points == True):
                Position = [
                    p(i - 1, j - 1, Nx),
                    p(i - 1, j, Nx),
                    p(i - 1, j + 1, Nx),
                    p(i, j - 1, Nx),
                    p(i, j, Nx),
                    p(i, j + 1, Nx),
                    p(i + 1, j - 1, Nx),
                    p(i + 1, j, Nx),
                    p(i + 1, j + 1, Nx)
                ]
            else:
                Position = [
                    p(i - 2, j, Nx),
                    p(i - 1, j, Nx),
                    p(i, j - 2, Nx),
                    p(i, j - 1, Nx),
                    p(i, j, Nx),
                    p(i + 1, j, Nx),
                    p(i + 2, j, Nx),
                    p(i, j + 1, Nx),
                    p(i, j + 2, Nx)
                ]
            if TF_SF == True:
                data_Q.append(Q_map[i, j])
                ligne_Q.append(L)
                colonne_Q.append(L)

            for k, pos in enumerate(Position):
                # if np.logical_and(pos >= 0, pos < (Nx * Ny)):
                if Coefficient[k] != 0:
                    data_A.append(Coefficient[k])
                    ligne_A.append(L)
                    colonne_A.append(pos)
                    # A[L, int(pos)] = Coefficient[k]
            b[L] = Source_Map[i,
                              j] * Source_mask[i,
                                               j] * h**2 * rho_eau * p_source

    A_sp = scipy.sparse.coo_matrix((data_A, (ligne_A, colonne_A)),
                                   shape=(Nx**2, Nx**2),
                                   dtype=np.complex)
    A_sp = A_sp.tocsc()  # scipy.sparse.csc_matrix(A)
    if TF_SF == True:
        Q_sp = scipy.sparse.coo_matrix((data_Q, (ligne_Q, colonne_Q)),
                                       shape=(Nx**2, Nx**2),
                                       dtype=np.complex)
        Q_sp = Q_sp.tocsc()  # scipy.sparse.csc_matrix(A)
        b_TFSF = (Q_sp.dot(A_sp) - A_sp.dot(Q_sp)).dot(b)
    else:
        b_TFSF = b

    return A_sp, b_TFSF
        self.items = items

    def __getitem__(self,i):
        try:
            return self.items[i]
        except KeyError as e:
            return i

with open('../data/all_confirmed_cases_with_population.json','r') as f:
    data = json.load(f)
with open('../data/all_confirmed_csse_cases_with_population.json','r') as f:
    datafeb = json.load(f)

tuplelist = [ (p, d)  for p, d in data.items()\
                               if max(d['cases']) >= 20\
                               and len(np.where(np.logical_and(np.array(d['times'])<=12,
                                                               np.array(d['cases'])>0))[0])\
                                   >= 8
                             ]

tuplelist = sorted([ t for t in tuplelist ],key=lambda x: -max(x[1]['cases']))

loaded_fits = len(sys.argv) > 1
if loaded_fits:
    pickle_filename = sys.argv[1]


n_fits = len(tuplelist)
n_col = int(np.ceil(np.sqrt(n_fits)))
n_row = n_col-1
#ax = ax.flatten()
Example #55
0
 def define_aspect_node_subset_local(self,
                                     dist_tolerance=4.,
                                     angle_tolerance=15.,
                                     dip_dir='E'):
     """
     """
     grid = self.grid
     try:
         print('using subset')
         # remember, steep_nodes is already core_nodes.size long
         subset = np.where(self.steep_nodes)[0]
     except NameError:
         print('using all nodes')
         subset = np.arange(grid.core_nodes.size)
     closest_ft_node = np.empty(subset.size, dtype=int)
     angle_to_ft = np.empty_like(closest_ft_node, dtype=float)
     new_angle_to_ft = np.empty_like(closest_ft_node, dtype=float)
     distance_to_ft = np.empty_like(closest_ft_node, dtype=float)
     distance_to_ft.fill(sys.float_info.max)
     new_distance_to_ft = np.empty_like(closest_ft_node, dtype=float)
     for i in self.ft_trace_node_ids:
         grid.calc_distances_of_nodes_to_point(
             (grid.node_x[i], grid.node_y[i]),
             node_subset=grid.core_nodes[subset],
             get_az='angles',
             out_distance=new_distance_to_ft,
             out_azimuth=new_angle_to_ft)
         closer_nodes = new_distance_to_ft < distance_to_ft
         distance_to_ft[closer_nodes] = new_distance_to_ft[closer_nodes]
         angle_to_ft[closer_nodes] = new_angle_to_ft[closer_nodes]
         closest_ft_node[closer_nodes] = i
     self.closest_ft_node = -np.ones(grid.core_nodes.size)
     self.distance_to_ft = -np.ones(grid.core_nodes.size)
     self.angle_to_ft = -np.ones(grid.core_nodes.size)
     self.closest_ft_node[subset] = closest_ft_node
     self.distance_to_ft[subset] = distance_to_ft
     # angle_to_ft is actually the angle_from_ft! So we need to adjust.
     # a second problem is that pts downslope (opposite az) can also be on the line.
     # solution - take a dip_dir input...
     angle_to_ft = (angle_to_ft + np.pi) % (2. * np.pi)
     self.angle_to_ft[subset] = angle_to_ft
     #gridshow.imshow_grid_at_node(self.grid, self.distance_to_ft)
     # show()
     #gridshow.imshow_grid_at_node(self.grid, self.angle_to_ft)
     # show()
     # the relevant condition is now that the local aspect and angle to fault
     # are the same...
     # We need to bias the five degrees against distant points, as it's easier
     # to have similar angles in the far field. Rule should be in px - the
     # two angles should be within *angle_tol* px of each other at the ft
     # trace.
     divergence_at_ft = distance_to_ft * \
         np.tan((angle_to_ft - self.aspect[subset]) % np.pi)
     # might be *too* forgiving for close-in nodes
     condition = np.less(np.fabs(divergence_at_ft),
                         grid.dx * dist_tolerance)
     #...so add another tester; must be w/i 15 degrees of each other:
     diff_angles = np.min([
         np.fabs(angle_to_ft - self.aspect[subset]),
         np.fabs(np.fabs(angle_to_ft - self.aspect[subset]) - 2. * np.pi)
     ],
                          axis=0)
     self.diff_angles = np.empty(grid.core_nodes.size, dtype=float)
     self.diff_angles.fill(sys.float_info.max)
     self.diff_angles[subset] = diff_angles
     #gridshow.imshow_grid_at_node(self.grid, self.angle_to_ft)
     # show()
     figure(6)
     gridshow.imshow_grid_at_node(
         self.grid,
         np.where(self.diff_angles < 100000., self.diff_angles, -1.))
     condition2 = np.less(diff_angles, angle_tolerance * np.pi / 180.)
     condition = np.logical_and(condition, condition2)
     core_nodes_size_condition = np.zeros(grid.core_nodes.size, dtype=bool)
     core_nodes_size_condition[subset] = condition
     #gridshow.imshow_grid_at_node(self.grid, core_nodes_size_condition)
     # show()
     #core_nodes_size_condition = np.zeros(grid.core_nodes.size, dtype=bool)
     #core_nodes_size_condition[subset] = condition2
     #gridshow.imshow_grid_at_node(self.grid, core_nodes_size_condition)
     # show()
     self.aspect_close_nodes = core_nodes_size_condition
     print(
         'Calculated and stored nodes with aspects compatible with fault trace...'
     )
     return self.aspect_close_nodes
Example #56
0
splitter.segment_cells(488, verbose=True)  # segment cells using the 488 wl
print('Cells segmented.')
# initialize a Foci instance from splitter
foci_obj = foci.Foci(splitter, verbose=True)
print('Foci instance created.')
foci_obj.segment(seg_channels=(640,), min_cutoff={640: 5}, rm_nuclear=False,
                 thresholds={640: (4000, 2000)})
print('Foci segmented.')
cell_im = foci_obj.segmented_cells[0]
all_foci_mask = foci_obj.foci[640][0] > 0
cell_golgi_vals = pd.DataFrame(index=np.unique(cell_im),
                               columns=['intensity_mean', 'volume'])
for c in np.unique(cell_im):
    if c == 0:
        continue
    cell_foci_mask = np.logical_and(all_foci_mask, cell_im == c)
    golgi_volume = np.sum(cell_foci_mask)
    golgi_mean = np.sum(foci_obj.imgs[488][0][cell_foci_mask])/np.sum(cell_foci_mask)
    cell_golgi_vals.at[c, 'intensity_mean'] = golgi_mean
    cell_golgi_vals.at[c, 'volume'] = golgi_volume
cell_golgi_vals.to_csv(output_dir +h 'golgi.csv')
# output images to check quality of segmentation later
print('outputting images...')
im_fname = foci_obj.filenames.split('/')[-1]
im_output_dir = output_dir + '/' + im_fname[:-4]
if not os.path.isdir(im_output_dir):
    os.makedirs(im_output_dir)
os.chdir(im_output_dir)
for i in range(0, len(foci_obj.segmented_nuclei)):
    skimage.io.imsave(str(i)+'_nuclei.tif', foci_obj.segmented_nuclei[i].astype('uint16'))
    skimage.io.imsave(str(i)+'_cells.tif', foci_obj.segmented_cells[i].astype('uint16'))
    for i in range(n):
        firing_position_e.append(np.zeros((1, int(z_e[i]))))
    for i in range(n):
        firing_position_c.append(np.zeros((1, int(z_c[i]))))

    f_p_x_t = firing_position_x_t.ravel()
    f_p_y_t = firing_position_y_t.ravel()
    f_p_x_e = firing_position_x_e.ravel()
    f_p_y_e = firing_position_y_e.ravel()
    f_p_x_c = firing_position_x_c.ravel()
    f_p_y_c = firing_position_y_c.ravel()

    f_p_t = np.column_stack((f_p_x_t, f_p_y_t))
    f_p_e = np.column_stack((f_p_x_e, f_p_y_e))
    f_p_c = np.column_stack((f_p_x_c, f_p_y_c))
    f_p_t = f_p_t[np.logical_not(np.logical_and(f_p_t[:, 0] == 0, f_p_t[:, 1] == 0))]
    f_p_e = f_p_e[np.logical_not(np.logical_and(f_p_e[:, 0] == 0, f_p_e[:, 1] == 0))]
    f_p_c = f_p_c[np.logical_not(np.logical_and(f_p_c[:, 0] == 0, f_p_c[:, 1] == 0))]

    # ==============================================================================#

    # density

    density_t = z_t / (np.pi * 100 * radius ** 2)
    density_e = z_e / (np.pi * 100 * radius ** 2)
    density_c = z_c / (np.pi * 100 * radius ** 2)

    density_d = nsteps / (400 * length ** 2) * firing_prob_2

    z_map_t = 0
    z_map_e = 0
Example #58
0
    def find_slope_lines(self, tolerance=1.):
        """
        This method attempts to find slope-consistent line profiles up facets,
        perpendicular to the fault.
        Assumes you used define_aspect_node_subset_local().
        """
        grid = self.grid
        self.possible_core_nodes = np.where(
            np.logical_and(self.steep_nodes, self.aspect_close_nodes))[0]
        pcn = self.possible_core_nodes
        unique_starting_pts = np.unique(
            self.closest_ft_node[pcn])  # in real node nos
        # set up places to store the profile data:
        profile_ft_node_id = []
        profile_ft_node_x = []
        profile_ft_node_y = []
        profile_ft_node_z = []
        profile_ft_node_dist = []
        profile_x_facet_pts = []
        profile_z_facet_pts = []
        profile_S_facet_pts = []
        count = 0
        for i in unique_starting_pts:
            count += 1
            print("Running ", count, " of ", unique_starting_pts.size)
            # set the local angle of the ft trace:
            ft_pt_distances_to_node = self.grid.calc_distances_of_nodes_to_point(
                (grid.node_x[i], grid.node_y[i]),
                node_subset=self.ft_trace_node_ids)
            close_ft_nodes = np.less(ft_pt_distances_to_node, 5. * grid.dx)
            x = grid.node_x[self.ft_trace_node_ids[close_ft_nodes]]
            y = grid.node_y[self.ft_trace_node_ids[close_ft_nodes]]
            (grad, offset) = np.polyfit(x, y, 1)
            condition = np.equal(self.closest_ft_node[pcn], i)
            nodes_possible = pcn[condition]
            print(nodes_possible.size, " nodes")
            if nodes_possible.size > 10.:
                #their_az = self.angle_to_ft[nodes_possible]
                #their_diff_angles = self.diff_angles[nodes_possible]
                their_elevs = self.elevs[grid.core_nodes][nodes_possible]
                #their_distances = self.distance_to_ft[nodes_possible]
                # need to make new distances so we remove the ambiguity of angle around the ft point (i.e., dists from a far-field pt on the ft normal)
                # now make a multiplier to make sure the reference point for
                # new distances is far from the actual pts:
                multiplier = 10. * \
                    np.ptp(grid.node_y[grid.core_nodes[nodes_possible]])
                # derive the position:
                x_ref = grid.node_x[i] + cmp(
                    grid.node_x[i],
                    np.mean(grid.node_x[grid.core_nodes[nodes_possible]])
                ) * multiplier * abs(grad)
                y_ref = grid.node_y[i] + cmp(
                    grid.node_y[i],
                    np.mean(grid.node_y[
                        grid.core_nodes[nodes_possible]])) * multiplier
                # get new absolute distances
                dist_to_ft = self.grid.calc_distances_of_nodes_to_point(
                    (x_ref, y_ref), node_subset=np.array([i]))
                dists_along_profile = self.grid.calc_distances_of_nodes_to_point(
                    (x_ref, y_ref),
                    node_subset=grid.core_nodes[nodes_possible]) - dist_to_ft
                # note the ft is now the origin, but pts might be back-to-front (consistently, though)
                # sort the distances. Remove any pts that aren't in a "cluster".
                # We assume there will be one big "bunched" plane, then a load
                # of outliers
                dist_order = np.argsort(dists_along_profile)
                dist_diffs = np.diff(dists_along_profile[dist_order])
                print("dists along profile sorted: ",
                      dists_along_profile[dist_order])
                print("dist diffs: ", dist_diffs)
                # max_diff = 3.*np.median(dist_diffs) #######this might need
                # attention if there's a heavy tail on the distances
                if grad < 1:
                    mod = np.sqrt(1. + grad**2.)
                else:
                    mod = np.sqrt(1. + (1. / grad)**2.)
                max_diff = 1.9 * mod * grid.dx
                locs_of_large_diffs = np.where(dist_diffs > max_diff)[0]
                # there should only be 1 place on the line where there's a cluster, i.e., a large pts_betw_of_max_diffs.
                # This is what we're seeking.
                # ...this can be empty quite easily
                pts_betw_large_diffs = np.diff(locs_of_large_diffs)
                # need to be careful here in case the where call gives an empty
                # array
                if locs_of_large_diffs.size > 1:
                    biggest_interval_loc = np.argmax(pts_betw_large_diffs)
                elif locs_of_large_diffs.size == 1:
                    # one side or the other must be bigger:
                    if 2. * locs_of_large_diffs[
                            0] < dists_along_profile.size - 1:
                        locs_of_large_diffs = np.array([
                            locs_of_large_diffs[0],
                            (dists_along_profile.size - 1)
                        ])
                    else:
                        locs_of_large_diffs = np.array(
                            [0, locs_of_large_diffs[0]])
                    biggest_interval_loc = np.array([0])
                    # here we assume that the single large diff must be further
                    # from the ft than the plane
                else:
                    locs_of_large_diffs = np.array(
                        [0, (dists_along_profile.size - 1)])
                    biggest_interval_loc = np.array([0])
                    #...all the pts in the line are one cluster
                # apply a test to ensure we only save "big" patches; a
                # threshold of 10 pts on the line
                try:
                    patch_size = pts_betw_large_diffs[biggest_interval_loc]
                except IndexError:  # pts_betw_large_diffs is empty
                    patch_size = locs_of_large_diffs[1] - locs_of_large_diffs[0]
                if patch_size > 10.:
                    start_pt_of_cluster = locs_of_large_diffs[
                        biggest_interval_loc] + 1
                    end_pt_of_cluster = locs_of_large_diffs[
                        biggest_interval_loc +
                        1] + 1  # both referring to the sorted list
                    # both +1s are to account for required frame of ref changes - indices refer to where the big gaps start, not where they ends
                    # so:
                    dists_to_sorted_pts = dists_along_profile[dist_order][
                        start_pt_of_cluster:end_pt_of_cluster]
                    elevs_of_sorted_pts = their_elevs[dist_order][
                        start_pt_of_cluster:end_pt_of_cluster]
                    slopes_of_sorted_pts = self.slopes[nodes_possible][
                        dist_order][start_pt_of_cluster:end_pt_of_cluster]
                    profile_ft_node_id.append(i.copy())
                    profile_ft_node_x.append(grid.node_x[i].copy())
                    profile_ft_node_y.append(grid.node_y[i].copy())
                    profile_ft_node_z.append(self.elevs[i].copy())
                    profile_ft_node_dist.append(dist_to_ft.copy())
                    profile_x_facet_pts.append(dists_to_sorted_pts.copy())
                    profile_z_facet_pts.append(elevs_of_sorted_pts.copy())
                    profile_S_facet_pts.append(slopes_of_sorted_pts.copy())
                    figure(5)
                    plot(dists_to_sorted_pts, elevs_of_sorted_pts)
                    # dirty, but effective code!

        self.profile_ft_node_id = profile_ft_node_id
        self.profile_ft_node_x = profile_ft_node_x
        self.profile_ft_node_y = profile_ft_node_y
        self.profile_ft_node_z = profile_ft_node_z
        self.profile_ft_node_dist = profile_ft_node_dist
        self.profile_x_facet_pts = profile_x_facet_pts
        self.profile_z_facet_pts = profile_z_facet_pts
        self.profile_S_facet_pts = profile_S_facet_pts
fig = plt.figure(figsize=(7.2,3))
grid = plt.GridSpec(20, 17, wspace=0.25, hspace=0)

nb = 4 + 1

x_axis = np.arange(0,17*nb)

r_list = [1,2,3,4,5,6,7,8,9,10,11,12,21,16,17,18,19]

ax = fig.add_subplot(grid[:-3, :10])

shift_x=0
for i in range(len(r_list)):

    df_g = df[np.logical_and(df.tag=='gard',df.reg==r_list[i])]

    ax.fill_between([shift_x+x_axis[nb*i]-0.5,shift_x+x_axis[nb*i]+0.5],[df_ext.gar[i]+df_ext.gar_err[i]]*2,[df_ext.gar[i]-df_ext.gar_err[i]]*2,color='tab:red',alpha=0.45,linewidth=0.25)
    ax.plot([shift_x+x_axis[nb*i]-0.5,shift_x+x_axis[nb*i]+0.5],[df_ext.gar[i]]*2,color='tab:red',linewidth=0.75)
    ax.errorbar(shift_x+x_axis[nb*i],df_g.dmdtda.values[0],2*df_g.err_dmdtda.values[0],fmt='o',color=plt.cm.Greys(0.8),capsize=1.2,zorder=30,ms=2.5,elinewidth=0.6,mew=0.6)

    df_z = df[np.logical_and(df.tag=='zemp',df.reg==r_list[i])]

    ax.fill_between([shift_x+x_axis[nb*i+3]-0.5,shift_x+x_axis[nb*i+3]+0.5],[df_ext.zemp[i]+df_ext.zemp_err[i]]*2,[df_ext.zemp[i]-df_ext.zemp_err[i]]*2,color='tab:orange',alpha=0.45,linewidth=0.25)
    ax.plot([shift_x+x_axis[nb*i+3]-0.5,shift_x+x_axis[nb*i+3]+0.5],[df_ext.zemp[i]]*2,color='tab:orange',linewidth=0.75)
    ax.errorbar(shift_x+x_axis[nb*i+3],df_z.dmdtda.values[0],2*df_z.err_dmdtda.values[0],fmt='o',color=plt.cm.Greys(0.8),capsize=1.2,zorder=30,ms=2.5,elinewidth=0.6,mew=0.6)

    df_w = df[np.logical_and(df.tag=='wout',df.reg==r_list[i])]

    ax.fill_between([shift_x+x_axis[nb*i+1]-0.5,shift_x+x_axis[nb*i+1]+0.5],[df_ext.wout[i]+df_ext.wout_err[i]]*2,[df_ext.wout[i]-df_ext.wout_err[i]]*2,color='tab:cyan',alpha=0.45,linewidth=0.25)
    ax.plot([shift_x+x_axis[nb*i+1]-0.5,shift_x+x_axis[nb*i+1]+0.5],[df_ext.wout[i]]*2,color='tab:cyan',linewidth=0.75)
Example #60
0
def plot(obs_parameters='', n=0, m=0, f_rest=0, slope_correction=False, dB=False, vlsr=False, meta=False, avg_ylim=[0,0], cal_ylim=[0,0], rfi=[], xlim=[0,0], ylim=[0,0], dm=0,
	 obs_file='observation.dat', cal_file='', waterfall_fits='', spectra_csv='', power_csv='', plot_file='plot.png'):
	'''
	Process, analyze and plot data.

	Args:
		obs_parameters: dict. Observation parameters (identical to parameters used to acquire data)
			dev_args: string. Device arguments (gr-osmosdr)
			rf_gain: float. RF gain
			if_gain: float. IF gain
			bb_gain: float. Baseband gain
			frequency: float. Center frequency [Hz]
			bandwidth: float. Instantaneous bandwidth [Hz]
			channels: int: Number of frequency channels (FFT size)
			t_sample: float: Integration time per FFT sample
			duration: float: Total observing duration [sec]
			loc: string: latitude, longitude, and elevation of observation (float, separated by spaces)
			ra_dec: string: right ascension and declination of observation target (float, separated by space)
			az_alt: string: azimuth and altitude of observation target (float, separated by space; takes precedence over ra_dec)
		n: int. Median filter factor (spectrum)
		m: int. Median filter factor (time series)
		f_rest: float. Spectral line reference frequency used for radial velocity (Doppler shift) calculations [Hz]
		slope_correction: bool. Correct slope in poorly-calibrated spectra using linear regression
		dB: bool. Display data in decibel scaling
		vlsr: bool. Display graph in VLSR frame of reference
		meta: bool. Display header with date, time, and target
		rfi: list. Blank frequency channels contaminated with RFI ([low_frequency, high_frequency]) [Hz]
		avg_ylim: list. Averaged plot y-axis limits ([low, high])
		cal_ylim: list. Calibrated plot y-axis limits ([low, high])
		xlim: list. x-axis limits ([low_frequency, high_frequency]) [Hz]
		ylim: list. y-axis limits ([start_time, end_time]) [Hz]
		dm: float. Dispersion measure for dedispersion [pc/cm^3]
		obs_file: string. Input observation filename (generated with virgo.observe)
		cal_file: string. Input calibration filename (generated with virgo.observe)
		waterfall_fits: string. Output FITS filename
		spectra_csv: string. Output CSV filename (spectra)
		power_csv: string. Output CSV filename (time series)
		plot_file: string. Output plot filename
	'''
	import matplotlib
	matplotlib.use('Agg') # Try commenting this line if you run into display/rendering errors
	import matplotlib.pyplot as plt
	from matplotlib.gridspec import GridSpec

	plt.rcParams['legend.fontsize'] = 14
	plt.rcParams['axes.labelsize'] = 14
	plt.rcParams['axes.titlesize'] = 18
	plt.rcParams['xtick.labelsize'] = 12
	plt.rcParams['ytick.labelsize'] = 12

	def decibel(x):
		if dB: return 10.0*np.log10(x)
		return x

	def shift(phase_num, n_rows):
		waterfall[:, phase_num] = np.roll(waterfall[:, phase_num], -n_rows)

	def SNR(spectrum, mask=np.array([])):
		'''Signal-to-Noise Ratio estimator, with optional masking.
		If mask not given, then all channels will be used to estimate noise
		(will drastically underestimate S:N - not robust to outliers!)'''

		if mask.size == 0:
			mask = np.zeros_like(spectrum)

		noise = np.nanstd((spectrum[2:]-spectrum[:-2])[mask[1:-1] == 0])/np.sqrt(2)
		background = np.nanmean(spectrum[mask == 0])

		return (spectrum-background)/noise

	def best_fit(power):
		'''Compute best Gaussian fit'''
		avg = np.nanmean(power)
		var = np.var(power)

		gaussian_fit_x = np.linspace(np.min(power),np.max(power),100)
		gaussian_fit_y = 1.0/np.sqrt(2*np.pi*var)*np.exp(-0.5*(gaussian_fit_x-avg)**2/var)

		return [gaussian_fit_x, gaussian_fit_y]

	# Load observation parameters from dictionary argument/header file
	if obs_parameters != '':
		frequency = obs_parameters['frequency']
		bandwidth = obs_parameters['bandwidth']
		channels = obs_parameters['channels']
		t_sample = obs_parameters['t_sample']
		loc = obs_parameters['loc']
		ra_dec = obs_parameters['ra_dec']
		az_alt = obs_parameters['az_alt']
	else:
		header_file = '.'.join(obs_file.split('.')[:-1])+'.header'

		warnings.warn('No observation parameters passed. Attempting to load from header file ('+header_file+')...')

		with open(header_file, 'r') as f:
			headers = [parameter.rstrip('\n') for parameter in f.readlines()]

		for i in range(len(headers)):
			if 'mjd' in headers[i]:
				mjd = float(headers[i].strip().split('=')[1])
			elif 'frequency' in headers[i]:
				frequency = float(headers[i].strip().split('=')[1])
			elif 'bandwidth' in headers[i]:
				bandwidth = float(headers[i].strip().split('=')[1])
			elif 'channels' in headers[i]:
				channels = int(headers[i].strip().split('=')[1])
			elif 't_sample' in headers[i]:
				t_sample = float(headers[i].strip().split('=')[1])
			elif 'loc' in headers[i]:
				loc = tuple(map(float, headers[i].strip().split('=')[1].split(' ')))
			elif 'ra_dec' in headers[i]:
				ra_dec = tuple(map(str, headers[i].split('=')[1].split(' ')))
			elif 'az_alt' in headers[i]:
				az_alt = tuple(map(float, headers[i].split('=')[1].split(' ')))



	# Transform frequency axis limits to MHz
	xlim = [x / 1e6 for x in xlim]

	# Transform to VLSR
	if vlsr:

		from astropy import units as u
		from astropy.coordinates import SpectralCoord, EarthLocation, SkyCoord
		from astropy.time import Time

		obs_location = EarthLocation.from_geodetic(loc[0], loc[1], loc[2])
		obs_time = obs_location.get_itrs(obstime=Time(str(mjd), format='mjd', scale='utc'))

		if az_alt!='':
				obs_coord = SkyCoord(az=az_alt[0]*u.degree, alt=az_alt[1]*u.degree, frame='altaz', location=obs_location, obstime=Time(str(mjd), format='mjd', scale='utc'))
				obs_coord = obs_coord.icrs
				print (obs_coord)
		else:
				obs_coord = SkyCoord(ra=ra_dec[0]*u.degree, dec=ra_dec[1]*u.degree, frame='icrs')


		#Transform center frequency
		frequency = SpectralCoord(frequency * u.MHz, observer=obs_time, target=obs_coord)
		frequency = frequency.with_observer_stationary_relative_to('lsrk')
		frequency = frequency.quantity.value

	# Define Radial Velocity axis limits
	left_velocity_edge = -299792.458*(bandwidth-2*frequency+2*f_rest)/(bandwidth-2*frequency)
	right_velocity_edge = 299792.458*(-bandwidth-2*frequency+2*f_rest)/(bandwidth+2*frequency)

	# Transform sampling time to number of bins
	bins = int(t_sample*bandwidth/channels)

	# Load observation & calibration data
	offset = 1
	waterfall = offset*np.fromfile(obs_file, dtype='float32').reshape(-1, channels)/bins

	# Delete first 3 rows (potentially containing outlier samples)
	waterfall = waterfall[3:, :]

	# Mask RFI-contaminated channels
	if rfi != []:

		for j in range(len(rfi)):

			# Frequency to channel transformation
			current_rfi = rfi[j]
			rfi_lo = channels*(current_rfi[0] - (frequency - bandwidth/2))/bandwidth
			rfi_hi = channels*(current_rfi[1] - (frequency - bandwidth/2))/bandwidth

			# Blank channels
			for i in range(int(rfi_lo), int(rfi_hi)):
				waterfall[:, i] = np.nan

	if cal_file != '':
		waterfall_cal = offset*np.fromfile(cal_file, dtype='float32').reshape(-1, channels)/bins

		# Delete first 3 rows (potentially containing outlier samples)
		waterfall_cal = waterfall_cal[3:, :]

		# Mask RFI-contaminated channels
		if rfi != []:

			for j in range(len(rfi)):

				# Frequency to channel transformation
				current_rfi = rfi[j]
				rfi_lo = channels*(current_rfi[0] - (frequency - bandwidth/2))/bandwidth
				rfi_hi = channels*(current_rfi[1] - (frequency - bandwidth/2))/bandwidth

				# Blank channels
				for i in range(int(rfi_lo), int(rfi_hi)):
					waterfall_cal[:, i] = np.nan

	# Compute average spectra
	with warnings.catch_warnings():
		warnings.filterwarnings(action='ignore', message='Mean of empty slice')
		avg_spectrum = decibel(np.nanmean(waterfall, axis=0))
		if cal_file != '':
			avg_spectrum_cal = decibel(np.nanmean(waterfall_cal, axis=0))

	# Number of sub-integrations
	subs = waterfall.shape[0]

	# Compute Time axis
	t = t_sample*np.arange(subs)

	# Compute Frequency axis; convert Hz to MHz
	frequency = np.linspace(frequency-0.5*bandwidth, frequency+0.5*bandwidth,
	                        channels, endpoint=False)*1e-6

	# Perform de-dispersion
	if dm != 0:
		deltaF = float(np.max(frequency)-np.min(frequency))/subs
		f_start = np.min(frequency)
		for t_bin in range(subs):
			f_chan = f_start+t_bin*deltaF
			deltaT = 4149*dm*((1/(f_chan**2))-(1/(np.max(frequency)**2)))
			n = int((float(deltaT)/(float(1)/channels)))
			shift(t_bin, n)

	# Define array for Time Series plot
	power = decibel(np.nanmean(waterfall, axis=1))

	# Apply Mask
	mask = np.zeros_like(avg_spectrum)
	mask[np.logical_and(frequency > f_rest*1e-6-0.2, frequency < f_rest*1e-6+0.8)] = 1 # Margins OK for galactic HI

	# Define text offset for axvline text label
	text_offset = 0

	# Calibrate Spectrum
	if cal_file != '':
		if dB:
			spectrum = 10**((avg_spectrum-avg_spectrum_cal)/10)
		else:
			spectrum = avg_spectrum/avg_spectrum_cal

		spectrum = SNR(spectrum, mask)
		if slope_correction:
			idx = np.isfinite(frequency) & np.isfinite(spectrum)
			fit = np.polyfit(frequency[idx], spectrum[idx], 1)
			ang_coeff = fit[0]
			intercept = fit[1]
			fit_eq = ang_coeff*frequency + intercept
			spectrum = SNR(spectrum-fit_eq, mask)

		# Mitigate RFI (Frequency Domain)
		if n != 0:
			spectrum_clean = SNR(spectrum.copy(), mask)
			for i in range(0, int(channels)):
				spectrum_clean[i] = np.nanmedian(spectrum_clean[i:i+n])

		# Apply position offset for Spectral Line label
		text_offset = 60

	# Mitigate RFI (Time Domain)
	if m != 0:
		power_clean = power.copy()
		for i in range(0, int(subs)):
			power_clean[i] = np.nanmedian(power_clean[i:i+m])


	# Write Waterfall to file (FITS)
	if waterfall_fits != '':
		from astropy.io import fits

		# Load data
		hdu = fits.PrimaryHDU(waterfall)

		# Prepare FITS headers
		hdu.header['NAXIS'] = 2
		hdu.header['NAXIS1'] = channels
		hdu.header['NAXIS2'] = subs
		hdu.header['CRPIX1'] = channels/2
		hdu.header['CRPIX2'] = subs/2
		hdu.header['CRVAL1'] = frequency[int(channels/2)]
		hdu.header['CRVAL2'] = t[int(subs/2)]
		hdu.header['CDELT1'] = bandwidth*1e-6/channels
		hdu.header['CDELT2'] = t_sample
		hdu.header['CTYPE1'] = 'Frequency (MHz)'
		hdu.header['CTYPE2'] = 'Relative Time (s)'
		try:
			hdu.header['MJD-OBS'] = mjd
		except NameError:
			warnings.warn('Observation MJD could not be found and will not be part of the FITS header.')
			pass

		# Delete pre-existing FITS file
		try:
			os.remove(waterfall_fits)
		except OSError:
			pass

		# Write to file
		hdu.writeto(waterfall_fits)

	# Write Spectra to file (csv)
	if spectra_csv != '':
		if cal_file != '':
			np.savetxt(spectra_csv, np.concatenate((frequency.reshape(channels, 1),
                       avg_spectrum.reshape(channels, 1), avg_spectrum_cal.reshape(channels, 1),
                       spectrum.reshape(channels, 1)), axis=1), delimiter=',', fmt='%1.6f')
		else:
			np.savetxt(spectra_csv, np.concatenate((frequency.reshape(channels, 1),
                       avg_spectrum.reshape(channels, 1)), axis=1), delimiter=',', fmt='%1.6f')

	# Write Time Series to file (csv)
	if power_csv != '':
		np.savetxt(power_csv, np.concatenate((t.reshape(subs, 1), power.reshape(subs, 1)),
                   axis=1), delimiter=',', fmt='%1.6f')

	# Initialize plot
	if cal_file != '':
		fig = plt.figure(figsize=(27, 15))
		gs = GridSpec(2, 3)
	else:
		fig = plt.figure(figsize=(21, 15))
		gs = GridSpec(2, 2)

	if meta:
		from astropy.coordinates import get_constellation

		epoch = (mjd - 40587) * 86400.0
		meta_title = 'Date and Time: ' + time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(epoch)) + '       '
		meta_title += 'Target: ' + obs_coord.to_string('hmsdms', precision=0) + ' in ' + get_constellation(obs_coord) + '\n'
		plt.suptitle(meta_title, fontsize=18)

	# Plot Average Spectrum
	ax1 = fig.add_subplot(gs[0, 0])
	ax1.plot(frequency, avg_spectrum)
	if xlim == [0,0]:
		ax1.set_xlim(np.min(frequency), np.max(frequency))
	else:
		ax1.set_xlim(xlim[0], xlim[1])
	ax1.ticklabel_format(useOffset=False)
	ax1.set_xlabel('Frequency (MHz)')
	if avg_ylim != [0,0]:
		ax1.set_ylim(avg_ylim[0], avg_ylim[1])
	if dB:
		ax1.set_ylabel('Relative Power (dB)')
	else:
		ax1.set_ylabel('Relative Power')

	if vlsr:
		cal_title = r'$Average\ Spectrum\ (V_{LSR})$'
	else:
		cal_title = 'Average Spectrum'

	if f_rest != 0:
		cal_title += '\n'

	ax1.set_title(cal_title)
	ax1.grid()

	if xlim == [0,0] and f_rest != 0:
		# Add secondary axis for Radial Velocity
		ax1_secondary = ax1.twiny()
		ax1_secondary.set_xlabel('Radial Velocity (km/s)', labelpad=5)
		ax1_secondary.axvline(x=0, color='brown', linestyle='--', linewidth=2, zorder=0)
		ax1_secondary.annotate('Spectral Line\nRest Frequency', xy=(460-text_offset, 5),
                               xycoords='axes points', size=14, ha='left', va='bottom', color='brown')
		ax1_secondary.set_xlim(left_velocity_edge, right_velocity_edge)
		ax1_secondary.tick_params(axis='x', direction='in', pad=-22)

	#Plot Calibrated Spectrum
	if cal_file != '':
		ax2 = fig.add_subplot(gs[0, 1])
		ax2.plot(frequency, spectrum, label='Raw Spectrum')
		if n != 0:
			ax2.plot(frequency, spectrum_clean, color='orangered', label='Median (n = '+str(n)+')')

		if cal_ylim !=[0,0]:
			ax2.set_ylim(cal_ylim[0],cal_ylim[1])
		else:
			ax2.set_ylim()

		if xlim == [0,0]:
			ax2.set_xlim(np.min(frequency), np.max(frequency))
		else:
			ax2.set_xlim(xlim[0], xlim[1])
		ax2.ticklabel_format(useOffset=False)
		ax2.set_xlabel('Frequency (MHz)')
		ax2.set_ylabel('Signal-to-Noise Ratio (S/N)')

		if vlsr:
			cal_title = r'$Calibrated\ Spectrum\ (V_{LSR})$' + '\n'
		else:
			cal_title = 'Calibrated Spectrum\n'

		if f_rest != 0:
			ax2.set_title(cal_title)
		else:
			ax2.set_title('Calibrated Spectrum')
		if n != 0:
			if f_rest != 0:
				ax2.legend(bbox_to_anchor=(0.002, 0.96), loc='upper left')
			else:
				ax2.legend(loc='upper left')

		if xlim == [0,0] and f_rest != 0:
			# Add secondary axis for Radial Velocity
			ax2_secondary = ax2.twiny()
			ax2_secondary.set_xlabel('Radial Velocity (km/s)', labelpad=5)
			ax2_secondary.axvline(x=0, color='brown', linestyle='--', linewidth=2, zorder=0)
			ax2_secondary.annotate('Spectral Line\nRest Frequency', xy=(400, 5),
                                   xycoords='axes points', size=14, ha='left', va='bottom', color='brown')
			ax2_secondary.set_xlim(left_velocity_edge, right_velocity_edge)
			ax2_secondary.tick_params(axis='x', direction='in', pad=-22)
		ax2.grid()

	# Plot Dynamic Spectrum
	if cal_file != '':
		ax3 = fig.add_subplot(gs[0, 2])
	else:
		ax3 = fig.add_subplot(gs[0, 1])

	ax3.imshow(decibel(waterfall), origin='lower', interpolation='None', aspect='auto',
		   extent=[np.min(frequency), np.max(frequency), np.min(t), np.max(t)])
	if xlim == [0,0] and ylim != [0,0]:
		ax3.set_ylim(ylim[0], ylim[1])
	elif xlim != [0,0] and ylim == [0,0]:
		ax3.set_xlim(xlim[0], xlim[1])
	elif xlim != [0,0] and ylim != [0,0]:
		ax3.set_xlim(xlim[0], xlim[1])
		ax3.set_ylim(ylim[0], ylim[1])

	ax3.ticklabel_format(useOffset=False)
	ax3.set_xlabel('Frequency (MHz)')
	ax3.set_ylabel('Relative Time (s)')
	ax3.set_title('Dynamic Spectrum (Waterfall)')

	# Adjust Subplot Width Ratio
	if cal_file != '':
		gs = GridSpec(2, 3, width_ratios=[16.5, 1, 1])
	else:
		gs = GridSpec(2, 2, width_ratios=[7.6, 1])

	# Plot Time Series (Power vs Time)
	ax4 = fig.add_subplot(gs[1, 0])
	ax4.plot(t, power, label='Raw Time Series')
	if m != 0:
		ax4.plot(t, power_clean, color='orangered', label='Median (n = '+str(m)+')')
		ax4.set_ylim()
	if ylim == [0,0]:
		ax4.set_xlim(0, np.max(t))
	else:
		ax4.set_xlim(ylim[0], ylim[1])
	ax4.set_xlabel('Relative Time (s)')
	if dB:
		ax4.set_ylabel('Relative Power (dB)')
	else:
		ax4.set_ylabel('Relative Power')
	ax4.set_title('Average Power vs Time')
	if m != 0:
		ax4.legend(bbox_to_anchor=(1, 1), loc='upper right')
	ax4.grid()

	# Plot Total Power Distribution
	if cal_file != '':
		gs = GridSpec(2, 3, width_ratios=[7.83, 1.5, -0.325])
	else:
		gs = GridSpec(2, 2, width_ratios=[8.8, 1.5])

	ax5 = fig.add_subplot(gs[1, 1])

	ax5.hist(power, np.max([int(np.size(power)/50),10]), density=1, alpha=0.5, color='royalblue', orientation='horizontal', zorder=10)
	ax5.plot(best_fit(power)[1], best_fit(power)[0], '--', color='blue', label='Best fit (Raw)', zorder=20)
	if m != 0:
		ax5.hist(power_clean, np.max([int(np.size(power_clean)/50),10]), density=1, alpha=0.5, color='orangered', orientation='horizontal', zorder=10)
		ax5.plot(best_fit(power_clean)[1], best_fit(power_clean)[0], '--', color='red', label='Best fit (Median)', zorder=20)
	ax5.set_xlim()
	ax5.set_ylim()
	ax5.get_shared_x_axes().join(ax5, ax4)
	ax5.set_yticklabels([])
	ax5.set_xlabel('Probability Density')
	ax5.set_title('Total Power Distribution')
	ax5.legend(bbox_to_anchor=(1, 1), loc='upper right')
	ax5.grid()

	# Save plots to file
	plt.tight_layout()
	plt.savefig(plot_file)
	plt.clf()