Beispiel #1
0
def count_lower_neighbors(data):

  size_minus_2 = map(lambda s: s-2, data.shape)
  from numpy import zeros, int, greater, add, subtract, int8
  compare = zeros(size_minus_2, int)
  count = zeros(size_minus_2, int)

  offsets = ((-1,-1,-1), (-1,-1,0), (-1,-1,1),
             (-1,0,-1), (-1,0,0), (-1,0,1),
             (-1,1,-1), (-1,1,0), (-1,1,1),
             (0,-1,-1), (0,-1,0), (0,-1,1),
             (0,0,-1), (0,0,1),
             (0,1,-1), (0,1,0), (0,1,1),
             (1,-1,-1), (1,-1,0), (1,-1,1),
             (1,0,-1), (1,0,0), (1,0,1),
             (1,1,-1), (1,1,0), (1,1,1))
             
  xsize, ysize, zsize = data.shape
  for xo, yo, zo in offsets:
    greater(data[1:-1,1:-1,1:-1],
            data[xo+1:xsize-1+xo,yo+1:ysize-1+yo,zo+1:zsize-1+zo],
            compare)
    add(compare, count, count)

  subtract(count, 13, count)
  
  return count.astype(int8)
Beispiel #2
0
    def _get_plottable(self):
        # If log scale is set, only pos data will be returned

        x, y = self._x, self._y

        try: logx = self.get_transform().get_funcx().get_type()==LOG10
        except RuntimeError: logx = False  # non-separable

        try: logy = self.get_transform().get_funcy().get_type()==LOG10
        except RuntimeError: logy = False  # non-separable

        if not logx and not logy:
            return x, y

        if self._logcache is not None:
            waslogx, waslogy, xcache, ycache = self._logcache
            if logx==waslogx and waslogy==logy:
                return xcache, ycache

        Nx = len(x)
        Ny = len(y)

        if logx: indx = npy.greater(x, 0)
        else:    indx = npy.ones(len(x))

        if logy: indy = npy.greater(y, 0)
        else:    indy = npy.ones(len(y))

        ind, = npy.nonzero(npy.logical_and(indx, indy))
        x = npy.take(x, ind)
        y = npy.take(y, ind)

        self._logcache = logx, logy, x, y
        return x, y
Beispiel #3
0
def evaluate_MI(fname, threshold = 0.95):
    CUT = slice(0,1000)
    # version = 3
    with open(fname, 'rb') as f:
        result = cPickle.load(f)

    phase_phase_coherence = result['phase x phase data']
    phase_phase_CMI = result['phase CMI data']
    surrCoherence = result['phase x phase surrs'][CUT, ...]
    surrCMI = result['phase CMI surrs'][CUT, ...]
    phase_amp_condMI = result['phase amp CMI data']
    surrPhaseAmpCMI = result['phase amp CMI surrs'][CUT, ...]

    res_phase_coh = np.zeros_like(phase_phase_coherence)
    res_phase_cmi = np.zeros_like(res_phase_coh)
    res_phase_amp_CMI = np.zeros_like(res_phase_coh)

    for i in range(res_phase_coh.shape[0]):
        for j in range(res_phase_coh.shape[1]):
            res_phase_coh[i, j] = np.sum(np.greater(phase_phase_coherence[i, j], surrCoherence[:, i, j])) / np.float(surrCoherence.shape[0])
            res_phase_cmi[i, j] = np.sum(np.greater(phase_phase_CMI[i, j], surrCMI[:, i, j])) / np.float(surrCMI.shape[0])
            res_phase_amp_CMI[i, j] = np.sum(np.greater(phase_amp_condMI[i, j], surrPhaseAmpCMI[:, i, j])) / np.float(surrPhaseAmpCMI.shape[0])

    f.close()

    res_phase_coh_thr = np.zeros_like(res_phase_coh, dtype = np.int)
    res_phase_coh_thr[np.where(res_phase_coh > threshold)] = 1
    res_phase_cmi_thr = np.zeros_like(res_phase_cmi, dtype = np.int)
    res_phase_cmi_thr[np.where(res_phase_cmi > threshold)] = 1
    res_phase_amp_CMI_thr = np.zeros_like(res_phase_amp_CMI, dtype = np.int)
    res_phase_amp_CMI_thr[np.where(res_phase_amp_CMI > threshold)] = 1

    return res_phase_coh_thr, res_phase_cmi_thr, res_phase_amp_CMI_thr
Beispiel #4
0
def computeSTA(spike_file,tdt_signal,channel,t_start,t_stop):
	'''
	Compute the spike-triggered average (STA) for a specific channel overa  designated time window
	[t_start,t_stop].

	spike_file should be the results of plx = plexfile.openFile('filename.plx') and spike_file = plx.spikes[:].data
	tdt_signal should be the array of time-stamped values just for this channel
	'''
	channel_spikes = [entry for entry in spike_file if (t_start <= entry[0] <= t_stop)&(entry[1]==channel)]
	units = [spike[2] for spike in channel_spikes]
	unit_vals = set(units)  # number of units
	unit_vals.remove(0) 	# value 0 are units marked as noise events
	unit_sta = dict()

	tdt_times = np.ravel(tdt_signal.times)
	tdt_data = np.ravel(tdt_signal)

	for unit in unit_vals:
		
		spike_times = [spike[0] for spike in channel_spikes if (spike[2]==unit)]
		start_avg = [(time - 1) for time in spike_times] 	# look 1 s back in time until 1 s forward in time from spike
		stop_avg = [(time + 1) for time in spike_times]
		epoch = np.logical_and(np.greater(tdt_times,start_avg[0]),np.less(tdt_times,stop_avg[0]))
		epoch_inds = np.ravel(np.nonzero(epoch))
		len_epoch = len(epoch_inds)
		sta = np.zeros(len_epoch)
		num_spikes = len(spike_times)
		for i in range(0,num_spikes):
			epoch = np.logical_and(np.greater(tdt_times,start_avg[i]),np.less(tdt_times,stop_avg[i]))
			epoch_inds = np.ravel(np.nonzero(epoch))
			if (len(epoch_inds) == len_epoch):
				sta += tdt_data[epoch_inds]
		unit_sta[unit] = sta/float(num_spikes)

	return unit_sta
Beispiel #5
0
    def date_start_surcote(self, data, trimesters_tot, trim_maj_tot, age_min_retirement):
        ''' Détermine la date individuelle a partir de laquelle on atteint la surcote
        (a atteint l'âge légal de départ en retraite + côtisé le nombre de trimestres cible)
        Rq : pour l'instant on pourrait ne renvoyer que l'année'''
        agem = data.info_ind['agem']
        # TODO: do something better with datesim
        datesim = self.dateleg.liam
        P = reduce(getattr, self.param_name.split('.'), self.P)
        if P.surcote.exist == 0:
            # Si pas de dispositif de surcote
            return [2100*100 + 1]*len(trim_maj_tot)
        else:
            # 1. Construction de la matrice des booléens indiquant si l'année
            # est surcotée selon critère trimestres
            n_trim = array(P.plein.n_trim)
            cumul_trim = trimesters_tot.cumsum(axis=1)
            trim_limit = array((n_trim - nan_to_num(trim_maj_tot)))
            years_surcote_trim = greater(cumul_trim.T, trim_limit).T
            nb_years = years_surcote_trim.shape[1]

            # 2. Construction de la matrice des booléens indiquant si l'année
            # est surcotée selon critère âge
            age_by_year = array([array(agem) - 12*i for i in reversed(range(nb_years))])
            years_surcote_age = greater(age_by_year, array(age_min_retirement)).T

            # 3. Décompte du nombre d'années répondant aux deux critères
            years_surcote = years_surcote_trim*years_surcote_age
            nb_years_surcote = years_surcote.sum(axis=1)
            start_surcote = [datesim - nb_year*100
                             if nb_year > 0 else 2100*100 + 1
                             for nb_year in nb_years_surcote]
            return start_surcote
Beispiel #6
0
def prune_outside_window(boxlist, window):
  """Prunes bounding boxes that fall outside a given window.

  This function prunes bounding boxes that even partially fall outside the given
  window. See also ClipToWindow which only prunes bounding boxes that fall
  completely outside the window, and clips any bounding boxes that partially
  overflow.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a numpy array of size 4, representing [ymin, xmin, ymax, xmax]
            of the window.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in.
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """

  y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
  win_y_min = window[0]
  win_x_min = window[1]
  win_y_max = window[2]
  win_x_max = window[3]
  coordinate_violations = np.hstack([np.less(y_min, win_y_min),
                                     np.less(x_min, win_x_min),
                                     np.greater(y_max, win_y_max),
                                     np.greater(x_max, win_x_max)])
  valid_indices = np.reshape(
      np.where(np.logical_not(np.max(coordinate_violations, axis=1))), [-1])
  return gather(boxlist, valid_indices), valid_indices
    def _read_particles(self):
        if not os.path.exists(self.particle_filename): return
        with open(self.particle_filename, 'r') as f:
            lines = f.readlines()
            self.num_stars = int(lines[0].strip().split(' ')[0])
            for num, line in enumerate(lines[1:]):
                particle_position_x = float(line.split(' ')[1])
                particle_position_y = float(line.split(' ')[2])
                particle_position_z = float(line.split(' ')[3])
                coord = [particle_position_x, particle_position_y, particle_position_z]
                # for each particle, determine which grids contain it
                # copied from object_finding_mixin.py
                mask = np.ones(self.num_grids)
                for i in range(len(coord)):
                    np.choose(np.greater(self.grid_left_edge.d[:,i],coord[i]), (mask,0), mask)
                    np.choose(np.greater(self.grid_right_edge.d[:,i],coord[i]), (0,mask), mask)
                ind = np.where(mask == 1)
                selected_grids = self.grids[ind]
                # in orion, particles always live on the finest level.
                # so, we want to assign the particle to the finest of
                # the grids we just found
                if len(selected_grids) != 0:
                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
                    ind = np.where(self.grids == grid)[0][0]
                    self.grid_particle_count[ind] += 1
                    self.grids[ind].NumberOfParticles += 1

                    # store the position in the *.sink file for fast access.
                    try:
                        self.grids[ind]._particle_line_numbers.append(num + 1)
                    except AttributeError:
                        self.grids[ind]._particle_line_numbers = [num + 1]
Beispiel #8
0
def analyzeFrame(bgrFrame):
    mutex.acquire()
    if lowerBound and upperBound:

        hsvFrame = cv2.cvtColor(bgrFrame, cv2.COLOR_BGR2HSV)
        centeredBox = hsvFrame[topLeft[1]:bottomLeft[1], topLeft[0]:topRight[0], :]
        boxFlat = centeredBox.reshape([-1, 3])
        numBroken = 0
        # Doing it this ways removes worry of checkInBounds changing while analyzing an individual frame
        # i.e., it won't take effect until the next frame.
        if boundType == 'in':
            for i in xrange(0, (boxFlat.shape)[0]):
                isGreaterLower = numpy.all(numpy.greater(boxFlat[i], lowerBound))
                isLessUpper = numpy.all(numpy.less(boxFlat[i], upperBound))
                if isGreaterLower and isLessUpper:
                    numBroken = numBroken + 1
        else:
            for i in xrange(0, (boxFlat.shape)[0]):
                isLessLower = numpy.all(numpy.less(boxFlat[i], lowerBound))
                isGreaterUpper = numpy.all(numpy.greater(boxFlat[i], upperBound))
                if isLessLower and isGreaterUpper:
                    numBroken = numBroken + 1

        if (numBroken/area) >= threshold:
            sys.stderr.write('Exceeded\n')
            sys.stderr.flush()


    mutex.release()
def pickBreakpointV2(response, x1, predictor):
    #print int(min(predictor))*10, int(max(predictor)+1)*10, int(max(predictor) - min(predictor) + 1)/2
    #bpChoices = geneBpChoices(min(predictor), max(predictor), 20)
    results = np.zeros((len(bpChoices)-1, 2))
    print bpChoices
    
    for i in range(len(bpChoices)-1):
        print i
        x2star = (predictor - bpChoices[i]) * np.greater(predictor, bpChoices[i])
        x1star = x1 * np.greater(predictor, bpChoices[i]) 
        tempPredictor = np.array(zip(x1, x1star, predictor, x2star))
        #fileLoc = filePath + 'temp.csv'
        #np.savetxt(fileLoc, tempPredictor, delimiter=',', fmt = '%s')
        #print tempPredictor
        tempmodel = ols.ols(response, tempPredictor,'y',['F1F2', 'F1F2star', 'dist', 'diststar'])
        results[i,0] = i
        #results[i,1] = tempmodel.sse
        results[i,1] = tempmodel.R2

    optBP = int(results[np.argmax(results, axis = 0)[1],0])
    print 'Optimal Index:', optBP
    print 'Optimal changepoint: ', bpChoices[optBP], ' exp value: ', np.exp(bpChoices[optBP]), ' with R2 = ', results[optBP, 1]

    #x2star = (predictor - bpChoices[optBP]) * np.greater(predictor, bpChoices[optBP])
    #optPredictor = np.array(zip(predictor, x2star))
    #optmodel = ols.ols(response, optPredictor,'y',['x1', 'x2'])
    x1star = x1 * np.greater(predictor, bpChoices[optBP])
    x2star = (predictor - bpChoices[optBP]) * np.greater(predictor, bpChoices[optBP])
    optPredictor = np.array(zip(x1, x1star, predictor, x2star))
    optmodel = ols.ols(response, optPredictor,'y',['F1F2', 'F1F2star', 'dist', 'diststar'])
    
    #return bpChoices[optBP], results, optmodel, optmodel.b[0]+optmodel.b[1]*predictor+optmodel.b[2]*x2star
    print results, optmodel.b
    print optmodel.summary()
    return results
Beispiel #10
0
 def _getinvisible(self):
     if self.invisible is not None:
         inv = self.invisible
     else:
         inv = np.zeros(len(self.atoms))
     if self.invisibilityfunction:
         inv = np.logical_or(inv, self.invisibilityfunction(self.atoms))
     r = self._getpositions()
     if len(r) > len(inv):
         # This will happen in parallel simulations due to ghost atoms.
         # They are invisible.  Hmm, this may cause trouble.
         i2 = np.ones(len(r))
         i2[:len(inv)] = inv
         inv = i2
         del i2
     if self.cut["xmin"] is not None:
         inv = np.logical_or(inv, np.less(r[:,0], self.cut["xmin"]))
     if self.cut["xmax"] is not None:
         inv = np.logical_or(inv, np.greater(r[:,0], self.cut["xmax"]))
     if self.cut["ymin"] is not None:
         inv = np.logical_or(inv, np.less(r[:,1], self.cut["ymin"]))
     if self.cut["ymax"] is not None:
         inv = np.logical_or(inv, np.greater(r[:,1], self.cut["ymax"]))
     if self.cut["zmin"] is not None:
         inv = np.logical_or(inv, np.less(r[:,2], self.cut["zmin"]))
     if self.cut["zmax"] is not None:
         inv = np.logical_or(inv, np.greater(r[:,2], self.cut["zmax"]))
     return inv
    def sample_3d_pdf(self, pdf, points, xlim, ylim, zlim):
        logger.info("Sampling FD distribution for {0} particles.".format(random_vec.shape[0]))
        # Create CDF in axis 0 direction by summing in axis 1, then cumsum:
        F = pdf.sum(2).sum(1).cumsum()
        F /= F.max()

        x = np.interp(points[:, 0], F, np.arange(F.shape[0]))
        xi = np.around(x).astype(np.int)        # For indexing

        F2 = pdf.sum(2).cumsum(axis=1)
        F2 /= F2.max(axis=1).reshape((-1, 1)).repeat(F2.shape[1], axis=1)

        yi = np.greater(F2[xi, :], points[:, 1].reshape((-1, 1))).argmax(axis=1)
        y = yi-(F2[xi, yi]-points[:, 1])/(F2[xi, yi]-F2[xi, yi-1])          # Interpolation

        F3 = pdf.cumsum(axis=2)
        F3 /= F3.max(axis=2).reshape((F3.shape[0], F3.shape[1], 1)).repeat(F3.shape[2], axis=2)

        zi = np.greater(F3[xi, yi, :], points[:, 2].reshape((-1, 1))).argmax(axis=1)
        z = zi-(F3[xi, yi, zi]-points[:, 2])/(F3[xi, yi, zi]-F3[xi, yi, zi-1])          # Interpolation

        px = xlim[0] + x * (xlim[1] - xlim[0]) / pdf.shape[0]
        py = ylim[0] + y * (ylim[1] - ylim[0]) / pdf.shape[1]
        pz = zlim[0] + z * (zlim[1] - zlim[0]) / pdf.shape[2]
        p = np.hstack((px.reshape((-1, 1)), py.reshape((-1, 1)), pz.reshape((-1, 1))))

        return p
Beispiel #12
0
def get_rt_change_deriv(kin_sig, bins, d_vel_thres = 0., fs = 60):
    '''
    input:
        kin_sig: trials x time array corresponding to velocity of the cursor
        
        start_tm: time from beginning of 'bins' of which to ignore any motion (e.g. if hold 
            time is 200 ms, and your kin_sig starts at the beginning of the hold time, set 
            start_tm = 0.2 to prevent micromovements in the hold time from being captured)

    output: 
        kin_feat : a trl x 3 array:
            column1 = RT in units of "bins" indices
            column2 = RT in units of time (bins[column1])
            column3 = index of max of kin_sig

    '''
    ntrials= kin_sig.shape[0]
    kin_feat = np.zeros((ntrials, 2))
    
    #Iterate through trials
    for trl in range(ntrials):   
        spd = kin_sig[trl,:]

        dt = 1./fs
        d_spd = np.diff(spd,axis=0)/dt
        
        if len(np.ravel(np.nonzero(np.greater(d_spd,d_vel_thres))))==0:
            bin_rt = 0
        else:
            bin_rt = np.ravel(np.nonzero(np.greater(d_spd,d_vel_thres)))[0]
        
        kin_feat[trl, 0] = bin_rt + 1 #Index of 'RT'
        kin_feat[trl, 1] = bins[kin_feat[trl, 0]] #Actual time of 'RT'
    return kin_feat
Beispiel #13
0
def chkoverlap(par0,par1,nphi=100):
    """
    Check for overlap between two ellipses
    """
    phiLIST=np.linspace(0.,2*np.pi,nphi)
    x0,y0=phi2xy_ellipse(phiLIST,**par0) ; r0=np.sqrt(x0**2+y0**2)
    x1,y1=phi2xy_ellipse(phiLIST,**par1) ; r1=np.sqrt(x1**2+y1**2)
    return not (np.all(np.greater(r0,r1)) or np.all(np.greater(r1,r0)))
Beispiel #14
0
 def __gt__(a, b):
     try:
         return np.greater(a.v, b.v)
     except AttributeError:
         if isinstance(a, Measurement):
             return np.greater(a.v, b)
         else:
             return np.greater(a, b.v)
Beispiel #15
0
def valid_na_data(ij):
    "pull out the k-values of an ijk array that are positive and have indices in the vicinity of north america"
    x = ij_to_ll(ij)
    imask = np.logical_and(np.greater(x[:,0], -150), np.greater(-50, x[:,0]))
    jmask = np.logical_and(np.greater(x[:,1], 20), np.greater(70, x[:,1]))
    kmask = np.greater(x[:,2], 0)
    xmask = np.logical_and(np.logical_and(imask, jmask), kmask)
    return x[:,2][xmask]
Beispiel #16
0
 def _subset(self, z):
     """
     Hampel's function is defined piecewise over the range of z
     """
     z = np.fabs(np.asarray(z))
     t1 = np.less_equal(z, self.a)
     t2 = np.less_equal(z, self.b) * np.greater(z, self.a)
     t3 = np.less_equal(z, self.c) * np.greater(z, self.b)
     return t1, t2, t3
Beispiel #17
0
def _seg_by_structure_feature(oracle, delta=0.05, width=9, hier=False, connectivity='rsfx'):
    self_sim = create_selfsim(oracle, method=connectivity)
    lag_sim = librosa.segment.recurrence_to_lag(self_sim, pad=False)
    sf = scipy.ndimage.filters.gaussian_filter(lag_sim, [0.5, width], 0, mode='reflect')
    novelty_curve = np.sqrt(np.mean(np.diff(sf, axis=1) ** 2, axis=0))
    novelty_curve -= np.min(novelty_curve)
    novelty_curve /= np.max(novelty_curve)
    novelty_curve = np.insert(novelty_curve,0,0)

    bound_width=9
    offset = int((bound_width - 1) / 2)
    tmp_novelty = np.pad(novelty_curve, [offset], mode='reflect')
    boundaries = [0]
    for i in range(len(novelty_curve)):
        if (np.greater(tmp_novelty[i + offset], tmp_novelty[i:i + offset]).all() and
                np.greater(tmp_novelty[i + offset], tmp_novelty[i + offset + 1:i + bound_width]).all() and
                    tmp_novelty[i + offset] > delta):
            boundaries.append(i)
    boundaries.append(oracle.n_states-2)

    seg_sim_mat = np.zeros((len(boundaries) - 1, len(boundaries) - 1))
    intervals = zip(boundaries[:-1], boundaries[1:])
    self_sim[self_sim > 1.0] = 1.0
    for i in range(len(boundaries) - 1):
        for j in range(len(boundaries) - 1):
            seg_sim_mat[i, j] = _segment_sim(self_sim[intervals[i][0]:intervals[i][1],
                                             intervals[j][0]:intervals[j][1]])

    seg_sim_mat = (seg_sim_mat + seg_sim_mat.T) / 2
    seg_sim_mat[seg_sim_mat < (np.mean(seg_sim_mat) + np.std(seg_sim_mat))] = 0.0

    new_seg_mat = seg_sim_mat
    while True:
        new_seg_mat = np.dot(new_seg_mat, new_seg_mat)
        thresh_seg_mat = new_seg_mat
        new_seg_mat[new_seg_mat < 1.0] = 0.0
        new_seg_mat[new_seg_mat >= 1.0] = 1.0
        if np.array_equal(new_seg_mat, thresh_seg_mat):
            break

    labels = np.zeros(len(boundaries) - 1)
    for i in range(thresh_seg_mat.shape[0]):
        ind = np.nonzero(thresh_seg_mat[i, :])
        label_ind = 0
        for idx in ind[0]:
            if labels[idx]:
                if label_ind:
                    labels[idx] = label_ind
                else:
                    label_ind = labels[idx]
            else:
                if label_ind:
                    labels[idx] = label_ind
                else:
                    labels[idx] = i + 1
                    label_ind = i + 1
    return np.array(boundaries), labels
Beispiel #18
0
def _getCliques(seq_list, num_needed, min_hd=2, cutoff=1):
    '''Helper function for finding sequence groups w/ min inter-seq hd
    '''
    hg = hammingGraph(seq_list)
    f = np.vectorize(lambda x: x[0])
    hg = f(hg)
    hd_thresh = np.zeros_like(hg)
    np.greater(hg, np.full_like(hg, min_hd-1), hd_thresh)
    return find_cliques(hd_thresh.astype(np.uint8), num_needed, cutoff)
Beispiel #19
0
    def __call__(self, variations):

        vars_for_stat = self._filter_samples_for_stats(variations)

        assert len(vars_for_stat.samples) == self.sample_dp_means.shape[0]

        dps = vars_for_stat[DP_FIELD]
        if is_dataset(dps):
            dps = dps[:]
        num_no_miss_calls = numpy.sum(dps > 0, axis=1)

        high_dp_calls = dps > self._too_high_dps

        num_high_dp_calls = numpy.sum(high_dp_calls, axis=1)

        with numpy.errstate(all='ignore'):
            # This is the stat
            freq_high_dp = num_high_dp_calls / num_no_miss_calls

        result = {}

        if self.do_histogram:
            counts, edges = histogram(freq_high_dp, n_bins=self.n_bins,
                                      range_=self.range)
            result[COUNTS] = counts
            result[EDGES] = edges

        if self.do_filtering or self.report_selection:
            het_call = call_is_het(vars_for_stat[GT_FIELD])
            with numpy.errstate(all='ignore'):
                obs_het = numpy.sum(het_call, axis=1) / num_no_miss_calls
            with numpy.errstate(all='ignore'):
                too_much_het = numpy.greater(obs_het, self.max_obs_het)

            with numpy.errstate(all='ignore'):
                snps_too_high = numpy.greater(freq_high_dp,
                                              self.max_high_dp_freq)
            to_remove = numpy.logical_and(too_much_het, snps_too_high)
            selected_snps = numpy.logical_not(to_remove)

        if self.report_selection:
            result[SELECTED_VARS] = selected_snps

        if self.do_filtering:
            flt_vars = variations.get_chunk(selected_snps)

            n_kept = numpy.count_nonzero(selected_snps)
            tot = selected_snps.shape[0]
            n_filtered_out = tot - n_kept

            result[FLT_VARS] = flt_vars
            result[FLT_STATS] = {N_KEPT: n_kept,
                                 N_FILTERED_OUT: n_filtered_out,
                                 TOT: tot}

        return result
Beispiel #20
0
def input_mask(ain, type,  mask, missing = None):

    """    #-------------------------------------------------------------------
    #                                      
    #     purpose: set up the input mask including missing from ain
    #
    #     usage:    
    #
    #     passed : 
    #
    #     returned:  
    #
    #
    #------------------------------------------------------------------------"""
    if type != 'h' and type != 'v':
        raise ValueError, 'Mask type must be h or v'
        return 

    if missing == None:
        try:
            omit = ain.missing_value
        except AttributeError:
            omit = 1.0e20
    else:
        omit = missing

    # ----- insert 0.0 in mask where array has missing data -------

    mask_size = len(mask.shape)
    data_size = len(ain.shape)

    if mask_size ==  2 and data_size > 2:             # make reduced array with first lat_lon section from a

        if data_size == 3:                            # caution: assuming standard order lat-lon varying the fastest
            if type == 'h':
                reduced = ain[0,:,:] 
            elif type == 'v':
                reduced = ain[:,:,0]                  # removes lats dummy latitude
        elif data_size == 4:                       
            if type == 'h':
                reduced = ain[0,0,:,:] 
            elif type == 'v':
                reduced = ain[0,:,:,0]                # removes lats dummy latitude
        else:
            raise IndexError, 'Data size is out of range'
            return 
         
        amskin = numpy.where( numpy.greater(reduced, 0.9*omit),  0.0, mask)
        amskin = amskin.astype(numpy.float32)

    else:                                                    # 0.0 -> missing in passed mask

        amskin = numpy.where( numpy.greater(ain, 0.9*omit),  0.0, mask)
        amskin = amskin.astype(numpy.float32)

    return omit, amskin  
    def detect_obstacle(self, sonarPing, depth):
        depth = depth 
        """
        calculate distance at which obstacle is detected
        based on deprecated sonar_detectobstacle.py

        Parameters that exist but are currently not used:
        # GlitchCount = rospy.get_param("/GlitchCount")
        This might be interesting in connection with the capability of the sonar to repeat a ping,
        though I am not sure if the micron is one of the sonar that can do repeats, and of course this looses time
        """
        if sonarPing.hasBins:
            BinLength = sonarPing.ADInterval / float(self.SoundspeedInWater)/2.    # Make sure the division is by float
            start_bin = int(np.ceil(self.BlankDist/BinLength)) # Index of first bin after blanking distance
            thresholds = self.get_thresholds(sonarPing, start_bin) 


            # return indices of bins with value above threshold:
            ReturnIndexes = np.flatnonzero(np.greater(sonarPing.pingPower, thresholds))
            detections = np.greater(sonarPing.pingPower, thresholds)
            detections = np.multiply(detections, thresholds)

            target_range = -1
            # calculate target range in metres
            if ReturnIndexes != []:
                # remove indeces that are continuous
                ReturnIndexes = remove_continued(ReturnIndexes)
 
                idx = 0
                while ((idx < len(ReturnIndexes))
                       and (ReturnIndexes[idx] < start_bin)):
                    idx += 1
                if idx < len(ReturnIndexes):
                    target_range = ReturnIndexes[idx] * BinLength * self.RangeFudgeFactor

                    # check if this is just a reflection of the surface
                    if abs(target_range - depth) < 0.1:
                        idx += 1
                        #print("depth ???")
                        if len(ReturnIndexes) > idx:
                            target_range = ReturnIndexes[idx] * BinLength * self.RangeFudgeFactor
                        else:
                            target_range = -1
                    
            # mean intensity of bins beyond the blanking disance
            # may be of interest in identifying areas of interest
            meanIntensity = np.mean(sonarPing.pingPower[start_bin:-1])

            # apply offsets so the bearing angle can be turned into rotation around a given axis
            # in the Delphin2 Coordinate system
            bearing_in_delphin2ks = (sonarPing.transducerBearing - self.rotationOffset) * self.rotationDirection
            results = [bearing_in_delphin2ks, target_range, meanIntensity]
        else:
            results = [0, 0, 0]
            detections = []
        return results, detections
def find_targ_acquired_time(d,threshold,stimon,runsize=4,sample_period=5):

    """Finds time when distance values in d array are below threshold for
    at least runsize values in a row.  Returns time (in ms)."""

    regions = contiguous_regions(np.less(d,threshold))
    longruns = np.greater(regions[:,1]-regions[:,0],runsize)
    after_stimon = np.greater(np.multiply(regions[:,1],sample_period),stimon)
    longruns_after_stimon = np.nonzero(np.logical_and(longruns,after_stimon))
    return regions[longruns_after_stimon][0][0]*sample_period
 def find_point(self, coord):
     """
     Returns the (objects, indices) of grids containing an (x,y,z) point
     """
     mask = np.ones(self.num_grids)
     for i in range(len(coord)):
         np.choose(np.greater(self.grid_left_edge[:, i], coord[i]), (mask, 0), mask)
         np.choose(np.greater(self.grid_right_edge[:, i], coord[i]), (0, mask), mask)
     ind = np.where(mask == 1)
     return self.grids[ind], ind
Beispiel #24
0
def unfold_boundary_crossings(Input, all_positions, positions, i):
    test1 = all_positions[i-1] - positions
    test2 = -1.0*(all_positions[i-1] - positions)
    test_i1 = np.nonzero(np.greater(test1, 0.80*Input['BOXsize']))
    test_i2 = np.nonzero(np.greater(test2, 0.80*Input['BOXsize']))
    positions[test_i1[0],test_i1[1]] = (all_positions[i-1,test_i1[0],test_i1[1]] + 
                                        (Input['BOXsize'] - test1[test_i1[0],test_i1[1]]))
    positions[test_i2[0],test_i2[1]] = (all_positions[i-1,test_i2[0],test_i2[1]] - 
                                        (Input['BOXsize'] - test2[test_i2[0],test_i2[1]]))
    return positions
Beispiel #25
0
def main(argv):

#  plt.ion()

  side_padding = 15
  sep_energy = 2109
  dep_energy = 1597
  binwidth = 0.5
  
  file_names = ["ms_event_set_runs11510-11530_mcmcfit.npz", "ms_event_set_runs11530-11560_mcmcfit.npz", "ms_event_set_runs11560-11570_mcmcfit.npz"]
  all_wfs = []
  for file_name in file_names:
    if os.path.isfile(file_name):
      data = np.load(file_name)
      all_wfs.append(  data['wfs'][:])
    else:
      print "no wf file named %s" % file_name
      exit(0)

  all_wfs = np.concatenate(all_wfs[:])
  energy_arr = np.zeros(all_wfs.size)
  like_arr = np.zeros(all_wfs.size)
  
  for (idx, wf) in enumerate(all_wfs):
    energy_arr[idx] = wf.energy
    like_arr[idx] = -1*wf.lnprob / wf.wfLength

  like_arr[ np.where( np.isnan(like_arr) == 1) ] = np.inf

  dep_idxs =  np.where(np.logical_and(np.less(energy_arr, 1800), np.isfinite(like_arr)))[0]
  r_arr = np.empty(len(dep_idxs))
  z_arr = np.empty(len(dep_idxs))
  like_arr_dep = like_arr[dep_idxs]

  for (new_idx, all_wf_idx) in enumerate(dep_idxs):
    samples = all_wfs[all_wf_idx].samples
    r_hist, r_bins = np.histogram(samples[:,0], bins=np.linspace(0, 33.8, 339 ))
    z_hist, z_bins = np.histogram(samples[:,2], bins=np.linspace(0, 39.3, 394 ))
    
    r_arr[new_idx] = r_bins[np.argmax(r_hist)]
    z_arr[new_idx] = z_bins[np.argmax(z_hist)]

  best_dep_idxs = np.where( np.less(like_arr_dep, 2) )[0]
  ok_dep_idxs = np.where(np.logical_and( np.greater(like_arr_dep, 2),np.less(like_arr_dep, 3) ))[0]
  bad_dep_idxs = np.where(np.greater(like_arr_dep, 3) )[0]

  plt.figure()
  plt.scatter(r_arr[best_dep_idxs], z_arr[best_dep_idxs], color="g")
  plt.scatter(r_arr[ok_dep_idxs], z_arr[ok_dep_idxs], color="b")
  plt.scatter(r_arr[bad_dep_idxs], z_arr[bad_dep_idxs], color="r")

  plt.xlim(0, 34)
  plt.ylim(0,38)

  plt.show()
Beispiel #26
0
def limits(desspd, lspd, vmin, vmo, mmo, M, ama, alt, hmaxact, desalt, lalt, maxthr, Thr, lvs, D, tas, mass, ESF):
    # minimum speed
    vmincomp = np.less(desspd, vmin) * 1
    if vmincomp.any() == 1:
        lspd = (vmincomp == 1) * (vmin + 1.0) + (vmincomp == 0) * 0.0
        # limit for above crossover
        ama = (vmincomp == 1) * vcas2mach(lspd, alt) + (vmincomp == 0) * ama
        # print "below minimum speed", lspd, ama

    # maximum speed

    # CAS
    vcomp = np.greater(vmo, desspd) * 1

    # set speed to max. possible speed if CAS>VMO
    if vcomp.all() == 0:
        lspd = (vcomp == 0) * (vmo - 1.0) + (vcomp != 0) * 0.0
        # limit for above crossover
        ama = (vcomp == 0) * vcas2mach(lspd, alt) + (vcomp != 0) * ama
        # print "above max CAS speed", lspd

    # Mach
    macomp = np.greater(mmo, M) * 1
    # set speed to max. possible speed if Mach>MMO
    if macomp.all() == 0:
        lspd = (macomp == 0) * vmach2cas((mmo - 0.001), alt) + (macomp != 0) * 0.0
        # limit for above crossover
        ama = (macomp == 0) * vcas2mach(lspd, alt) + (macomp != 0) * ama
        # print "above max Mach speed", lspd

    # remove non-needed speed limit
    ls = np.array(desspd == lspd)
    if ls.any() == 1:
        lspd = (ls == 1) * 0.0

    # maximum altitude
    hcomp = np.greater(hmaxact, desalt) * 1

    # set altitude to max. possible altitude if alt>Hmax
    if hcomp.all() == 0:
        lalt = (hcomp == 0) * (hmaxact - 1.0) + (hcomp != 0) * 0.0
        #  print "above max alt"

    # remove non-needed altitude limit
    la = np.array(desalt == lalt)
    if la.any() == 1:
        lalt = (la == 1) * 0.0

    # thrust
    thrcomp = np.greater(maxthr, Thr) * 1
    if thrcomp.all() == 0:
        Thr = (thrcomp == 0) * (maxthr - 1.0) + (thrcomp != 0) * Thr
        lvs = (thrcomp == 0) * (((Thr - D) * tas) / (mass * g0)) * ESF + (thrcomp == 0) * 0.0

    return lspd, lalt, lvs, ama
Beispiel #27
0
def main():
    spacecraft = np.array([[1000.,0.,0.,0.,0.,0.]])
    beacon     = np.array([[1200,1000,450]])

    ##################################################################################
    #
    # Camera/P&L Parameters
    #
    ##################################################################################
    
    extras = {}
    ## \var extras
    # Focal Length (mm)
    # 
    extras['FoL'] = 100.

    # Camera resolution (pixels)
    extras['resolution'] = [2048., 512.]
    
    # width and height of pixels in camera
    extras['pixel_width']  = 2.5
    extras['pixel_height'] = 10.
 
    # direction coefficient of pixel and line axes
    extras['pixel_direction'] = 1.
    extras['line_direction']  = 1.

    extras['obs_beacons'] = [1.]

    angles = np.array([[0,np.pi / 4,np.pi / 2.]])
    angles = np.array([0,        0,np.pi / 2.])

    args = ( spacecraft, beacon, angles, extras )
    pdb.set_trace()
    G = fncG( args )

    H = fncH( args )

    symbolic_results = pixel_and_line_symbolics.main()
    
    diffG = G - np.array(symbolic_results[0:2])

    diffH = H[:,0:3] - np.array([symbolic_results[2:5],symbolic_results[5:8]])

    if np.any( np.greater( np.abs( diffG ), 10**(-10) ) ):
       print 'P&L G Function did not pass unit test :('
    else:
       print 'P&L G Function passed unit test!'

    if np.any( np.greater( np.abs( diffH ), 10**(-10) ) ):
       print 'P&L H Function did not pass unit test :('
    else:
       print 'P&L H Function passed unit test!'

    return
Beispiel #28
0
def get_residual_stats(config_dict, Phi_0, coadd_img, med_img, xycent=None):
    if xycent == None:
        xycent = ((fr_width - 1)/2., (fr_width - 1)/2.)
    fr_shape = config_dict['fr_shape']
    parang_seq = config_dict['parang_seq']
    op_rad = config_dict['op_rad']
    op_az = config_dict['op_az']
    rad_vec = np.sqrt(get_radius_sqrd(fr_shape, xycent)).ravel()
    
    Phi_0_derot = (Phi_0 + parang_seq[0]) % 360.
    coadd_annular_rms = list()
    zonal_rms = [[None]*N_az[r] for r in range(N_rad)]
    print "RMS counts in KLIP results:"
    for rad_ind in op_rad:
        R2 = R_out[rad_ind]
        if rad_ind == 0:
            R1 = R_inner
        else:
            R1 = R_out[rad_ind-1]
        annular_mask_logic = np.vstack([np.less_equal(rad_vec, R2),\
                                        np.greater(rad_vec, R1),\
                                        np.isfinite(coadd_img.ravel())])
        annular_mask = np.nonzero( np.all(annular_mask_logic, axis=0) )[0]
        coadd_annular_rms.append( np.sqrt( np.mean( np.ravel(coadd_img)[annular_mask]**2 ) ) )
        print "\tannulus %d/%d: %.3f in KLIP sub'd, derotated, coadded annlus" % (rad_ind+1, len(op_rad), coadd_annular_rms[-1])
        if len(op_az[rad_ind]) > 1:
            Phi_beg = (Phi_0_derot - DPhi[rad_ind]/2.) % 360.
            Phi_end = [ (Phi_beg + i * DPhi[rad_ind]) % 360. for i in range(1, len(op_az[rad_ind])) ]
            Phi_end.append(Phi_beg)
            for az_ind in op_az[rad_ind]:
                Phi2 = Phi_end[az_ind]
                if az_ind == 0:
                    Phi1 = Phi_beg
                else:
                    Phi1 = Phi_end[az_ind-1]
                if Phi1 < Phi2:
                    mask_logic = np.vstack((np.less_equal(rad_vec, R2),\
                                            np.greater(rad_vec, R1),\
                                            np.less_equal(angle_vec, Phi2),\
                                            np.greater(angle_vec, Phi1)))
                else: # azimuthal region spans phi = 0
                    rad_mask_logic = np.vstack((np.less_equal(rad_vec, R2),\
                                                np.greater(rad_vec, R1)))
                    az_mask_logic = np.vstack((np.less_equal(angle_vec, Phi2),\
                                               np.greater(angle_vec, Phi1)))
                    mask_logic = np.vstack((np.any(az_mask_logic, axis=0),\
                                            np.all(rad_mask_logic, axis=0)))
                derot_zonemask = np.nonzero( np.all(mask_logic, axis = 0) )[0]
                zonal_rms[rad_ind][az_ind] = np.sqrt( np.mean( np.ravel(coadd_img)[derot_zonemask]**2 ) )
            delimiter = ', '
            print "\tby zone: %s" % delimiter.join(["%.3f" % zonal_rms[rad_ind][a] for a in op_az[rad_ind]])
    print "Peak, min values in final co-added image: %0.3f, %0.3f" % (np.nanmax(coadd_img), np.nanmin(coadd_img))
    print "Peak, min values in median of de-rotated images: %0.3f, %0.3f" % (np.nanmax(med_img), np.nanmin(med_img))
    return coadd_annular_rms, zonal_rms
Beispiel #29
0
def sfilter(spectral,center, width, exponent=6, taupass=1.0,  \
            taustop=0.0, filtertype = 'bandpass' ):
    """ Calculate a symmetrical filter response of shape exp(-x^n)


    Given a number of parameters, calculates maximally flat,
    symmetrical transmittance.  The function parameters controls
    the width, pass-band and stop-band transmittance and sharpness
    of cutoff. This function is not meant to replace the use of
    properly measured filter responses, but rather serves as a
    starting point if no other information is available.
    This function does not calculate ripple in the pass-band
    or cut-off band.

    Filter types supported include band pass, high (long) pass and
    low (short) pass filters. High pass filters have maximal
    transmittance for all spectral values higher than the central
    value. Low pass filters have maximal transmittance for all
    spectral values lower than the central value.

    Args:
        | spectral (np.array[N,] or [N,1]): spectral vector in  [um] or [cm-1].
        | center (float): central value for filter passband
        | width (float): proportional to width of filter passband
        | exponent (float): even integer, define the sharpness of cutoff.
        |                     If exponent=2        then gaussian
        |                     If exponent=infinity then square
        | taupass (float): the transmittance in the pass band (assumed constant)
        | taustop (float): peak transmittance in the stop band (assumed constant)
        | filtertype (string): filter type, one of 'bandpass', 'lowpass' or 'highpass'

    Returns:
        | transmittance (np.array[N,] or [N,1]):  transmittances at "spectral" intervals.

    Raises:
        | No exception is raised.
        | If an invalid filter type is specified, return None.
    """

    tau = taustop+(taupass-taustop)*np.exp(-(2*(spectral-center)/width)**exponent)
    maxtau=np.max(tau)
    if filtertype == 'bandpass':
        pass
    elif filtertype == 'lowpass':
        tau = tau * np.greater(spectral,center) + \
                maxtau * np.ones(spectral.shape) * np.less(spectral,center)
    elif filtertype == 'highpass':
        tau = tau * np.less(spectral,center) + \
                maxtau * np.ones(spectral.shape) * np.greater(spectral,center)
    else:
        return None

    return tau
Beispiel #30
0
def solve_newton(A, x, alpha, beta, maxiter, epsilon):
    T = np.array([])
    for i in range(1, maxiter):
        y = (- np.sum(np.log(1 - np.dot(A, x)))
             - np.sum(np.log(1 + x))
             - np.sum(np.log(1 - x)));
        grad = (np.dot(np.transpose(A), np.true_divide(1, (1 - np.dot(A, x))))
                - np.true_divide(1, 1 + x)
                + np.true_divide(1, 1 - x));
        if i % 4 == 1:
            # JESUS That was hard to organize
            hess = (np.dot(A.transpose(),
                           np.dot(
                               np.diagflat(
                                   np.square(np.true_divide(1, 1 - np.dot(A, x)))),
                               A))
                    + np.diagflat(np.true_divide(1, np.square(1 + x)))
                    + np.diagflat(np.true_divide(1, np.square(1 - x))))

        # delX = A\b
        delX = np.linalg.solve(-hess, grad)
        lambdaSq = (np.dot(grad.transpose(),
                           np.dot(np.linalg.pinv(hess), grad)))
        # Cutoff point, checked after step 1
        if lambdaSq / 2 <= epsilon:
            print "Iterations"
            print i;
            # Graph this later
            # plt.scatter(T);
            return x;

        # Else we keep going!
        # 2. Line Search
        t = 1
        # Have to do some preprocessing here, or else we get NaNs
        # from the log() term
        dotChange = np.max(np.dot(A, (x + t * delX)));
        squareChange = np.max(np.abs(x + t * delX));
        while np.greater(dotChange, 1) | np.greater(squareChange, 1):
            t = t * beta;
            dotChange = np.max(np.dot(A, (x + t * delX)));
            squareChange = np.max(np.abs(x + t * delX));

        yStep = (- np.sum(np.log(1 - np.dot(A, (x + t * delX))))
                 - np.sum(np.log(1 - np.square(x + t * delX))))
        while yStep > y + alpha * t * np.dot(np.transpose(grad), delX):
            t = t * beta;
            yStep = (- np.sum(np.log(1 - np.dot(A, (x + t * delX))))
                     - np.sum(np.log(1 - np.square(x + t * delX))))

        # 3. Update to next step
        np.append(T, t);
        x = x + t * delX;
def plot_network(input_data):
	y_pred_numpy = sess.run(nn_out,feed_dict={model_input:input_data})
	out = np.greater(y_pred_numpy,0.5).astype(np.float32)
	return np.squeeze(out)
def _round(x):
    if N.greater(x, 0.):
        return N.floor(x)
    else:
        return N.ceil(x)
Beispiel #33
0
    def fit_transform(self, X, y=None):
        """Fits the imputer on X and return the transformed X.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Input data, where "n_samples" is the number of samples and
            "n_features" is the number of features.

        y : ignored.

        Returns
        -------
        Xt : array-like, shape (n_samples, n_features)
            The imputed input data.
        """
        self.random_state_ = getattr(self, "random_state_",
                                     check_random_state(self.random_state))

        if self.max_iter < 0:
            raise ValueError(
                "'max_iter' should be a positive integer. Got {} instead."
                .format(self.max_iter))

        if self.tol < 0:
            raise ValueError(
                "'tol' should be a non-negative float. Got {} instead."
                .format(self.tol)
            )

        if self.estimator is None:
            from ..linear_model import BayesianRidge
            self._estimator = BayesianRidge()
        else:
            self._estimator = clone(self.estimator)

        self.imputation_sequence_ = []

        self.initial_imputer_ = None

        X, Xt, mask_missing_values, complete_mask = (
            self._initial_imputation(X, in_fit=True))

        super()._fit_indicator(complete_mask)
        X_indicator = super()._transform_indicator(complete_mask)

        if self.max_iter == 0 or np.all(mask_missing_values):
            self.n_iter_ = 0
            return super()._concatenate_indicator(Xt, X_indicator)

        # Edge case: a single feature. We return the initial ...
        if Xt.shape[1] == 1:
            self.n_iter_ = 0
            return super()._concatenate_indicator(Xt, X_indicator)

        self._min_value = self._validate_limit(
            self.min_value, "min", X.shape[1])
        self._max_value = self._validate_limit(
            self.max_value, "max", X.shape[1])

        if not np.all(np.greater(self._max_value, self._min_value)):
            raise ValueError(
                "One (or more) features have min_value >= max_value.")

        # order in which to impute
        # note this is probably too slow for large feature data (d > 100000)
        # and a better way would be good.
        # see: https://goo.gl/KyCNwj and subsequent comments
        ordered_idx = self._get_ordered_idx(mask_missing_values)
        self.n_features_with_missing_ = len(ordered_idx)

        abs_corr_mat = self._get_abs_corr_mat(Xt)

        n_samples, n_features = Xt.shape
        if self.verbose > 0:
            print("[IterativeImputer] Completing matrix with shape %s"
                  % (X.shape,))
        start_t = time()
        if not self.sample_posterior:
            Xt_previous = Xt.copy()
            normalized_tol = self.tol * np.max(
                np.abs(X[~mask_missing_values])
            )
        for self.n_iter_ in range(1, self.max_iter + 1):
            if self.imputation_order == 'random':
                ordered_idx = self._get_ordered_idx(mask_missing_values)

            for feat_idx in ordered_idx:
                neighbor_feat_idx = self._get_neighbor_feat_idx(n_features,
                                                                feat_idx,
                                                                abs_corr_mat)
                Xt, estimator = self._impute_one_feature(
                    Xt, mask_missing_values, feat_idx, neighbor_feat_idx,
                    estimator=None, fit_mode=True)
                estimator_triplet = _ImputerTriplet(feat_idx,
                                                    neighbor_feat_idx,
                                                    estimator)
                self.imputation_sequence_.append(estimator_triplet)

            if self.verbose > 1:
                print('[IterativeImputer] Ending imputation round '
                      '%d/%d, elapsed time %0.2f'
                      % (self.n_iter_, self.max_iter, time() - start_t))

            if not self.sample_posterior:
                inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf,
                                          axis=None)
                if self.verbose > 0:
                    print('[IterativeImputer] '
                          'Change: {}, scaled tolerance: {} '.format(
                              inf_norm, normalized_tol))
                if inf_norm < normalized_tol:
                    if self.verbose > 0:
                        print('[IterativeImputer] Early stopping criterion '
                              'reached.')
                    break
                Xt_previous = Xt.copy()
        else:
            if not self.sample_posterior:
                warnings.warn("[IterativeImputer] Early stopping criterion not"
                              " reached.", ConvergenceWarning)
        Xt[~mask_missing_values] = X[~mask_missing_values]
        return super()._concatenate_indicator(Xt, X_indicator)
Beispiel #34
0
    def apply(self, page):
        """Detects thick section barlines from the connected components.

    These should be tall components that start and end near the start and end
    of two (possibly different) staves. We use the standard barlines logic to
    assign components to the nearest start and end staff. We filter for
    candidate barlines, whose start and end are sufficiently close to the
    expected values. We then filter again by whether the component width is
    within the expected values for section barlines.

    For each staff system, we take the section barlines that match exactly that
    system's staves. Any standard barlines that are too close to a new section
    barline are removed, and we merge the existing standard barlines with the
    new section barlines.

    Args:
      page: A Page message.

    Returns:
      The same Page message, with new section barlines added.
    """
        component_center_x = np.mean(self.components[:,
                                                     [COLUMNS.X0, COLUMNS.X1]],
                                     axis=1).astype(int)
        # Take section barline candidates, whose start and end y values are close
        # enough to the staff start and end ys.
        component_is_candidate, candidate_start_staff, candidate_end_staff = (
            barlines.assign_barlines_to_staves(
                barline_x=component_center_x,
                barline_y0=self.components[:, COLUMNS.Y0],
                barline_y1=self.components[:, COLUMNS.Y1],
                staff_detector=self.staff_detector))
        candidates = self.components[component_is_candidate]
        candidate_center_x = component_center_x[component_is_candidate]
        del component_center_x

        # Filter again by the expected section barline width.
        component_width = candidates[:, COLUMNS.X1] - candidates[:, COLUMNS.X0]
        component_width_ok = np.logical_and(
            self._section_min_width() <= component_width,
            component_width <= self._section_max_width(candidate_start_staff))
        candidates = candidates[component_width_ok]
        candidate_center_x = candidate_center_x[component_width_ok]
        candidate_start_staff = candidate_start_staff[component_width_ok]
        candidate_end_staff = candidate_end_staff[component_width_ok]

        # For each existing staff system, consider only the candidates that match
        # exactly the system's start and end staves.
        start_staff = 0
        for system in page.system:
            staffline_distance = np.median([
                staff.staffline_distance for staff in system.staff
            ]).astype(int)
            candidate_covers_staff_system = np.logical_and(
                candidate_start_staff == start_staff,
                candidate_end_staff + 1 == start_staff + len(system.staff))
            # Calculate the x coordinates of all section barlines to keep.
            section_bar_x = candidate_center_x[candidate_covers_staff_system]
            # Extract the existing bar x coordinates and types for merging.
            existing_bar_type = {bar.x: bar.type for bar in system.bar}
            existing_bars = np.asarray([bar.x for bar in system.bar])
            # Merge the existing barlines and section barlines.
            if existing_bars.size and section_bar_x.size:
                # Filter the existing bars by whether they are far enough from a new
                # section barline. Section barlines override the existing standard
                # barlines.
                existing_bars_ok = np.greater(
                    np.min(np.abs(existing_bars[:, None] -
                                  section_bar_x[None, :]),
                           axis=1), staffline_distance * 4)
                existing_bars = existing_bars[existing_bars_ok]

            # Merge the existing barlines which we kept, and the new section barlines
            # (which are assumed to be type END_BAR), in sorted order.
            bars = sorted(
                [Bar(x=x, type=existing_bar_type[x]) for x in existing_bars] +
                [Bar(x=x, type=Bar.END_BAR) for x in section_bar_x],
                key=lambda bar: bar.x)
            # Update the staff system.
            system.ClearField('bar')
            system.bar.extend(bars)

            start_staff += len(system.staff)
        return page
Beispiel #35
0
    def getJointMove(self,
                     q_goal,
                     q0,
                     base_steps=1000,
                     steps_per_meter=1000,
                     steps_per_radians=4,
                     time_multiplier=1,
                     percent_acc=1,
                     use_joint_move=False,
                     table_frame=None):

        if q0 is None:
            rospy.logerr("Invalid initial joint position in getJointMove")
            return JointTrajectory()
        elif np.all(np.isclose(q0, q_goal, atol=0.0001)):
            rospy.logwarn("Robot is already in the goal position.")
            return JointTrajectory()

        if np.any(np.greater(np.absolute(q_goal[:2] - np.array(q0[:2])), np.pi/2)) \
          or np.absolute(q_goal[3] - q0[3]) > np.pi:

            # TODO: these thresholds should not be set manually here.
            rospy.logerr("Dangerous IK solution, abort getJointMove")

            return JointTrajectory()
        delta_q = np.array(q_goal) - np.array(q0)
        # steps = base_steps + int(np.sum(np.absolute(delta_q)) * steps_per_radians)
        steps, t_v_const_step, t_v_setting_max, steps_to_max_speed, const_velocity_max_step = self.calculateAccelerationProfileParameters(
            delta_q, base_steps, 0, steps_per_radians, 0, time_multiplier,
            self.acceleration_magnification * percent_acc)

        traj = JointTrajectory()
        traj.points.append(
            JointTrajectoryPoint(positions=q0,
                                 velocities=[0] * len(q0),
                                 accelerations=[0] * len(q0)))
        # compute IK
        for i in range(1, steps + 1):
            xyz = None
            rpy = None
            q = None

            q = np.array(q0) + (float(i) / steps) * delta_q
            q = q.tolist()

            if self.verbose:
                print "%d -- %s %s = %s" % (i, str(xyz), str(rpy), str(q))

            if q is not None:
                dq_i = np.array(q) - np.array(traj.points[i - 1].positions)
                if np.sum(dq_i) < 0.0001:
                    rospy.logwarn(
                        "Joint trajectory point %d is repeating previous trajectory point. "
                        % i)
                    # continue

                total_time = total_time = self.calculateTimeOfTrajectoryStep(
                    i, steps_to_max_speed, const_velocity_max_step,
                    t_v_const_step, t_v_setting_max)
                traj.points[i - 1].velocities = dq_i / (
                    total_time - traj.points[i - 1].time_from_start.to_sec())

                pt = JointTrajectoryPoint(positions=q,
                                          velocities=[0] * len(q),
                                          accelerations=[0] * len(q))
                pt.time_from_start = rospy.Duration(total_time)
                traj.points.append(pt)
            else:
                rospy.logwarn(
                    "No IK solution on one of the trajectory point to cartesian move target"
                )

        if len(traj.points) < base_steps:
            print rospy.logerr("Planning failure with " \
                    + str(len(traj.points)) \
                    + " / " + str(base_steps) \
                    + " points.")
            return JointTrajectory()

        traj.joint_names = self.joint_names
        return traj
Beispiel #36
0
def funcFindPrfGpu(
        idxPrc,
        varNumX,
        varNumY,
        varNumPrfSizes,
        vecMdlXpos,  #noqa
        vecMdlYpos,
        vecMdlSd,
        aryFunc,
        aryPrfTc,
        strVersion,
        queOut):
    """
    Find the best pRF model for voxel time course.
    
    This version uses ```feed_dict``` to put model time courses on the
    computational graph. This is slow, i.e. slower than multi-threaded cython
    on CPU or GPU vesion using a queue.
    """
    # Number of voxels to be fitted:
    varNumVoxChnk = aryFunc.shape[0]

    # Number of volumes:
    varNumVol = aryFunc.shape[1]

    # Vectors for pRF finding results [number-of-voxels times one]:
    vecBstXpos = np.zeros(varNumVoxChnk)
    vecBstYpos = np.zeros(varNumVoxChnk)
    vecBstSd = np.zeros(varNumVoxChnk)

    # Vector for best R-square value. For each model fit, the R-square value is
    # compared to this, and updated if it is lower than the best-fitting
    # solution so far. We initialise with an arbitrary, high value
    vecBstRes = np.add(np.zeros(varNumVoxChnk), 100000000.0).astype(np.float32)

    # Vector that will hold the temporary residuals from the model fitting:
    vecTmpRes = np.zeros(varNumVoxChnk).astype(np.float32)

    # We reshape the voxel time courses, so that time goes down the column,
    # i.e. from top to bottom.
    aryFunc = aryFunc.T

    #    # Reshape pRF model time courses:
    #    aryPrfTc = np.reshape(aryPrfTc,
    #                          ((aryPrfTc.shape[0]
    #                            * aryPrfTc.shape[1]
    #                            * aryPrfTc.shape[2]),
    #                           aryPrfTc.shape[3]))

    #    # Reshape back to original shape:
    #    aryPrfTc = np.reshape(aryPrfTc,
    #                          (cfg.varNumX,
    #                           cfg.varNumY,
    #                           cfg.varNumPrfSizes,
    #                           cfg.varNumVol))

    # Constant term for the model:
    vecConst = np.ones((varNumVol), dtype=np.float32)

    # Change type to float 32:
    aryFunc = aryFunc.astype(np.float32)
    aryPrfTc = aryPrfTc.astype(np.float32)

    # Prepare status indicator if this is the first of the parallel processes:
    if idxPrc == 0:

        # We create a status indicator for the time consuming pRF model finding
        # algorithm. Number of steps of the status indicator:
        varStsStpSze = 20

        # Number of pRF models to fit:
        varNumMdls = (varNumX * varNumY * varNumPrfSizes)

        # Vector with pRF values at which to give status feedback:
        vecStatPrf = np.linspace(0,
                                 varNumMdls,
                                 num=(varStsStpSze + 1),
                                 endpoint=True)
        vecStatPrf = np.ceil(vecStatPrf)
        vecStatPrf = vecStatPrf.astype(int)

        # Vector with corresponding percentage values at which to give status
        # feedback:
        vecStatPrc = np.linspace(0, 100, num=(varStsStpSze + 1), endpoint=True)
        vecStatPrc = np.ceil(vecStatPrc)
        vecStatPrc = vecStatPrc.astype(int)

        # Counter for status indicator:
        varCntSts01 = 0
        varCntSts02 = 0

    # There can be pRF model time courses with a variance of zero (i.e. pRF
    # models that are not actually responsive to the stimuli). For time
    # efficiency, and in order to avoid division by zero, we ignore these
    # model time courses.
    aryPrfTcVar = np.var(aryPrfTc, axis=3)

    # Zero with float32 precision for comparison:
    varZero32 = np.array(([0.0])).astype(np.float32)[0]

    # L2 regularization factor for regression:
    varL2reg = 0.0

    # Definition of computational graph:
    objGrph = tf.Graph()
    with objGrph.as_default():
        # Design matrix with two columns (graph input). The design matrix is
        # different on every iteration, so we define a placeholder object.
        objDsng = tf.placeholder(tf.float32, shape=(varNumVol, 2))  # !

        # Functional data. Because the functional data does not change, we
        # put the entire data on the graph. This may become a problem for
        # large datasets.
        objFunc = tf.Variable(aryFunc)

        # The matrix solving operation.
        # objMatSlve = tf.matrix_solve_ls(objDsng, objFunc, varL2reg, fast=True)

        # Operation that solves matrix (in the least squares sense), and
        # calculates residuals along time dimension:
        objMatSlve = tf.reduce_sum(
                                   tf.abs(
                                          tf.subtract(
                                                      tf.matmul(
                                                                objDsng,
                                                                tf.matrix_solve_ls( \
                                                                    objDsng, objFunc,
                                                                    varL2reg,
                                                                    fast=True)
                                                                ),
                                                      objFunc),
                                          ),
                                   axis=0
                                   )

    # Create session with graph:
    with tf.Session(graph=objGrph) as objSess:

        # Initialise variables.
        tf.global_variables_initializer().run()

        # Loop through pRF models:
        for idxX in range(0, varNumX):

            for idxY in range(0, varNumY):

                for idxSd in range(0, varNumPrfSizes):

                    # Status indicator (only used in the first of the parallel
                    # processes):
                    if idxPrc == 0:

                        # Status indicator:
                        if varCntSts02 == vecStatPrf[varCntSts01]:

                            # Prepare status message:
                            strStsMsg = ('------------Progress: ' +
                                         str(vecStatPrc[varCntSts01]) +
                                         ' % --- ' +
                                         str(vecStatPrf[varCntSts01]) +
                                         ' pRF models out of ' +
                                         str(varNumMdls))

                            print(strStsMsg)

                            # Only increment counter if the last value has not
                            # been reached yet:
                            if varCntSts01 < varStsStpSze:
                                varCntSts01 = varCntSts01 + int(1)

                    # Only fit pRF model if variance is not zero:
                    if np.greater(aryPrfTcVar[idxX, idxY, idxSd], varZero32):

                        # Current pRF time course model:
                        vecMdlTc = aryPrfTc[idxX, idxY, idxSd, :].flatten()

                        # We create a design matrix including the current pRF
                        # time course model, and a constant term:
                        aryDsgn = np.vstack([vecMdlTc, vecConst]).T

                        # Change type to float32:
                        aryDsgn = aryDsgn.astype(np.float32)

                        # Design matrix to nested list:
                        lstDsng = aryDsgn.tolist()

                        # Run the graph with current design matrix, returning
                        # parameter estimates (betas):
                        vecTmpRes = objSess.run(objMatSlve,
                                                feed_dict={objDsng: lstDsng})

                        #print(type(aryTmpCoef))
                        #print(aryTmpCoef.shape)

                    # Check whether current residuals are lower than previously
                    # calculated ones:
                    vecLgcTmpRes = np.less(vecTmpRes, vecBstRes)

                    # Replace best x and y position values, and SD values.
                    vecBstXpos[vecLgcTmpRes] = vecMdlXpos[idxX]
                    vecBstYpos[vecLgcTmpRes] = vecMdlYpos[idxY]
                    vecBstSd[vecLgcTmpRes] = vecMdlSd[idxSd]

                    # Replace best residual values:
                    vecBstRes[vecLgcTmpRes] = vecTmpRes[vecLgcTmpRes]

                    # Status indicator (only used in the first of the parallel
                    # processes):
                    if idxPrc == 0:

                        # Increment status indicator counter:
                        varCntSts02 = varCntSts02 + 1

    # After finding the best fitting model for each voxel, we still have to
    # calculate the coefficient of determination (R-squared) for each voxel. We
    # start by calculating the total sum of squares (i.e. the deviation of the
    # data from the mean). The mean of each time course:
    vecFuncMean = np.mean(aryFunc, axis=0)
    # Deviation from the mean for each datapoint:
    vecFuncDev = np.subtract(aryFunc, vecFuncMean[None, :])
    # Sum of squares:
    vecSsTot = np.sum(np.power(vecFuncDev, 2.0), axis=0)
    # Coefficient of determination:
    vecBstR2 = np.subtract(1.0, np.divide(vecBstRes, vecSsTot))

    # Output list:
    lstOut = [idxPrc, vecBstXpos, vecBstYpos, vecBstSd, vecBstR2]

    queOut.put(lstOut)
 def _upright_constraint(self, safety_vars):
   """Pelvis orientantion should remain upright."""
   z_up = safety_vars['z_up']
   return np.greater(z_up, self.limits['_upright_constraint'])
Beispiel #38
0
            column = np.genfromtxt(filename, dtype='str', delimiter="\t")[2:]
        except:
            print filename, "broken"
            pass
        if (len_first == -1):
            len_first = len(column)
        elif (len_first != len(column)):
            continue

        columns.append(column)

    columns = np.array(columns).T
    c_calc = 100 - 100 * np.array(columns.T[1:, 1:], dtype=np.float)
    columns.T[1:, 1:] = c_calc
    mean = ["Mean Err (%)"] + list(c_calc.mean(axis=1))
    higher = np.greater(c_calc, 5)

    higher = np.sum(higher, axis=1)
    print higher
    greater = ["Failed(err. > 5%)"] + list(higher)
    print greater
    print mean

    columns = np.vstack([columns, mean, greater])
    #if(column[0])

    print tabulate(columns,
                   headers="firstrow",
                   tablefmt="latex_booktabs",
                   floatfmt=".1f")
    #print tabulate(columns,headers="firstrow", tablefmt= "html")
Beispiel #39
0
 def step_size(self, s):
     assert isinstance(s, (float, int, bool, np.ndarray))
     assert np.greater(s, 0.0)
     if self._step_type == 'relative':
         assert np.less_equal(s, 1.0)
     self._step_size = float(s)
Beispiel #40
0
    def get_batch(self, data_set, feed_previous_rate=1, pos=-1):
        encoder_inputs, decoder_inputs = [], []
        encoder_input_length = []
        references = []

        for i in xrange(0, 64):  # self.batch_size + 1):
            if pos is None:
                # random samples.
                encoder_input, decoder_input, pair_id = random.choice(data_set)
            else:
                pos += 1
                if pos != 0 and pos % len(data_set) == 0:
                    random.shuffle(data_set)
                    break
                encoder_input, decoder_input, pair_id = data_set[pos %
                                                                 len(data_set)]

            # Check if Unknown tokens are in the input
            has_ukn = True if data_utils.UNK_ID in encoder_input else False

            # Encoder inputs are padded.
            encoder_pad = [data_utils.PAD_ID
                           ] * (self.encoder_length - len(encoder_input))
            encoder_inputs.append(list(encoder_input + encoder_pad))

            # Record the meaningful encoder input length.
            encoder_input_length.append(len(encoder_input))

            # Decoder inputs get a starting symbol "GO", and are padded.
            decoder_pad = [data_utils.PAD_ID
                           ] * (self.decoder_length - len(decoder_input) - 1)
            decoder_inputs.append([data_utils.GO_ID] + decoder_input +
                                  decoder_pad)

            # Save references for evaluation.
            references.append([pair_id, decoder_input, has_ukn])

        encoder_input_length = np.array(encoder_input_length, dtype=np.int32)

        # batch_size is not necessarily equal to self.batch_size.
        batch_size = len(encoder_inputs)

        # Create the list of masks.
        list_of_mask = []
        full_matrix = np.full((batch_size), int(feed_previous_rate * 100))
        for length_idx in xrange(self.decoder_length):
            mask = np.greater(full_matrix,
                              np.random.randint(
                                  100, size=(batch_size))).astype(np.float32)
            list_of_mask.append(mask)

        # Now create time-major vectors from the data seleted above.
        batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []

        for length_idx in xrange(self.encoder_length):
            batch_encoder_inputs.append(
                np.array([
                    encoder_inputs[batch_idx][length_idx]
                    for batch_idx in xrange(batch_size)
                ],
                         dtype=np.int32))

        for length_idx in xrange(self.decoder_length):
            batch_decoder_inputs.append(
                np.array([
                    decoder_inputs[batch_idx][length_idx]
                    for batch_idx in xrange(batch_size)
                ],
                         dtype=np.int32))
            # Create target_weights to be 0 for targets that are padding.
            batch_weight = np.ones(batch_size, dtype=np.float32)
            for batch_idx in xrange(batch_size):
                if length_idx < self.decoder_length - 1:
                    target = decoder_inputs[batch_idx][length_idx + 1]
                if length_idx == self.decoder_length - 1 or target == data_utils.PAD_ID:
                    batch_weight[batch_idx] = 0.0
            batch_weights.append(batch_weight)

        return (batch_encoder_inputs, batch_decoder_inputs, batch_weights,
                encoder_input_length, list_of_mask, batch_size, references,
                pos)
Beispiel #41
0
    def test_half_ufuncs(self):
        """Test the various ufuncs"""

        a = np.array([0, 1, 2, 4, 2], dtype=float16)
        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)

        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])

        assert_equal(np.equal(a, b), [False, False, False, True, False])
        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
        assert_equal(np.less(a, b), [False, True, False, False, True])
        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
        assert_equal(np.greater(a, b), [True, False, True, False, False])
        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
        assert_equal(np.logical_not(a), [True, False, False, False, False])

        assert_equal(np.isnan(c), [False, False, False, True, False])
        assert_equal(np.isinf(c), [False, False, True, False, False])
        assert_equal(np.isfinite(c), [True, True, False, False, True])
        assert_equal(np.signbit(b), [True, False, False, False, False])

        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])

        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
        x = np.maximum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [0, 5, 1, 0, 6])
        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
        x = np.minimum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [-2, -1, -np.inf, 0, 3])
        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])

        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
        assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
        assert_equal(np.square(b), [4, 25, 1, 16, 9])
        assert_equal(np.reciprocal(b),
                     [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
        assert_equal(np.conjugate(b), b)
        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
        assert_equal(np.positive(b), b)
        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
        assert_equal(np.frexp(b),
                     ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
Beispiel #42
0
def k2p2FixFromSum(SumImage,
                   thresh=1,
                   output_folder=None,
                   plot_folder=None,
                   show_plot=True,
                   min_no_pixels_in_mask=8,
                   min_for_cluster=4,
                   cluster_radius=np.sqrt(2),
                   segmentation=True,
                   ws_alg='flux',
                   ws_blur=0.5,
                   ws_thres=0.05,
                   ws_footprint=3,
                   extend_overflow=True,
                   catalog=None):
    """
	Create pixel masks from Sum-image.

	Parameters:
		SumImage (ndarray): Sum-image.
		thres (float, optional): Threshold for significant flux. The threshold is calculated as MODE+thres*MAD. Default=1.
		output_folder (string, optional): Path to directory where output should be saved. Default=None.
		plot_folder (string, optional): Path to directory where plots should be saved. Default=None.
		show_plot (boolean, optional): Should plots be shown to the user? Default=True.
		min_no_pixels_in_mask (integer, optional): Minimim number of pixels to constitute a mask.
		min_for_cluster (integer, optional): Minimum number of pixels to be considered a cluster in DBSCAN clustering.
		cluster_radius (float, optional): Radius around points to consider cluster in DBSCAN clustering.
		segmentation (boolean, optional): Perform segmentation of clusters using Watershed segmentation.
		ws_alg (string, optional): Watershed method to use. Default='flux'.
		ws_thres (float, optional): Threshold for watershed segmentation.
		ws_footprint (integer, optional): Footprint to use in watershed segmentation.
		extend_overflow (boolean, optional): Enable extension of overflow columns for bright stars.
		catalog (ndarray, optional): Catalog of stars as an array with three columns (column, row and magnitude). If this is provided
			the results will only allow masks to be returned for stars in the catalog and the information is
			also used in the extension of overflow columns.

	Returns:
		tuple: Tuple with two elements: A 3D boolean ndarray of masks and a float indicating the bandwidth used for the estimation background-levels.

	.. codeauthor:: Rasmus Handberg <*****@*****.**>
	.. codeauthor:: Mikkel Lund <*****@*****.**>
	"""

    # Get logger for printing messages:
    logger = logging.getLogger(__name__)
    logger.info("Creating masks from sum-image...")

    NY, NX = np.shape(SumImage)
    ori_mask = ~np.isnan(SumImage)
    X, Y = np.meshgrid(np.arange(NX), np.arange(NY))

    # Cut out pixels from sum image which were collected and contains flux
    # and flatten the 2D image to 1D array:
    Flux = SumImage[ori_mask].flatten()
    Flux = Flux[Flux > 0]

    # Check if there was actually any flux measured:
    if len(Flux) == 0:
        raise K2P2NoFlux("No measured flux in sum-image")

    # Cut away the top 15% of the fluxes:
    flux_cut = stats.trim1(np.sort(Flux), 0.15)
    # Also do a cut on the absolute values of pixel - This helps in cases where
    # the image is dominated by saturated pixels. The exact value is of course
    # in principle dependent on the CCD, but we have found this value to be
    # reasonable in TESS simulated data:
    flux_cut = flux_cut[flux_cut < 70000]

    # Estimate the bandwidth we are going to use for the background:
    background_bandwidth = select_bandwidth(flux_cut, bw='scott', kernel='gau')
    logger.debug("  Sum-image KDE bandwidth: %f", background_bandwidth)

    # Make the Kernel Density Estimation of the fluxes:
    kernel = KDE(flux_cut)
    kernel.fit(kernel='gau', bw=background_bandwidth, fft=True, gridsize=100)

    # MODE
    def kernel_opt(x):
        return -1 * kernel.evaluate(x)

    max_guess = kernel.support[np.argmax(kernel.density)]
    MODE = minimize(kernel_opt, max_guess, method='Powell').x

    # MAD (around mode)
    MAD1 = mad_to_sigma * nanmedian(np.abs(Flux[(Flux < MODE)] - MODE))

    # Define the cutoff above which pixels are regarded significant:
    CUT = MODE + thresh * MAD1

    logger.debug("  Threshold used: %f", thresh)
    logger.debug("  Flux cut is: %f", CUT)
    if logger.isEnabledFor(logging.DEBUG) and plot_folder is not None:
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.fill_between(kernel.support, kernel.density, alpha=0.3)
        ax.axvline(MODE, color='k')
        ax.axvline(CUT, color='r')
        ax.set_xlabel('Flux')
        ax.set_ylabel('Distribution')
        save_figure(os.path.join(plot_folder, 'flux_distribution'))
        plt.close(fig)

    #==========================================================================
    # Find and seperate clusters of pixels
    #==========================================================================

    # Cut out pixels of sum image with flux above the cut-off:
    # The following two lines are identical to "idx = (SumImage > CUT)",
    # but in this way we avoid an RuntimeWarning when SumImage contains NaNs.
    idx = np.zeros_like(SumImage, dtype='bool')
    np.greater(SumImage, CUT, out=idx, where=~np.isnan(SumImage))
    X2 = X[idx]
    Y2 = Y[idx]

    if np.all(~idx):
        raise K2P2NoStars("No flux above threshold")

    logger.debug("  Min for cluster is: %f", min_for_cluster)
    logger.debug("  Cluster radius is: %f", cluster_radius)

    # Run clustering algorithm
    XX, labels_ini, core_samples_mask = run_DBSCAN(X2, Y2, cluster_radius,
                                                   min_for_cluster)

    # Run watershed segmentation algorithm:
    # Demand that there was any non-noise clusters found.
    if segmentation and any(labels_ini != -1):
        # Create a set of dummy-masks that are made up of the clusters
        # that were found by DBSCAN, meaning that there could be masks
        # with several stars in them:
        DUMMY_MASKS = np.zeros((0, NY, NX), dtype='bool')
        DUMMY_MASKS_LABELS = []
        m = np.zeros_like(SumImage, dtype='bool')
        for lab in set(labels_ini):
            if lab == -1: continue
            # Create "image" of this mask:
            m[:, :] = False
            for x, y in XX[labels_ini == lab]:
                m[y, x] = True
            # Append them to lists:
            DUMMY_MASKS = np.append(DUMMY_MASKS, [m], axis=0)
            DUMMY_MASKS_LABELS.append(lab)

        # Run the dummy masks through the detection of saturated columns:
        logger.debug("Detecting saturated columns in non-segmentated masks...")
        smask, _ = k2p2_saturated(SumImage, DUMMY_MASKS, idx)

        # Create dictionary that will map a label to the mask of saturated pixels:
        if np.any(smask):
            saturated_masks = {}
            for u, sm in enumerate(smask):
                saturated_masks[DUMMY_MASKS_LABELS[u]] = sm
        else:
            saturated_masks = None

        # Run the mask segmentaion algorithm on the found clusters:
        labels, unique_labels, NoCluster = k2p2WS(
            X,
            Y,
            X2,
            Y2,
            SumImage,
            XX,
            labels_ini,
            core_samples_mask,
            saturated_masks=saturated_masks,
            ws_thres=ws_thres,
            ws_footprint=ws_footprint,
            ws_blur=ws_blur,
            ws_alg=ws_alg,
            output_folder=plot_folder,
            catalog=catalog)
    else:
        labels = labels_ini
        unique_labels = set(labels)
        #NoCluster = len(unique_labels) - (1 if -1 in labels else 0)

    # Make sure it is a tuple and not a set - much easier to work with:
    unique_labels = tuple(unique_labels)

    # Create list of clusters and their number of pixels:
    No_pix_sort = np.zeros([len(unique_labels), 2])
    for u, lab in enumerate(unique_labels):
        No_pix_sort[u, 0] = np.sum(labels == lab)
        No_pix_sort[u, 1] = lab

    # Only select the clusters that have enough pixels and are not noise:
    cluster_select = (No_pix_sort[:, 0] >=
                      min_no_pixels_in_mask) & (No_pix_sort[:, 1] != -1)
    no_masks = sum(cluster_select)
    No_pix_sort = No_pix_sort[cluster_select, :]

    # No masks were found, so return None:
    if no_masks == 0:
        MASKS = None

    else:
        # Sort the clusters by the number of pixels:
        cluster_sort = np.argsort(No_pix_sort[:, 0])
        No_pix_sort = No_pix_sort[cluster_sort[::-1], :]

        # Create 3D array that will hold masks for each target:
        MASKS = np.zeros((no_masks, NY, NX))
        for u in range(no_masks):
            lab = No_pix_sort[u, 1]
            class_member_mask = (labels == lab)
            xy = XX[class_member_mask, :]
            MASKS[u, xy[:, 1], xy[:, 0]] = 1

        #==========================================================================
        # Fill holes in masks
        #==========================================================================
        pattern = np.array([[[0, 0.25, 0], [0.25, 0, 0.25],
                             [0, 0.25, 0]]])  # 3D array - shape=(1, 3, 3)
        mask_holes_indx = ndimage.convolve(MASKS,
                                           pattern,
                                           mode='constant',
                                           cval=0.0)
        mask_holes_indx = (mask_holes_indx > 0.95) & (
            MASKS == 0
        )  # Should be exactly 1.0, but let's assume some round-off errors
        if np.any(mask_holes_indx):
            logger.info("Filling %d holes in the masks",
                        np.sum(mask_holes_indx))
            MASKS[mask_holes_indx] = 1

            if plot_folder is not None:
                # Create image showing all masks at different levels:
                img = np.zeros((NY, NX))
                for r in np.transpose(np.where(MASKS > 0)):
                    img[r[1], r[2]] = r[0] + 1

                # Plot everything together:
                fig = plt.figure()
                ax = fig.add_subplot(111)
                plot_image(img,
                           ax=ax,
                           scale='linear',
                           percentile=100,
                           cmap='nipy_spectral',
                           title='Holes in mask filled')

                # Create outline of filled holes:
                for hole in np.transpose(np.where(mask_holes_indx)):
                    cen = (hole[2] - 0.5, hole[1] - 0.5)
                    ax.add_patch(
                        mpl.patches.Rectangle(cen,
                                              1,
                                              1,
                                              color='k',
                                              lw=2,
                                              fill=False,
                                              hatch='//'))

                #fig.savefig(os.path.join(plot_folder, 'mask_filled_holes.png'), format='png', bbox_inches='tight')
                save_figure(os.path.join(plot_folder, 'mask_filled_holes'))
                plt.close(fig)

        #==========================================================================
        # Entend overflow lanes
        #==========================================================================
        if extend_overflow:
            logger.debug("Detecting saturated columns in masks...")

            # Find pixels that are saturated in each mask and find out if they should
            # be added to the mask:
            saturated_mask, pixels_added = k2p2_saturated(SumImage, MASKS, idx)
            logger.info("Overflow will add %d pixels in total to the masks.",
                        pixels_added)

            # If we have a catalog of stars, we will only allow stars above the saturation
            # limit to get their masks extended:
            if catalog is not None:
                # Filter that catalog, only keeping stars actully inside current image:
                c = np.asarray(np.round(catalog[:, 0]), dtype='int32')
                r = np.asarray(np.round(catalog[:, 1]), dtype='int32')
                tmag = catalog[:, 2]
                indx = (c >= 0) & (c < SumImage.shape[1]) & (r >= 0) & (
                    r < SumImage.shape[0])
                c = c[indx]
                r = r[indx]
                tmag = tmag[indx]
                # Loop through the masks:
                for u in range(no_masks):
                    if np.any(saturated_mask[u, :, :]):
                        # Find out which stars fall inside this mask:
                        which_stars = np.asarray(MASKS[u, :, :][r, c],
                                                 dtype='bool')
                        if np.any(which_stars):
                            # Only allow extension of columns if the combined light of
                            # the targts in the mask exceeds the saturation limit:
                            mags_in_mask = tmag[which_stars]
                            mags_total = -2.5 * np.log10(
                                np.nansum(10**(-0.4 * mags_in_mask)))
                            if mags_total > saturation_limit:
                                # The combined magnitude of the targets is now
                                # above saturation
                                saturated_mask[u, :, :] = False
                        else:
                            # Do not add saturation columns if no stars were found:
                            saturated_mask[u, :, :] = False

            # If we are going to plot later on, make a note
            # of how the outline of the masks looked before
            # changing anything:
            if plot_folder is not None and logger.isEnabledFor(logging.DEBUG):
                outline_before = []
                for u in range(no_masks):
                    outline_before.append(k2p2maks(MASKS[u, :, :], 1, 0.5))

            # Add the saturated pixels to the masks:
            MASKS[saturated_mask] = 1

            # If we are running as DEBUG, output some plots as well:
            if plot_folder is not None and logger.isEnabledFor(logging.DEBUG):
                logger.debug("Plotting overflow figures...")
                Ypixel = np.arange(NY)
                for u in range(no_masks):
                    mask = np.asarray(MASKS[u, :, :], dtype='bool')
                    mask_rows, mask_columns = np.where(mask)
                    mask_max = np.nanmax(SumImage[mask])

                    # The outline of the mask after saturated columns have been
                    # corrected for:
                    outline = k2p2maks(mask, 1, 0.5)

                    with PdfPages(
                            os.path.join(plot_folder, 'overflow_mask' +
                                         str(u) + '.pdf')) as pdf:
                        for c in sorted(set(mask_columns)):

                            column_rows = mask_rows[mask_columns == c]

                            title = "Mask %d - Column %d" % (u, c)
                            if np.any(saturated_mask[u, :, c]):
                                title += " - Saturated"

                            fig = plt.figure(figsize=(14, 6))
                            ax1 = fig.add_subplot(121)
                            ax1.axvspan(np.min(column_rows) - 0.5,
                                        np.max(column_rows) + 0.5,
                                        color='0.7')
                            ax1.plot(Ypixel,
                                     SumImage[:, c],
                                     'ro-',
                                     drawstyle='steps-mid')
                            ax1.set_title(title)
                            ax1.set_xlabel('Y pixels')
                            ax1.set_ylabel('Sum-image counts')
                            ax1.set_ylim(0, mask_max)
                            ax1.set_xlim(-0.5, NY - 0.5)

                            ax2 = fig.add_subplot(122)
                            plot_image(SumImage, ax=ax2, scale='log')
                            ax2.plot(outline_before[u][:, 0],
                                     outline_before[u][:, 1], 'r:')
                            ax2.plot(outline[:, 0], outline[:, 1], 'r-')
                            ax2.axvline(c, color='r', ls='--')

                            pdf.savefig(fig)
                            plt.close(fig)

    #==============================================================================
    # Create plots
    #==============================================================================
    if plot_folder is not None:
        # Colors to use for each cluster label:
        colors = plt.cm.gist_rainbow(np.linspace(0, 1, len(unique_labels)))

        # Colormap to use for clusters:
        # https://stackoverflow.com/questions/9707676/defining-a-discrete-colormap-for-imshow-in-matplotlib/9708079#9708079
        #cmap = mpl.colors.ListedColormap(np.append([[1, 1, 1, 1]], colors, axis=0))
        #cmap_norm = mpl.colors.BoundaryNorm(np.arange(-1, len(unique_labels)-1)+0.5, cmap.N)

        # Set up figure to hold subplots:
        if NY / NX > 5:
            aspect = 0.5
        else:
            aspect = 0.2

        fig0 = plt.figure(figsize=(2 * plt.figaspect(aspect)))
        fig0.subplots_adjust(wspace=0.12)

        # ---------------
        # PLOT 1
        ax0 = fig0.add_subplot(151)
        plot_image(SumImage, ax=ax0, scale='log', title='Sum-image')

        # ---------------
        # PLOT 2
        idx = np.zeros_like(SumImage, dtype='bool')
        np.greater(SumImage, CUT, out=idx, where=~np.isnan(SumImage))
        Flux_mat2 = np.zeros_like(SumImage)
        Flux_mat2[~idx] = 1
        Flux_mat2[idx] = 2
        Flux_mat2[ori_mask == 0] = 0

        ax2 = fig0.add_subplot(152)
        plot_image(Flux_mat2,
                   ax=ax2,
                   scale='linear',
                   percentile=100,
                   cmap='nipy_spectral',
                   title='Significant flux')

        # ---------------
        # PLOT 3
        ax2 = fig0.add_subplot(153)

        Flux_mat4 = np.zeros_like(SumImage)
        for u, lab in enumerate(unique_labels):
            class_member_mask = (labels == lab)
            xy = XX[class_member_mask, :]
            if lab == -1:
                # Black used for noise.
                ax2.plot(xy[:, 0],
                         xy[:, 1],
                         '+',
                         markerfacecolor='k',
                         markeredgecolor='k',
                         markersize=5)

            else:
                Flux_mat4[xy[:, 1], xy[:, 0]] = u + 1
                ax2.plot(xy[:, 0],
                         xy[:, 1],
                         'o',
                         markerfacecolor=tuple(colors[u]),
                         markeredgecolor='k',
                         markersize=5)

        ax2.set_title("Clustering + Watershed")
        ax2.set_xlim([-0.5, SumImage.shape[1] - 0.5])
        ax2.set_ylim([-0.5, SumImage.shape[0] - 0.5])
        ax2.set_aspect('equal')

        # ---------------
        # PLOT 4
        ax4 = fig0.add_subplot(154)
        plot_image(Flux_mat4,
                   ax=ax4,
                   scale='linear',
                   percentile=100,
                   cmap='nipy_spectral',
                   title='Extracted clusters')

        # ---------------
        # PLOT 5
        ax5 = fig0.add_subplot(155)
        plot_image(SumImage, ax=ax5, scale='log', title='Final masks')

        # Plot outlines of selected masks:
        for u in range(no_masks):
            # Get the color associated with this label:
            col = colors[int(np.where(unique_labels == No_pix_sort[u, 1])[0])]
            # Make mask outline:
            outline = k2p2maks(MASKS[u, :, :], 1, threshold=0.5)
            # Plot outlines:
            ax5.plot(outline[:, 0],
                     outline[:, 1],
                     color=col,
                     zorder=10,
                     lw=2.5)
            ax4.plot(outline[:, 0],
                     outline[:, 1],
                     color='k',
                     zorder=10,
                     lw=1.5)

        # Save the figure and close it:
        save_figure(os.path.join(plot_folder, 'masks_' + ws_alg))
        if show_plot:
            plt.show()
        else:
            plt.close('all')

    return MASKS, background_bandwidth
 r = SRD_generate_paths(x_disc, r0, kappa_r, theta_r, sigma_r,
                        T, M, I, rand, 0, cho_matrix)
 # volatility process paths
 v = SRD_generate_paths(x_disc, v0, kappa_v, theta_v, sigma_v,
                        T, M, I, rand, 2, cho_matrix)
 # index level process paths
 S = H93_index_paths(S0, r, v, 1, cho_matrix)
 for K in k_list:  # strikes
     # inner value matrix
     h = np.maximum(K - S, 0)
     # value/cash flow matrix
     V = np.maximum(K - S, 0)
     for t in xrange(M - 1, 0, -1):
         df = np.exp(-(r[t] + r[t + 1]) / 2 * dt)
         # select only ITM paths
         itm = np.greater(h[t], 0)
         relevant = np.nonzero(itm)
         rel_S = np.compress(itm, S[t])
         no_itm = len(rel_S)
         if no_itm == 0:
             cv = np.zeros((I), dtype=np.float)
         else:
             rel_v = np.compress(itm, v[t])
             rel_r = np.compress(itm, r[t])
             rel_V = (np.compress(itm, V[t + 1]) *
                      np.compress(itm, df))
             matrix = np.zeros((D + 1, no_itm), dtype=np.float)
             matrix[10] = rel_S * rel_v * rel_r
             matrix[9] = rel_S * rel_v
             matrix[8] = rel_S * rel_r
             matrix[7] = rel_v * rel_r
Beispiel #44
0
def trial(x,t,s, name):
    '''TRIAL Display data and calculated solution together

       Syntax:
           trial(x, t, s, name )          
   
          name = name of the solution
          x    = vector of parameters
          t,s  = data set

       Description:
           The function trial allows to produce a graph that superposes data
           and a model. This can be used to test graphically the quality of a
           fit, or to adjust manually the parameters of a model until a
           satisfactory fit is obtained.

       Example:
           trial(p,t,s,'ths')
           trial([0.1,1e-3],t,s, 'cls')


       See also: ldf, diagnostic, fit, ths_dmo'''
    t,s = hyclean(t,s)
    td,sd = ldiffs(t,s, npoints=40)
    
    tplot = np.logspace(np.log10(t[0]), np.log10(t[len(t)-1]),  endpoint = True, base = 10.0, dtype = np.float64)
    
    if name == 'ths' :
        sc = hp.ths.dim(x,tplot)
    if name == 'Del' :
        sc = hp.Del.dim(x,tplot)
    
    
    tdc,dsc = ldiff(tplot,sc)

    
    if np.mean(sd) < 0 :
        sd = [ -x for x in sd]
        dsc = [ -x for x in dsc]
    condition = np.greater(sd,0)
    td = np.extract(condition,td)
    sd = np.extract(condition,sd)
    
    condition2 = np.greater(dsc,0)
    tdc = np.extract(condition2,tdc)
    dsc = np.extract(condition2,dsc)    
    
    fig = plt.figure()
    ax1 = fig.add_subplot(111)
    ax1.set_xlabel('t')
    ax1.set_ylabel('s')
    ax1.set_title('Log Log diagnostic plot')
    ax1.loglog(t, s, c='r', marker = 'o', linestyle = '')
    ax1.loglog(td,sd, c = 'b', marker = 'x', linestyle = '')
    ax1.loglog(tplot,sc, c = 'g', linestyle = ':')
    ax1.loglog(tdc,dsc, c = 'y', linestyle = '-.')
    
    ax1.grid(True)

    plt.show()
    

    fig = plt.figure()
    ax1 = fig.add_subplot(111)
    ax1.set_xlabel('t')
    ax1.set_ylabel('s')
    ax1.set_title('Semi Log diagnostic plot')
    ax1.semilogx(t, s, c='r', marker = 'o', linestyle = '')
    ax1.semilogx(td,sd, c = 'b', marker = 'x', linestyle = '')
    ax1.semilogx(tplot,sc, c = 'g', linestyle = ':')
    ax1.semilogx(tdc,dsc, c = 'y', linestyle = '-.')
    
    ax1.grid(True)
    
    plt.show()        
Beispiel #45
0
def detect_blobs_large_image(filename_base, image5d, offset, size,
                             verify=False, save_dfs=True, full_roi=False):
    """Detect blobs within a large image through parallel processing of 
    smaller chunks.
    
    Args:
        filename_base: Base path to use file output.
        image5d: Large image to process as a Numpy array of t,z,y,x,[c]
        offset: Sub-image offset given as coordinates in z,y,x.
        size: Sub-image shape given in z,y,x.
        verify: True to verify detections against truth database; defaults 
            to False.
        save_dfs: True to save data frames to file; defaults to True.
        full_roi (bool): True to treat ``image5d`` as the full ROI; defaults
            to False.
    """
    time_start = time()
    if size is None or offset is None:
        # uses the entire stack if no size or offset specified
        size = image5d.shape[1:4]
        offset = (0, 0, 0)
    else:
        # change base filename for ROI-based partial stack
        filename_base = make_subimage_name(filename_base, offset, size)
    filename_subimg = libmag.combine_paths(filename_base, config.SUFFIX_SUBIMG)
    filename_blobs = libmag.combine_paths(filename_base, config.SUFFIX_BLOBS)
    
    # get ROI for given region, including all channels
    if full_roi:
        # treat the full image as the ROI
        roi = image5d[0]
    else:
        roi = plot_3d.prepare_subimg(image5d, size, offset)
    _, channels = plot_3d.setup_channels(roi, config.channel, 3)
    
    # prep chunking ROI into sub-ROIs with size based on segment_size, scaling
    # by physical units to make more independent of resolution
    time_detection_start = time()
    settings = config.roi_profile  # use default settings
    scaling_factor = detector.calc_scaling_factor()
    print("microsope scaling factor based on resolutions: {}"
          .format(scaling_factor))
    denoise_size = config.roi_profile["denoise_size"]
    denoise_max_shape = None
    if denoise_size:
        # further subdivide each sub-ROI for local preprocessing
        denoise_max_shape = np.ceil(
            np.multiply(scaling_factor, denoise_size)).astype(int)

    # overlap sub-ROIs to minimize edge effects
    overlap_base = chunking.calc_overlap()
    tol = np.multiply(overlap_base, settings["prune_tol_factor"]).astype(int)
    overlap_padding = np.copy(tol)
    overlap = np.copy(overlap_base)
    exclude_border = config.roi_profile["exclude_border"]
    if exclude_border is not None:
        # exclude border to avoid blob detector edge effects, where blobs
        # often collect at the faces of the sub-ROI;
        # ensure that overlap is greater than twice the border exclusion per
        # axis so that no plane will be excluded from both overlapping sub-ROIs
        exclude_border_thresh = np.multiply(2, exclude_border)
        overlap_less = np.less(overlap, exclude_border_thresh)
        overlap[overlap_less] = exclude_border_thresh[overlap_less]
        excluded = np.greater(exclude_border, 0)
        overlap[excluded] += 1  # additional padding
        overlap_padding[excluded] = 0  # no need to prune past excluded border
    print("sub-ROI overlap: {}, pruning tolerance: {}, padding beyond "
          "overlap for pruning: {}, exclude borders: {}"
          .format(overlap, tol, overlap_padding, exclude_border))
    max_pixels = np.ceil(np.multiply(
        scaling_factor, 
        config.roi_profile["segment_size"])).astype(int)
    print("preprocessing max shape: {}, detection max pixels: {}"
          .format(denoise_max_shape, max_pixels))
    sub_roi_slices, sub_rois_offsets = chunking.stack_splitter(
        roi.shape, max_pixels, overlap)
    # TODO: option to distribute groups of sub-ROIs to different servers 
    # for blob detection
    seg_rois = detect_blobs_sub_rois(
        roi, sub_roi_slices, sub_rois_offsets, denoise_max_shape, exclude_border)
    detection_time = time() - time_detection_start
    print("blob detection time (s):", detection_time)
    
    # prune blobs in overlapping portions of sub-ROIs
    time_pruning_start = time()
    segments_all, df_pruning = _prune_blobs_mp(
        roi, seg_rois, overlap, tol, sub_roi_slices, sub_rois_offsets, channels,
        overlap_padding)
    pruning_time = time() - time_pruning_start
    print("blob pruning time (s):", pruning_time)
    #print("maxes:", np.amax(segments_all, axis=0))
    
    # get weighted mean of ratios
    if df_pruning is not None:
        print("\nBlob pruning ratios:")
        path_pruning = "blob_ratios.csv" if save_dfs else None
        df_pruning_all = df_io.data_frames_to_csv(
            df_pruning, path_pruning, show=" ")
        cols = df_pruning_all.columns.tolist()
        blob_pruning_means = {}
        if "blobs" in cols:
            blobs_unpruned = df_pruning_all["blobs"]
            num_blobs_unpruned = np.sum(blobs_unpruned)
            for col in cols[1:]:
                blob_pruning_means["mean_{}".format(col)] = [
                    np.sum(np.multiply(df_pruning_all[col], blobs_unpruned)) 
                    / num_blobs_unpruned]
            path_pruning_means = "blob_ratios_means.csv" if save_dfs else None
            df_pruning_means = df_io.dict_to_data_frame(
                blob_pruning_means, path_pruning_means, show=" ")
        else:
            print("no blob ratios found")
    
    '''# report any remaining duplicates
    np.set_printoptions(linewidth=500, threshold=10000000)
    print("all blobs (len {}):".format(len(segments_all)))
    sort = np.lexsort(
        (segments_all[:, 2], segments_all[:, 1], segments_all[:, 0]))
    blobs = segments_all[sort]
    print(blobs)
    print("checking for duplicates in all:")
    print(detector.remove_duplicate_blobs(blobs, slice(0, 3)))
    '''
    
    stats_detection = None
    fdbk = None
    if segments_all is not None:
        # remove the duplicated elements that were used for pruning
        detector.replace_rel_with_abs_blob_coords(segments_all)
        segments_all = detector.remove_abs_blob_coords(segments_all)
        
        # compare detected blobs with truth blobs
        # TODO: assumes ground truth is relative to any ROI offset,
        # but should make customizable
        if verify:
            db_path_base = None
            exp_name = os.path.splitext(os.path.basename(config.filename))[0]
            try:
                if config.truth_db is None:
                    # find and load truth DB based on filename and subimage
                    db_path_base = os.path.basename(filename_base)
                    print("about to verify with truth db from {}"
                          .format(db_path_base))
                    sqlite.load_truth_db(db_path_base)
                if config.truth_db is not None:
                    # truth DB may contain multiple experiments for different
                    # subimages; series not included in exp name since in ROI
                    rois = config.truth_db.get_rois(exp_name)
                    if rois is None:
                        # exp may have been named by ROI
                        print("{} experiment name not found, will try with"
                              "ROI offset/size".format(exp_name))
                        exp_name = make_subimage_name(exp_name, offset, size)
                        rois = config.truth_db.get_rois(exp_name)
                    if rois is None:
                        raise LookupError(
                            "No truth set ROIs found for experiment {}, will "
                            "skip detection verification".format(exp_name))
                    print("load ROIs from exp: {}".format(exp_name))
                    exp_id = sqlite.insert_experiment(
                        config.verified_db.conn, config.verified_db.cur, 
                        exp_name, None)
                    verify_tol = np.multiply(
                        overlap_base, settings["verify_tol_factor"])
                    stats_detection, fdbk = detector.verify_rois(
                        rois, segments_all, config.truth_db.blobs_truth, 
                        verify_tol, config.verified_db, exp_id, config.channel)
            except FileNotFoundError:
                libmag.warn("Could not load truth DB from {}; "
                            "will not verify ROIs".format(db_path_base))
            except LookupError as e:
                libmag.warn(str(e))
    
    file_time_start = time()
    if config.save_subimg:
        if (isinstance(config.image5d, np.memmap) and 
                config.image5d.filename == os.path.abspath(filename_subimg)):
            # file at sub-image save path may have been opened as a memmap
            # file, in which case saving would fail
            libmag.warn("{} is currently open, cannot save sub-image"
                        .format(filename_subimg))
        else:
            # write sub-image, which is in ROI (3D) format
            with open(filename_subimg, "wb") as f:
                np.save(f, roi)

    # save blobs
    # TODO: only segments used; consider removing the rest except ver
    outfile_blobs = open(filename_blobs, "wb")
    np.savez(outfile_blobs, ver=BLOBS_NP_VER, segments=segments_all,
             resolutions=config.resolutions,
             basename=os.path.basename(config.filename),  # only save name
             offset=offset, roi_size=size)  # None unless explicitly set
    outfile_blobs.close()
    file_save_time = time() - file_time_start
    
    # whole image benchmarking time
    times = (
        [detection_time], 
        [pruning_time], 
        time() - time_start)
    times_dict = {}
    for key, val in zip(StackTimes, times):
        times_dict[key] = val
    if segments_all is None:
        print("\nNo blobs detected")
    else:
        print("\nTotal blobs found:", len(segments_all))
        detector.show_blobs_per_channel(segments_all)
    print("file save time:", file_save_time)
    print("\nTotal detection processing times (s):")
    path_times = "stack_detection_times.csv" if save_dfs else None
    df_io.dict_to_data_frame(times_dict, path_times, show=" ")
    
    return stats_detection, fdbk, segments_all
    def _calculate_anchors_info(self, all_anchor_boxes_3d, empty_anchor_filter,
                                gt_labels):
        """Calculates the list of anchor information in the format:
            N x 8 [max_gt_2d_iou, max_gt_3d_iou, (6 x offsets), class_index]
                max_gt_out - highest 3D iou with any ground truth box
                offsets - encoded offsets [dx, dy, dz, d_dimx, d_dimy, d_dimz]
                class_index - the anchor's class as an index
                    (e.g. 0 or 1, for "Background" or "Car")

        Args:
            all_anchor_boxes_3d: list of anchors in box_3d format
                N x [x, y, z, l, w, h, ry]
                其中所有anchor x从小到大,z从大到小,y是plane平面内与x,z对应的体素坐标
                (l,w,h)当前类的长宽高聚类中心点,ry是旋转角度(0或pi/2)
            empty_anchor_filter: boolean mask of which anchors are non empty
            gt_labels: list of Object Label data format containing ground truth
                labels to generate positives/negatives from.

        Returns:
            list of anchor info
        """
        # Check for ground truth objects
        if len(gt_labels) == 0:
            raise Warning("No valid ground truth label to generate anchors.")

        kitti_utils = self._dataset.kitti_utils

        # Filter empty anchors
        anchor_indices = np.where(empty_anchor_filter)[0]
        anchor_boxes_3d = all_anchor_boxes_3d[
            empty_anchor_filter]  #保留在体素网格系中存在点云的anchor
        # Convert anchor_boxes_3d to anchor format
        anchors = box_3d_encoder.box_3d_to_anchor(anchor_boxes_3d)

        # Convert ground_ture to boxes_3d -> anchors -> iou format
        gt_boxes_3d = np.asarray([
            box_3d_encoder.object_label_to_box_3d(gt_obj)
            for gt_obj in gt_labels
        ])
        gt_anchors = box_3d_encoder.box_3d_to_anchor(gt_boxes_3d,
                                                     ortho_rotate=True)

        rpn_iou_type = self.mini_batch_utils.rpn_iou_type
        if rpn_iou_type == '2d':
            # Convert anchors to 2d iou format
            anchors_for_2d_iou, _ = np.asarray(
                anchor_projector.project_to_bev(anchors,
                                                kitti_utils.bev_extents)
            )  #anchors_for_2d_iou:[x1,z1,x2,z2]

            gt_boxes_for_2d_iou, _ = anchor_projector.project_to_bev(
                gt_anchors, kitti_utils.bev_extents)

        elif rpn_iou_type == '3d':
            # Convert anchors to 3d iou format for calculation
            anchors_for_3d_iou = box_3d_encoder.box_3d_to_3d_iou_format(
                anchor_boxes_3d)

            gt_boxes_for_3d_iou = \
                box_3d_encoder.box_3d_to_3d_iou_format(gt_boxes_3d)
        else:
            raise ValueError('Invalid rpn_iou_type {}', rpn_iou_type)

        # Initialize sample and offset lists
        num_anchors = len(anchor_boxes_3d)
        all_info = np.zeros((num_anchors, self.mini_batch_utils.col_length))

        # Update anchor indices 在体素网格系中存在点云的体素index
        all_info[:, self.mini_batch_utils.col_anchor_indices] = anchor_indices

        # For each of the labels, generate samples  对label文件中的每行
        for gt_idx in range(len(gt_labels)):

            gt_obj = gt_labels[gt_idx]
            gt_box_3d = gt_boxes_3d[gt_idx]

            # Get 2D or 3D IoU for every anchor
            if self.mini_batch_utils.rpn_iou_type == '2d':
                gt_box_for_2d_iou = gt_boxes_for_2d_iou[gt_idx]
                ious = evaluation.two_d_iou(gt_box_for_2d_iou,
                                            anchors_for_2d_iou)  #求IOU
            elif self.mini_batch_utils.rpn_iou_type == '3d':
                gt_box_for_3d_iou = gt_boxes_for_3d_iou[gt_idx]
                ious = evaluation.three_d_iou(gt_box_for_3d_iou,
                                              anchors_for_3d_iou)

            # Only update indices with a higher iou than before
            update_indices = np.greater(
                ious, all_info[:, self.mini_batch_utils.col_ious])

            # Get ious to update
            ious_to_update = ious[update_indices]

            # Calculate offsets, use 3D iou to get highest iou
            anchors_to_update = anchors[update_indices]
            gt_anchor = box_3d_encoder.box_3d_to_anchor(gt_box_3d,
                                                        ortho_rotate=True)
            offsets = anchor_encoder.anchor_to_offset(anchors_to_update,
                                                      gt_anchor)

            # Convert gt type to index
            class_idx = kitti_utils.class_str_to_index(gt_obj.type)

            # Update anchors info (indices already updated)
            # [index, iou, (offsets), class_index]
            all_info[update_indices, self.mini_batch_utils.
                     col_ious] = ious_to_update  #对每个anchor保留与gt每个目标中的最大IOU

            all_info[
                update_indices,
                self.mini_batch_utils.col_offsets_lo:self.mini_batch_utils.
                col_offsets_hi] = offsets  #位移量/anchor长、log(gt_长/anchor_长)
            all_info[
                update_indices, self.mini_batch_utils.
                col_class_idx] = class_idx  #对每个anchor保留与gt每个目标中的最大IOU对应的class

        return all_info
Beispiel #47
0
def _warp_sun_coordinates(xy, smap, dt: u.s, **diffrot_kwargs):
    """
    Function that returns a new list of coordinates for each input coord.
    This is an inverse function needed by the scikit-image `transform.warp`
    function.

    Parameters
    ----------
    xy : `numpy.ndarray`
        Array from `transform.warp`
    smap : `~sunpy.map`
        Original map that we want to transform
    dt : `~astropy.units.Quantity`
        Desired interval to rotate the input map by solar differential rotation.

    Returns
    -------
    xy2 : `~numpy.ndarray`
        Array with the inverse transformation
    """
    # NOTE: The time is being subtracted - this is because this function
    # calculates the inverse of the transformation.
    rotated_time = smap.date - dt

    # Calculate the hpc coords
    x = np.arange(0, smap.dimensions.x.value)
    y = np.arange(0, smap.dimensions.y.value)
    xx, yy = np.meshgrid(x, y)
    # the xy input array would have the following shape
    # xy = np.dstack([xx.T.flat, yy.T.flat])[0]

    # We start by converting the pixel to world
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        hpc_coords = smap.pixel_to_world(xx * u.pix, yy * u.pix)

        # then diff-rotate the hpc coordinates to the desired time
        rotated_coord = solar_rotate_coordinate(hpc_coords, rotated_time,
                                                **diffrot_kwargs)

        # To find the values that are behind the sun we need to convert them
        # to HeliographicStonyhurst
        findOccult = rotated_coord.transform_to(HeliographicStonyhurst)

        with np.errstate(invalid='ignore'):
            # and find which ones are outside the [-90, 90] range.
            occult = np.logical_or(np.less(findOccult.lon, -90 * u.deg),
                                   np.greater(findOccult.lon, 90 * u.deg))

        # NaN-ing values that move to the other side of the sun
        rotated_coord.data.lon[occult] = np.nan * u.deg
        rotated_coord.data.lat[occult] = np.nan * u.deg
        rotated_coord.cache.clear()

        # Go back to pixel co-ordinates
        x2, y2 = smap.world_to_pixel(rotated_coord)

    # Re-stack the data to make it correct output form
    xy2 = np.dstack([x2.T.value.flat, y2.T.value.flat])[0]
    # Returned a masked array with the non-finite entries masked.
    xy2 = np.ma.array(xy2, mask=np.isnan(xy2))
    return xy2
Beispiel #48
0
    def recomputeExptime(self):
        """Compute the exposure time and update the EXPTIME keyword.

        Returns
        -------
        gti: list of two-element lists
            Each element of gti is a two-element list, the start and stop
            times (in seconds since EXPSTART) for a "good time interval."
        """

        if self.events_time is None:
            events_hdu = self.fd[self.events_hdunum]
            self.events_time = cosutil.getColCopy(data=events_hdu.data,
                                                  column="time")
        nevents = len(self.events_time)
        zero = np.zeros(1, dtype=np.int8)
        one = np.ones(1, dtype=np.int8)
        # flag1 and flag2 are boolean arrays.  An element will be True if
        # the corresponding element of the DQ column (self.dq) is flagged as
        # being within a bad time interval (flag1) or as a burst (flag2).
        flag1 = np.greater(np.bitwise_and(self.dq, calcosparam.DQ_BAD_TIME), 0)
        flag2 = np.greater(np.bitwise_and(self.dq, calcosparam.DQ_BURST), 0)
        flag = np.logical_or(flag1, flag2)
        # iflag is an array of 8-bit signed integer flags, 1 where self.dq
        # includes either the burst flag or the bad-time flag, 0 elsewhere.
        iflag = np.where(flag, one, zero)
        del (flag, flag1, flag2)

        # dflag is non-zero (+1 or -1) at elements where iflag changes
        # from 0 to 1 or from 1 to 0.
        dflag = iflag[1:] - iflag[0:-1]
        # non_zero will be something like:  (array([ 2,  7, 11, 13]),)
        # For each value i in non_zero[0], dq[i+1] differs from dq[i].
        non_zero = np.where(dflag != 0)
        dflag_nz = dflag[non_zero]
        nz = non_zero[0]  # extract the array of indices
        n_indices = len(nz)

        gti_indices = []  # list of good time intervals
        # it_begin and it_end are the indices in events_time of the
        # beginning and end respectively of a good time interval.
        it_begin = None
        it_end = None
        if iflag[0] == 0:
            it_begin = 0
        for i in range(n_indices):
            if dflag[nz[i]] > 0:  # end of a good time interval
                it_end = nz[i]
                gti_indices.append([it_begin, it_end])
            elif dflag[nz[i]] < 0:  # end of a bad time interval
                it_begin = nz[i] + 1
                it_end = None
            else:
                print("internal error:  dflag = %d" % dflag[nz[i]])
        if it_end is None and it_begin is not None:
            gti_indices.append([it_begin, nevents - 1])

        # Add up the good time intervals, and create a GTI list.
        gti = []
        for (it_begin, it_end) in gti_indices:
            gti.append([self.events_time[it_begin], self.events_time[it_end]])

        # The original GTI table (self.first_gti) may exclude some region or
        # regions (e.g. if data are ignored when the buffer is full), and
        # these would not show up in the DQ column if there were no events
        # during those time intervals.  To account for this, use the original
        # GTI table as a mask for the gti that we just found.
        gti = self.mergeGTI(self.first_gti, gti)
        # Round off the start and stop times to three decimals.
        gti = self.roundGTI(gti, precision=3)

        exptime = 0.
        for (t0, t1) in gti:
            exptime += (t1 - t0)

        # Update the EXPTIME keyword, and also EXPTIMEA or EXPTIMEB for FUV.
        detector = self.fd[0].header.get("detector", default="missing")
        if detector == "FUV":
            segment = self.fd[0].header.get("segment", default="missing")
            exptime_key = "exptime" + segment[-1].lower()
        else:
            exptime_key = "exptime"
        old_exptime = self.fd[self.events_hdunum].header.get(exptime_key, 0.)
        self.fd[self.events_hdunum].header[exptime_key] = exptime
        if detector == "FUV":
            self.fd[self.events_hdunum].header["exptime"] = exptime
        if self.verbose and abs(exptime - old_exptime) > 0.032:
            print("EXPTIME changed from %.8g to %.8g" % (old_exptime, exptime))

        return gti
 def test_greater(self):
     arr1 = (1, 1) * pq.m
     arr2 = (1.0, 2.0) * pq.m
     self.assertTrue(np.all(np.greater(arr2 * 1.01, arr1)))
     self.assertFalse(np.all(np.greater(arr2, arr1)))
Beispiel #50
0
    def getCartesianMove(self,
                         frame,
                         q0,
                         base_steps=1000,
                         steps_per_meter=1000,
                         steps_per_radians=4,
                         time_multiplier=1,
                         percent_acc=1,
                         use_joint_move=False,
                         table_frame=None):

        if table_frame is not None:
            if frame.p[2] < table_frame[0][2]:
                rospy.logerr(
                    "Ignoring move to waypoint due to relative z: %f < %f" %
                    (frame.p[2], table_frame[0][2]))
                return JointTrajectory()

        if q0 is None:
            rospy.logerr("Invalid initial joint position in getCartesianMove")
            return JointTrajectory()

        # interpolate between start and goal
        pose = pm.fromMatrix(self.kdl_kin.forward(q0))

        cur_rpy = np.array(pose.M.GetRPY())
        cur_xyz = np.array(pose.p)

        goal_rpy = np.array(frame.M.GetRPY())
        goal_xyz = np.array(frame.p)
        delta_rpy = np.linalg.norm(goal_rpy - cur_rpy)
        delta_translation = (pose.p - frame.p).Norm()
        if delta_rpy < 0.001 and delta_translation < 0.001:
            rospy.logwarn("Robot is already in the goal position.")
            return JointTrajectory(points=[
                JointTrajectoryPoint(positions=q0,
                                     velocities=[0] * len(q0),
                                     accelerations=[0] * len(q0),
                                     time_from_start=rospy.Duration(0.0))
            ],
                                   joint_names=self.joint_names)

        q_target = self.ik(pm.toMatrix(frame), q0)
        if q_target is None:
            rospy.logerr("No IK solution on cartesian move target")
            return JointTrajectory()
        else:
            if np.any(
              np.greater(
                np.absolute(q_target[:2] - np.array(q0[:2])), np.pi/2)) \
              or np.absolute(q_target[3] - q0[3]) > np.pi:

                rospy.logerr("Dangerous IK solution, abort getCartesianMove")
                return JointTrajectory()

        dq_target = q_target - np.array(q0)
        if np.sum(np.absolute(dq_target)) < 0.0001:
            rospy.logwarn("Robot is already in the goal position.")
            return JointTrajectory(points=[
                JointTrajectoryPoint(positions=q0,
                                     velocities=[0] * len(q0),
                                     accelerations=[0] * len(q0),
                                     time_from_start=rospy.Duration(0.0))
            ],
                                   joint_names=self.joint_names)

        steps, t_v_const_step, t_v_setting_max, steps_to_max_speed, const_velocity_max_step = self.calculateAccelerationProfileParameters(
            dq_target, base_steps, steps_per_meter, steps_per_radians,
            delta_translation, time_multiplier,
            self.acceleration_magnification * percent_acc)

        traj = JointTrajectory()
        traj.points.append(
            JointTrajectoryPoint(positions=q0,
                                 velocities=[0] * len(q0),
                                 accelerations=[0] * len(q0)))
        # Compute a smooth trajectory.
        for i in range(1, steps + 1):
            xyz = None
            rpy = None
            q = None

            if not use_joint_move:
                xyz = cur_xyz + ((float(i) / steps) * (goal_xyz - cur_xyz))
                rpy = cur_rpy + ((float(i) / steps) * (goal_rpy - cur_rpy))

                # Create transform for goal frame
                frame = pm.toMatrix(
                    kdl.Frame(kdl.Rotation.RPY(rpy[0], rpy[1], rpy[2]),
                              kdl.Vector(xyz[0], xyz[1], xyz[2])))

                # Use current inverse kinematics solver with current position
                q = self.ik(frame, q0)
            else:
                q = np.array(q0) + (float(i) / steps) * dq_target
                q = q.tolist()

                #q = self.kdl_kin.inverse(frame,q0)
            if self.verbose:
                print "%d -- %s %s = %s" % (i, str(xyz), str(rpy), str(q))

            if q is not None:
                total_time = self.calculateTimeOfTrajectoryStep(
                    i, steps_to_max_speed, const_velocity_max_step,
                    t_v_const_step, t_v_setting_max)
                # Compute the distance to the last point for each joint. We use this to compute our joint velocities.
                dq_i = np.array(q) - np.array(traj.points[-1].positions)
                if np.sum(np.abs(dq_i)) < self.skip_tol:
                    rospy.logwarn(
                        "Joint trajectory point %d is repeating previous trajectory point. "
                        % i)
                    continue

                traj.points[i - 1].velocities = (dq_i) / (
                    total_time - traj.points[i - 1].time_from_start.to_sec())
                pt = JointTrajectoryPoint(positions=q,
                                          velocities=[0] * len(q),
                                          accelerations=[0] * len(q))

                pt.time_from_start = rospy.Duration(total_time)
                # pt.time_from_start = rospy.Duration(i * ts)
                traj.points.append(pt)
            else:
                rospy.logwarn(
                    "No IK solution on one of the trajectory point to cartesian move target"
                )

        if len(traj.points) < base_steps:
            print rospy.logerr("Planning failure with " \
                    + str(len(traj.points)) \
                    + " / " + str(base_steps) \
                    + " points.")
            return JointTrajectory()

        traj.joint_names = self.joint_names
        return traj
Beispiel #51
0
def export_hydro_model(hydro_rs_xml, topo_rs_xml, out_path):

    log = Logger("Hydro GIS Export")

    # 1 todo Read project.rs.xml
    rs_hydro = Project(hydro_rs_xml)
    rs_topo = TopoProject(topo_rs_xml)
    hydro_results_folder = os.path.dirname(hydro_rs_xml)
    csvfile_hydro = os.path.join(
        hydro_results_folder,
        "dem_grid_results.csv")  # todo get this from hydro project xml

    if not rs_hydro.ProjectMetadata.has_key("Visit"):
        raise MissingException("Cannot Find Visit ID")
    visit_id = rs_hydro.ProjectMetadata['Visit']

    df_csv = pandas.read_csv(csvfile_hydro)
    log.info("Read hydro results csv as data frame")

    # Get DEM Props
    with rasterio.open(rs_topo.getpath("DEM")) as rio_dem:
        dem_crs = rio_dem.crs
        dem_bounds = rio_dem.bounds
        dem_nodata = rio_dem.nodata
    out_transform = rasterio.transform.from_origin(dem_bounds.left,
                                                   dem_bounds.top, 0.1, 0.1)

    pad_top = int((dem_bounds.top - max(df_csv['Y'])) / 0.1)
    pad_bottom = int((min(df_csv['Y']) - dem_bounds.bottom) / 0.1)
    pad_right = int((dem_bounds.right - max(df_csv['X'])) / 0.1)
    pad_left = int((min(df_csv['X']) - dem_bounds.left) / 0.1)
    log.info("Read DEM properties")

    # generate shp
    geom = [Point(xy) for xy in zip(df_csv.X, df_csv.Y)]
    df_output = df_csv.drop(
        ["X", "Y", "Depth.Error", "WSE", "BedLevel"],
        axis="columns")  #, inplace=True) # save a bit of space
    gdf_hydro = geopandas.GeoDataFrame(df_output, geometry=geom)
    gdf_hydro.crs = dem_crs
    gdf_hydro.columns = gdf_hydro.columns.str.replace(".", "_")
    gdf_hydro["VelDir"] = numpy.subtract(
        90,
        numpy.degrees(
            numpy.arctan2(gdf_hydro["Y_Velocity"], gdf_hydro["X_Velocity"])))
    gdf_hydro["VelBearing"] = numpy.where(gdf_hydro['VelDir'] < 0,
                                          360 + gdf_hydro["VelDir"],
                                          gdf_hydro["VelDir"])
    gdf_hydro.drop("VelDir", axis="columns", inplace=True)
    #gdf_hydro.to_file(os.path.join(out_path, "HydroResults.shp"))
    del df_output, gdf_hydro
    log.info("Generated HydroResults.shp")

    for col in [
            col for col in df_csv.columns
            if col not in ["X", "Y", "X.Velocity", "Y.Velocity"]
    ]:
        df_pivot = df_csv.pivot(index="Y", columns="X", values=col)
        np_raw = df_pivot.iloc[::-1].as_matrix()

        np_output = numpy.pad(np_raw,
                              ((pad_top, pad_bottom), (pad_left, pad_right)),
                              mode="constant",
                              constant_values=numpy.nan)

        with rasterio.open(os.path.join(out_path, "{}.tif".format(col)),
                           'w',
                           driver='GTiff',
                           height=np_output.shape[0],
                           width=np_output.shape[1],
                           count=1,
                           dtype=np_output.dtype,
                           crs=dem_crs,
                           transform=out_transform,
                           nodata=dem_nodata) as rio_output:
            rio_output.write(np_output, 1)
        log.info("Generated output Raster for {}".format(col))

        if col == "Depth":
            # Generate water extent polygon
            np_extent = numpy.greater(np_output, 0)
            mask = numpy.isfinite(np_output)
            shapes = features.shapes(np_extent.astype('float32'),
                                     mask,
                                     transform=out_transform)
            gdf_extent_raw = geopandas.GeoDataFrame.from_features(
                geopandas.GeoSeries([asShape(s) for s, v in shapes]))
            gdf_extent = geopandas.GeoDataFrame.from_features(
                gdf_extent_raw.geometry.simplify(0.5))
            gdf_extent.crs = dem_crs

            gdf_extent['Area'] = gdf_extent.geometry.area
            gdf_extent['Extent'] = ""
            gdf_extent.set_value(
                gdf_extent.index[gdf_extent['Area'].idxmax()], "Extent",
                "Channel")  # Set largest Polygon as Main Channel
            gdf_extent.to_file(os.path.join(out_path, "StageExtent.shp"))
            log.info("Generated Water Extent Polygons")

            # Generate islands and spatial join existing islands attributes
            gdf_exterior = geopandas.GeoDataFrame.from_features(
                geopandas.GeoSeries([
                    Polygon(shape) for shape in gdf_extent.geometry.exterior
                ]))
            gs_diff = gdf_exterior.geometry.difference(gdf_extent.geometry)
            if not all(g.is_empty for g in gs_diff):
                gdf_islands_raw = geopandas.GeoDataFrame.from_features(
                    geopandas.GeoSeries(
                        [shape for shape in gs_diff if not shape.is_empty]))
                gdf_islands_explode = geopandas.GeoDataFrame.from_features(
                    gdf_islands_raw.geometry.explode())
                gdf_islands_clean = geopandas.GeoDataFrame.from_features(
                    gdf_islands_explode.buffer(0))
                gdf_islands_clean.crs = dem_crs
                if fiona.open(rs_topo.getpath("WettedIslands")).__len__(
                ) > 0:  # Exception when createing gdf if topo islands shapefile is empty feature class
                    gdf_topo_islands = geopandas.GeoDataFrame.from_file(
                        rs_topo.getpath("WettedIslands"))
                    gdf_islands_sj = geopandas.sjoin(gdf_islands_clean,
                                                     gdf_topo_islands,
                                                     how="left",
                                                     op="intersects")
                    gdf_islands_sj.drop(["index_right", "OBJECTID"],
                                        axis="columns",
                                        inplace=True)
                    gdf_islands_sj.crs = dem_crs
                    gdf_islands_sj.to_file(
                        os.path.join(out_path, "StageIslands.shp"))

    #todo: Generate Lyr file and copy
    #todo: Generate readme

    return 0
Beispiel #52
0
print('Test MAE: %.3f' % error)


# %% plot
#==============================================================================
pyplot.show()
  
timeline = numpy.arange(0, len(test))
baseline = numpy.zeros(len(test))
residual = test - forecast
residual_1 = forecast-test

lower = bound[:, 0]
upper = bound[:, 1]
lower_1 = forecast-lower
overflow = numpy.greater(residual, upper) * residual
overflow[overflow == 0] = 'nan'
underflow = numpy.greater(residual_1, lower_1) * residual
underflow[underflow == 0] = 'nan'
  
pyplot.subplot(211)
pyplot.plot(test, 'g', label='groundtruth')
pyplot.plot(forecast, 'b', label='forecast')
#==============================================================================
  
pyplot.fill_between(timeline, lower, upper, alpha=0.5, label='confidence')
pyplot.legend()
pyplot.xlabel('Days')
pyplot.ylabel('Traffic Volume')
pyplot.xticks(numpy.arange(0, len(test), 24), numpy.arange(8, 31))
pyplot.gca().xaxis.grid(True)
Beispiel #53
0
    def process_data(self, input_id, input_data, output_data):
        """
        Process one inference and return data to visualize
        """
        # assume the only output is a CHW image where C is the number
        # of classes, H and W are the height and width of the image
        class_data = output_data[output_data.keys()[0]].astype('float32')
        # retain only the top class for each pixel
        class_data = np.argmax(class_data, axis=0).astype('uint8')

        # remember the classes we found
        found_classes = np.unique(class_data)

        # convert using color map (assume 8-bit output)
        if self.map:
            fill_data = (self.map.to_rgba(class_data) * 255).astype('uint8')
        else:
            fill_data = np.ndarray(
                (class_data.shape[0], class_data.shape[1], 4), dtype='uint8')
            for x in xrange(3):
                fill_data[:, :, x] = class_data.copy()

        # Assuming that class 0 is the background
        mask = np.greater(class_data, 0)
        fill_data[:, :, 3] = mask * 255

        # Black mask of non-segmented pixels
        mask_data = np.zeros(fill_data.shape, dtype='uint8')
        mask_data[:, :, 3] = (1 - mask) * 255

        # Generate outlines around segmented classes
        if len(found_classes) > 1:
            # Assuming that class 0 is the background.
            line_mask = np.zeros(class_data.shape, dtype=bool)
            for c in (x for x in found_classes if x != 0):
                c_mask = np.equal(class_data, c)
                # Find the signed distance from the zero contour
                distance = skfmm.distance(c_mask.astype('float32') - 0.5)
                # Accumulate the mask for all classes
                line_width = 3
                line_mask |= c_mask & np.less(distance, line_width)

            # add the outlines to the input image
            for x in xrange(3):
                input_data[:, :, x] = (input_data[:, :, x] * (1 - line_mask) +
                                       fill_data[:, :, x] * line_mask)

        # Input image with outlines
        input_max = input_data.max()
        input_min = input_data.min()
        input_range = input_max - input_min
        if input_range > 255:
            input_data = (input_data - input_min) * 255.0 / input_range
        elif input_min < 0:
            input_data -= input_min
        input_image = PIL.Image.fromarray(input_data.astype('uint8'))
        input_image.format = 'png'

        # Fill image
        fill_image = PIL.Image.fromarray(fill_data)
        fill_image.format = 'png'

        # Mask image
        mask_image = PIL.Image.fromarray(mask_data)
        mask_image.format = 'png'

        # legend for this instance
        legend = self.get_legend_for(found_classes, skip_classes=[0])

        return {
            'input_id': input_id,
            'input_image': input_image,
            'fill_image': fill_image,
            'mask_image': mask_image,
            'legend': legend,
            'class_data': class_data,
        }
Beispiel #54
0
def upright_constraint(env, safety_vars):
    """Pelvis orientantion should remain upright."""
    z_up = safety_vars['z_up']
    return np.greater(z_up, env.limits['torso_upright_constraint'])
def sigma(theta, E = 2):
    a = 1 / 4 * (kZZq_e2 / (2 * E))**2 * (1 / sin(theta / 2))** 4
    b = np.ones_like(a)
    c = less_equal(a, b)
    d = greater(a, b)
    return a * c + (2 - 1 / a) * d
def plot_logistic(x_data):
    y_pred_numpy = sess.run(logistic_output, feed_dict={model_input: x_data})
    #np.greater() >
    out = np.greater(y_pred_numpy, 0.5).astype(np.float32)
    return np.squeeze(out)
Beispiel #57
0
   def response(self, specwave, flux=None, z=0, zeropad=0, photons=1):
      '''Get the response of this filter over the specified spectrum.  This
      spectrum can be defined as a spectrum instance, in which case you simply
      need to specify [specwave]  Or, you can specify a wavelength
      and flux vector, in which case, you need to specify both [specwave] (which
      is now taken to be the wavelength vector) and the flux as [flux].  If z is
      supplied, first redshift the spectrum by this amount.  If zeropad is true,
      then the spectrum is assumed to be zero where the filter extends beyond
      its definition, otherwise -1 is returned if the filter extends beyond the
      spectrum's definition.  If photons=1, the integrand is multiplied by the
      wavelength vector and divided by c*h, i.e., the photon flux is
      computed..'''

      # Handle the intput parameters
      if flux is None:
         # We must have a spectrum object:
         if not isinstance(specwave, spectrum):
            raise TypeError, \
                  "If specifying just specwave, it must be a spectrum object"
         wave = specwave.wave
         spec = specwave.flux
      else:
         if type(specwave) is not num.ndarray or type(flux) is not num.ndarray:
            raise TypeError, \
                  "If specifying both specwave and flux, they must be arrays"
         if len(num.shape(specwave)) != 1 or len(num.shape(flux)) != 1:
            raise TypeError, \
                  "specwave and flux must be 1D arrays"
         wave = specwave
         spec = flux

      if z > 0:
         swave = wave*(1.+z)
      elif z < 0:
         swave = wave/(1.+z)
      else:
         swave = wave
      if (self.wavemin < swave[0] or self.wavemax > swave[-1]) and not zeropad:
            return(-1.0)

      # Now figure out the limits of the integration:
      x_min = num.minimum.reduce(self.wave)
      x_max = num.maximum.reduce(self.wave)
      try:
         i_min = num.nonzero(num.greater(swave - x_min, 0))[0][0]
      except:
         i_min = 0
      try:
         i_max = num.nonzero(num.greater(swave - x_max, 0))[0][0]
      except:
         i_max = len(swave)-1
   
      if i_min >= 5:
         i_min -= 5
      else:
         i_min = 0
      if i_max <= len(swave)-6:
         i_max += 5
      else:
         i_max = len(swave) - 1

      trim_spec = spec[i_min:i_max+1:subsample]
      trim_wave = swave[i_min:i_max+1:subsample]
      # Now, we need to resample the response wavelengths to the spectrum:
      if interp_method == "spline":
         if self.tck is None:
            self.tck = scipy.interpolate.splrep(self.wave, self.resp, k=1, s=0)
         fresp_int = scipy.interpolate.splev(trim_wave, self.tck)
      else:
         if self.mint is None:
            self.mint = scipy.interpolate.interp1d(self.wave, self.resp, 
                  kind=interp_method)
         fresp_int = self.mint(trim_wave)
      # Zero out any places beyond the definition of the filter:
      fresp_int = num.where(num.less(trim_wave, x_min), 0, fresp_int)
      fresp_int = num.where(num.greater(trim_wave, x_max), 0, fresp_int)

      integrand = fresp_int*trim_spec
      if photons:
         integrand = integrand*trim_wave/ch

      if integ_method=='simpsons':
         result = scipy.integrate.simps(integrand, x=trim_wave, even='avg')
      elif integ_method=='trapz':
         result = scipy.integrate.trapz(integrand, x=trim_wave)
      else:
         result = (trim_wave[-1] - trim_wave[0])/(len(trim_wave)-1)*sum(integrand)

      return(result)
Beispiel #58
0
def included(qid, data_split):
    return np.any(np.greater(data_split.query_labels(qid), 0))
Beispiel #59
0
    def dataprocessor(self):
        dfdemoLoan = pd.merge(self.dataDict['LoanFinSheet'],
                              self.dataDict['CustomerDemo'],
                              how='left',
                              on='CUSTOMERID')
        dictMail = self.analyseMails()
        dfdailymailcnt = pd.merge(dfdemoLoan,
                                  dictMail['freqMailer>5'],
                                  how='left',
                                  on='CUSTOMERID')
        dfdailymailcnt = dfdailymailcnt.apply(self.addBaseRate, axis=1)
        dfdailymailcnt.to_csv(self.filePath + 'mailsbaserate.csv', index=False)
        # adding 2 new features here
        dfdailymailcnt['PAID_PRINCIPALpct'] = dfdailymailcnt.groupby([
            'AGREEMENTID'
        ])['PAID_PRINCIPAL'].transform(lambda x: x.pct_change())
        dfdailymailcnt.PAID_PRINCIPALpct.replace([np.inf, -np.inf],
                                                 np.nan,
                                                 inplace=True)
        dfdailymailcnt['PAID_PRINCIPALpct'].fillna(0, inplace=True)
        dfdailymailcnt['TENURECHANGE'] = dfdailymailcnt[
            'CURRENT_TENOR'] - dfdailymailcnt['ORIGNAL_TENOR']
        dfdailymailcnt['loanprepay'] = np.where(np.logical_or(dfdailymailcnt['TENURECHANGE'] < 0,
                                                              np.logical_and(dfdailymailcnt['PAID_PRINCIPALpct'] > 0.05,
                                                                             np.logical_and(np.greater(
                                                                                 dfdailymailcnt['EXCESS_ADJUSTED_AMT'],
                                                                                 0), \
                                                                                 np.less_equal(
                                                                                     dfdailymailcnt[
                                                                                         'NET_RECEIVABLE'],
                                                                                     0)))), 1, 0)
        dfdailymailcnt['loanrefinance'] = np.where(
            dfdailymailcnt['PAID_PRINCIPALpct'] < 0, 1, 0)

        dfnewFeatures = self.featureEngg(dfdailymailcnt)
        dfDatawithTarget = pd.merge(dfnewFeatures,
                                    self.dataDict['train_foreclosure'],
                                    how='left',
                                    on='AGREEMENTID')
        dfDatawithTarget.to_csv(self.filePath + 'allDataNewFeatures.csv',
                                index=False)
        dfgrpd = dfDatawithTarget.groupby(['AGREEMENTID']).mean().reset_index()
        dfUniqueData = (dfDatawithTarget.merge(
            dfgrpd, on=['AGREEMENTID'],
            suffixes=('',
                      '_mean')).drop(list(dfgrpd.columns[1:]),
                                     axis=1).groupby(['AGREEMENTID'
                                                      ]).first().reset_index())
        dfUniqueData.columns.str.replace('_mean', '')
        dfUniqueData.columns = dfUniqueData.columns.str.replace('_mean', '')
        dfUniqueData.to_csv(self.filePath + 'dfUnique.csv', index=False)
        return dfDatawithTarget
Beispiel #60
0
 def __num_of_blacks(self, line):
     boundary = numpy.ndarray(len(line))
     boundary.fill(127)
     return numpy.add.reduce(numpy.greater(boundary, line))