Exemple #1
0
 def estimate_path_flow(self, init_scale = 0.1, step_size = 0.1, max_epoch = 100, adagrad = False):
   # print "Init"
   (f_u, f_sigma) = self.init_path_distribution(init_scale)
   # print "Start loop"
   for i in range(max_epoch):
     if adagrad:
       sum_g_u_square = 1e-6
       sum_g_sigma_square = 1e-6
     seq = np.random.permutation(self.num_data)
     loss = np.float(0)
     for j in seq:
       # print j
       stdnorm = self.generate_standard_normal()
       f_e_one = self.dod_forward(stdnorm, f_u, f_sigma)
       f_e_one = np.maximum(f_e_one, 1e-3)
       one_data_dict = self._get_one_data(j)
       grad, tmp_loss = self.compute_path_flow_grad_and_loss(one_data_dict, f_e_one)
       (g_u, g_sigma) = self.dod_backward(stdnorm, f_u, f_sigma, grad)
       if adagrad:
         sum_g_u_square = sum_g_u_square + np.power(g_u, 2)
         sum_g_sigma_square = sum_g_sigma_square + np.power(g_sigma, 2)
         f_u -= step_size * g_u / np.sqrt(sum_g_u_square)
         f_sigma -= step_size * g_sigma / np.sqrt(sum_g_sigma_square)
       else:
         f_u -= g_u * step_size / np.sqrt(i+1)
         f_sigma -= g_sigma * step_size / np.sqrt(i+1)
       f_u = np.maximum(f_u, 1e-3)
       f_sigma = np.maximum(f_sigma, 1e-3)
       loss += tmp_loss
     print "Epoch:", i, "Loss:", loss / np.float(self.num_data)
   return f_u, f_sigma
Exemple #2
0
 def add_phase(self, filename=None):
     if filename is None:
         filenames = QtGui.QFileDialog.getOpenFileNames(
             self.view, "Load Phase(s).", self.working_dir['phase'])
         if len(filenames):
             self.working_dir['phase'] = os.path.dirname(str(filenames[0]))
             for filename in filenames:
                 filename = str(filename)
                 self.phase_data.add_phase(filename)
                 self.phase_lw_items.append(
                     self.view.phase_lw.addItem(get_base_name(filename)))
                 if self.view.phase_apply_to_all_cb.isChecked():
                     self.phase_data.phases[-1].compute_d(
                         pressure=np.float(
                             self.view.phase_pressure_sb.value()),
                         temperature=np.float(
                             self.view.phase_temperature_sb.value()))
                 self.phase_data.get_lines_d(-1)
                 self.view.phase_lw.setCurrentRow(
                     len(self.phase_data.phases) - 1)
                 self.add_phase_plot()
     else:
         self.phase_data.add_phase(filename)
         self.phase_lw_items.append(
             self.view.phase_lw.addItem(get_base_name(filename)))
         if self.view.phase_apply_to_all_cb.isChecked():
             self.phase_data.phases[-1].compute_d(
                 pressure=np.float(self.view.phase_pressure_sb.value()),
                 temperature=np.float(
                     self.view.phase_temperature_sb.value()))
         self.phase_data.get_lines_d(-1)
         self.view.phase_lw.setCurrentRow(len(self.phase_data.phases) - 1)
         self.add_phase_plot()
         self.working_dir['phase'] = os.path.dirname(str(filename))
    def _vertex_current_flow_betweenness_python(self, i):
        """Python version of VCFB
        """
        # get required matrices
        admittance = self.get_admittance()
        R = self.get_R()

        # set params
        Is = It = np.float(1.0)

        # alloc output
        VCFB = np.float(0)

        for t in xrange(self.N):
            for s in xrange(t):
                I = 0.0
                if i == t or i == s:
                    pass
                else:
                    for j in xrange(self.N):
                        I += admittance[i][j] * np.abs(
                            Is*(R[i][s]-R[j][s]) + It*(R[j][t]-R[i][t]))/2.
                VCFB += 2.*I/(self.N*(self.N-1))

        return VCFB
Exemple #4
0
def update_batch_cd1(para, data_v, layer=1):
    eta = para['eta']
    max_bsize = data_v.shape[0]
    if layer == 0: # input layer, otherwise they are binary
        data_h, gibbs_v, gibbs_h = sampling_nb(para, data_v)
    else:
        data_h, gibbs_v, gibbs_h = sampling(para, data_v)
    
    pos_delta_w = np.zeros((para['v_num'], para['h_num']))
    neg_delta_w = np.zeros((para['v_num'], para['h_num']))
    for i in range(max_bsize):
        pos_delta_w += matu.matrix_times(data_v[i], data_h[i])
        neg_delta_w += matu.matrix_times(gibbs_v[i], gibbs_h[i])    
    delta_w_pos = eta * pos_delta_w/np.float(max_bsize)
    delta_w_neg = eta * neg_delta_w/np.float(max_bsize)
    para['w'] += delta_w_pos
    para['w'] -= delta_w_neg
    delta_a = data_v - gibbs_v
    delta_b = data_h - gibbs_h
    delta_a = eta * np.average(delta_a,0)
    delta_b = eta * np.average(delta_b,0)
    para['a'] += delta_a
    para['b'] += delta_b
    #print delta_w_pos.max(), delta_w_neg.max()
    return para
    def _compute_pvalue(obs_val, sim):
        """
        Compute the p-value given an observed value of a test statistic
        and some simulations of that same test statistic.

        Parameters
        ----------
        obs_value : float
            The observed value of the test statistic in question

        sim: iterable
            A list or array of simulated values for the test statistic

        Returns
        -------
        pval : float [0, 1]
            The p-value for the test statistic given the simulations.

        """

        # cast the simulations as a numpy array
        sim = np.array(sim)

        # find all simulations that are larger than
        # the observed value
        ntail = sim[sim > obs_val].shape[0]

        # divide by the total number of simulations
        pval = np.float(ntail) / np.float(sim.shape[0])

        return pval
def getData(filename):
    line = filename.readline()
    i = 0
    data_loss = []
    data_acc = []
    Epoch_data_loss = []
    Epoch_data_acc = []
    data_loss = []
    data_acc = []
    while line:
        # print line.split("  ",)[1:]
        # print i
        # 获取没更新一次权重的损失和zhunquelv
        line_data = line.split(" ",)
        # print line_data[0]

        # if 'loss:' in line_data:
        # 	#print  i
        # 	data_loss.append(np.float(line_data[line_data.index('loss:')+1]))
        # 	data_acc.append(np.float(line_data[line_data.index('loss:')+4][:6]))

        # 获取每次循环迭代的损失和准确率
        if 'val_loss:' in line_data:
            # print  i
            Epoch_data_loss.append(
                np.float(line_data[line_data.index('val_loss:') + 1]))
            Epoch_data_acc.append(
                np.float(line_data[line_data.index('val_loss:') + 4][:6]))

        line = filename.readline()
        i = i + 1
    return Epoch_data_loss, Epoch_data_acc, data_loss, data_acc
Exemple #7
0
    def fit(self, X, y):
        n_samples = X.shape[0]
        n_features = X.shape[1]
        n_classes = 2
        n_fvalues = 2

        if n_samples != len(y):
            raise ValueError('Mismatched number of samples.')

        nY = np.zeros(n_classes, dtype=np.int)
        for i in range(n_samples):
            nY[y[i]] += 1

        self.pY_ = np.empty(n_classes, dtype=np.float)
        for i in range(n_classes):
            self.pY_[i] = nY[i] / np.float(n_samples)

        nXY = np.zeros((n_features, n_fvalues, n_classes), dtype=np.int)
        for i in range(n_samples):
            for j in range(n_features):
                nXY[j, X[i, j], y[i]] += 1

        self.pXgY_ = np.empty((n_features, n_fvalues, n_classes), dtype=np.float)
        for j in range(n_features):
            for xi in range(n_fvalues):
                for yi in range(n_classes):
                    self.pXgY_[j, xi, yi] = nXY[j, xi, yi] / np.float(nY[yi])
def get_f1(truth, predict):
    """
    truth and predict are arrays in which values are True or False
    """
    tp = 0
    tn = 0
    fp = 0
    fn = 0
    truth = truth[:]
    predict = predict[:]

    tp = predict[truth==True].sum()
    fp = predict[truth==False].sum()
    fn = len(predict[truth==True]) - tp
    tn = len(predict[truth==False]) - fp

    if tp + fp != 0:
        precision = np.float(tp)/np.float(tp + fp)
    else:
        precision = 0
    recall = np.float(tp)/np.float(tp + fn)

    if precision == 0 and recall == 0:
        return 0, 0, 0

    f1 = 2.*precision*recall / (precision + recall)
    return f1, precision, recall
 def __init__(self, coefficients, p1=None, p2=None, p3=None):
     """
     Initializes a plane from the 4 coefficients a, b, c and d of ax + by + cz + d = 0
     :param coefficients: abcd coefficients of the plane
     """
     #Initializes the normal vector
     self.normal_vector = np.array([coefficients[0], coefficients[1], coefficients[2]], np.float)
     normv = np.linalg.norm(self.normal_vector)
     self.normal_vector /= normv
     nonzeros = np.argwhere(self.normal_vector != 0.0).flatten()
     zeros = list(set(range(3))-set(nonzeros))
     if len(nonzeros) == 0:
         raise ValueError("Normal vector is equal to 0.0")
     if self.normal_vector[nonzeros[0]] < 0.0:
         self.normal_vector = -self.normal_vector
         dd = -np.float(coefficients[3]) / normv
     else:
         dd = np.float(coefficients[3]) / normv
     self._coefficients = np.array([self.normal_vector[0],
                                   self.normal_vector[1],
                                   self.normal_vector[2],
                                   dd], np.float)
     self._crosses_origin = np.isclose(dd, 0.0, atol=1e-7, rtol=0.0)
     self.p1 = p1
     self.p2 = p2
     self.p3 = p3
     #Initializes 3 points belonging to the plane (useful for some methods)
     if self.p1 is None:
         self.init_3points(nonzeros, zeros)
     self.vector_to_origin = dd * self.normal_vector
Exemple #10
0
def gauss_kern(xsize, ysize=None):
    """ Returns a normalized 2D gauss kernel for convolutions """
    xsize = int(xsize)
    ysize = ysize and int(ysize) or xsize
    x, y = mgrid[-xsize:xsize+1, -ysize:ysize+1]
    g = np.exp(-(x**2/float(xsize) + y**2/float(ysize)))
    return g / g.sum()
	def getTimeseriesNemData(self, state, startDate, endDate):
		# AEMO data is in AEST - GMT + 10
		tz = timezone.SydneyTimezone()
		startDate = startDate.astimezone(tz)
		endDate = endDate.astimezone(tz)

		folderPath = "./nemData"
		
		data = np.loadtxt(folderPath+"/"+state+".csv", delimiter=',',dtype='string',skiprows=1, usecols=None, unpack=False)
		timeseries = None
		for i in np.arange(data.shape[0]):
			date = datetime.datetime(year=int(data[i][0]), month = int(data[i][1]), day = int(data[i][2]), hour = int(data[i][3]), minute=int(data[i][4]), tzinfo=timezone.SydneyTimezone())
			if date >= startDate and date <= endDate:
				timePeriod = np.zeros(shape=(7))
				timePeriod[0] = int(data[i][0])
				timePeriod[1] = int(data[i][1])
				timePeriod[2] = int(data[i][2])
				timePeriod[3] = int(data[i][3])
				timePeriod[4] = int(data[i][4])
				timePeriod[5] = np.float(data[i][5])
				timePeriod[6] = np.float(data[i][6])
				if timeseries is None:
					timeseries = timePeriod
				else:
					timeseries = np.vstack((timeseries, timePeriod))

		return timeseries
Exemple #12
0
 def count(self):
     '''
     Estimate the cardinality count.
     See: http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=365694 
     '''
     k = self.hashvalues.size
     return np.float(k) / np.sum(self.hashvalues / np.float(_max_hash)) - 1.0
def incentive(array, weight_factor):
    """Calculate the incentivization factor, for encouraging Tor exit relay
    operators in countries with less exit relays to run more nodes.

    :param array: A two-dimensional 3xN array of country codes, exit
        probabilities, and factors.
    :param float weight_factor: Should be winsorized standard deviation of
        exit probabilities, or trimmed standard deviation of exit
        probabilities.
    """
    array_copy  = numpy.asarray(array[:,1], dtype=numpy.float)
    main_stddev = numpy.float(array_copy.std())

    incentivized = list()
    for ccname, pexit, _ in array[::]:
        ccname = numpy.string_(ccname)  ## oh, Python2.x, how i despise you…
        pexit  = numpy.float(pexit)

        weighted = main_stddev - weight_factor + pexit
        inverted = 1. / (abs(weighted)**2)
        shifted  = inverted * 10.
        factor   = shifted

        incentivized.append({'cc': ccname,
                             'p_exit': pexit,
                             'incentive_factor': factor})
    return incentivized
Exemple #14
0
def make_tag_cloud(data, can_be_noun_arg, process_option='freqs'):
    stop_words = sw.words()
    process_f = {
            'concatenate': lambda : concatenate(data, can_be_noun_arg, stop_words),
            'freqs': lambda : freq_weight(data, can_be_noun_arg, stop_words),
            'race' : lambda : race_tfidf(data, can_be_noun_arg, stop_words)
    }
    freqs = process_f[process_option]()
    if type(freqs) == type([]):
        freqs = freqs[:30]
        # normalize freqs in case they are counts
        sum_freqs = np.sum(x for _,x in freqs)
        freqs = [(w, np.float(f)/sum_freqs) for w,f in freqs]
        #pprint(freqs)
        #return
        tags = make_tags(freqs, maxsize=80)
        fname = 'noun_last_words_{}.png'.format(process_option)
        if not can_be_noun_arg:
            fname = 'not_'+fname
        create_tag_image(tags, fname, size=(900, 600), fontname='Lobster')
    elif type(freqs)==type({}):
        for k in freqs:
            top_freqs = freqs[k][:30]
            # normalize    
            sum_freqs = np.sum(x for _,x in top_freqs)
            top_freqs = [(w, np.float(f)/sum_freqs) for w,f in top_freqs]
            print top_freqs
            tags = make_tags(top_freqs, maxsize=15)
            fname = 'noun_last_words_{}_{}.png'.format(process_option,k)
            create_tag_image(tags, fname, size=(900, 600), fontname='Lobster')
Exemple #15
0
def jaccard(*mhs):
    '''
    Compute Jaccard similarity measure for multiple of MinHash objects.
    '''
    if len(mhs) < 2:
        raise ValueError("Less than 2 MinHash objects were given")
    seed = mhs[0].seed
    if any(seed != m.seed for m in mhs):
        raise ValueError("Cannot compare MinHash objects with\
                different seeds")
    num_perm = mhs[0].hashvalues.size
    if any(num_perm != m.hashvalues.size for m in mhs):
        raise ValueError("Cannot compare MinHash objects with\
                different numbers of permutation functions")
    if len(mhs) == 2:
        m1, m2 = mhs
        return np.float(np.count_nonzero(m1.hashvalues == m2.hashvalues)) /\
                np.float(m1.hashvalues.size)
    # TODO: find a way to compute intersection for more than 2 using numpy
    intersection = 0
    for i in range(num_perm):
        phv = mhs[0].hashvalues[i]
        if all(phv == m.hashvalues[i] for m in mhs):
            intersection += 1
    return float(intersection) / float(num_perm)
Exemple #16
0
    def integrate_single(self, integration_time, sample_rate):
        """
        :param integration_time:
        :param sample_rate: Must be a power of 2
        :return:
        """

        # Set the sample rate
        self.write("SRAT %d"%(np.log2(sample_rate)+4))

        # Deletes the data buffers
        self.write("REST")
        # Starts or resumes the data storage
        self.write("STRT")

        t0 = time.time()

        while time.time() - t0 < integration_time:
            time.sleep(0.01)

        # Pause data storage
        self.write("PAUS")

        # Get number of points in buffer:
        noof_points = np.int(self.query("SPTS?"))

        # Read out buffer
        raw_ch1 = [_f for _f in self.query("TRCA? 1,0,%d"%(noof_points-1)).split(',') if _f]
        raw_ch2 = [_f for _f in self.query("TRCA? 2,0,%d"%(noof_points-1)).split(',') if _f]

        ch1 = [np.float(raw_ch1[i]) for i in range(len(raw_ch1))]
        ch2 = [np.float(raw_ch2[i]) for i in range(len(raw_ch2))]

        return ch1, ch2
    def _compute_rdiff_stats(self, var1, var2):
        """Compute the relative difference statistics of var1 and var2.

        vars_differ must already be set for self."""
        if (not self.vars_differ() or len(var1) == 0):
            rdiff_max = np.float('nan')
            rdiff_maxloc = -1
            rdiff_logavg = np.float('nan')
        else:
            differences = self._compute_diffs(var1, var2) != 0
            diff_vals = self._compute_diffs(var1, var2)[differences]
            maxvals = np.maximum(np.abs(var1), np.abs(var2))[differences]
            rdiff = np.abs(diff_vals) / maxvals.astype(np.float)
            rdiff_max = np.max(rdiff)
            rdiff_maxloc = self._compute_max_loc(rdiff, differences)
            numDiffs = np.sum(differences)
            if numDiffs > 0:
                # Compute the sum of logs by taking the products of the logands; +1 if the logand is 0
                # Then take the log of the result
                # Since the log(1) is 0, this does not affect the final sum
                rdiff_prod = np.prod(rdiff)
                if rdiff_prod != np.float('inf') and rdiff_prod > 0.0:
                    rdiff_logsum = -math.log10(rdiff_prod)
                else:
                    # We need to use a different (slower, less accurate) method of computing this,
                    # the product either overflowed or underflowed due to the small exponent
                    rdiff_logs = np.log10(rdiff)
                    rdiff_logsum = -np.sum(rdiff_logs)
                rdiff_logavg = rdiff_logsum / np.sum(differences)
            else:
                rdiff_logavg = np.float('nan')
        return rdiff_max, rdiff_maxloc, rdiff_logavg
def get_raw_data(utmin,utmax,freq='10'):
    '''
    function to turn the UT range into filenames and read in the raw data
    '''
    ltmin=utmin-7.0 #convert to local time first
    ltmax=utmax-7.0
    data_dir='/cofe/flight_data/'+str(freq)+'GHz/'
    if (ltmin<24) & (ltmax <24):
        data_dir=data_dir+'20110917/'     
    elif (ltmin>24) & (ltmax >24):
        data_dir=data_dir+'20110917/'
        ltmin=ltmin-24.
        ltmax=ltmax-24.
    
    #list the available files
    fl=glob(data_dir+'*.dat')
    ltfl=[]
    for file in fl:
        ltfl.append(file[-12:-4])
    ltflhours=np.zeros(len(ltfl))
    for i,lt in enumerate(ltfl):
        ltflhours[i]=np.float(lt[0:2])+np.float(lt[2:4])/60.+np.float(lt[4:6])/3600.
    fl=np.array(fl)
    ltflhours=np.array(ltflhours,dtype=float)
    files2read=fl[(ltflhours>ltmin) & (ltflhours<ltmax)]
    len(files2read)
    d=datparsing.read_raw(files2read)
    return(d)
Exemple #19
0
def cover_fraction(d12, ls1, ls2, dist, bsz1=1, bsz2=1):
    '''
    show what fraction of the tracks in one tractography 'belong' 
    to a skeleton track which is within mdf distance 'dist' of the other tractography  
    '''

    #find bundles of sizes bigger than bsz
    small1=[i for (i,l) in enumerate(ls1) if ls1[i]<bsz1]
    small2=[i for (i,l) in enumerate(ls2) if ls2[i]<bsz2]
    
    #print 'sh',d12.shape
    m12=d12.copy()
    if small2!=[]:
        m12[:,small2]=np.Inf
    if small1!=[]:
        m12[small1,:]=np.Inf
    #print 'sh',m12.shape    
    #find how many taleton-B neighbours nearer than dist
    near12 = np.sum(m12<=dist,axis=1)
    near21 = np.sum(m12<=dist,axis=0)

    #find their sizes    
    #sizes1 = [ls1[t] for t in np.where(np.sum(d12<=dist,axis=1)>0)[0]]
    sizes1 = [ls1[i] for i in range(len(near12)) if near12[i] > 0]
    #sizes2 = [ls2[t] for t in np.where(np.sum(d12<=dist,axis=0)>0)[0]]
    sizes2 = [ls2[i] for i in range(len(near21)) if near21[i] > 0]

    #calculate their coverage
    #total1 = np.float(np.sum(ls1))
    #total2 = np.float(np.sum(ls2))
    total1 = np.sum([ls1[t] for t in range(len(ls1)) if t not in small1])
    #total2 = np.float(np.sum(ls2))
    total2 = np.sum([ls2[t] for t in range(len(ls2)) if t not in small2])
    return np.sum(sizes1)/np.float(total1),np.sum(sizes2)/np.float(total2)
Exemple #20
0
    def __init__(self, central_longitude=0.0, satellite_height=35785831,
                 false_easting=0, false_northing=0, globe=None):
        proj4_params = [('proj', 'geos'), ('lon_0', central_longitude),
                        ('lat_0', 0), ('h', satellite_height),
                        ('x_0', false_easting), ('y_0', false_northing),
                        ('units', 'm')]
        super(Geostationary, self).__init__(proj4_params, globe=globe)

        # TODO: Factor this out, particularly if there are other places using
        # it (currently: Stereographic & Geostationary). (#340)
        def ellipse(semimajor=2, semiminor=1, easting=0, northing=0, n=200):
            t = np.linspace(0, 2 * np.pi, n)
            coords = np.vstack([semimajor * np.cos(t), semiminor * np.sin(t)])
            coords += ([easting], [northing])
            return coords

        # TODO: Let the globe return the semimajor axis always.
        a = np.float(self.globe.semimajor_axis or 6378137.0)
        b = np.float(self.globe.semiminor_axis or 6378137.0)
        h = np.float(satellite_height)
        max_x = h * math.atan(a / (a + h))
        max_y = h * math.atan(b / (b + h))

        coords = ellipse(max_x, max_y,
                         false_easting, false_northing, 60)
        coords = tuple(tuple(pair) for pair in coords.T)
        self._boundary = sgeom.polygon.LinearRing(coords)
        self._xlim = self._boundary.bounds[::2]
        self._ylim = self._boundary.bounds[1::2]
        self._threshold = np.diff(self._xlim)[0] * 0.02
Exemple #21
0
    def _add_phase(self, filename):
        try:
            if filename.endswith("jcpds"):
                self.phase_model.add_jcpds(filename)
            elif filename.endswith(".cif"):
                self.cif_conversion_dialog.exec_()
                self.phase_model.add_cif(filename,
                                         self.cif_conversion_dialog.int_cutoff,
                                         self.cif_conversion_dialog.min_d_spacing)

            if self.widget.phase_apply_to_all_cb.isChecked():
                pressure = np.float(self.widget.phase_pressure_sb.value())
                temperature = np.float(self.widget.phase_temperature_sb.value())
                self.phase_model.phases[-1].compute_d(pressure=pressure,
                                                      temperature=temperature)
            else:
                pressure = 0
                temperature = 298

            self.phase_model.get_lines_d(-1)
            color = self.add_phase_plot()
            self.widget.add_phase(get_base_name(filename), '#%02x%02x%02x' % (color[0], color[1], color[2]))

            self.widget.set_phase_pressure(len(self.phase_model.phases) - 1, pressure)
            self.update_phase_temperature(len(self.phase_model.phases) - 1, temperature)
            if self.jcpds_editor_controller.active:
                self.jcpds_editor_controller.show_phase(self.phase_model.phases[-1])
        except PhaseLoadError as e:
            self.widget.show_error_msg(
                'Could not load:\n\n{}.\n\nPlease check if the format of the input file is correct.'. \
                format(e.filename))
Exemple #22
0
    def readPulsar(self, psr, psrname):
        print("WARNING: readPulsar has been deprecated!")
        psr.name = psrname

        # Read the content of the par/tim files in a string
        psr.parfile_content = str(self.getData(psrname, 'parfile', required=False))
        psr.timfile_content = str(self.getData(psrname, 'timfile', required=False))

        # Read the timing model parameter descriptions
        psr.ptmdescription = map(str, self.getData(psrname, 'tmp_name'))
        psr.ptmpars = np.array(self.getData(psrname, 'tmp_valpre'))
        psr.ptmparerrs = np.array(self.getData(psrname, 'tmp_errpre'))
        psr.flags = map(str, self.getData(psrname, 'efacequad', 'Flags'))

        # Read the position of the pulsar
        if self.hasField(psrname, 'raj'):
            psr.raj = np.float(self.getData(psrname, 'raj'))
        else:
            rajind = np.flatnonzero(np.array(psr.ptmdescription) == 'RAJ')
            psr.raj = np.array(self.getData(psrname, 'tmp_valpre'))[rajind]

        if self.hasField(psrname, 'decj'):
            psr.decj = np.float(self.getData(psrname, 'decj'))
        else:
            decjind = np.flatnonzero(np.array(psr.ptmdescription) == 'DECJ')
            psr.decj = np.array(self.getData(psrname, 'tmp_valpre'))[decjind]

        # Obtain residuals, TOAs, etc.
        psr.toas = np.array(self.getData(psrname, 'TOAs'))
        psr.toaerrs = np.array(self.getData(psrname, 'toaErr'))
        psr.prefitresiduals = np.array(self.getData(psrname, 'prefitRes'))
        psr.residuals = np.array(self.getData(psrname, 'postfitRes'))
        psr.detresiduals = np.array(self.getData(psrname, 'prefitRes'))
        psr.freqs = np.array(self.getData(psrname, 'freq'))
        psr.Mmat = np.array(self.getData(psrname, 'designmatrix'))
    def define_power_sweep_vec(self, pow_vec, cwfrequency, BW, time, set_time='dwell'):
        '''
        Define a sweep in power with a power vector.

        Input:
            pow_vec [dBm] : define the power vector
            cwfrequency [GHz]: constant wave frequency of the VNA
            time [s]: if set_time==dwell it is a delay for each partial measurement in the segment
                      if set_time==sweeptime, we define the duration of the sweep in the segment
            BW [Hz]: define the Bandwidth

        Output:
            None
        '''
        logging.debug(__name__ + ' : making a sweep in power' % ())

        #Delete all the remaining segments from previous measurement
        self._visainstrument.write('SEGM:DEL:ALL')

        if np.float(self._visainstrument.query('SEGM:COUNT?')) != 0:
            print 'Error: segments not deleted'

        point = len(pow_vec)
        for i in np.arange(point):
            self.define_segment(i+1, cwfrequency, cwfrequency,1, pow_vec[i],time, BW, set_time )

        if np.float(self._visainstrument.query('SEGM:COUNT?')) != point:
            print 'Error: not the number of segment wanted'
Exemple #24
0
def _percentage_distance(canny_in, canny_out, r):
    diamond = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])

    E_1 = scipy.ndimage.morphology.binary_dilation(canny_in, structure=diamond, iterations=r)
    E_2 = scipy.ndimage.morphology.binary_dilation(canny_out, structure=diamond, iterations=r)

    return 1.0 - np.float(np.sum(E_1 & E_2))/np.float(np.sum(E_1))
    def define_power_sweep(self, startpow, stoppow, steppow, cwfrequency, BW, time, set_time='dwell'):
        '''
        Make a sweep in power where startpow can be greater than stoppow

        Input:
            startpow [dBm] : define the power at which begin the sweep
            stoppow [dBm]: define the power at which finish the sweep
            steppow [dBm]: define the step of the sweep
            cwfrequency [GHz]: constant wave frequency of the VNA
            time [s]: if set_time==dwell it is a delay for each partial measurement in the segment
                      if set_time==sweeptime, we define the duration of the sweep in the segment
            BW [Hz]: define the Bandwidth

        Output:
            None
        '''
        logging.debug(__name__ + ' : making a sweep in power from %s to %s with a step of %s' % (startpow, stoppow, steppow))

        #Destroy all the remaining segments from previous measurement
        self._visainstrument.write('SEGM:DEL:ALL')

        if np.float(self._visainstrument.query('SEGM:COUNT?'))!=0:
            print 'Error: segments not deleted'

        pow_vec=np.arange(startpow, stoppow + steppow, steppow)
        point=len(pow_vec)
        for i in np.arange(point):
            self.define_segment(i+1, cwfrequency, cwfrequency,1, pow_vec[i],time, BW, set_time )

        if np.float(self._visainstrument.query('SEGM:COUNT?'))!=point:
            print 'Error: not the number of segment wanted'
def retick(ax, axname):
  if axname == 'x':
    rng = ax.get_xlim()
  elif axname == 'y':
    rng = ax.get_ylim()
  else:
    rng = ax.get_zlim()

  mn = np.int(np.floor(rng[0]))
  mx = np.int(np.ceil(rng[1]))
  ticks = []
  ticklabels = []
  for i in range(mn, mx):
    if np.float(i) >= rng[0]:
      ticks.append(np.float(i))
      ticklabels.append('$10^{' + ("%d" % i) + '}$')

  if axname == 'x':
    ax.set_xticks(ticks)
    ax.set_xticklabels(ticklabels)
  elif axname == 'y':
    ax.set_yticks(ticks)
    ax.set_yticklabels(ticklabels)
  else:
    ax.set_zticks(ticks)
    ax.set_zticklabels(ticklabels)

  return
def buildMatrixExplicit(ratings):  ## Build the matrix in a binary way
    u = ratings['userid'].drop_duplicates() ## we extract the userid's of this region
    u.index = range(0,len(u))   ## we change the index so it will go from 0 to the number of users
    b = ratings['band'].drop_duplicates()  ## An array with the names of the bands
    b.index = range(0,len(b))   ## We change the index of the array so each band has an unique number
    pairs = ratings.loc[:,["userid","band","rating"]] ## For this method we need the userid, and the band. Later on we will count the number of times a band appears for each user profile
    pairs.loc[pairs.rating=="Yes",'rating'] = np.float(5.0) ## We change the implicit values
    pairs.loc[pairs.rating=="Maybe",'rating'] = np.float(4.0)
    pairs['rating'] = pairs['rating'].astype(float) ## We change the column of the ratings to float
    g = pairs.groupby(['userid'])
    rows = []
    cols = []
    rat = []
    for name, group in g:   ## name is the userid by which we group before, group is all the band names and its ratings
        ### We are going to group each of the user groups by band to calculate the mean ratings for each band
        g2 = group.loc[:,['band','rating']].groupby('band')
        meanRatings = g2['rating'].mean()
        z  = list(group['band']) ## A list with the bands that the user have been to. The names can be repeated
        d = Counter(z)  ## A dictionary with the distinct names of bands and the number of occurences of each one
        for band, count in d.iteritems(): ## Eg. band "Arctic monkeys" count = 3
            cols.append(b[b==band].index[0]) ## We append the position of the band in the matrix
            freq = len(list(c for c in d.itervalues() if c <= count)) ## The number of bands which count <= (current band count)
            r = (meanRatings[band] * freq)/len(d) ## We do this in a scale [0,5]
            rat.append(r)
        userNo = (u[u==name].index[0])### name is the user
        rows.extend([userNo]*len(d)) ## We extend with the row position of the user repeated n times where n is the number of columns
    result = csr_matrix((map(float,rat),(rows,cols)),shape=(len(u),len(b)))
    return(result)
		def find_extrema():
			import numpy as np
			extrema_1 = np.float(self.x_initial + (- 2*self.c[0] + (4*self.c[0]**2 - 12*self.b[0]*self.d[0])**.5)/(6*self.d[0]))
			extrema_2 = np.float(self.x_initial + (- 2*self.c[0] - (4*self.c[0]**2 - 12*self.b[0]*self.d[0])**.5)/(6*self.d[0]))
			extrema_3 = np.float(self.x_break + (- 2*self.c[1] + (4*self.c[1]**2 - 12*self.b[1]*self.d[1])**.5)/(6*self.d[1]))
			extrema_4 = np.float(self.x_break + (- 2*self.c[1] - (4*self.c[1]**2 - 12*self.b[1]*self.d[1])**.5)/(6*self.d[1]))
			return(extrema_1,extrema_2,extrema_3,extrema_4)
def mkKernel(ks, sig, th , om, ps, gm):
        """ Check the kernel size"""
        if not ks%2:
            exit(1)

        """ Definition of the varibles"""
        theta = th 
        psi = ps 
        sigma = np.float(sig)
        omega = np.float(om)
        gamma = gm
        
        """Creating the kernel size"""        
#        xs=np.linspace(-1*ks,1*ks/10.,ks)
#        ys=np.linspace(-1*ks/10.,1*ks/10.,ks)
        xs=np.linspace(-1*ks,1*ks,ks)
        ys=np.linspace(-1*ks,1*ks,ks)

        """Creating the kernel"""        
        x,y = np.meshgrid(xs,ys)        

 
        #return np.array( np.exp(-0.5*(x_theta**2+gamma * y_theta**2)/sigma**2)*np.cos(2.*np.pi*x_theta/lmbd + psi),dtype=np.float32)
        gabor_kernel =  np.array( np.exp(-(x**2 + y**2)/(2*sigma**2) )*np.cos(2.*np.pi*(x*np.cos(theta)+y*np.sin(theta) ) * omega), dtype=np.float32)     
        return gabor_kernel
        """  Return the kernel                  The sigma signal                                           The sinus wave                                   """
Exemple #30
0
def load_PairCount_output(root):
   #load sbins
   lines=open(root+'-DD.dat').readlines()
   sbins=np.array([np.float(x) for x in lines[0].split()])
   ns=sbins.size-1
   #load mu bins
   mubins=np.array([np.float(x) for x in lines[1].split()])
   nmu=mubins.size-1
   #load pair counts
   DD=np.loadtxt(root+'-DD.dat',skiprows=2)
   DR=np.loadtxt(root+'-DR.dat',skiprows=2)
   try:
      RR=np.loadtxt(root+'-RR.dat',skiprows=2)
   except:
      RR=np.loadtxt(root[:-4]+'0001-RR.dat',skiprows=2)
   #load wieghts
   lines=open(root+'-norm.dat').readlines()
   Wsum_data=np.float(lines[0].split()[-1])
   Wsum_rand=np.float(lines[1].split()[-1])

   Wrat=Wsum_data/Wsum_rand
   DR=DR*Wrat
   RR=RR*Wrat*Wrat
   #print ns,nmu,DD.shape, Wsum_data, Wsum_rand
   #print sbins, mubins

   #combined all output in a dictionary and return
   pcdict={'sbins': sbins, 'ns': ns, 'mubins':mubins, 'nmu':nmu, 
	 'DD': DD, 'DR': DR, 'RR': RR ,
	 'Wsum_data': Wsum_data, 'Wsum_rand': Wsum_rand, 'Wrat':Wrat}
   return pcdict
Exemple #31
0
        x = x.view(-1, 9 * 9 * 128)

        x = self.dropout7(F.relu(self.fc1(x)))
        x = self.dropout8(F.relu(self.fc2(x)))
        x = self.fc3(x)
        return x


model = Net2()

# Make sure that all nodes have the same model
for param in model.parameters():
    tensor0 = param.data
    dist.all_reduce(tensor0, op=dist.reduce_op.SUM)
    param.data = tensor0 / np.sqrt(np.float(num_nodes))

model.cuda()

Path_Save = '/projects/sciteam/bahp/RNN/TinyImageNetModel'
# torch.save(model.state_dict(), Path_Save)
# model.load_state_dict(torch.load(Path_Save))

LR = 0.001
batch_size = 100
Num_Epochs = 1000

criterion = nn.CrossEntropyLoss()
optimizer = optim.RMSprop(model.parameters(), lr=LR)

I_permutation = np.random.permutation(L_Y_train)
Exemple #32
0
puncts = set(string.punctuation)

vocab = set(vocabFile.read().strip().split(' '))
nWords = len(vocab) #Size of input layer = length of vocabulary
dataTemp = []

fil = fil[1:]
liwcTemp = []

for j in fil:
    i = temp1 = j[1]
    temp1 = i.split('|||')
    temp2 = []
    for i in temp1:
    #Removing URLs and numbers
        x = re.sub(r'https?\S+', '', re.sub(r'\w*\d\w*', '', i).strip()).strip()
        #Tokenizing
        tok = list(tknzr.tokenize(x))
        for x in tok:
            tempWord = lancaster.stem(wordnetlem.lemmatize(x.lower()))
            tempWord = ''.join(ch for ch in tempWord if ch not in puncts)
            if len(tempWord) > 1 and tempWord in vocab:
                temp2.append(tempWord)
    dataToAdd = [j[0], ' '.join(temp2)] + j[2:7]
    dataTemp.append(dataToAdd)
    liwcTemp.append([np.float(k) for k in j[8:]])


np.save('16to40withliwc.npy', dataTemp)
np.save('16to40ofliwc.npy', liwcTemp)
Exemple #33
0
                                    overall_data, ensure_ascii='False')
                                request_send = requests.post(url, data_send)
                                request_message = json.loads(request_send.text)
                                class_coeff.append(
                                    request_message["data"]["splits"])
                                #print(request_message)

                            range_vals = (np.min(np.array(x_max)),
                                          np.max(np.array(x_min)))

                            try:  #Computation Block-2 involving- Checking Which X Values falls within the Range
                                x_rough = df.iloc[0:, 2].tolist()
                                x_rough = [x.split("%")[0] for x in x_rough]
                                x_unique = set(x_rough)
                                for x in x_unique:
                                    if ((range_vals[0] > np.float(x) >
                                         range_vals[1])
                                            or (range_vals[0] < np.float(x) <
                                                range_vals[1])):
                                        forecast_x = np.float(x)
                                        break
                                    else:
                                        continue

                                try:  #Computation Block-3 involving- Mapping Class with X's Peak Values & Quadratic Coeff's
                                    reqd_dict = {}
                                    for each_log in range(len(class_log)):
                                        reqd_dict[class_log[
                                            each_log]] = class_coeff[each_log]

                                    output_passed["status"] = "success"
    def __resample4(self, x_in, y_in, x_out, y_out, stop_time):
        # we want x-out to have three times the number of points as there are pixels
        # Plus one at the end
        # y_out = numpy.empty(len(x_out)-1, dtype=numpy.float64)
        # print 'len x_out: %d'%len(x_out)

        # A couple of special cases that I don't want to have to put extra checks in for:
        if x_out[-1] < x_in[0] or x_out[0] > stop_time:
            # We're all the way to the left of the data or all the way to the right. Fill with NaNs:
            y_out.fill('NaN')
        elif x_out[0] > x_in[-1]:
            # We're after the final clock tick, but before stop_time
            i = 0
            while i < len(x_out) - 1:
                if x_out[i] < stop_time:
                    y_out[i] = y_in[-1]
                else:
                    y_out[i] = numpy.float('NaN')
                i += 1
        else:
            i = 0
            j = 1
            # Until we get to the data, fill the output array with NaNs (which
            # get ignored when plotted)
            while x_out[i] < x_in[0]:
                y_out[i] = numpy.float('NaN')
                y_out[i + 1] = numpy.float('NaN')
                y_out[i + 2] = numpy.float('NaN')
                i += 3
            # If we're some way into the data, we need to skip ahead to where
            # we want to get the first datapoint from:
            while x_in[j] < x_out[i]:
                j += 1

            # Get the first datapoint:
            # y_out[i] = y_in[j-1]
            # i += 1

            # Get values until we get to the end of the data:
            while j < len(x_in) and i < len(x_out) - 2:  # Leave one spare for the final data point and one because stepMode=True requires len(y)=len(x)-1
                # This is 'nearest neighbour on the left' interpolation. It's
                # what we want if none of the source values checked in the
                # upcoming loop are used:
                y_out[i] = y_in[j - 1]
                i += 2
                positive_jump_value = 0
                positive_jump_index = j - 1
                negative_jump_value = 0
                negative_jump_index = j - 1
                # now find the max and min values between this x_out time point and the next x_out timepoint
                # print i
                while j < len(x_in) and x_in[j] < x_out[i]:
                    jump = y_in[j] - y_out[i - 2]
                    # would using this source value cause a bigger positive jump?
                    if jump > 0 and jump > positive_jump_value:
                        positive_jump_value = jump
                        positive_jump_index = j
                    # would using this source value cause a bigger negative jump?
                    elif jump < 0 and jump < negative_jump_value:
                        negative_jump_value = jump
                        negative_jump_index = j

                    j += 1

                if positive_jump_index < negative_jump_index:
                    y_out[i - 1] = y_in[positive_jump_index]
                    y_out[i] = y_in[negative_jump_index]
                    # TODO: We could override the x_out values with x_in[jump_index]
                else:
                    y_out[i - 1] = y_in[negative_jump_index]
                    y_out[i] = y_in[positive_jump_index]

                i += 1

            # Get the last datapoint:
            if j < len(x_in):
                # If the sample rate of the raw data is low, then the current
                # j point could be outside the current plot view range
                # If so, decrease j so that we take a value that is within the
                # plot view range.
                if x_in[j] > x_out[-1] and j > 0:
                    j -= 1

                y_out[i] = y_in[j]
                i += 1
            # if i < len(x_out):
            #    y_out[i] = y_in[-1]
            #    i += 1
            # Fill the remainder of the array with the last datapoint,
            # if t < stop_time, and then NaNs after that:
            while i < len(x_out) - 1:
                if x_out[i] < stop_time:
                    y_out[i] = y_in[-1]
                else:
                    y_out[i] = numpy.float('NaN')
                i += 1
Exemple #35
0
def train_model(model, criterion, optimizer, scheduler, num_epochs=5):
    since = time.time()

    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        # Each epoch has a training and validation phase
        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()  # Set model to training mode
            else:
                model.eval()  # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0

            # Iterate over data.
            for inputs, labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                # track history if only in train
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    _, preds = torch.max(outputs, 1)
                    loss = criterion(outputs, labels)

                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                # statistics
                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)
            if phase == 'train':
                scheduler.step()

            print(phase, running_loss, dataset_sizes[phase])
            epoch_loss = running_loss / dataset_sizes[phase]
            epoch_acc = running_corrects.double() / dataset_sizes[phase]

            # Log the los / acc to AMLS
            run.log("{} Loss".format(phase), np.float(epoch_loss))
            run.log("{} Acc".format(phase), np.float(epoch_acc))

            # deep copy the model
            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())

        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # load best model weights
    model.load_state_dict(best_model_wts)
    return model
def _plot_confusion_matrix(pred_labels, true_labels):
    confusion_matrix = metrics.confusion_matrix(true_labels, pred_labels)
    row_sums = confusion_matrix.sum(axis=1, keepdims=True)
    normalized_confusion_matrix = confusion_matrix / row_sums
    np.fill_diagonal(normalized_confusion_matrix, 0)
    plt.matshow(normalized_confusion_matrix, cmap=plt.cm.get_cmap('gray'))
    plt.title('Confusion Matrix')
    fig = plt.gcf()
    fig.canvas.set_window_title('Confusion Matrix')
    plt.show()


if __name__ == '__main__':
    model_metrics = {
        'glove_rnn': {
            'precision': np.float(0.567653),
            'recall': 0.5,
            'roc': 0.4,
            'f1': 0.5,
        },
        'tfidf': {
            'precision': 0.32,
            'recall': 0.5,
            'roc': 0.3,
            'f1': 0.8
        },
        'sds': {
            'precision': 0.32,
            'recall': 0.5,
            'roc': 0.3,
            'f1': 0.8
Exemple #37
0
    def propagate(self,
                  pulse_in,
                  fiber,
                  n_steps,
                  output_power=None,
                  reload_fiber_each_step=False,
                  tshock_correction=0):
        """
        This is the main user-facing function that allows a pulse to be 
        propagated along a fiber (or other nonlinear medium). 
        
        Parameters
        ----------
        
        pulse_in : pulse object
            this is an instance of the :class:`pynlo.light.PulseBase.Pulse` class.
        
        fiber : fiber object
            this is an instance of the :class:`pynlo.media.fibers.fiber.FiberInstance` class.
        
        n_steps : int
            the number of steps requested in the integrator output. Note: the RK4IP integrator
            uses an adaptive step size. It should pick the correct step size automatically,
            so setting n_steps should not affect the accuracy, just the number of points that
            are returned by this funciton.
        
        output_power : 
            This parameter is a mystery
    
        reload_fiber_each_step : boolean
            This flag determines if the fiber parameters should be reloaded every step. It is 
            necessary if the fiber dispersion or gamma changes along the fiber length. 
            :func:`pynlo.media.fibers.fiber.FiberInstance.set_dispersion_function` and 
            :func:`pynlo.media.fibers.fiber.FiberInstance.set_dispersion_function` should be used
            to specify how the dispersion and gamma change with the fiber length
        
        
        Returns
        -------
        z_positions : array of float
            an array of z-positions along the fiber (in meters)
        
        AW : 2D array of complex128
            A 2D numpy array corresponding to the intensities in each *frequency* bin for each
            step in the z-direction of the fiber. 
        
        AT : 2D array of complex128
            A 2D numpy array corresponding to the intensities in each *time* bin for each
            step in the z-direction of the fiber. 
        
        pulse_out : PulseBase object
            the pulse after it has propagated through the fiber. This object is suitable for propagation 
            through the next fiber!
        """

        n_steps = int(n_steps)

        # Copy parameters from pulse and fiber into class-wide variables
        z_positions = np.linspace(0, fiber.length, n_steps + 1)
        if n_steps == 1:
            delta_z = fiber.length
        else:
            delta_z = z_positions[1] - z_positions[0]

        AW = np.complex64(np.zeros((pulse_in.NPTS, n_steps)))
        AT = np.complex64(np.copy(AW))

        print("Pulse energy before", fiber.fibertype,":", \
              1e9 * pulse_in.calc_epp(), 'nJ')

        pulse_out = Pulse()
        pulse_out.clone_pulse(pulse_in)
        self.setup_fftw(pulse_in, fiber, output_power)
        self.w0 = 1 / (1 / self.w0 + tshock_correction)

        self.load_fiber_parameters(pulse_in, fiber, output_power)

        for i in range(n_steps):
            print("Step:", i, "Distance remaining:",
                  fiber.length * (1 - np.float(i) / n_steps))

            if reload_fiber_each_step:
                self.load_fiber_parameters(pulse_in,
                                           fiber,
                                           output_power,
                                           z=i * delta_z)

            self.integrate_over_dz(delta_z)
            AW[:, i] = self.conditional_ifftshift(self.FFT_t_2(self.A))
            AT[:, i] = self.conditional_ifftshift(self.A)
            pulse_out.set_AT(self.conditional_ifftshift(self.A))
            print("Pulse energy after:", \
              1e9 * pulse_out.calc_epp(), 'nJ')
        pulse_out.set_AT(self.conditional_ifftshift(self.A))

        print("Pulse energy after", fiber.fibertype,":", \
              1e9 * pulse_out.calc_epp(), 'nJ')
        #        print "alpha out:",self.alpha
        self.cleanup()
        return z_positions, AW, AT, pulse_out
Exemple #38
0
def phasesym(im,
             nscale=5,
             norient=6,
             minWaveLength=3,
             mult=2.1,
             sigmaOnf=0.55,
             k=2.0,
             polarity=0,
             noiseMethod=-1):
    """  Arguments:
                   Default values      Description
     
         nscale           5    - Number of wavelet scales, try values 3-6
         norient          6    - Number of filter orientations.
         minWaveLength    3    - Wavelength of smallest scale filter.
         mult             2.1  - Scaling factor between successive filters.
         sigmaOnf         0.55 - Ratio of the standard deviation of the Gaussian 
                                 describing the log Gabor filter's transfer function 
                                 in the frequency domain to the filter center frequency.
         k                2.0  - No of standard deviations of the noise energy beyond
                                 the mean at which we set the noise threshold point.
                                 You may want to vary this up to a value of 10 or
                                 20 for noisy images 
         polarity         0    - Controls 'polarity' of symmetry features to find.
                                  1 - just return 'bright' points
                                 -1 - just return 'dark' points
                                  0 - return bright and dark points.
         noiseMethod      -1   - Parameter specifies method used to determine
                                 noise statistics. 
                                   -1 use median of smallest scale filter responses
                                   -2 use mode of smallest scale filter responses
                                    0+ use noiseMethod value as the fixed noise threshold.
     
      Return values:
         phaseSym              - Phase symmetry image (values between 0 and 1).
         orientation           - Orientation image. Orientation in which local
                                 symmetry energy is a maximum, in degrees
                                 (0-180), angles positive anti-clockwise. Note
                                 the orientation info is quantized by the number
                                 of orientations
         totalEnergy           - Un-normalised raw symmetry energy which may be
                                 more to your liking.
         T                     - Calculated noise threshold (can be useful for
                                 diagnosing noise characteristics of images).  Once you know
                                 this you can then specify fixed thresholds and save some
                                 computation time.
     
      Notes on specifying parameters:
      
      The parameters can be specified as a full list eg.
       >> phaseSym = phasesym(im, 5, 6, 3, 2.5, 0.55, 2.0, 0);
     
      or as a partial list with unspecified parameters taking on default values
       >> phaseSym = phasesym(im, 5, 6, 3);
     
      or as a partial list of parameters followed by some parameters specified via a
      keyword-value pair, remaining parameters are set to defaults, for example:
       >> phaseSym = phasesym(im, 5, 6, 3, 'polarity',-1, 'k', 2.5);
      
      The convolutions are done via the FFT.  Many of the parameters relate to the
      specification of the filters in the frequency plane.  The values do not seem
      to be very critical and the defaults are usually fine.  You may want to
      experiment with the values of 'nscales' and 'k', the noise compensation factor.
     
      Notes on filter settings to obtain even coverage of the spectrum
      sigmaOnf       .85   mult 1.3
      sigmaOnf       .75   mult 1.6     (filter bandwidth ~1 octave)
      sigmaOnf       .65   mult 2.1  
      sigmaOnf       .55   mult 3       (filter bandwidth ~2 octaves)
     
      For maximum speed the input image should have dimensions that correspond to
      powers of 2, but the code will operate on images of arbitrary size.
     
      See Also:  PHASECONG, PHASECONG2, GABORCONVOLVE, PLOTGABORFILTERS
    
      References:
          Peter Kovesi, "Symmetry and Asymmetry From Local Phase" AI'97, Tenth
          Australian Joint Conference on Artificial Intelligence. 2 - 4 December
          1997. http://www.cs.uwa.edu.au/pub/robvis/papers/pk/ai97.ps.gz.
     
          Peter Kovesi, "Image Features From Phase Congruency". Videre: A
          Journal of Computer Vision Research. MIT Press. Volume 1, Number 3,
          Summer 1999 http://mitpress.mit.edu/e-journals/Videre/001/v13.html
    """

    epsilon = 1e-4  # Used to prevent division by zero
    rows, cols = im.shape
    imagefft = np.fft.fft2(im)  # Fourier transform of image
    zero = np.zeros((rows, cols))

    totalEnergy = zero.copy()  # ndarray for accumulating weighted phase
    # congruency values (energy)
    totalSumAn = zero.copy()  # ndarray for accumulating filter response
    # amplitude values
    orientation = zero.copy()  # ndarray storing orientation with greatest
    # energy for each pixel
    T = 0

    # Pre-compute some stuff to speed up filter construction
    #
    # Set up X and Y ndarrays with ranges normalized to +/- 0.5
    # The following code adjusts things appropriately for odd and even values
    # of rows and columns.

    if cols % 2:
        x_range = np.arange(-(cols - 1) / 2, (cols - 1) / 2 + 1,
                            dtype=np.float)
        x_range = np.divide(x_range, np.float(cols - 1))
    else:
        x_range = np.arange(-cols / 2, (cols / 2 - 1) + 1, dtype=np.float)
        x_range = np.divide(x_range, cols)

    if rows % 2:
        y_range = np.arange(-(rows - 1) / 2, (rows - 1) / 2 + 1,
                            dtype=np.float)
        y_range = np.divide(y_range, np.float(rows - 1))
    else:
        y_range = np.arange(-rows / 2, (rows / 2 - 1) + 1, dtype=np.float)
        y_range = np.divide(y_range, rows)

    x, y = np.meshgrid(x_range, y_range)

    radius = np.sqrt(
        np.square(x) +
        np.square(y))  # ndarray values contain *normalized* radius from center
    theta = np.arctan2(-y, x)  # ndarray values contain polar angle
    # (note negative y is used to give positive
    # anti-clockwise angles)

    radius = np.fft.ifftshift(
        radius)  # Quadrant shift radius and theta so that filters
    theta = np.fft.ifftshift(
        theta)  # are constructed wtih 0 frequency at the corners.
    radius[0][0] = 1  # Get rid of the 0 radius value at the 0
    # frequency point (now at top-left corner)
    # so that taking the log of the radius will
    # not cause trouble

    sintheta = np.sin(theta)
    costheta = np.cos(theta)

    #print sintheta
    #print
    #print costheta
    #print

    #Filters are constructed in terms of two components.
    # 1) The radial component, which controls the frequency band that the filter
    #    responds to
    # 2) The angular component, which controls the orientation that the filter
    #    responds to.
    # The two components are multiplied together to construct the overall filter.

    # Construct the radial filter components...
    # First construct a low-pass filter that is as large as possible, yet falls
    # away to zero at the boundaries.  All log Gabor filters are multiplied by
    # this to ensure no extra frequencies at the 'corners' of the FFT are
    # incorporated as this seems to upset the normalisation process when
    # calculating phase congrunecy.

    lp = lowpassfilter((rows, cols), 0.4, 10)  #Radius 0.4, 'sharpness' 10
    logGabor = []

    for s in range(nscale):
        wavelength = minWaveLength * (mult**(s))
        fo = 1.0 / np.float(wavelength)  #Center frequency filter
        thisFilterTop = np.divide(radius, fo)
        thisFilterTop = np.log10(thisFilterTop)
        thisFilterTop = -(np.square(thisFilterTop))
        thisFilterBot = np.log10(sigmaOnf)
        thisFilterBot = np.square(thisFilterBot)
        thisFilterBot = thisFilterBot * 2
        thisFilter = np.divide(thisFilterTop, thisFilterBot)
        thisFilter = np.exp(thisFilter)
        thisFilter = np.multiply(thisFilter, lp)  #Apply low pass filter
        thisFilter[0][
            0] = 0  #Set the value at the 0 frequency point of the filter
        logGabor.append(thisFilter)

    #works until here !!!!!!!!!!!!!!!!

    #The main loop...

    for o in range(norient):
        #Construct the angular filter spread function
        angle = (o * np.pi) / norient  #Filter Angle
        #print "angle:" + str(angle)

        #For each point in the filter matrix, calculate the angular distance from
        #the specified filter orientation. To overcome the angular wrap-around
        #problem, sine difference, then cosine difference values are first computed
        #and then the atan2 function is used to determine angular distance.

        ds = (sintheta * np.cos(angle)) - (costheta * np.sin(angle)
                                           )  #Difference in sine
        #print "ds: " + str(ds)
        dc = (costheta * np.cos(angle)) + (sintheta * np.sin(angle)
                                           )  #Difference in cosine
        #print "dc: " + str(dc)
        dtheta = np.abs(np.arctan2(ds, dc))  #Absolute Angular distance
        #print "dtheta: " + str(dtheta)
        #Scale theta so that cosine spread function has the right wavelength and clamp to pi
        multiply = np.multiply(dtheta, np.float(norient) / 2)
        dtheta = np.minimum(multiply, np.pi)
        #print "dtheta: " + str(dtheta)

        #The spread function is cos(dtheta between - pi and pi. We add 1,
        #and then divide by 2 so that the value ranges from 0 - 1

        spread = (np.cos(dtheta) + 1) / 2
        #print "spread: " + str(spread)

        sumAn_ThisOrient = zero
        #print "sunAn_ThisOrient: " + str(sumAn_ThisOrient)
        Energy_ThisOrient = zero
        #print "Energy_ThisOrient: " + str(Energy_ThisOrient)

        for s in range(nscale):  #For each scale ...
            thisFilter = np.multiply(logGabor[s],
                                     spread)  #Multiply radial and angular
            #components to get filter.
            #print "Filter: " + str(thisFilter)

            #Convolve image with even and odd filters returning the result in EO
            #print "imageFFT: " + str(imagefft)
            EO = np.fft.ifft2(np.multiply(imagefft, thisFilter))
            #print "EO: " + str(EO)
            An = np.abs(EO)  #Amplitude of Even and Odd filter response
            #print "An: " + str(An)
            sumAn_ThisOrient = sumAn_ThisOrient + An  #Sum of amplitude responses.
            #print "sumAn_ThisOrient: " + str(sumAn_ThisOrient)
            #At the smallest scale estimate noise characteristics from the
            #distribution of the filter amplitude responses stored in sumAn.
            #tau is the Rayleigh parameter that is used to describe the
            #distribution.

            if s == 0:
                #Use median to estimate noise statistics
                tau = np.divide(np.median(sumAn_ThisOrient[:]),
                                np.sqrt(np.log(4)))
                #print "tau: " + str(tau)

            #Now calculate phase symmetry measure
            if polarity == 0:  #Look for 'white' and 'black' spots
                Energy_ThisOrient = Energy_ThisOrient + np.abs(
                    np.real(EO)) - np.abs(np.imag(EO))

            elif polarity == 1:  #Look for just 'white' spots
                Energy_ThisOrient = Energy_ThisOrient + np.real(EO) - np.abs(
                    np.imag(EO))

            elif polarity == -1:  #Just look for 'black' spots
                Energy_ThisOrient = Energy_ThisOrient - np.real(EO) - np.abs(
                    np.imag(EO))
            #print "Energy_ThisOrient: " + str(Energy_ThisOrient)

        # Automatically determine noise threshold

        #  Assuming the noise is Gaussian the response of the filters to noise will
        #  form Rayleigh distribution.  We use the filter responses at the smallest
        #  scale as a guide to the underlying noise level because the smallest scale
        #  filters spend most of their time responding to noise, and only
        #  occasionally responding to features. Either the median, or the mode, of
        #  the distribution of filter responses can be used as a robust statistic to
        #  estimate the distribution mean and standard deviation as these are related
        #  to the median or mode by fixed constants.  The response of the larger
        #  scale filters to noise can then be estimated from the smallest scale
        #  filter response according to their relative bandwidths.
        #
        #  This code assumes that the expected reponse to noise on the phase congruency
        #  calculation is simply the sum of the expected noise responses of each of
        #  the filters.  This is a simplistic overestimate, however these two
        #  quantities should be related by some constant that will depend on the
        #  filter bank being used.  Appropriate tuning of the parameter 'k' will
        #  allow you to produce the desired output.
        if noiseMethod >= 0:  # We are using a fixed noise threshold
            T = noiseMethod  # use supplied noiseMethod value as the threshold
        else:
            # Estimate the effect of noise on the sum of the filter responses as
            # the sum of estimated individual responses (this is a simplistic
            # overestimate). As the estimated noise response at succesive scales
            # is scaled inversely proportional to bandwidth we have a simple
            # geometric sum.
            tauTop = np.power((1 / mult), nscale)
            tauTop = 1 - tauTop
            tauBot = 1 - (1 / mult)
            totalTau = tau * (tauTop / tauBot)

            # Calculate mean and std dev from tau using fixed relationship
            # between these parameters and tau. See
            # http://mathworld.wolfram.com/RayleighDistribution.html
            EstNoiseEnergyMean = totalTau * np.sqrt(
                np.pi / 2)  # Expected mean and std
            EstNoiseEnergySigma = totalTau * np.sqrt(
                (4 - np.pi) / 2)  # values of noise energy

            # Noise threshold, make sure it is not less than epsilon.
            T = np.maximum(EstNoiseEnergyMean + k * EstNoiseEnergySigma,
                           epsilon)
        #print "T: " + str(T)
        # Apply noise threshold,  this is effectively wavelet denoising via
        # soft thresholding.  Note 'Energy_ThisOrient' will have -ve values.
        # These will be floored out at the final normalization stage.
        Energy_ThisOrient = Energy_ThisOrient - T
        #print "Energy_ThisOrient: " + str(Energy_ThisOrient)

        # Update accumulator matrix for sumAn and totalEnergy
        totalSumAn = totalSumAn + sumAn_ThisOrient
        #print "totalSumAn: " + str(totalSumAn)
        totalEnergy = totalEnergy + Energy_ThisOrient
        #print "totalEnergy: " + str(totalEnergy)
        #print "T: " + str(T)

        # Update orientation matrix by finding image points where the energy in
        # this orientation is greater than in any previous orientation (the
        # change matrix) and then replacing these elements in the orientation
        # matrix with the current orientation number.
        #import pdb; pdb.set_trace()
        #print "o: " + str(o)
        if o == 0:
            maxEnergy = Energy_ThisOrient
            #print "maxExergy: " + str(maxEnergy)
        else:
            Energy_ThisOrient = np.around(Energy_ThisOrient, decimals=8)
            maxEnergy = np.around(maxEnergy, decimals=8)
            change = Energy_ThisOrient > maxEnergy
            #print "change: " + str(change)
            invert = np.logical_not(change)
            orientationRight = np.multiply(orientation, invert)
            orientationLeft = np.multiply(o, change)
            orientation = orientationLeft + orientationRight
            #print "orientation: " + str(orientation)
            #print "Energy_ThisOrient: " + str(Energy_ThisOrient)
            maxEnergy = np.maximum(maxEnergy, Energy_ThisOrient)
            #print "maxExergy: " + str(maxEnergy)

    # Normalize totalEnergy by the totalSumAn to obtain phase symmetry
    # totalEnergy is floored at 0 to eliminate -ve values
    phaseSym = np.divide(np.maximum(totalEnergy, 0), (totalSumAn + epsilon))

    # Convert orientation matrix values to degrees
    orientation = np.fix(orientation * (180 / norient))

    return phaseSym, orientation, totalEnergy, T
import numpy as np
import sys

tmp = np.linspace(0.0,
                  np.float(sys.argv[1]),
                  num=int(sys.argv[2]),
                  endpoint=False)
print('\n'.join('{0:.8f}'.format(k).rstrip('0').rstrip('.') for k in tmp))
Exemple #40
0
def read_SLR_C20(SLR_file, HEADER=True, AOD=True):
    """
    Reads C20 spherical harmonic coefficients from SLR measurements

    Arguments
    ---------
    SLR_file: Satellite Laser Ranging file

    Keyword arguments
    -----------------
    AOD: remove background De-aliasing product from the SLR solution
    HEADER: file contains header text to be skipped

    Returns
    -------
    data: SLR degree 2 order 0 cosine stokes coefficients
    error: SLR degree 2 order 0 cosine stokes coefficient error
    month: GRACE/GRACE-FO month of measurement
    date: date of SLR measurement
    """

    #-- check that SLR file exists
    if not os.access(os.path.expanduser(SLR_file), os.F_OK):
        raise IOError('SLR file not found in file system')

    #-- determine if imported file is from PO.DAAC or CSR
    if bool(re.search(r'C20_RL\d+', SLR_file)):
        #-- SLR C20 file from CSR
        #-- Just for checking new months when TN series isn't up to date as the
        #-- SLR estimates always use the full set of days in each calendar month.
        #-- format of the input file (note 64 bit floating point for C20)
        #-- Column 1: Approximate mid-point of monthly solution (years)
        #-- Column 2: C20 from SLR (normalized)
        #-- Column 3: Delta C20 relative to a mean value of -4.841694723127E-4 (1E-10)
        #-- Column 4: Solution sigma (1E-10)
        #-- Column 5: Mean value of Atmosphere-Ocean De-aliasing model (1E-10)
        #-- Columns 6-7: Start and end dates of data used in solution
        dtype = {}
        dtype['names'] = ('time', 'C20', 'delta', 'sigma', 'AOD', 'start',
                          'end')
        dtype['formats'] = ('f', 'f8', 'f', 'f', 'f', 'f', 'f')
        #-- header text is commented and won't be read
        file_input = np.loadtxt(os.path.expanduser(SLR_file), dtype=dtype)
        #-- date and GRACE/GRACE-FO month
        tdec = file_input['time']
        grace_month = 1 + np.floor((tdec - 2002.) * 12.)
        C20 = file_input['C20']
        eC20 = file_input['sigma'] * 1e-10
        #-- Background gravity model includes solid earth and ocean tides, solid
        #-- earth and ocean pole tides, and the Atmosphere-Ocean De-aliasing
        #-- product. The monthly mean of the AOD model has been restored.
        if AOD:  #-- Removing AOD product that was restored in the solution
            C20 -= file_input['AOD'] * 1e-10
    elif bool(re.search('TN-(11|14)', SLR_file)):
        #-- SLR C20 RL06 file from PO.DAAC
        with open(os.path.expanduser(SLR_file), 'r') as f:
            file_contents = f.read().splitlines()
        #-- number of lines contained in the file
        file_lines = len(file_contents)

        #-- counts the number of lines in the header
        count = 0
        #-- Reading over header text
        while HEADER:
            #-- file line at count
            line = file_contents[count]
            #-- find PRODUCT: within line to set HEADER flag to False when found
            HEADER = not bool(re.match(r'PRODUCT:+', line, re.IGNORECASE))
            #-- add 1 to counter
            count += 1

        #-- number of months within the file
        n_mon = file_lines - count
        date_conv = np.zeros((n_mon))
        C20_input = np.zeros((n_mon))
        eC20_input = np.zeros((n_mon))
        mon = np.zeros((n_mon), dtype=np.int)
        #-- time count
        t = 0
        #-- for every other line:
        for line in file_contents[count:]:
            #-- find numerical instances in line including exponents,
            #-- decimal points and negatives
            line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?', line)
            #-- check for empty lines as there are
            #-- slight differences in RL04 TN-05_C20_SLR.txt
            #-- with blanks between the PRODUCT: line and the data
            count = len(line_contents)
            #-- if count is greater than 0
            if (count > 0):
                #-- modified julian date for line
                MJD = np.float(line_contents[0])
                #-- converting from MJD into month, day and year
                YY, MM, DD, hh, mm, ss = convert_julian(MJD + 2400000.5,
                                                        FORMAT='tuple')
                #-- converting from month, day, year into decimal year
                date_conv[t] = convert_calendar_decimal(YY,
                                                        MM,
                                                        day=DD,
                                                        hour=hh)
                #-- Spherical Harmonic data for line
                C20_input[t] = np.float(line_contents[2])
                eC20_input[t] = np.float(line_contents[4]) * 1e-10
                #-- GRACE/GRACE-FO month of SLR solutions
                mon[t] = 1 + np.round((date_conv[t] - 2002.) * 12.)
                #-- The GRACE/GRACE-FO 'Special Months'
                #-- (November 2011, December 2012, April 2012, October 2019)
                #-- Accelerometer shutoffs make the relation between month number
                #-- and date more complicated as days from other months are used
                #-- Nov11 (month 119) is centered in Oct11 (118)
                #-- May15 (month 161) is centered in Apr15 (160)
                #-- Oct18 (month 202) is centered in Nov18 (203)
                if (mon[t] == mon[t - 1]) and (mon[t - 1] == 118):
                    mon[t] = mon[t - 1] + 1
                elif (mon[t] == mon[t - 1]) and (mon[t - 1] == 121):
                    mon[t - 1] = mon[t] - 1
                elif (mon[t] == mon[t - 1]) and (mon[t - 1] == 160):
                    mon[t] = mon[t - 1] + 1
                elif (mon[t] == mon[t - 1]) and (mon[t - 1] == 203):
                    mon[t - 1] = mon[t] - 1
                #-- add to t count
                t += 1
        #-- convert to output variables and truncate if necessary
        tdec = date_conv[:t]
        C20 = C20_input[:t]
        eC20 = eC20_input[:t]
        grace_month = mon[:t]
    else:
        #-- SLR C20 file from PO.DAAC
        with open(os.path.expanduser(SLR_file), 'r') as f:
            file_contents = f.read().splitlines()
        #-- number of lines contained in the file
        file_lines = len(file_contents)

        #-- counts the number of lines in the header
        count = 0
        #-- Reading over header text
        while HEADER:
            #-- file line at count
            line = file_contents[count]
            #-- find PRODUCT: within line to set HEADER flag to False when found
            HEADER = not bool(re.match(r'PRODUCT:+', line))
            #-- add 1 to counter
            count += 1

        #-- number of months within the file
        n_mon = file_lines - count
        date_conv = np.zeros((n_mon))
        C20_input = np.zeros((n_mon))
        eC20_input = np.zeros((n_mon))
        slr_flag = np.zeros((n_mon), dtype=np.bool)
        #-- time count
        t = 0
        #-- for every other line:
        for line in file_contents[count:]:
            #-- find numerical instances in line including exponents,
            #-- decimal points and negatives
            line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?', line)
            #-- check for empty lines as there are
            #-- slight differences in RL04 TN-05_C20_SLR.txt
            #-- with blanks between the PRODUCT: line and the data
            count = len(line_contents)
            #-- if count is greater than 0
            if (count > 0):
                #-- modified julian date for line
                MJD = np.float(line_contents[0])
                #-- converting from MJD into month, day and year
                YY, MM, DD, hh, mm, ss = convert_julian(MJD + 2400000.5,
                                                        FORMAT='tuple')
                #-- converting from month, day, year into decimal year
                date_conv[t] = convert_calendar_decimal(YY,
                                                        MM,
                                                        day=DD,
                                                        hour=hh)
                #-- Spherical Harmonic data for line
                C20_input[t] = np.float(line_contents[2])
                eC20_input[t] = np.float(line_contents[4]) * 1e-10
                #-- line has * flag
                if bool(re.search(r'\*', line)):
                    slr_flag[t] = True
                #-- add to t count
                t += 1

        #-- truncate for RL04 if necessary
        date_conv = date_conv[:t]
        C20_input = C20_input[:t]
        eC20_input = eC20_input[:t]
        slr_flag = slr_flag[:t]

        #-- GRACE/GRACE-FO month of SLR solutions
        mon = 1 + np.round((date_conv - 2002.) * 12.)
        #-- number of unique months
        grace_month = np.unique(mon)
        n_uniq = len(grace_month)
        #-- Removing overlapping months to use the data for
        #-- months with limited GRACE accelerometer use
        tdec = np.zeros((n_uniq))
        C20 = np.zeros((n_uniq))
        eC20 = np.zeros((n_uniq))
        #-- New SLR datasets have * flags for the modified GRACE periods
        #-- these GRACE months use half of a prior month in their solution
        #-- this will find these months (marked above with slr_flag)
        for t in range(n_uniq):
            count = np.count_nonzero(mon == grace_month[t])
            #-- there is only one solution for the month
            if (count == 1):
                i = np.nonzero(mon == grace_month[t])
                tdec[t] = date_conv[i]
                C20[t] = C20_input[i]
                eC20[t] = eC20_input[i]
            #-- there is a special solution for the month
            #-- will the solution flagged with slr_flag
            elif (count == 2):
                i = np.nonzero((mon == grace_month[t]) & slr_flag)
                tdec[t] = date_conv[i]
                C20[t] = C20_input[i]
                eC20[t] = eC20_input[i]

    return {'data': C20, 'error': eC20, 'month': grace_month, 'time': tdec}
                
        X_test_l2 = np.concatenate((X_test_FA, X_test_MD), axis=1)
        X_test_l2 = np.concatenate((X_test_l2, X_test_AD), axis=1)
        X_test_l2 = np.concatenate((X_test_l2, X_test_RD), axis=1)
        with tf.Graph().as_default():
            X_train_2, X_test_2 = make_BBRBM_kfold(X_train_l2, X_test_l2, 4000, 5000, 450, index, save_path, 2)
        with tf.Graph().as_default():    
            X_train_3, X_test_3 = make_BBRBM_kfold(X_train_2, X_test_2, 5000, 2000, 640, index, save_path, 3)
        with tf.Graph().as_default():    
            X_train_4, X_test_4 = make_BBRBM_kfold(X_train_3, X_test_3, 2000, 1000, 540, index, save_path, 4)
        with tf.Graph().as_default():    
            X_train_5, X_test_5 = make_BBRBM_kfold(X_train_4, X_test_4, 1000, 200, 1024, index, save_path, 5) 
            
#        clf2 = RandomForestClassifier(45, max_depth=5, max_features=None, min_samples_split=2)
        clf2 = svm.SVC(kernel='linear',probability=True)    
        clf2.fit(X_train_4, Y_train)
        SVM_Accuracy[index] = clf2.score(X_test_4, Y_test)
        Y_predict = clf2.predict(X_test_4)
        tn, fp, fn, tp = confusion_matrix(Y_test, Y_predict).ravel()
        SVM_Sensitivity[index] = np.float(tp)/np.float(tp+fn)
        SVM_Specificity[index] = np.float(tn)/np.float(tn+fp)            
        index=index+1

    Total_mean_acc[loop] = np.mean(SVM_Accuracy,axis=0)
    Total_mean_Sen[loop] = np.mean(SVM_Sensitivity,axis=0)
    Total_mean_Spe[loop] = np.mean(SVM_Specificity,axis=0)

Final_mean_acc=np.mean(Total_mean_acc,axis=0)
Final_mean_Sen=np.mean(Total_mean_Sen,axis=0)
Final_mean_Spe=np.mean(Total_mean_Spe,axis=0)         
        
Exemple #42
0
def blochOscOneFileV2Rf(fileroot,
                        filenum,
                        roi,
                        draw=True,
                        checkField=False,
                        filerootFlea=fileroot,
                        roiFlea=roi,
                        bgndRidge=False):
    filename = fileroot + "_" + str(filenum).zfill(4) + ".ibw"
    dict1 = processIBW(filename, angle=-43)
    print filename

    if checkField:
        roiB = np.array(
            [roiFlea[0], roiFlea[1], 2 * roiFlea[2] - roiFlea[3], roiFlea[2]])
        filenameFlea = filerootFlea + "_" + str(filenum).zfill(4) + ".ibw"
        dictFlea = processIBW(filenameFlea, angle=-41)
        od1 = dictFlea['od1'][roiFlea[0]:roiFlea[1], roiFlea[2]:roiFlea[3]]
        od1B = dictFlea['od1'][roiB[0]:roiB[1], roiB[2]:roiB[3]]
        od2 = dictFlea['od2'][roiFlea[0]:roiFlea[1], roiFlea[2]:roiFlea[3]]
        od2B = dictFlea['od2'][roiB[0]:roiB[1], roiB[2]:roiB[3]]
        num1 = np.sum(od1) - np.sum(od1B)
        num2 = np.sum(od2) - np.sum(od2B)
        imbal = (num1 - num2) / (num1 + num2)
        signalGood = ((num1 > 0) & (num2 > 0))
        print num1, num2, imbal, signalGood
    else:
        imbal = nan
        signalGood = False
        num1 = nan
        num2 = nan

    if bgndRidge:
        odRoi = fringeremoval(filelist, filenum, roi, fileroot=fileroot)

    else:
        odRoi = dict1['rotODcorr'][roi[0]:roi[1], roi[2]:roi[3]]
        atomsRoi = dict1['rotRaw1'][roi[0]:roi[1], roi[2]:roi[3]]

#    delta=30
#    xM=np.array([])
#    yM=np.array([])
#    fM=np.array([])
#    xvals = np.arange(roi[2]-delta,roi[3]+delta)
#    yvals = np.arange(roi[0]-delta,roi[1]+delta)
#    for x in xvals:
#        for y in yvals:
#            if (x>roi[2] and x<roi[3] and y>roi[0] and y<roi[1])==False:
#                xM=np.append(xM,x)
#                yM=np.append(yM,y)
#                fM=np.append(fM,dict1['rotODcorr'][y,x])
#
    (x, y) = np.meshgrid(np.arange(roi[2], roi[3]), np.arange(roi[0], roi[1]))

    (A, B, C), cov = optimize.curve_fit(plane, (x, y),
                                        odRoi.flatten(),
                                        p0=(0.0, 0.0, 0.0))
    print A, B, C
    fitted_plane = plane((x, y), A, B, C).reshape(
        np.arange(roi[0], roi[1]).size,
        np.arange(roi[2], roi[3]).size)
    odRoi1 = odRoi - fitted_plane

    odFiltered = snd.filters.gaussian_filter(odRoi1, 1.0)  #+buildOD

    if draw:
        fig1 = plt.figure()
        pan1 = fig1.add_subplot(1, 1, 1)
        pan1.imshow(odFiltered, vmin=-0.15, vmax=0.3)

    odFiltLin = np.sum(odFiltered, axis=0)

    #    peakL=np.max(odFiltLin[:120])
    #    xGuessL=np.float( np.where(odFiltLin==peakL)[0])
    #    print xGuessL
    #    peakR=np.max(odFiltLin[120:])
    #
    #    xGuessR=np.float( np.where(odFiltLin==peakR)[0])
    #    print xGuessR
    #    peakT=np.max(odFiltLin)blochOscOneFileV2Rf(fileroot,filenum, roi
    #    xGuessT=np.float( np.where(odFiltLin==peakT)[0])
    #    xGuess2=175
    #    xGuess1=xGuess2-55.0
    #    xGuess3=xGuess2+52.0
    #
    if draw:
        figure2 = plt.figure()
        panel2 = figure2.add_subplot(1, 1, 1)
        panel2.plot(odFiltLin, 'bo')

#    p=(1.5,xGuess1,10.0,peak2,xGuess2,10.0,0.5,xGuess3,10.0,0,0,0)
#    (amp1,x01,sigma1,amp2,x02,sigma2,amp3,x03,sigma3,A,B,C), covariance = optimize.curve_fit(gaussLin3, np.arange(odFiltLin.size), odFiltLin, p0 =p )
#    data_fitted = gaussLin3(np.arange(odFiltLin.size), *(amp1,x01,sigma1,amp2,x02,sigma2,amp3,x03,sigma3,A,B,C))

#    if draw:
#        panel2.plot(data_fitted,'b-')

#    xCent=(xGuessR+xGuessL)/2.0

    peak = np.max(odFiltLin[140:180])
    xGuess = np.float(np.where(odFiltLin == peak)[0])
    xCent = xGuess

    #    xGuess1=xGuess2-55.0
    #    xGuess3=xGuess2+54.0

    #    p=(1.0,10.0,1.0,xGuess2,10.0,1.0,10.0,4.0,0,0)
    #    (amp1,sigma1,amp2,x02,sigma2,amp3,sigma3,A,B,C), covariance = optimize.curve_fit(gaussLin3const, np.arange(odFiltLin2.size), odFiltLin2,p0=p )
    #    data_fitted = gaussLin3const(np.arange(odFiltLin2.size), *(amp1,sigma1,amp2,x02,sigma2,amp3,sigma3,A,B,C))

    # yCent=0.865*xGuess2+54 #odtkick negative
    #  yCent=1.32*xGuess2-3.3# odtkick positive
    #  yCent=-0.055*xGuess2**2.0+13.78*xGuess2-690.372

    #for rf only:
    odFiltVert = np.sum(odFiltered, axis=1)
    plot(odFiltVert)
    peakRf = np.max(odFiltVert)
    yCent = np.float(np.where(odFiltVert == peakRf)[0])
    print yCent
    print xCent
    #    xCent=165
    #    yCent=183
    #  print xCent,xGuess2, yCent
    #    print yCent
    #    yCent=135

    norm = mpl.colors.Normalize(vmin=-0.15, vmax=0.3)
    im = Image.fromarray(np.uint8(plt.cm.jet(norm(odFiltered)) * 255))
    y1 = 61
    x1 = 50
    yrf = 0
    #   offsets = np.array([[0,0],[0,y1],[0,-y1],[0,y1*2],[0,-y1*2],[-x1,-y2],[-x1,-y2+y1],[-x1,-y2-y1],[-x1,-y2+y1*2],[-x1,-y2-y1*2],[x1,y2],[x1,y2+y1],[x1,y2-y1],[x1,y2+y1*2],[x1,y2-y1*2]])#np.array([[0,0],[0,69],[0,-69],[-58,49],[-58,119],[-58,-21],[57,-47],[57,21],[57,-115]])
    #   offsetsShort=   np.array([[0,0],[0,y1],[0,-y1],[-x1,-y2],[-x1,-y2+y1],[-x1,-y2-y1],[x1,y2],[x1,y2+y1],[x1,y2-y1]])
    rfOffsets = np.array([[0, 0], [0, y1], [0, -y1], [-x1, yrf],
                          [-x1, y1 + yrf], [-x1, -y1 + yrf], [x1, -yrf],
                          [x1, y1 - yrf], [x1, -y1 - yrf]])
    rfOffsetsShortest = np.array([[0, 0], [-x1, yrf], [x1, -yrf]])
    offsets = rfOffsets
    #  offsets=offsetsShort
    counts = np.zeros(offsets.shape[0])
    for i in np.arange(offsets.shape[0]):
        offs = offsets[i]
        counts[i] = getRoi(odFiltered,
                           im,
                           xCent + offs[0],
                           yCent + offs[1],
                           r=17,
                           draw=False)[0]
#    print counts
    maxInd = np.where(counts == np.max(counts))[0][0]

    allcounts = getRoi(odFiltered,
                       im,
                       xCent + offsets[maxInd, 0],
                       yCent + offsets[maxInd, 1],
                       r=17)
    #    print allcounts
    i = 0
    while ((np.abs(allcounts[4] - allcounts[3]) > 2.0) & (i < 20)):
        if (allcounts[4] - allcounts[3]) > 0:
            yCent = yCent + 1
            #     print "new yCent = " +str(yCent)
            allcounts = getRoi(odFiltered,
                               im,
                               xCent + offsets[maxInd, 0],
                               yCent + offsets[maxInd, 1],
                               draw=False)
        else:
            yCent = yCent - 1
            #      print "new yCent = " +str(yCent)
            allcounts = getRoi(odFiltered,
                               im,
                               xCent + offsets[maxInd, 0],
                               yCent + offsets[maxInd, 1],
                               draw=False)
        i = i + 1
#    print i
    i = 0
    while ((np.abs(allcounts[2] - allcounts[1]) > 2.0) & (i < 20)):
        if (allcounts[2] - allcounts[1]) > 0:
            xCent = xCent + 1
            #       print "new xCent = " +str(xCent)blochOscFractionsV2(fileroot,filelist,roi,key,plot=True,xlabel='',checkField=True,filerootFlea=filerootFlea,roiFlea=roiFlea
            allcounts = getRoi(odFiltered,
                               im,
                               xCent + offsets[maxInd, 0],
                               yCent + offsets[maxInd, 1],
                               draw=False)
        else:
            xCent = xCent - 1
            #       print "new xCent = " +str(xCent)a[6]['Note']
            allcounts = getRoi(odFiltered,
                               im,
                               xCent + offsets[maxInd, 0],
                               yCent + offsets[maxInd, 1],
                               draw=False)
        i = i + 1


#    print i
#   print allcounts
    r = 16.0
    if fitProbes:
        weightArray = np.ones(odFiltered.shape)
    for i in np.arange(offsets.shape[0]):
        offs = offsets[i]

        count = getRoi(odFiltered,
                       im,
                       xCent + offs[0],
                       yCent + offs[1],
                       r=r,
                       draw=draw)[0]
        if fitProbes:
            (count, cL, cR, cT, cB,
             weightArray) = getRoi(odFiltered,
                                   im,
                                   xCent + offs[0],
                                   yCent + offs[1],
                                   r=r,
                                   weightArray=weightArray,
                                   updateWeights=True,
                                   draw=draw)

        bgnd1 = getRoi(odFiltered,
                       im,
                       xCent + offs[0] + 2.0 * r,
                       yCent + offs[1],
                       r=r,
                       draw=draw,
                       color=(0, 0, 0))[0]
        bgnd2 = getRoi(odFiltered,
                       im,
                       xCent + offs[0] - 2.0 * r,
                       yCent + offs[1],
                       r=r,
                       draw=draw,
                       color=(0, 0, 0))[0]
        counts[i] = count - (bgnd1 + bgnd2) / 2.0

    if fitProbes:
        xTw = probeMatrixT * weightArray.flatten()
        rhs = np.dot(xTw, atomsRoi.flatten())

        lhs = np.dot(xTw, probeMatrix)

        beta = np.linalg.solve(lhs, rhs)
        newProbe = np.dot(probeMatrix, beta).reshape(atomsRoi.shape)
        newOd = -np.log(atomsRoi / newProbe)
        newOd = zeroNansInfsVector(newOd)
        odFiltered = newOd + (newProbe - atomsRoi) / (IsatCounts * imagingTime)
        odFiltered = snd.filters.gaussian_filter(odRoi1, 1.0)

        im2 = Image.fromarray(np.uint8(plt.cm.jet(norm(odFiltered)) * 255))
        for i in np.arange(offsets.shape[0]):
            offs = offsets[i]
            count = getRoi(odFiltered,
                           im2,
                           xCent + offs[0],
                           yCent + offs[1],
                           r=r,
                           draw=draw)[0]
            counts[i] = count

    if checkCoherence:
        maxInd = np.int(maxInd / 3) * 3

        extraAtoms1 = getRoi(
            odFiltered,
            im,
            xCent + (offsets[maxInd, 0] + offsets[maxInd + 1, 0]) / 2.0,
            yCent + (offsets[maxInd, 1] + offsets[maxInd + 1, 1]) / 2.0,
            r=r
        )[0]  #r=y1/2.0-r,eps=np.sqrt((y1/2.0-r)**2.0-r**2.0),horizontalStretch=False)[0]
        extraAtoms1bgnd = getRoi(
            odFiltered,
            im,
            xCent + (offsets[maxInd, 0] + offsets[maxInd + 1, 0]) / 2.0 +
            2.0 * r,
            yCent + (offsets[maxInd, 1] + offsets[maxInd + 1, 1]) / 2.0,
            r=r,
            color=(0, 0, 0)
        )[0]  #r=y1/2.0-r,eps=np.sqrt((y1/2.0-r)**2.0-r**2.0),color=(0,0,0),horizontalStretch=False)[0]
        extraAtoms2 = getRoi(
            odFiltered,
            im,
            xCent + (offsets[maxInd, 0] + offsets[maxInd + 2, 0]) / 2.0,
            yCent + (offsets[maxInd, 1] + offsets[maxInd + 2, 1]) / 2.0,
            r=r
        )[0]  #r=y1/2.0-r,eps=np.sqrt((y1/2.0-r)**2.0-r**2.0),horizontalStretch=False)[0]
        extraAtoms2bgnd = getRoi(
            odFiltered,
            im,
            xCent + (offsets[maxInd, 0] + offsets[maxInd + 2, 0]) / 2.0 +
            2.0 * r,
            yCent + (offsets[maxInd, 1] + offsets[maxInd + 2, 1]) / 2.0,
            r=r,
            color=(0, 0, 0)
        )[0]  #r=y1/2.0-r,eps=np.sqrt((y1/2.0-r)**2.0-r**2.0),color=(0,0,0),horizontalStretch=False)[0]
        extraAtoms1 = extraAtoms1 - extraAtoms1bgnd
        extraAtoms2 = extraAtoms2 - extraAtoms2bgnd
        print 'Extra atoms:'
        print extraAtoms1, extraAtoms2
        print counts[maxInd], counts[maxInd + 1], counts[maxInd + 2]
        goodAtoms = counts[maxInd] + counts[maxInd + 1] + counts[maxInd + 2]
        extraAtoms = np.max([extraAtoms1, extraAtoms2])
        if extraAtoms > goodAtoms / 3.0:
            coherent = False
        elif goodAtoms < 0.0:
            coherent = False
        else:
            coherent = True
    else:
        coherent = True

    if draw:
        fig4 = plt.figure()
        pan4 = fig4.add_subplot(1, 1, 1)
        pan4.imshow(im)
        pan4.set_title(filename)
        if fitProbes:
            fig4 = plt.figure()
            pan4 = fig4.add_subplot(1, 1, 1)
            pan4.imshow(im2)
            pan4.set_title(filename + '_reconProbe')

    print 'coherent = '
    print coherent

    return counts, odFiltered, xCent, yCent, num1, num2, imbal, signalGood, coherent, dict1
Exemple #43
0
def make_plots(all_data, skip_risk_mpc=False, skip_space_mpc=False):
    """Generate figures from data for main paper."""

    os.makedirs(os.path.join("Figures", "Diagnostics"), exist_ok=True)

    viz_options = {
        'population_vmin': 0,
        'population_vmax': 100,
        'show_dpcs': True,
        'alpha': 0.2,
        'dpc_data': all_data['sim_no_control'][0:20],
        'regions': ["A", "B", "C"],
        'inf_vmax': 0.05,
        'inf_cmap': mpl.colors.ListedColormap(["orange", "red"]),
        'node_alpha': 0.75,
        'min_node_radius': 0.025,
        'max_node_radius': 0.075,
        'coupling_link_min_size': 0.5,
        'coupling_link_max_size': 1.0,
    }

    plt.style.use("seaborn-whitegrid")

    nodes = all_data['setup']['nodes']
    dist_coupling = all_data['setup']['dist_coupling']

    ########################
    ## Main Paper Figures ##
    ########################

    # Get objective values
    all_objectives = []
    all_names = []
    iqr_segments = []

    all_objectives.append([np.sum(x.objective) for x in all_data['sim_high']])
    all_names.append("High")
    all_objectives.append([np.sum(x.objective) for x in all_data['sim_split']])
    all_names.append("Split")
    iqr_segments.append([[[i, np.percentile(x, 25)], [i,
                                                      np.percentile(x, 75)]]
                         for i, x in enumerate(all_objectives[0:2])])

    all_objectives.append(
        [np.sum(x.objective) for x in all_data['sim_risk_opt']])
    all_names.append("Risk OL")
    if skip_risk_mpc:
        all_objectives.append(np.zeros(100))
    else:
        all_objectives.append(
            [np.sum(x.objective) for x in all_data['sim_risk_mpc']])
    all_names.append("Risk MPC")
    iqr_segments.append([[[i + 2, np.percentile(x, 25)],
                          [i + 2, np.percentile(x, 75)]]
                         for i, x in enumerate(all_objectives[2:4])])

    all_objectives.append(
        [np.sum(x.objective) for x in all_data['sim_space_opt']])
    all_names.append("Space OL")
    if skip_space_mpc:
        all_objectives.append(np.zeros(100))
    else:
        all_objectives.append(
            [np.sum(x.objective) for x in all_data['sim_space_mpc']])
    all_names.append("Space MPC")
    iqr_segments.append([[[i + 4, np.percentile(x, 25)],
                          [i + 4, np.percentile(x, 75)]]
                         for i, x in enumerate(all_objectives[4:6])])

    all_objectives.append(
        [np.sum(x.objective) for x in all_data['sim_no_control']])
    all_names.append("No Control")

    # Figure 2 - Network and OL/MPC schematic
    # Plot network structure, and simulated and predicted infection numbers for open-loop & MPC

    # Choose whether to plot spatial results or risk based results
    space = True
    if space:
        obj_idx = 4
        name = 'space'
    else:
        obj_idx = 2
        name = 'risk'

    # Get index of 95th percentile OL simulation
    ol_idx = np.where(all_objectives[obj_idx] == np.percentile(
        all_objectives[obj_idx], 60, interpolation="nearest"))[0][0]
    # Generate OL DPC time course
    sim_ol_times = np.array([
        x[0]
        for x in all_data['sim_' + name + '_opt'][ol_idx].run_data['Global']
    ])
    sim_ol_inf_h = np.array([
        x[1 + State.INF_H]
        for x in all_data['sim_' + name + '_opt'][ol_idx].run_data['Global']
    ])
    sim_ol_inf_l = np.array([
        x[1 + State.INF_L]
        for x in all_data['sim_' + name + '_opt'][ol_idx].run_data['Global']
    ])

    pre_ol_times = np.linspace(0, all_data['setup']['end_time'], 501)
    pre_ol_inf_h = np.array([
        np.sum(all_data[name + '_model_opt'].state(t)[1:-1:6])
        for t in pre_ol_times
    ])
    pre_ol_inf_l = np.array([
        np.sum(all_data[name + '_model_opt'].state(t)[4:-1:6])
        for t in pre_ol_times
    ])

    # Get index of 95th percentile MPC simulation
    mpc_idx = np.where(all_objectives[obj_idx + 1] == np.percentile(
        all_objectives[obj_idx + 1], 60, interpolation="nearest"))[0][0]

    # Generate MPC DPC time course
    sim_mpc_times = np.array([
        x[0]
        for x in all_data['sim_' + name + '_mpc'][mpc_idx].run_data['Global']
    ])
    sim_mpc_inf_h = np.array([
        x[1 + State.INF_H]
        for x in all_data['sim_' + name + '_mpc'][mpc_idx].run_data['Global']
    ])
    sim_mpc_inf_l = np.array([
        x[1 + State.INF_L]
        for x in all_data['sim_' + name + '_mpc'][mpc_idx].run_data['Global']
    ])

    pre_mpc_times = []
    pre_mpc_inf_h = []
    pre_mpc_inf_l = []
    update_times = np.arange(0, all_data['setup']['end_time'],
                             all_data['setup']['mpc_update_freq'])
    for i, update_time in enumerate(update_times):
        times = np.linspace(update_time,
                            update_time + all_data['setup']['mpc_update_freq'],
                            500 / len(update_times),
                            endpoint=False)
        pre_mpc_times.append(times)
        pre_mpc_inf_h.append([
            np.sum(all_data['sim_' + name +
                            '_mpc'][mpc_idx].control[i][1].state(t)[1:-1:6])
            for t in times
        ])
        pre_mpc_inf_l.append([
            np.sum(all_data['sim_' + name +
                            '_mpc'][mpc_idx].control[i][1].state(t)[4:-1:6])
            for t in times
        ])

    plt.rc('axes', labelsize=10)
    plt.rc('xtick', labelsize=8)
    plt.rc('ytick', labelsize=8)

    fig = plt.figure(figsize=(5, 4))
    gs = gridspec.GridSpec(2, 2, height_ratios=[1.25, 1])
    gs.update(top=0.95,
              left=0.1,
              right=0.95,
              hspace=0.55,
              wspace=0.4,
              bottom=0.2)
    ax2 = fig.add_subplot(gs[0, 1])
    ax3 = fig.add_subplot(gs[1, 0])
    ax4 = fig.add_subplot(gs[1, 1], sharex=ax2)

    gs1 = gridspec.GridSpecFromSubplotSpec(1, 2, gs[0, 0], width_ratios=[6, 1])
    ax1 = fig.add_subplot(gs1[0])
    ax_leg1 = fig.add_subplot(gs1[1], frameon=False)
    ax_leg1.grid = False
    ax_leg1.set_xticks([])
    ax_leg1.set_yticks([])

    gs_bottom = gridspec.GridSpec(1, 1, top=0.1, bottom=0.02)
    ax_leg2 = fig.add_subplot(gs_bottom[:, :], frameon=False)
    ax_leg2.grid = False
    ax_leg2.set_xticks([])
    ax_leg2.set_yticks([])

    visualisation.plot_node_network(nodes,
                                    dist_coupling,
                                    options=viz_options,
                                    ax=ax1)

    ax1.text(2.5, 5.5, "A", fontsize=8, weight="semibold")
    ax1.text(5.5, 5.7, "B", fontsize=8, weight="semibold")
    ax1.text(5.5, 2.8, "C", fontsize=8, weight="semibold")
    ax1.set_aspect("equal", anchor="W")

    visualisation.plot_control(all_data['sim_' + name + '_mpc'][mpc_idx],
                               all_data['setup']['end_time'],
                               comparison=all_data[name +
                                                   '_model_opt'].control,
                               ax=ax2,
                               comparison_args={
                                   'label': 'OL',
                                   'linestyle': (0, (1, 1))
                               },
                               colors=["red", "skyblue"],
                               alpha=0.4)

    ax2.scatter(update_times, [-0.05] * len(update_times),
                s=10,
                facecolor="k",
                edgecolor="k",
                linewidth=0,
                zorder=10,
                clip_on=False)

    ax3.plot(sim_ol_times,
             sim_ol_inf_h + sim_ol_inf_l,
             color="red",
             linestyle="steps-post",
             label="Simulation",
             alpha=0.3)
    ax3.plot(pre_ol_times,
             pre_ol_inf_h + pre_ol_inf_l,
             '--',
             linewidth=1.0,
             color="red",
             label="Approximate Model")

    ax3.scatter([0], [0],
                s=10,
                facecolor="k",
                edgecolor="k",
                linewidth=0,
                label="Update Times",
                zorder=10,
                clip_on=False)
    ax3.set_xlabel("Time")
    ax3.set_ylabel("Number Infected")
    ax3.set_title("Open-loop (OL)")

    ax4.plot(sim_mpc_times,
             sim_mpc_inf_h + sim_mpc_inf_l,
             color="red",
             linestyle="steps-post",
             alpha=0.3)

    for times, inf_h, inf_l in zip(pre_mpc_times, pre_mpc_inf_h,
                                   pre_mpc_inf_l):
        ax4.plot(times,
                 np.array(inf_h) + np.array(inf_l),
                 '--',
                 linewidth=1.0,
                 color="red")
    ax4.scatter(update_times, [0] * len(update_times),
                s=10,
                facecolor="k",
                edgecolor="k",
                linewidth=0,
                zorder=10,
                clip_on=False)
    ax4.set_xlabel("Time")
    ax4.set_ylabel("Number Infected")
    ax4.set_title("MPC")

    xlim = [0, 5]
    ylim = [0, ax3.get_ylim()[1]]

    fig.text(0.01,
             0.96,
             "(a)",
             transform=fig.transFigure,
             fontsize=11,
             fontweight="semibold")
    fig.text(0.5,
             0.96,
             "(b)",
             transform=fig.transFigure,
             fontsize=11,
             fontweight="semibold")
    fig.text(0.01,
             0.53,
             "(c)",
             transform=fig.transFigure,
             fontsize=11,
             fontweight="semibold")
    fig.text(0.5,
             0.53,
             "(d)",
             transform=fig.transFigure,
             fontsize=11,
             fontweight="semibold")

    ax2.set_xlim(xlim)
    ax2.set_ylim([-0.05, 1.05])
    ax2.set_yticks([0, 0.5, 1.0])
    ax2.set_xticks(range(6))
    ax3.set_xlim(xlim)
    ax3.set_ylim(ylim)
    ax3.set_xticks(range(6))
    ax_leg1.legend(*ax2.get_legend_handles_labels(),
                   loc="center right",
                   frameon=True,
                   fontsize=6,
                   handlelength=1.0)
    ax_leg2.legend(*ax3.get_legend_handles_labels(),
                   loc="upper center",
                   ncol=3,
                   frameon=True,
                   fontsize=8,
                   handlelength=1.5)

    fig.savefig(os.path.join("Figures", "Figure2.pdf"), dpi=600)

    # Figure 3 - Illustrative model comparison of strategies
    # Violin plot showing distribution of epidemic costs for each strategy
    plt.rc('axes', labelsize=15)
    plt.rc('xtick', labelsize=12)
    plt.rc('ytick', labelsize=12)

    fig = plt.figure()
    ax = fig.add_subplot(111)
    viol1 = ax.violinplot(all_objectives[0:2],
                          positions=[0, 1],
                          showmeans=True,
                          showmedians=True)
    viol2 = ax.violinplot(all_objectives[2:4],
                          positions=[2, 3],
                          showmeans=True,
                          showmedians=True)
    viol3 = ax.violinplot(all_objectives[4:6],
                          positions=[4, 5],
                          showmeans=True,
                          showmedians=True)

    legend_elements = []
    colours = ["C0", "C1", "C2"]
    for i, viol in enumerate([viol1, viol2, viol3]):
        viol['cmeans'].set_color("r")
        viol['cmeans'].set_zorder(30)
        viol['cmedians'].set_color("b")
        viol['cmedians'].set_zorder(30)
        viol['cbars'].set_segments(iqr_segments[i])
        viol['cbars'].set_color("k")
        viol['cbars'].set_alpha(0.3)
        viol['cmaxes'].set_alpha(0)
        viol['cmins'].set_alpha(0)
        for part in viol['bodies']:
            part.set_color(colours[i])

    legend_elements.append(
        mpl.lines.Line2D([0], [0],
                         color=viol1['cmeans'].get_color()[0],
                         lw=viol1['cmeans'].get_linewidth()[0],
                         label='Mean'))
    legend_elements.append(
        mpl.lines.Line2D([0], [0],
                         color=viol1['cmedians'].get_color()[0],
                         lw=viol1['cmedians'].get_linewidth()[0],
                         label='Median'))
    legend_elements.append(
        mpl.lines.Line2D([0], [0],
                         color="k",
                         lw=viol1['cbars'].get_linewidth()[0],
                         alpha=0.3,
                         label='IQR'))

    ax.set_xticks(range(6))
    ax.set_xticklabels(all_names)
    ax.set_xlabel("Control Strategy")
    ax.set_ylabel("Epidemic Cost")
    ax.set_ylim([-60, 1400])
    ax.legend(handles=legend_elements)
    fig.tight_layout()

    ax.annotate('User-defined',
                xy=(0.5, 1100),
                xytext=(0.5, 1300),
                fontsize=12,
                ha='center',
                va='bottom',
                arrowprops=dict(arrowstyle='-[, widthB=3.5, lengthB=0.75',
                                lw=2.0,
                                color=colours[0]),
                color=colours[0],
                weight="bold")
    ax.annotate('Risk based',
                xy=(2.5, 750),
                xytext=(2.5, 950),
                fontsize=12,
                ha='center',
                va='bottom',
                arrowprops=dict(arrowstyle='-[, widthB=3.5, lengthB=0.75',
                                lw=2.0,
                                color=colours[1]),
                color=colours[1],
                weight="bold")
    ax.annotate('Space based',
                xy=(4.5, 650),
                xytext=(4.5, 850),
                fontsize=12,
                ha='center',
                va='bottom',
                arrowprops=dict(arrowstyle='-[, widthB=3.5, lengthB=0.75',
                                lw=2.0,
                                color=colours[2]),
                color=colours[2],
                weight="bold")

    fig.savefig(os.path.join("Figures", "Figure3.pdf"), dpi=600)

    plt.rc('axes', labelsize=12)

    ###########################
    ## Supplementary Figures ##
    ###########################

    viz_options['show_vac'] = False

    # Figure S2
    # Typical simulation model trajectories
    fig, reg_axes, glob_axes = visualisation.plot_dpc_data(
        nodes, all_data['sim_no_control'][0:10], options=viz_options, nruns=10)
    fig.savefig(os.path.join("Figures", "SuppFig2.pdf"), dpi=600)

    # Figure S3
    # Risk model fit
    viz_options["show_regions"] = False
    times = np.linspace(0, all_data['setup']['end_time'], 101)
    fig, _, glob_axes = visualisation.plot_dpc_data(
        nodes, all_data['sim_no_control'][0:20], options=viz_options, nruns=20)
    glob_axes[0].plot(
        times, [all_data['risk_model_no_control'].state(t)[0] for t in times],
        'g--',
        lw=2)
    glob_axes[0].plot(
        times, [all_data['risk_model_no_control'].state(t)[1] for t in times],
        'r--',
        lw=2)
    glob_axes[1].plot(
        times, [all_data['risk_model_no_control'].state(t)[3] for t in times],
        'g--',
        lw=2)
    glob_axes[1].plot(
        times, [all_data['risk_model_no_control'].state(t)[4] for t in times],
        'r--',
        lw=2)
    fig.savefig(os.path.join("Figures", "SuppFig3.pdf"), dpi=600)

    # Figure S4
    # Space model fit
    viz_options["show_regions"] = True
    times = np.linspace(0, all_data['setup']['end_time'], 101)
    fig, reg_axes, glob_axes = visualisation.plot_dpc_data(
        nodes, all_data['sim_no_control'][0:20], options=viz_options, nruns=20)

    for i in range(6):
        reg_axes[i].plot(times, [
            all_data['space_model_no_control'].state(t)[3 * i] for t in times
        ],
                         'g--',
                         lw=2)
        reg_axes[i].plot(times, [
            all_data['space_model_no_control'].state(t)[3 * i + 1]
            for t in times
        ],
                         'r--',
                         lw=2)
    for risk in range(2):
        glob_axes[risk].plot(
            times,
            np.sum([
                all_data['space_model_no_control'].state(t)[(3 * risk)::6]
                for t in times
            ],
                   axis=1),
            'g--',
            lw=2)
        glob_axes[risk].plot(
            times,
            np.sum([
                all_data['space_model_no_control'].state(t)[(3 * risk + 1)::6]
                for t in times
            ],
                   axis=1),
            'r--',
            lw=2)
    fig.savefig(os.path.join("Figures", "SuppFig4.pdf"), dpi=600)

    # Figure S8
    # Risk Split Scan
    risk_split_scan.make_fig(os.path.join("Data", "RiskOptimisation.npz"))

    # Figure S9
    # Comparison of optimal controls - risk vs space based
    fig = plt.figure()
    gs = gridspec.GridSpec(3,
                           2,
                           height_ratios=[1, 0.01, 1],
                           width_ratios=[1, 1.2])
    ax1 = fig.add_subplot(gs[0, 0])
    ax_legend = fig.add_subplot(gs[1, 0], frameon=False)
    ax_legend.grid = False
    ax_legend.set_xticks([])
    ax_legend.set_yticks([])
    ax2 = fig.add_subplot(gs[2, 0], sharex=ax1)
    ax3 = fig.add_subplot(gs[:, 1])

    leg_colours = [
        mpl.colors.to_rgba("C{}".format(i), alpha=alpha)
        for alpha in [0.7, 0.25] for i in range(3)
    ]

    visualisation.plot_control(all_data['sim_risk_opt'][0],
                               5,
                               risk_based=True,
                               ax=ax1,
                               colors=["red", "skyblue"],
                               alpha=0.4)
    visualisation.plot_control(all_data['sim_space_opt'][0],
                               5,
                               risk_based=True,
                               ax=ax2,
                               colors=["red", "skyblue"],
                               alpha=0.4)
    visualisation.plot_control(all_data['sim_space_opt'][0],
                               5,
                               risk_based=False,
                               ax=ax3,
                               regions=["A", "B", "C"],
                               colors=leg_colours)

    ax_legend.legend(*ax1.get_legend_handles_labels(),
                     loc='center',
                     ncol=2,
                     frameon=True,
                     fontsize=8,
                     handlelength=1.5)
    ax3.legend(loc='center left',
               bbox_to_anchor=(1.04, 0.5),
               frameon=True,
               fontsize=8,
               handlelength=1.5)

    ax1.set_title("Risk Based")
    ax2.set_title("Space Based")
    ax3.set_title("Space Based")

    ax1.set_xlim([0, 5])
    ax1.set_ylim([-0.05, 1.05])
    ax1.set_yticks(np.linspace(0, 1, 5))
    ax1.set_xticks(range(6))

    ax2.set_ylim([-0.05, 1.05])
    ax2.set_yticks(np.linspace(0, 1, 5))
    ax2.set_xticks(range(6))

    ax3.set_xlim([0, 5])
    ax3.set_ylim([-0.02, 1.02])
    ax3.set_yticks(np.linspace(0, 1, 6))
    ax3.set_xticks(range(6))

    fig.text(0.01,
             0.96,
             "(a)",
             transform=fig.transFigure,
             fontsize=11,
             fontweight="semibold")
    fig.text(0.01,
             0.45,
             "(b)",
             transform=fig.transFigure,
             fontsize=11,
             fontweight="semibold")
    fig.text(0.42,
             0.96,
             "(c)",
             transform=fig.transFigure,
             fontsize=11,
             fontweight="semibold")

    fig.tight_layout()
    fig.savefig(os.path.join("Figures", "SuppFig9.pdf"),
                dpi=600,
                bbox_inches='tight')

    # Figure S10
    # Histogram of illustrative model strategy results
    fig = plt.figure()
    ax = fig.add_subplot(111)
    x_vals = sorted(all_objectives[-1])
    n_vals = np.arange(1, len(x_vals) + 1) / np.float(len(x_vals))
    ax.step(x_vals, n_vals, label=all_names[-1], lw=1, color="k")
    ax.scatter([np.percentile(x_vals, 95)], [0],
               s=20,
               facecolor="None",
               edgecolor="k",
               linewidth=1,
               clip_on=False,
               zorder=15)
    for i in range(3):
        x_vals = sorted(all_objectives[2 * i])
        n_vals = np.arange(1, len(x_vals) + 1) / np.float(len(x_vals))
        ax.step(x_vals,
                n_vals,
                label=all_names[2 * i],
                lw=1,
                color="C" + str(i),
                alpha=0.5)
        ax.scatter([np.percentile(x_vals, 95)], [0],
                   s=20,
                   facecolor="None",
                   edgecolor="C" + str(i),
                   linewidth=1,
                   alpha=0.5,
                   clip_on=False,
                   zorder=15)

        x_vals = sorted(all_objectives[2 * i + 1])
        n_vals = np.arange(1, len(x_vals) + 1) / np.float(len(x_vals))
        ax.step(x_vals,
                n_vals,
                label=all_names[2 * i + 1],
                lw=1,
                color="C" + str(i))
        ax.scatter([np.percentile(x_vals, 95)], [0],
                   s=20,
                   facecolor="None",
                   edgecolor="C" + str(i),
                   linewidth=1,
                   clip_on=False,
                   zorder=15)

    ax.legend(loc="center right", ncol=1)
    ax.set_xlabel("Epidemic Cost")
    ax.set_ylabel("Cumulative Probability")
    ax.set_ylim([-0.05, 1.05])
    fig.tight_layout()
    fig.savefig(os.path.join("Figures", "SuppFig10.pdf"), dpi=600)

    ########################
    ## Diagnostic Figures ##
    ########################

    fig = visualisation.plot_node_network(nodes,
                                          dist_coupling,
                                          options=viz_options)
    fig.tight_layout()
    fig.savefig(os.path.join("Figures", "Diagnostics", "NetworkStructure.pdf"),
                dpi=600)

    fig, *_ = visualisation.plot_dpc_data(nodes,
                                          all_data['sim_high'][0:20],
                                          options=viz_options,
                                          nruns=20)
    fig.savefig(os.path.join("Figures", "Diagnostics", "HighControlDPC.png"),
                dpi=600)

    fig, *_ = visualisation.plot_dpc_data(nodes,
                                          all_data['sim_split'][0:20],
                                          options=viz_options,
                                          nruns=20)
    fig.savefig(os.path.join("Figures", "Diagnostics", "SplitControlDPC.png"),
                dpi=600)

    viz_options["show_regions"] = True
    fig, reg_axes, glob_axes = visualisation.plot_dpc_data(
        nodes, all_data['sim_risk_opt'][0:20], options=viz_options, nruns=20)
    glob_axes[0].plot(times,
                      [all_data['risk_model_opt'].state(t)[0] for t in times],
                      'g--',
                      lw=2)
    glob_axes[0].plot(times,
                      [all_data['risk_model_opt'].state(t)[1] for t in times],
                      'r--',
                      lw=2)
    glob_axes[0].plot(times,
                      [all_data['risk_model_opt'].state(t)[2] for t in times],
                      '--',
                      color="purple",
                      lw=2)
    glob_axes[1].plot(times,
                      [all_data['risk_model_opt'].state(t)[3] for t in times],
                      'g--',
                      lw=2)
    glob_axes[1].plot(times,
                      [all_data['risk_model_opt'].state(t)[4] for t in times],
                      'r--',
                      lw=2)
    glob_axes[1].plot(times,
                      [all_data['risk_model_opt'].state(t)[5] for t in times],
                      '--',
                      color="purple",
                      lw=2)
    fig.savefig(os.path.join("Figures", "Diagnostics",
                             "RiskModelOptControl.png"),
                dpi=600)

    if not skip_risk_mpc:
        viz_options["show_regions"] = True
        fig, reg_axes, glob_axes = visualisation.plot_dpc_data(
            nodes,
            all_data['sim_risk_mpc'][0:20],
            options=viz_options,
            nruns=20)
        fig.savefig(os.path.join("Figures", "Diagnostics", "MPC_risk_DPC.png"),
                    dpi=600)

    fig, reg_axes, glob_axes = visualisation.plot_dpc_data(
        nodes, all_data['sim_space_opt'][0:20], options=viz_options, nruns=20)
    for i in range(6):
        reg_axes[i].plot(
            times,
            [all_data['space_model_opt'].state(t)[3 * i] for t in times],
            'g--',
            lw=2)
        reg_axes[i].plot(
            times,
            [all_data['space_model_opt'].state(t)[3 * i + 1] for t in times],
            'r--',
            lw=2)
        reg_axes[i].plot(
            times,
            [all_data['space_model_opt'].state(t)[3 * i + 2] for t in times],
            '--',
            color="purple",
            lw=2)
    for risk in range(2):
        glob_axes[risk].plot(times, [
            np.sum(all_data['space_model_opt'].state(t)[(3 * risk):-1:6])
            for t in times
        ],
                             'g--',
                             lw=2)
        glob_axes[risk].plot(times, [
            np.sum(all_data['space_model_opt'].state(t)[(3 * risk + 1):-1:6])
            for t in times
        ],
                             'r--',
                             lw=2)
        glob_axes[risk].plot(times, [
            np.sum(all_data['space_model_opt'].state(t)[(3 * risk + 2):-1:6])
            for t in times
        ],
                             '--',
                             color="purple",
                             lw=2)
    fig.savefig(os.path.join("Figures", "Diagnostics",
                             "SpaceModelOptControl.png"),
                dpi=600)

    if not skip_space_mpc:
        viz_options["show_regions"] = True
        fig, reg_axes, glob_axes = visualisation.plot_dpc_data(
            nodes,
            all_data['sim_space_mpc'][0:20],
            options=viz_options,
            nruns=20)
        fig.savefig(os.path.join("Figures", "Diagnostics",
                                 "MPC_space_DPC.png"),
                    dpi=600)
Exemple #44
0
    def step(self, action):
        """Take an action (buy/sell/hold) and computes the immediate reward.

        Args:
            action (numpy.array): Action to be taken, one-hot encoded.

        Returns:
            tuple:
                - observation (numpy.array): Agent's observation of the current environment.
                - reward (float) : Amount of reward returned after previous action.
                - done (bool): Whether the episode has ended, in which case further step() calls will return undefined results.
                - info (dict): Contains auxiliary diagnostic information (helpful for debugging, and sometimes learning).

        """

        assert any([(action == x).all() for x in self._actions.values()])
        self._action = action
        self._iteration += 1
        done = False
        instant_pnl = 0
        info = {}
        reward = -self._time_fee
        #r = np.random.rand(1)
        q = np.float(self._depths_history[-1][0]) / (
            self._depths_history[-1][0] + self._depths_history[-1][1])
        fill_ask = True
        fill_bid = True

        #if q>0.8:
        #    fill_bid = False
        #elif q<0.2:
        #    fill_ask = False

        if all(action == self._actions['buy']) and (fill_bid):
            reward -= self._trading_fee
            if all(self._position == self._positions['flat']):
                self._position = self._positions['long']
                self._entry_price = self._prices_history[-1][0]  # bid
                #reward -= 0 #self._entry_price
            elif all(self._position ==
                     self._positions['short']):  # closed out a short position
                self._position = self._positions['flat']
                self._exit_price = self._prices_history[-1][0]  # bid

                instant_pnl = self._entry_price - self._exit_price
                self._entry_price = 0

        elif all(action == self._actions['sell']) and (fill_ask):
            reward -= self._trading_fee
            if all(self._position == self._positions['flat']):
                self._position = self._positions['short']
                self._entry_price = self._prices_history[-1][1]  # ask
            elif all(self._position == self._positions['long']):
                self._exit_price = self._prices_history[-1][1]  # ask
                instant_pnl = self._exit_price - self._entry_price
                self._position = self._positions['flat']
                self._entry_price = 0

        reward += instant_pnl
        self._total_pnl += instant_pnl
        self._total_reward += reward

        # Game over logic
        try:
            gen = np.array(self._data_generator.next())
            self._prices_history.append(gen[[2, 0]] / 1000.0)
            self._depths_history.append(gen[[3, 1]])
        except StopIteration:
            done = True
            info['status'] = 'No more data.'
        if self._iteration >= self._episode_length:
            done = True
            info['status'] = 'Time out.'
        if self._closed_plot:
            info['status'] = 'Closed plot'

        observation = self._get_observation()
        return observation, reward, done, info
Exemple #45
0
    def backward(self, grad_input_t):
        # backward反向传播
        # grad_input = np.random.randn(num_rois, num_channels, pooled_height, max_pooled_weight)       # 梯度
        grad_input = grad_input_t.cpu().detach().numpy()
        rois = self.rois.cpu().detach().numpy()
        argmax = self.argmax
        height = self.height
        weight = self.weight
        pooled_height = self.pooled_height
        num_rois, num_channels = grad_input.shape[0:2]

        mask = argmax.copy()
        mask[0 <= mask] = 1
        mask[mask < 0] = 0
        grad_input = np.multiply(grad_input, mask)  # 填充区域的梯度不计入计算
        grad_output = np.zeros(
            (num_rois, num_channels, height, weight))  # 反向传播的输出
        for n in range(num_rois):
            # 求bin的weight和height
            roi_start_w = np.round(rois[n, 1])
            roi_start_h = np.round(rois[n, 2])

            rois_weight = np.max([rois[n, 3] - rois[n, 1], 1])  # 每个区域的宽度
            rois_height = np.max([rois[n, 4] - rois[n, 2], 1])  # 每个区域的高度

            pooled_weight = np.ceil(
                np.float(rois_weight) / rois_height *
                pooled_height)  # 和rois等比例的池化
            pooled_weight = int(pooled_weight)

            bin_size_w = np.float(rois_weight) / pooled_weight  # 每个bin块的宽度
            bin_size_h = np.float(rois_height) / pooled_height  # 每个bin块的高度
            for c in range(num_channels):
                for ph in range(pooled_height):
                    for pw in range(pooled_weight):

                        hstart = np.floor(ph * bin_size_h)
                        wstart = np.floor(pw * bin_size_w)
                        hend = np.ceil((ph + 1) * bin_size_h)
                        wend = np.ceil((pw + 1) * bin_size_w)

                        hstart = min(max(hstart + roi_start_h, 0),
                                     height)  # 将每个bin限制在图片的尺寸范围内
                        hend = min(max(hend + roi_start_h, 0), height)
                        wstart = min(max(wstart + roi_start_w, 0), weight)
                        wend = min(max(wend + roi_start_w, 0), weight)

                        hstart, hend, wstart, wend = map(
                            lambda x: int(x), [hstart, hend, wstart, wend])

                        temp = np.zeros(
                            (hend - hstart, wend - wstart))  # 每个bin区域的临时值
                        temp = temp.flatten()
                        temp[int(argmax[n, c, ph, pw])] = grad_input[n, c, ph,
                                                                     pw]
                        temp = np.reshape(temp, (hend - hstart, -1))
                        grad_output[n, c, hstart:hend,
                                    wstart:wend] = temp  # 对一块bin进行赋值

        grad_output = np.sum(grad_output, axis=0)
        grad_output = np.expand_dims(grad_output, 0)
        return torch.tensor(grad_output,
                            dtype=grad_input_t.dtype).cuda(), torch.tensor(
                                rois, dtype=self.rois.dtype).cuda()
Exemple #46
0
        row_sum = sum(confus_matrix[i, :])
        col_sum = sum(confus_matrix[:, i])
        x_row_plus.append(row_sum)
        x_col_plus.append(col_sum)

    print("x_row_plus:{}".format(x_row_plus))
    print("x_col_plus:{}".format(x_col_plus))
    x_row_plus = np.array(x_row_plus)
    x_col_plus = np.array(x_col_plus)
    x_diagonal = np.array(x_diagonal)
    x_total = sum(x_row_plus)
    OA_acc = oa / (sum(x_row_plus))
    print("\nOA:{:.3f}".format(OA_acc))
    tmp = x_col_plus * x_row_plus
    kappa = (x_total * sum(x_diagonal) - sum(x_col_plus * x_row_plus)
             ) / np.float(x_total * x_total - sum(x_col_plus * x_row_plus))

    print("Kappa:{:.3f}".format(kappa))

    for i in range(n_class - 1):
        i = i + 1
        prec = x_diagonal[i] / x_row_plus[i]
        print("\n{}_accuracy= {:.3f}".format(dict_class[i], prec))
        recall = x_diagonal[i] / x_col_plus[i]
        print("{}_recall= {:.3f}".format(dict_class[i], recall))
        iou = x_diagonal[i] / (x_row_plus[i] + x_col_plus[i] - x_diagonal[i])
        print("{}_iou {:.3f}".format(dict_class[i], iou))

    # acc_roads = x_diagonal[1]/x_row_plus[1]
    # print("\nroads_accuracy= {:.3f}".format(acc_roads))
    # recall_roads = x_diagonal[1] / x_col_plus[1]
    # Open csv file with data
    FilePath = 'C:\\Users\\spauliuk\\FILES\\ARBEIT\\PROJECTS\\Database\\IndEcolFreiburg_Database_TestCase\\CSVExport\\' + FlowFileList[
        ds] + '.csv'
    lines = open(FilePath, 'r').read().split('\n')
    for line in lines:
        if line != '':
            cols = line.split(',')
            ElementID = 'Fe'
            ProductID = FlowMatlList[ds]
            OrProceID = int(cols[2])
            OrRegID = int(cols[4])
            DestProID = int(cols[3])
            DestRegID = int(cols[5])
            YearID = int(cols[6])

            FlowValue = np.float(cols[9])

            # match labels to classification_items entry
            Elem_Pos = C1IDs[C1Labels.index(ElementID)]
            Prod_Pos = C2IDs[C2Labels.index(ProductID)]
            OrPr_Pos = C3IDs[C3Labels.index(OrProceID)]
            OrRe_Pos = C4IDs[C4Labels.index(OrRegID)]
            DesP_Pos = C5IDs[C5Labels.index(DestProID)]
            DesR_Pos = C6IDs[C6Labels.index(DestRegID)]
            Year_Pos = C7IDs[C7Labels.index(YearID)]

            # Set unit
            UnitnID = 47
            UnitdnID = 44
            # Set uncertainty string:
Exemple #48
0
    def forward(self, input, rois_input):  # pooled height:设置的需要池化的高度
        # cast to numpy
        rois_input[:, [1, 3]] /= 4  # cnn卷集的比例
        rois_input[:, [2, 4]] /= 16
        input_data = input.cpu().detach().numpy()
        rois = rois_input.cpu().detach().numpy()
        batch, num_channels, height, weight = input_data.shape
        pooled_height = self.pooled_height

        num_rois = rois.shape[0]

        max_ratio = max((rois[:, 3] - rois[:, 1]) /
                        (rois[:, 4] - rois[:, 2]))  # 求出最大的宽度/高度的比值
        max_pooled_weight = int(np.ceil(max_ratio) * pooled_height)
        output = np.zeros((num_rois, num_channels, pooled_height,
                           max_pooled_weight))  # 300×64×2×10;10是序列的长度
        argmax = np.ones((num_rois, num_channels, pooled_height,
                          max_pooled_weight)) * -1  # 最大值的索引

        for n in range(num_rois):
            roi_start_w = np.round(rois[n, 1])  # 每个区域w方向开始的坐标
            roi_start_h = np.round(rois[n, 2])  # h方向开始的坐标

            rois_weight = np.max([rois[n, 3] - rois[n, 1], 1])  # 每个区域的宽度
            rois_height = np.max([rois[n, 4] - rois[n, 2], 1])  # 每个区域的高度

            pooled_weight = np.ceil(
                np.float(rois_weight) / rois_height *
                pooled_height)  # 和rois等比例的池化
            pooled_weight = int(pooled_weight)

            bin_size_w = np.float(rois_weight) / pooled_weight  # 每个bin块的宽度
            bin_size_h = np.float(rois_height) / pooled_height  # 每个bin块的高度

            ## 如何目标区域的rois太小则跳过
            th = np.floor(1 * bin_size_h)
            tw = np.floor(1 * bin_size_w)
            eh = np.ceil((1 + 1) * bin_size_h)
            ew = np.ceil((1 + 1) * bin_size_w)
            if eh - th == 0 or ew - tw == 0:
                continue
            try:
                for c in range(num_channels):
                    for ph in range(pooled_height):  # numpy的矩阵展开的时候是行优先的
                        for pw in range(pooled_weight):

                            hstart = np.floor(ph * bin_size_h)
                            wstart = np.floor(pw * bin_size_w)
                            hend = np.ceil((ph + 1) * bin_size_h)
                            wend = np.ceil((pw + 1) * bin_size_w)

                            hstart = min(max(hstart + roi_start_h, 0),
                                         height)  # 将每个bin限制在图片的尺寸范围内
                            hend = min(max(hend + roi_start_h, 0), height)
                            wstart = min(max(wstart + roi_start_w, 0), weight)
                            wend = min(max(wend + roi_start_w, 0), weight)

                            hstart, hend, wstart, wend = map(
                                lambda x: int(x), [hstart, hend, wstart, wend])

                            output[n, c, ph,
                                   pw] = np.max(input_data[:, c, hstart:hend,
                                                           wstart:wend])  # 最大值
                            argmax[n, c, ph, pw] = np.argmax(
                                input_data[:, c, hstart:hend,
                                           wstart:wend])  # 最大值的索引//
            except:
                print('hstart:{},hend:{},wstart:{},wend:{}'.format(
                    hstart, hend, wstart, wend))
        # ctx.save_for_backward()
        self.argmax = argmax
        self.height = height
        self.weight = weight
        self.rois = rois_input
        result = Variable(torch.tensor(output, dtype=input.dtype))
        # return torch.tensor(output, dtype = input.dtype).cuda()
        return result.cuda()
Exemple #49
0
def test(img_dir, split_test, split_name, model, batch_size, img_size, crop_size, gpu_id):

	# -------------------- SETTINGS: CXR DATA TRANSFORMS -------------------
	normalizer = [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]]
	data_transforms = {split_name: transforms.Compose([
		transforms.Resize(img_size),
		# transforms.RandomResizedCrop(crop_size),
		transforms.CenterCrop(crop_size),
		transforms.ToTensor(),
		transforms.Normalize(normalizer[0], normalizer[1])])}

	# -------------------- SETTINGS: DATASET BUILDERS -------------------
	datasetTest = DataGenerator(img_dir=img_dir, split_file=split_test,
								transform=data_transforms[split_name])
	dataLoaderTest = DataLoader(dataset=datasetTest, batch_size=batch_size,
								shuffle=False, num_workers=32, pin_memory=True)

	dataloaders = {}
	dataloaders[split_name] = dataLoaderTest

	print('Number of testing CXR images: {}'.format(len(datasetTest)))
	dataset_sizes = {split_name: len(datasetTest)}
 
	# -------------------- TESTING -------------------
	model.eval()
	running_corrects = 0
	output_list = []
	label_list = []
	preds_list = []

	with torch.no_grad():
		# Iterate over data.
		for data in dataloaders[split_name]:
			inputs, labels, img_names = data

			labels_auc = labels
			labels_print = labels
			labels_auc = labels_auc.type(torch.FloatTensor)
			labels = labels.type(torch.LongTensor) #add for BCE loss
			
			# wrap them in Variable
			inputs = inputs.cuda(gpu_id, non_blocking=True)
			labels = labels.cuda(gpu_id, non_blocking=True)
			labels_auc = labels_auc.cuda(gpu_id, non_blocking=True)

			labels = labels.view(labels.size()[0],-1) #add for BCE loss
			labels_auc = labels_auc.view(labels_auc.size()[0],-1) #add for BCE loss
			# forward
			outputs = model(inputs)
			# _, preds = torch.max(outputs.data, 1)
			score = torch.sigmoid(outputs)
			score_np = score.data.cpu().numpy()
			preds = score>0.5
			preds_np = preds.data.cpu().numpy()
			preds = preds.type(torch.cuda.LongTensor)

			labels_auc = labels_auc.data.cpu().numpy()
			outputs = outputs.data.cpu().numpy()

			for j in range(len(img_names)):
				print(str(img_names[j]) + ': ' + str(score_np[j]) + ' GT: ' + str(labels_print[j]))

			for i in range(outputs.shape[0]):
				output_list.append(outputs[i].tolist())
				label_list.append(labels_auc[i].tolist())
				preds_list.append(preds_np[i].tolist())

			# running_corrects += torch.sum(preds == labels.data)
			# labels = labels.type(torch.cuda.FloatTensor)
			running_corrects += torch.sum(preds.data == labels.data) #add for BCE loss

	acc = np.float(running_corrects) / dataset_sizes[split_name]
	auc = metrics.roc_auc_score(np.array(label_list), np.array(output_list), average=None)
	# print(auc)
	fpr, tpr, _ = metrics.roc_curve(np.array(label_list), np.array(output_list))
	roc_auc = metrics.auc(fpr, tpr)

	ap = metrics.average_precision_score(np.array(label_list), np.array(output_list))
	
	tn, fp, fn, tp = metrics.confusion_matrix(label_list, preds_list).ravel()

	recall = tp/(tp+fn)
	precision = tp/(tp+fp)
	f1 = 2*precision*recall/(precision+recall)
	sensitivity = recall
	specificity = tn/(tn+fp)
	PPV = tp/(tp+fp)
	NPV = tn/(tn+fn)
	print('Test Accuracy: {0:.4f}  Test AUC: {1:.4f}  Test_AP: {2:.4f}'.format(acc, auc, ap))
	print('TP: {0:}  FP: {1:}  TN: {2:}  FN: {3:}'.format(tp, fp, tn, fn))
	print('Sensitivity: {0:.4f}  Specificity: {1:.4f}'.format(sensitivity, specificity))
	print('Precision: {0:.2f}%  Recall: {1:.2f}%  F1: {2:.4f}'.format(precision*100, recall*100, f1))
	print('PPV: {0:.4f}  NPV: {1:.4f}'.format(PPV, NPV))
	# Plot all ROC curves
	plt.figure()
	plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.4f)' % roc_auc)
	plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
	plt.xlim([0.0, 1.0])
	plt.ylim([0.0, 1.0])
	plt.xlabel('False Positive Rate')
	plt.ylabel('True Positive Rate')
	plt.title('ROC curve of abnormal/normal classification: '+args.arch)
	plt.legend(loc="lower right")
	plt.savefig('ROC_abnormal_normal_cls_'+args.arch+'_'+args.test_labels+'.pdf', bbox_inches='tight')
	plt.show()
Exemple #50
0
    def group_subplots_all_parameters(self,
                                      best,
                                      counts=None,
                                      error=False,
                                      no_rows=2,
                                      adapt_bottom=True,
                                      plot_range=None,
                                      base=5,
                                      eps=.5,
                                      plot_fit=True,
                                      colormap='pastel1',
                                      max_n_cols=10,
                                      legend_position='lower right',
                                      legend_pad='not implemented',
                                      print_value='auto'):
        """ Create a single barplot for each group of the first attribute in best.
        """
        no_subplots = len(best.index.levels[0])
        f, ax_arr = plt.subplots(
            no_rows, np.int(np.ceil(no_subplots / np.float(no_rows))))

        ax_flat = ax_arr.flatten()

        att_names = best.index.names
        self.set_all_orders()

        lev0 = self.bring_in_order(best.index.levels[0], att_names[0])
        lev1 = self.bring_in_order(best.index.levels[1], att_names[1])
        lev2 = self.bring_in_order(best.index.levels[2], att_names[2])

        best = best.reset_index()
        if counts is not None:
            counts = counts.reset_index()
        bar_x = np.arange(len(lev1))
        block_x = np.arange(len(lev2))
        width = 1.0 / len(lev2)
        bar_width = .4 * width
        offset = 1

        cmap = plt.cm.get_cmap(colormap)
        dummy_artists = []
        ticks = []
        tick_labels = []

        for plt_i, (lev0_ind, lev0_att) in enumerate(lev0):
            for bar_i, (lev1_ind, lev1_att) in enumerate(lev1):
                c = cmap(np.float(lev1_ind) / len(lev1))
                dummy_artists.append(Rectangle((0, 0), 1, 1, fc=c))
                for block_i, (lev2_ind, lev2_att) in enumerate(lev2):
                    # compute plot limits
                    if plot_range:
                        bottom = plot_range[0]
                        ceil = plot_range[1]
                    elif adapt_bottom:
                        relevant = best[(best[att_names[0]] == lev0_att)
                                        & -(best['test mean'] == 0)]
                        if error:
                            ceil = misc.based_ceil(
                                np.max(relevant['test mean']) +
                                np.max(relevant['test std']) + eps, base)
                            bottom = misc.based_floor(
                                np.min(relevant['test mean']) -
                                np.max(relevant['test std']) - eps, base)
                        else:
                            ceil = misc.based_ceil(
                                np.max(relevant['test mean']) + eps, base)
                            bottom = misc.based_floor(
                                np.min(relevant['test mean']) - eps, base)

                    test_mean = misc.float(
                        best[(best[att_names[0]] == lev0_att)
                             & (best[att_names[1]] == lev1_att)
                             & (best[att_names[2]] == lev2_att)]['test mean'])
                    test_std = misc.float(
                        best[(best[att_names[0]] == lev0_att)
                             & (best[att_names[1]] == lev1_att)
                             & (best[att_names[2]] == lev2_att)]['test std'])
                    train_mean = misc.float(
                        best[(best[att_names[0]] == lev0_att)
                             & (best[att_names[1]] == lev1_att)
                             & (best[att_names[2]] == lev2_att)]['train mean'])
                    train_std = misc.float(
                        best[(best[att_names[0]] == lev0_att)
                             & (best[att_names[1]] == lev1_att)
                             & (best[att_names[2]] == lev2_att)]['train std'])

                    # create bar plots
                    if (test_mean is not 0) and (test_mean is not np.nan) and (
                            train_mean is not np.nan):
                        if plot_fit:
                            if error:
                                ax_flat[plt_i].bar(bar_x[bar_i] +
                                                   block_x[block_i] * width,
                                                   train_mean - bottom,
                                                   bar_width,
                                                   color=c,
                                                   bottom=bottom,
                                                   yerr=train_std,
                                                   ecolor='gray',
                                                   alpha=.5,
                                                   linewidth=0.)
                                ax_flat[plt_i].bar(bar_x[bar_i] +
                                                   block_x[block_i] * width +
                                                   bar_width,
                                                   test_mean - bottom,
                                                   bar_width,
                                                   color=c,
                                                   bottom=bottom,
                                                   yerr=test_std,
                                                   ecolor='gray',
                                                   linewidth=0.)
                            else:
                                ax_flat[plt_i].bar(bar_x[bar_i] +
                                                   block_x[block_i] * width,
                                                   train_mean - bottom,
                                                   bar_width,
                                                   color=c,
                                                   bottom=bottom,
                                                   alpha=.5,
                                                   linewidth=0.)
                                ax_flat[plt_i].bar(bar_x[bar_i] +
                                                   block_x[block_i] * width +
                                                   bar_width,
                                                   test_mean - bottom,
                                                   bar_width,
                                                   color=c,
                                                   bottom=bottom,
                                                   linewidth=0.)

                            if print_value is True or (print_value is not False
                                                       and counts is None):
                                ax_flat[plt_i].text(
                                    bar_x[bar_i] + block_x[block_i] * width +
                                    .25, (test_mean + bottom) / 2,
                                    '%.2f' % train_mean,
                                    ha='center',
                                    va='top',
                                    rotation='vertical')

                        else:
                            if error:
                                ax_flat[plt_i].bar(bar_x[bar_i] +
                                                   block_x[block_i] * width,
                                                   test_mean - bottom,
                                                   color=c,
                                                   bottom=bottom,
                                                   yerr=test_std,
                                                   ecolor='gray',
                                                   linewidth=0.)
                            else:
                                ax_flat[plt_i].bar(bar_x[bar_i] +
                                                   block_x[block_i] * width,
                                                   test_mean - bottom,
                                                   color=c,
                                                   bottom=bottom,
                                                   linewidth=0.)

                            if print_value is True or (print_value is not False
                                                       and counts is None):
                                ax_flat[plt_i].text(
                                    bar_x[bar_i] + block_x[block_i] * width +
                                    .5, (test_mean + bottom) / 2,
                                    '%.2f' % test_mean,
                                    ha='center',
                                    va='center',
                                    rotation='vertical',
                                    hatch=self.patterns[block_i])

                        if plt_i == 0:
                            ticks.append(bar_x[bar_i] +
                                         block_x[block_i] * width +
                                         width * 0.5)
                        tick_labels += [lev2_att]
                        # print count
                        if counts is not None:
                            count = misc.int(
                                counts[(counts[att_names[0]] == lev0_att)
                                       & (counts[att_names[1]] == lev1_att)
                                       & (counts[att_names[2]]
                                          == lev2_att)]['test mean'])

                            if count > 0:
                                ax_flat[plt_i].text(
                                    bar_x[bar_i] +
                                    block_x[block_i] * bar_width + .4,
                                    (test_mean + bottom) / 2,
                                    '%d' % count,
                                    ha='center',
                                    va='center',
                                    rotation='vertical')

                    ax_flat[plt_i].set_title(lev0_att)
                    ax_flat[plt_i].set_xticks([])
                    ax_flat[plt_i].set_ylim(bottom, ceil)

                    ax_flat[plt_i].spines['top'].set_visible(False)
                    ax_flat[plt_i].spines['right'].set_visible(False)
                    ax_flat[plt_i].spines['left'].set_color('gray')
                    ax_flat[plt_i].spines['bottom'].set_color('gray')

        for plt_i in range(len(ax_flat)):
            # ax_flat[plt_i].axis('off')
            ax_flat[plt_i].set_xticks(ticks)
            ax_flat[plt_i].set_xticklabels(tick_labels, rotation=90)
        print 'shit'
        legend = [(int(att) if isinstance(att, float) else att)
                  for i, att in lev1]

        n_col = len(legend)
        if n_col > max_n_cols:
            n_col = int((n_col + 1) / 2)

        plt.figlegend(dummy_artists,
                      legend,
                      loc=legend_position,
                      ncol=n_col,
                      title=att_names[1])
def convert_to_float(string_to_convert):
    if ',' in string_to_convert:
        float_value = np.float(string_to_convert.replace(',', '.'))
    else:
        float_value = np.float(string_to_convert)
    return float_value
    def test(self, data):
        """
        test the system, return the accuracy of the model
        """
        test_length = len(data)
        init_hidden_state = np.zeros((test_length, self.latent_dim))
        test_data, self.test_data_lab, self.test_logit, self.test_demo, self.test_com = self.get_batch_train_period(
            test_length, 0, data)
        self.logit_out = self.sess.run(self.output_layer, feed_dict={self.input_x_vital: test_data,
                                                                     self.input_demo_: self.test_demo,
                                                                     self.input_x_lab: self.test_data_lab,
                                                                     self.input_x_com: self.test_com,
                                                                     self.init_hiddenstate: init_hidden_state})

        self.test_att_score = self.sess.run([self.score_attention, self.input_importance],
                                            feed_dict={self.input_x_vital: test_data,
                                            self.input_demo_:self.test_demo,
                                            self.input_x_lab:self.test_data_lab,
                                            self.input_x_com:self.test_com,
                                            self.init_hiddenstate:init_hidden_state})

        self.correct = 0
        self.tp_test = 0
        self.fp_test = 0
        self.fn_test = 0
        self.tp_correct = 0
        self.tp_neg = 0
        """
        for i in range(test_length):
            if self.test_logit[i,1] == 1:
                self.tp_correct += 1
            if self.test_logit[i,1] == 1 and self.logit_out[i,1] > self.threshold:
                print("im here")
                self.correct += 1
                self.tp_test += 1
                print(self.tp_test)
            if self.test_logit[i,1] == 0:
                self.tp_neg += 1
            if self.test_logit[i,1] == 1 and self.logit_out[i,1] < self.threshold:
                self.fn_test += 1
            if self.test_logit[i,1] == 0 and self.logit_out[i,1] > self.threshold:
                self.fp_test += 1
            if self.test_logit[i,1] == 0 and self.logit_out[i,1] < self.threshold:
                self.correct += 1
        """
        self.correct_predict_death = []
        for i in range(test_length):
            if self.test_logit[i, 0] == 1:
                self.tp_correct += 1
            if self.test_logit[i, 0] == 1 and self.logit_out[i, 0] > self.threshold:
                self.correct_predict_death.append(i)
                self.correct += 1
                self.tp_test += 1
            if self.test_logit[i, 0] == 0:
                self.tp_neg += 1
            if self.test_logit[i, 0] == 1 and self.logit_out[i, 0] < self.threshold:
                self.fn_test += 1
            if self.test_logit[i, 0] == 0 and self.logit_out[i, 0] > self.threshold:
                self.fp_test += 1
            if self.test_logit[i, 0] == 0 and self.logit_out[i, 0] < self.threshold:
                self.correct += 1

        self.correct_predict_death = np.array(self.correct_predict_death)

        feature_len = self.item_size + self.lab_size

        self.test_data_scores = self.test_att_score[1][self.correct_predict_death, :, :]
        self.ave_data_scores = np.zeros((self.time_sequence, feature_len))

        count = 0
        value = 0

        for j in range(self.time_sequence):
            for p in range(feature_len):
                for i in range(self.correct_predict_death.shape[0]):
                    if self.test_data_scores[i, j, p] != 0:
                        count += 1
                        value += self.test_data_scores[i, j, p]
                if count == 0:
                    continue
                self.ave_data_scores[j, p] = float(value / count)
                count = 0
                value = 0

        """
        self.tp_test = 0
        self.fp_test = 0
        self.fn_test = 0
        for i in range(test_length):
            if self.test_logit[i,1] == 1 and self.logit_out[i,1] > self.threshold:
                self.tp_test += 1
            if self.test_logit[i,1] == 1 and self.logit_out[i,1] < self.threshold:
                self.fn_test += 1
            if self.test_logit[i,1] == 0 and self.logit_out[i,1] > self.threshold:
                self.fp_test += 1
        """
        self.precision_test = np.float(self.tp_test) / (self.tp_test + self.fp_test)
        self.recall_test = np.float(self.tp_test) / (self.tp_test + self.fn_test)

        self.f1_test = 2 * (self.precision_test * self.recall_test) / (self.precision_test + self.recall_test)

        self.acc = np.float(self.correct) / test_length

        threshold = 0.0
        self.resolution = 0.01
        tp_test = 0
        fp_test = 0
        self.tp_total = []
        self.fp_total = []
        self.precision_total = []
        self.recall_total = []
        while (threshold < 1.01):
            tp_test = 0
            fp_test = 0
            fn_test = 0
            precision_test = 0
            for i in range(test_length):
                if self.test_logit[i, 0] == 1 and self.logit_out[i, 0] > threshold:
                    tp_test += 1
                if self.test_logit[i, 0] == 0 and self.logit_out[i, 0] > threshold:
                    fp_test += 1
                if self.test_logit[i, 0] == 1 and self.logit_out[i, 0] < threshold:
                    fn_test += 1
            self.check_fp_test = fp_test
            print(self.check_fp_test)
            self.check_tp_test = tp_test
            print(self.check_tp_test)
            if (tp_test + fp_test) == 0:
                precision_test = 1
            else:
                precision_test = np.float(tp_test) / (tp_test + fp_test)
            recall_test = np.float(tp_test) / (tp_test + fn_test)
            tp_rate = tp_test / self.tp_correct
            fp_rate = fp_test / self.tp_neg
            self.tp_total.append(tp_rate)
            self.fp_total.append(fp_rate)
            self.precision_total.append(precision_test)
            self.recall_total.append(recall_test)
            threshold += self.resolution
Exemple #53
0
def rebin_data(x, y, dx_new, yerr=None, method='sum', dx=None):
    """Rebin some data to an arbitrary new data resolution. Either sum
    the data points in the new bins or average them.

    Parameters
    ----------
    x: iterable
        The dependent variable with some resolution ``dx_old = x[1]-x[0]``

    y: iterable
        The independent variable to be binned

    dx_new: float
        The new resolution of the dependent variable ``x``

    Other parameters
    ----------------
    yerr: iterable, optional
        The uncertainties of ``y``, to be propagated during binning.

    method: {``sum`` | ``average`` | ``mean``}, optional, default ``sum``
        The method to be used in binning. Either sum the samples ``y`` in
        each new bin of ``x``, or take the arithmetic mean.

    dx: float
        The old resolution (otherwise, calculated from median diff)

    Returns
    -------
    xbin: numpy.ndarray
        The midpoints of the new bins in ``x``

    ybin: numpy.ndarray
        The binned quantity ``y``

    ybin_err: numpy.ndarray
        The uncertainties of the binned values of ``y``.

    step_size: float
        The size of the binning step
    """

    y = np.asarray(y)
    yerr = np.asarray(apply_function_if_none(yerr, y, np.zeros_like))

    dx_old = apply_function_if_none(dx, np.diff(x), np.median)

    if dx_new < dx_old:
        raise ValueError("New frequency resolution must be larger than "
                         "old frequency resolution.")

    step_size = dx_new / dx_old

    output = []
    outputerr = []
    for i in np.arange(0, y.shape[0], step_size):
        total = 0
        totalerr = 0

        int_i = int(i)
        prev_frac = int_i + 1 - i
        prev_bin = int_i
        total += prev_frac * y[prev_bin]
        totalerr += prev_frac * (yerr[prev_bin] ** 2)

        if i + step_size < len(x):
            # Fractional part of next bin:
            next_frac = i + step_size - int(i + step_size)
            next_bin = int(i + step_size)
            total += next_frac * y[next_bin]
            totalerr += next_frac * (yerr[next_bin] ** 2)

        total += sum(y[int(i + 1):int(i + step_size)])
        totalerr += sum(yerr[int(i + 1):int(step_size)] ** 2)
        output.append(total)
        outputerr.append(np.sqrt(totalerr))

    output = np.asarray(output)
    outputerr = np.asarray(outputerr)

    if method in ['mean', 'avg', 'average', 'arithmetic mean']:
        ybin = output / np.float(step_size)
        ybinerr = outputerr / np.sqrt(np.float(step_size))

    elif method == "sum":
        ybin = output
        ybinerr = outputerr

    else:
        raise ValueError("Method for summing or averaging not recognized. "
                         "Please enter either 'sum' or 'mean'.")

    tseg = x[-1] - x[0] + dx_old

    if (tseg / dx_new % 1) > 0:
        ybin = ybin[:-1]
        ybinerr = ybinerr[:-1]

    new_x0 = (x[0] - (0.5 * dx_old)) + (0.5 * dx_new)
    xbin = np.arange(ybin.shape[0]) * dx_new + new_x0

    return xbin, ybin, ybinerr, step_size
Exemple #54
0
    nth = np.int(sys.argv[2])
    name = sys.argv[3]
    data = np.zeros(
        [ndsets * nth, 2048 // pow(2, binning), 2448 // pow(2, binning)],
        dtype='float32')
    theta = np.zeros(ndsets * nth, dtype='float32')
    for k in range(ndsets):
        data[k * nth:(k + 1) *
             nth] = np.load(name + '_bin' + str(binning) + str(k) +
                            '.npy').astype('float32')
        theta[k * nth:(k + 1) * nth] = np.load(name + '_theta' + str(k) +
                                               '.npy').astype('float32')

    data = data[:, data.shape[1] // 2:data.shape[1] // 2 + 1]
    data[np.isnan(data)] = 0
    center = np.float(sys.argv[4])
    print('shape', data.shape, 'center', center // pow(2, binning))

    [ntheta, nz, n] = data.shape  # object size n x,y
    data -= np.mean(data)

    # exit()
    niter = 32  # tomography iterations
    pnz = 1  # number of slice partitions for simultaneous processing in tomography
    ngpus = 1
    # initial guess
    u = np.zeros([nz, n, n], dtype='float32')
    psi = data.copy()

    with tc.SolverTomo(theta, ntheta, nz, n, pnz, center / pow(2, binning),
                       ngpus) as tslv:
Exemple #55
0
with PdfPages(fn + '.pdf') as pdf:

    #plt.figure(figsize=(11.69, 8.27))
    plt.figure()

    txt = 'sound speed used : ' + str(soundspeed) + ' m/s\n'
    txt += 'ship transducer depth : ' + str(ship_transducer_depth) + ' m\n'
    txt += 'release to anchor : ' + str(
        release_height_above_seafloor) + ' m\n\n'

    for l in list_pre:
        pre_utm = utm.from_latlon(l['lat'],
                                  l['lon'],
                                  force_zone_number=anchor_utm[2])
        pre_dist = np.sqrt(
            (np.float(pre_utm[0]) - np.float(anchor_utm[0]) + x1)**2 +
            (np.float(pre_utm[1]) - np.float(anchor_utm[1]) + y1)**2)
        txt += l['ts'] + ' pre : ' + '{:6.2f} m'.format(
            pre_dist) + ' : ' + l['txt'] + '\n'
    txt += '\n'

    txt += 'file : ' + fn + '\n' + '\n'
    txt += 'std error first pass {:4.2f}'.format(stderror) + '\n'
    txt += 'fit x = {:6.2f}, y = {:6.2f}, z1 = {:6.2f}'.format(x1, y1,
                                                               z1) + '\n'
    txt += 'std error second pass {:4.2f}'.format(stderror1) + '\n'
    txt += 'fall back {:4.1f} (m)'.format(fallback) + '\n'
    txt += 'anchor solution lat {:9.5f} lon {:9.5f}'.format(ll[0],
                                                            ll[1]) + '\n'
    txt += 'release depth {:6.1f} anchor depth {:6.1f}'.format(
        z1, z1 + release_height_above_seafloor + ship_transducer_depth)
def create_output_images(Rover):

    # Create a scaled map for plotting and clean up obs/nav pixels a bit
    if np.max(Rover.worldmap[:, :, 2]) > 0:
        nav_pix = Rover.worldmap[:, :, 2] > 0
        navigable = Rover.worldmap[:, :, 2] * (
            255 / np.mean(Rover.worldmap[nav_pix, 2]))
    else:
        navigable = Rover.worldmap[:, :, 2]
    if np.max(Rover.worldmap[:, :, 0]) > 0:
        obs_pix = Rover.worldmap[:, :, 0] > 0
        obstacle = Rover.worldmap[:, :, 0] * (
            255 / np.mean(Rover.worldmap[obs_pix, 0]))
    else:
        obstacle = Rover.worldmap[:, :, 0]

    likely_nav = navigable >= obstacle
    obstacle[likely_nav] = 0
    plotmap = np.zeros_like(Rover.worldmap)
    plotmap[:, :, 0] = obstacle
    plotmap[:, :, 2] = navigable
    plotmap = plotmap.clip(0, 255)
    # Overlay obstacle and navigable terrain map with ground truth map
    map_add = cv2.addWeighted(plotmap, 1, Rover.ground_truth, 0.5, 0)

    # Check whether any rock detections are present in worldmap
    rock_world_pos = Rover.worldmap[:, :, 1].nonzero()
    # If there are, we'll step through the known sample positions
    # to confirm whether detections are real
    samples_located = 0
    if rock_world_pos[0].any():

        rock_size = 2
        for idx in range(len(Rover.samples_pos[0])):
            test_rock_x = Rover.samples_pos[0][idx]
            test_rock_y = Rover.samples_pos[1][idx]
            rock_sample_dists = np.sqrt((test_rock_x - rock_world_pos[1])**2 + \
                                  (test_rock_y - rock_world_pos[0])**2)
            # If rocks were detected within 3 meters of known sample positions
            # consider it a success and plot the location of the known
            # sample on the map
            if np.min(rock_sample_dists) < 3:
                samples_located += 1
                map_add[test_rock_y - rock_size:test_rock_y + rock_size,
                        test_rock_x - rock_size:test_rock_x +
                        rock_size, :] = 255

    # Calculate some statistics on the map results
    # First get the total number of pixels in the navigable terrain map
    tot_nav_pix = np.float(len((plotmap[:, :, 2].nonzero()[0])))
    # Next figure out how many of those correspond to ground truth pixels
    good_nav_pix = np.float(
        len(((plotmap[:, :, 2] > 0) &
             (Rover.ground_truth[:, :, 1] > 0)).nonzero()[0]))
    # Next find how many do not correspond to ground truth pixels
    bad_nav_pix = np.float(
        len(((plotmap[:, :, 2] > 0) &
             (Rover.ground_truth[:, :, 1] == 0)).nonzero()[0]))
    # Grab the total number of map pixels
    tot_map_pix = np.float(len((Rover.ground_truth[:, :, 1].nonzero()[0])))
    # Calculate the percentage of ground truth map that has been successfully found
    perc_mapped = round(100 * good_nav_pix / tot_map_pix, 1)
    # Calculate the number of good map pixel detections divided by total pixels
    # found to be navigable terrain
    if tot_nav_pix > 0:
        fidelity = round(100 * good_nav_pix / (tot_nav_pix), 1)
    else:
        fidelity = 0
    # Flip the map for plotting so that the y-axis points upward in the display
    map_add = np.flipud(map_add).astype(np.float32)
    # Add some text about map and rock sample detection results
    cv2.putText(map_add, "Time: " + str(np.round(Rover.total_time, 1)) + ' s',
                (0, 10), cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
    cv2.putText(map_add, "Mapped: " + str(perc_mapped) + '%', (0, 25),
                cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
    cv2.putText(map_add, "Fidelity: " + str(fidelity) + '%', (0, 40),
                cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
    cv2.putText(map_add, "Rocks", (0, 55), cv2.FONT_HERSHEY_COMPLEX, 0.4,
                (255, 255, 255), 1)
    cv2.putText(map_add, "  Located: " + str(samples_located), (0, 70),
                cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
    cv2.putText(map_add, "  Collected: " + str(Rover.samples_collected),
                (0, 85), cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
    # Convert map and vision image to base64 strings for sending to server
    pil_img = Image.fromarray(map_add.astype(np.uint8))
    buff = BytesIO()
    pil_img.save(buff, format="JPEG")
    encoded_string1 = base64.b64encode(buff.getvalue()).decode("utf-8")

    pil_img = Image.fromarray(Rover.vision_image.astype(np.uint8))
    buff = BytesIO()
    pil_img.save(buff, format="JPEG")
    encoded_string2 = base64.b64encode(buff.getvalue()).decode("utf-8")

    return encoded_string1, encoded_string2
Exemple #57
0
def affine_augm(images,v_flip,h_flip,rot,width_shift,height_shift,zoom,shear):
    """Affine augmentation (replacement for affine augm. for which I previously (AID <=0.0.4) used Keras ImageDataGenerator)
    -Function augments images
    images: array. array of shape (nr.images,image_height,image_width,channels)
    v_flip: bool. If True, 50% of the images are vertically flipped
    h_flip: bool. If True, 50% of the images are horizontally flipped
    rot: integer or float. Range of rotation in degrees
    width_shift: float. If >=1 or <=-1:number of pixels to shift the image left or right, if between 0 and 1: fraction of total width of image
    height_shift: float. If >=1 or <=-1:number of pixels to shift the image up or down, if between 0 and 1: fraction of total height of image
    zoom: float. zoom=0.1 means image is randomly scaled up/down by up to 10%. zoom=10 means, images are randomly scaled up to 10x initial size or down to 10% of initial size
    shear: float. Shear Intensity (Shear angle in degrees)
    
    this functions performs very similar augmentation operations that are also 
    possbile using Keras ImageDataGenerator or Imgaug, but this function is
    7x faster than ImageDataGenerator and
    4.5x faster than Imgaug  
    """
    images = np.copy(images)
    rot,width_shift,height_shift,shear = abs(rot),abs(width_shift),abs(height_shift) ,abs(shear) 
    rows,cols = images.shape[1],images.shape[2]
    if height_shift<1 and height_shift>-1:
        height_shift = height_shift*rows
    if width_shift<1 and width_shift>-1:
        width_shift = width_shift*cols
    
    if zoom!=0: #get the random numbers for zooming
        zoom = abs(zoom)
        if zoom>0 and zoom<1:
            fx = rand_state.uniform(low=1-zoom,high=1+zoom,size=images.shape[0])
            fy = rand_state.uniform(low=1-zoom,high=1+zoom,size=images.shape[0])
        else:
            fx = rand_state.uniform(low=1.0/np.float(zoom),high=zoom,size=images.shape[0])
            fy = rand_state.uniform(low=1.0/np.float(zoom),high=zoom,size=images.shape[0])
    if rot!=0:
        deg_rnd = rand_state.uniform(-rot,rot,size=images.shape[0])
    else:
        deg_rnd = np.repeat(0,repeats=images.shape[0])
    if height_shift!=0:
        height_shift_rnd = rand_state.uniform(-height_shift,height_shift,size=images.shape[0])
    else:
        height_shift_rnd = np.repeat(0,repeats=images.shape[0])
    if width_shift!=0: 
        width_shift_rnd = rand_state.uniform(-width_shift,width_shift,size=images.shape[0])
    else:
        width_shift_rnd = np.repeat(0,repeats=images.shape[0])
    if shear!=0: 
        shear = np.deg2rad(shear)
        shear_rnd = rand_state.uniform(-shear,shear,size=images.shape[0])
    else:
        shear_rnd = np.repeat(0,repeats=images.shape[0])
        
    for i in range(images.shape[0]):
        img = images[i]
        #1. Flipping:
        if v_flip==True and h_flip==False and rand_state.randint(low=0,high=2)>0:
            img = cv2.flip( img, 0 )
        elif v_flip==False and h_flip==True and rand_state.randint(low=0,high=2)>0:
            img = cv2.flip( img, 1 )
        elif v_flip==True and h_flip==True:
            rnd = rand_state.randint(low=-1,high=2) #get a random flipping axis: 1=vertical,0=horizontal,-1=both
            img = cv2.flip( img, rnd )

        #2.zooming
        if zoom!=0:
            img	= np.atleast_3d(cv2.resize(img,None,fx=fx[i],fy=fy[i]))
            #By either padding or cropping, get back to the initial image size
            diff_height,diff_width = img.shape[0]-rows, img.shape[1]-cols
            c_height,c_width = int(img.shape[0]/2), int(img.shape[1]/2)#center in height and width
            #adjust height:
            if diff_height>0:#zoomed image is too high->crop
                y1 = c_height-rows//2
                y2 = y1+rows
                img = img[int(y1):int(y2)]
            if diff_width>0:#zoomed image is too high->crop
                x1 = c_width-cols//2
                x2 = x1+cols
                img = img[:,int(x1):int(x2)]
            
            if diff_height<0 or diff_width<0 :#zoomed image is to small in some direction->pad
                if diff_height<0:
                    diff_height = abs(diff_height)
                    top, bottom = diff_height//2, diff_height-(diff_height//2)
                else:
                    top, bottom = 0,0
                if diff_width<0:
                    diff_width = abs(diff_width)    
                    left, right = diff_width//2, diff_width-(diff_width//2)
                else:
                    left, right = 0,0
                color = [0, 0, 0]
                img = np.atleast_3d(cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT,value=color))
        
        #3.Translation, Rotation, Shear
        M_transf = cv2.getRotationMatrix2D((cols/2,rows/2),deg_rnd[i],1) #rotation matrix
        translat_center_x = -(shear_rnd[i]*cols)/2;
        translat_center_y = -(shear_rnd[i]*rows)/2;
        M_transf = M_transf + np.float64([[0,shear_rnd[i],width_shift_rnd[i] + translat_center_x], [shear_rnd[i],0,height_shift_rnd[i] + translat_center_y]]);
        images[i] = np.atleast_3d(cv2.warpAffine(img,M_transf,(cols,rows))) #Rotation, translation and shear in a single call of cv2.warpAffine!
    return images
Exemple #58
0
    def _solve_1_slack_qp(self, constraints, n_samples):
        C = np.float(self.C) * n_samples  # this is how libsvm/svmstruct do it
        joint_features = [c[0] for c in constraints]
        losses = [c[1] for c in constraints]

        joint_feature_matrix = np.vstack(joint_features)
        n_constraints = len(joint_features)
        P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T))
        # q contains loss from margin-rescaling
        q = cvxopt.matrix(-np.array(losses, dtype=np.float))
        # constraints: all alpha must be >zero
        idy = np.identity(n_constraints)
        tmp1 = np.zeros(n_constraints)
        # positivity constraints:
        if self.negativity_constraint is None:
            #empty constraints
            zero_constr = np.zeros(0)
            joint_features_constr = np.zeros((0, n_constraints))
        else:
            joint_features_constr = joint_feature_matrix.T[
                self.negativity_constraint]
            zero_constr = np.zeros(len(self.negativity_constraint))

        # put together
        G = cvxopt.sparse(
            cvxopt.matrix(np.vstack((-idy, joint_features_constr))))
        h = cvxopt.matrix(np.hstack((tmp1, zero_constr)))

        # equality constraint: sum of all alpha must be = C
        A = cvxopt.matrix(np.ones((1, n_constraints)))
        b = cvxopt.matrix([C])

        # solve QP model
        cvxopt.solvers.options['feastol'] = 1e-5
        try:
            solution = cvxopt.solvers.qp(P, q, G, h, A, b)
        except ValueError:
            solution = {'status': 'error'}
        if solution['status'] != "optimal":
            print("regularizing QP!")
            P = cvxopt.matrix(
                np.dot(joint_feature_matrix, joint_feature_matrix.T) +
                1e-8 * np.eye(joint_feature_matrix.shape[0]))
            solution = cvxopt.solvers.qp(P, q, G, h, A, b)
            if solution['status'] != "optimal":
                raise ValueError("QP solver failed. Try regularizing your QP.")

        # Lagrange multipliers
        a = np.ravel(solution['x'])
        self.old_solution = solution
        self.prune_constraints(constraints, a)

        # Support vectors have non zero lagrange multipliers
        sv = a > self.inactive_threshold * C
        if self.verbose > 1:
            print("%d support vectors out of %d points" %
                  (np.sum(sv), n_constraints))
        self.w = np.dot(a, joint_feature_matrix)
        # we needed to flip the sign to make the dual into a minimization
        # model
        return -solution['primal objective']
Exemple #59
0
    def numSolve(self):
        print('Solving for y(x,t)')
        nx = self.x.shape[0]
        nt = self.t.shape[0]

        self.y = np.zeros([nx,nt])
        self.y[:,0] = self.y0

        D4 = np.zeros([nx,nx])
        Dt = np.zeros([nx,nx])
        a = np.zeros([nx,nx])
        c = np.zeros([nx,nt])

        for i in range(2,nx-2):
            D4[i,i-2:i+3] = self.A[i]*np.array([1,-4,6,-4,1])/self.dx**3

        Dt[2:nx-2,2:nx-2] = np.diag(self.zetaN[2:nx-2]*self.dx/self.dt)

        # LH BC
        if self.BC[0]==1:
            a[0,0] = 1
            a[1,1] = 1

            if self.shift[0]==1:
                c[0,:] = self.shiftAmp*np.sin(self.omega*self.t)
            else:
                c[0,:] = np.zeros(nt)

            if self.twist[0] == 1:
                c[1,:] = c[1,:] + self.dx*self.twistAmp*np.sin(self.omega*self.t)
            else:
                c[1,:] = c[0,:]

        else:
            a[0,0:4] = np.array([2,-5,4,-1])/self.dx**2
            a[1,0:4] = np.array([-1,3,-3,1])/self.dx**3
            c[0:2,:] = np.zeros([2,nt])

        # RH BC
        if self.BC[1]==1:
            a[nx-2,nx-2] = 1
            a[nx-1,nx-1] = 1

            if self.shift[1] == 1:
                c[nx-1,:] = self.shiftAmp*np.sin(self.omega*self.t)
            else:
                c[nx-1,:] = np.zeros(self.t.shape[0])

            if self.twist[1] == 1:
                c[nx-2,:] = c[nx-1,:] - self.dx*self.twistAmp*np.sin(self.omega*self.t)
            else:
                c[nx-2,:] = c[nx-1,:]

        else:
            a[nx-2,nx-4:nx] = np.array([-1,3,-3,1])/self.dx**3
            a[nx-1,nx-4:nx] = np.array([-1,4,-5,2])/self.dx**2 
            c[nx-2:nx,:] = np.zeros([2,nt])

        c[2:nx-2,:] = c[2:nx-2,:] + self.w[2:nx-2,:]
        
        # Build differential operator
        a = a + D4 + Dt
        
        # Solution step
        for i in range(1,nt):
            if np.mod(i,50)==0:
                print(i/np.float(self.t.shape[0]))
                
            c[2:nx-2,i] = c[2:nx-2,i] + np.multiply(self.zetaN[2:nx-2],self.y[2:nx-2,i-1])*self.dx/self.dt

            self.y[:,i] = np.linalg.solve(a,c[:,i])
Exemple #60
0
 def _generate_standard_design(self,
                               infolist,
                               functional_runs=None,
                               realignment_parameters=None,
                               outliers=None):
     """ Generates a standard design matrix paradigm given information about
         each run
     """
     sessinfo = []
     output_units = 'secs'
     if 'output_units' in self.inputs.traits():
         output_units = self.inputs.output_units
     for i, info in enumerate(infolist):
         sessinfo.insert(i, dict(cond=[]))
         if isdefined(self.inputs.high_pass_filter_cutoff):
             sessinfo[i]['hpf'] = \
                 np.float(self.inputs.high_pass_filter_cutoff)
         if hasattr(info, 'conditions') and info.conditions is not None:
             for cid, cond in enumerate(info.conditions):
                 sessinfo[i]['cond'].insert(cid, dict())
                 sessinfo[i]['cond'][cid]['name'] = info.conditions[cid]
                 scaled_onset = scale_timings(info.onsets[cid],
                                              self.inputs.input_units,
                                              output_units,
                                              self.inputs.time_repetition)
                 sessinfo[i]['cond'][cid]['onset'] = scaled_onset
                 scaled_duration = scale_timings(
                     info.durations[cid], self.inputs.input_units,
                     output_units, self.inputs.time_repetition)
                 sessinfo[i]['cond'][cid]['duration'] = scaled_duration
                 if hasattr(info, 'amplitudes') and info.amplitudes:
                     sessinfo[i]['cond'][cid]['amplitudes'] = \
                         info.amplitudes[cid]
                 if hasattr(info, 'tmod') and info.tmod and \
                         len(info.tmod) > cid:
                     sessinfo[i]['cond'][cid]['tmod'] = info.tmod[cid]
                 if hasattr(info, 'pmod') and info.pmod and \
                         len(info.pmod) > cid:
                     if info.pmod[cid]:
                         sessinfo[i]['cond'][cid]['pmod'] = []
                         for j, name in enumerate(info.pmod[cid].name):
                             sessinfo[i]['cond'][cid]['pmod'].insert(j, {})
                             sessinfo[i]['cond'][cid]['pmod'][j]['name'] = \
                                 name
                             sessinfo[i]['cond'][cid]['pmod'][j]['poly'] = \
                                 info.pmod[cid].poly[j]
                             sessinfo[i]['cond'][cid]['pmod'][j]['param'] = \
                                 info.pmod[cid].param[j]
         sessinfo[i]['regress'] = []
         if hasattr(info, 'regressors') and info.regressors is not None:
             for j, r in enumerate(info.regressors):
                 sessinfo[i]['regress'].insert(j, dict(name='', val=[]))
                 if hasattr(info, 'regressor_names') and \
                         info.regressor_names is not None:
                     sessinfo[i]['regress'][j]['name'] = \
                         info.regressor_names[j]
                 else:
                     sessinfo[i]['regress'][j]['name'] = 'UR%d' % (j + 1)
                 sessinfo[i]['regress'][j]['val'] = info.regressors[j]
         sessinfo[i]['scans'] = functional_runs[i]
     if realignment_parameters is not None:
         for i, rp in enumerate(realignment_parameters):
             mc = realignment_parameters[i]
             for col in range(mc.shape[1]):
                 colidx = len(sessinfo[i]['regress'])
                 sessinfo[i]['regress'].insert(colidx, dict(name='',
                                                            val=[]))
                 sessinfo[i]['regress'][colidx]['name'] = 'Realign%d' % (
                     col + 1)
                 sessinfo[i]['regress'][colidx]['val'] = mc[:, col].tolist()
     if outliers is not None:
         for i, out in enumerate(outliers):
             numscans = 0
             for f in filename_to_list(sessinfo[i]['scans']):
                 shape = load(f).shape
                 if len(shape) == 3 or shape[3] == 1:
                     iflogger.warning(("You are using 3D instead of 4D "
                                       "files. Are you sure this was "
                                       "intended?"))
                     numscans += 1
                 else:
                     numscans += shape[3]
             for j, scanno in enumerate(out):
                 colidx = len(sessinfo[i]['regress'])
                 sessinfo[i]['regress'].insert(colidx, dict(name='',
                                                            val=[]))
                 sessinfo[i]['regress'][colidx]['name'] = 'Outlier%d' % (j +
                                                                         1)
                 sessinfo[i]['regress'][colidx]['val'] = \
                     np.zeros((1, numscans))[0].tolist()
                 sessinfo[i]['regress'][colidx]['val'][int(scanno)] = 1
     return sessinfo