def loyer_retenu():

            # loyer mensuel réel, multiplié par 2/3 pour les meublés
            L1 = round_(loyer * where(statut_occupation == 5, 2 / 3, 1))

            zone_apl = simulation.calculate('zone_apl_famille', period)

            # Paramètres contenant les plafonds de loyer pour cette zone
            plafonds_by_zone = [[0] + [al.loyers_plafond[ 'zone' + str(zone) ][ 'L' + str(i) ] for zone in range(1, 4)] for i in range(1, 5)]
            L2_personne_seule = take(plafonds_by_zone[0], zone_apl)
            L2_couple = take(plafonds_by_zone[1], zone_apl)
            L2_famille = take(plafonds_by_zone[2], zone_apl) + (al_pac > 1) * (al_pac - 1) * take(plafonds_by_zone[3], zone_apl)

            L2 = select(
                [personne_seule * (al_pac == 0) + chambre, al_pac > 0],
                [L2_personne_seule, L2_famille],
                default = L2_couple
                )

            # taux à appliquer sur le loyer plafond
            coeff_chambre_colloc = select(
                [chambre, coloc],
                [al.loyers_plafond.chambre, al.loyers_plafond.colocation],
                default = 1)

            L2 = round_(L2 * coeff_chambre_colloc, 2)

            # loyer retenu
            L = min_(L1, L2)

            return L
def visualize2DOF(pred_act1,pred_act2,act,num_bins=10):
    bin_size1 = (numpy.max(pred_act1,axis=0) - numpy.min(pred_act1,axis=0))/num_bins 
    bin_size2 = (numpy.max(pred_act2,axis=0) - numpy.min(pred_act2,axis=0))/num_bins
    	
    of = numpy.zeros((numpy.shape(act)[1],num_bins,num_bins))
    ofn = numpy.zeros((numpy.shape(act)[1],num_bins,num_bins))
	
    for i in xrange(0,numpy.shape(act)[0]):
	idx1 = numpy.round_((pred_act1[i,:]-numpy.min(pred_act1,axis=0)) / bin_size1)    	
	idx2 = numpy.round_((pred_act2[i,:]-numpy.min(pred_act2,axis=0)) / bin_size2)
	
	idx1 = idx1 -(idx1 >= num_bins)
	idx2 = idx2 -(idx2 >= num_bins)
	
	j=0
	for (x,y) in zip(numpy.array(idx1).flatten().tolist(),numpy.array(idx2).flatten().tolist()):
            of[j,x,y] = of[j,x,y] +  act[i,j]
	    ofn[j,x,y] = ofn[j,x,y] + 1 
	    j=j+1
    
    of = of - (ofn <= 0)
    ofn = ofn + (ofn <= 0)
    of = of/ofn
    print of[0]
    print of[1]
    print ofn[0]
    print ofn[1]

    
    
    showRFS(of,joinnormalize=False)
    def function(self, simulation, period):
        period = period.this_month
        rfr = simulation.calculate('rfr', period.n_2)
        age_holder = simulation.compute('age', period)
        scolarite_holder = simulation.compute('scolarite', period)
        P = simulation.legislation_at(period.start).bourses_education.bourse_college

        ages = self.split_by_roles(age_holder, roles = ENFS)
        nb_enfants = sum(
            age >= 0 for age in ages.itervalues()
        )

        scolarites = self.split_by_roles(scolarite_holder, roles = ENFS)

        nb_enfants_college = sum(
            scolarite == SCOLARITE_COLLEGE for scolarite in scolarites.itervalues()
        )

        montant_par_enfant = apply_thresholds(
            rfr,
            thresholds = [
                # plafond_taux_3 est le plus bas
                round_(P.plafond_taux_3 + P.plafond_taux_3 * nb_enfants * P.coeff_enfant_supplementaire),
                round_(P.plafond_taux_2 + P.plafond_taux_2 * nb_enfants * P.coeff_enfant_supplementaire),
                round_(P.plafond_taux_1 + P.plafond_taux_1 * nb_enfants * P.coeff_enfant_supplementaire),
                ],
            choices = [P.montant_taux_3, P.montant_taux_2, P.montant_taux_1]
            )

        montant = nb_enfants_college * montant_par_enfant

        return period, montant / 12
    def function(self, simulation, period):
        period = period.start.offset('first-of', 'month').period('month')
        rfr = simulation.calculate('rfr', period.start.offset('first-of', 'year').period('year').offset(-2))
        age_holder = simulation.compute('age', period)
        scolarite_holder = simulation.compute('scolarite', period)
        P = simulation.legislation_at(period.start).bourses_education.bourse_college

        ages = self.split_by_roles(age_holder, roles = ENFS)
        nb_enfants = zeros(len(rfr))
        for age in ages.itervalues():
            nb_enfants += age >= 0

        plafond_taux_1 = round_(P.plafond_taux_1 + P.plafond_taux_1 * nb_enfants * P.coeff_enfant_supplementaire)
        plafond_taux_2 = round_(P.plafond_taux_2 + P.plafond_taux_2 * nb_enfants * P.coeff_enfant_supplementaire)
        plafond_taux_3 = round_(P.plafond_taux_3 + P.plafond_taux_3 * nb_enfants * P.coeff_enfant_supplementaire)

        eligible_taux_3 = rfr <= plafond_taux_3
        eligible_taux_2 = not_(eligible_taux_3) * (rfr <= plafond_taux_2)
        eligible_taux_1 = not_(or_(eligible_taux_2, eligible_taux_3)) * (rfr <= plafond_taux_1)

        scolarites = self.split_by_roles(scolarite_holder, roles = ENFS)
        nb_enfants_college = zeros(len(rfr))
        for scolarite in scolarites.itervalues():
            nb_enfants_college += scolarite == SCOLARITE_COLLEGE

        montant = nb_enfants_college * (
            eligible_taux_3 * P.montant_taux_3 +
            eligible_taux_2 * P.montant_taux_2 +
            eligible_taux_1 * P.montant_taux_1
            )

        return period, montant / 12
Exemple #5
0
    def indices(self, x, y, clip=False):
        """
        Return the grid pixel indices (i_x, i_y) corresponding to the
        given arrays of grid coordinates. Arrays x and y must have the
        same size. Also return a boolean array of the same length that
        is True where the pixels are within the grid bounds and False
        elsewhere.
		
        If clip is False, a ValueError is raised if any of the pixel
        centers are outside the grid bounds, and array within will be
        all True. If clip is True, then the i_x and i_y values where
        within is False will be nonsense; the safe thing is to use
        only i_x[within] and i_y[within].
        """
        if x.size != y.size:
            raise ValueError("Arrays x and y must have the same length.")
        # This is a workaround for the behavior of int_: when given an
        # array of size 1 it returns an int instead of an array.
        if x.size == 1:
            i_x = np.array([np.int(np.round((x[0] - self.x[0]) / self.dx()))])
            i_y = np.array([np.int(np.round((y[0] - self.y[0]) / self.dy()))])
        else:
            i_x = np.int_(np.round_((x - self.x[0]) / self.dx()))
            i_y = np.int_(np.round_((y - self.y[0]) / self.dy()))
        within = ((0 <= i_x) & (i_x < self.x.size) & (0 <= i_y) & (i_y < self.y.size))
        if not clip and not all(within):
            raise ValueError("Not all points are inside the grid bounds, and clipping is not allowed.")
        return i_x, i_y, within
    def _run_(self):
        '''
        '''
        #print '- Running Mjpeg Decoder...'
        hf = h.HuffCoDec(self.hufftables)
        r, c, chnl = self.R, self.C, self.NCHNL
        Z = self.Z
        
        #hufcd = self.huffcodes#self.fl.readline()[:-1]
        if self.mode == '444':
            for ch in range(chnl):                #hufcd = self.fl.readline()[:-1]            #    print hufcd[0:20]
                nblk, seqrec = hf.invhuff(self.huffcodes[ch], ch)
                for i in range(self.nBlkRows):
                    for j in range(self.nBlkCols):
                        blk = h.zagzig(seqrec[i*self.nBlkCols + j])
                        self.imRaw[r*i:r*i+r, c*j:c*j+c, ch] = np.round_( cv2.idct( blk*Z[:,:,ch] ))
                        
        elif self.mode == '420':
            #import math as m
            if chnl == 1:
                rYmg = self.imRaw
            else:                #Y = self.imRaw[:,:,0]
                Y = np.zeros( (self.M, self.N) )
                dims, CrCb = h.adjImg( downsample(np.zeros( (self.M, self.N, 2) ), self.mode)[1] )
                rYmg = [ Y, CrCb[:,:,0], CrCb[:,:,1] ]
                
            for ch in range(chnl):
                #hufcd = self.fl.readline()[:-1]
                if ch == 0:
                    rBLK = self.nBlkRows
                    cBLK = self.nBlkCols
                else:
                    rBLK, cBLK = int(np.floor(dims[0]/self.R)), int(np.floor(dims[1]/self.C))
            #    print hufcd[0:20]
                nblk, self.seqrec = hf.invhuff(self.huffcodes[ch], ch)
                for i in range(rBLK):
                    for j in range(cBLK):
                        blk = h.zagzig(self.seqrec[i*cBLK + j])
                        #print rYmg[ch][r*i:r*i+r, c*j:c*j+c].shape, ch, i, j
                        rYmg[ch][r*i:r*i+r, c*j:c*j+c] = np.round_( cv2.idct( blk*Z[:,:,ch] ))
            # UPSAMPLE
            if chnl == 1:
                self.imRaw = rYmg #[:self.Mo, : self.No]
            else:
                self.imRaw[:,:,0] = rYmg[0]
                self.imRaw[:,:,1] = upsample(rYmg[1], self.mode)[:self.M, :self.N]
                self.imRaw[:,:,2] = upsample(rYmg[2], self.mode)[:self.M, :self.N]
        
        #self.fl.close()
        
#        imrec = cv2.cvtColor((self.imRaw[:self.Mo, :self.No]+128), cv2.COLOR_YCR_CB2BGR)
#        imrec = self.imRaw[:self.Mo, :self.No]+128
        imrec = self.imRaw+128.0
#        imrec[imrec>255.0]=255.0
#        imrec[imrec<0.0]=0.0
								
        #print 'Mjpeg Decoder Complete...'
        
        return imrec
Exemple #7
0
def is_equidistant(x):
    '''
    >>> is_equidistant((0,1,2))
    True
    >>> is_equidistant((0,1,2.5))
    False
    '''
    d = np.diff(x)
    return (np.round_(d,8)==np.round_(d[0],8)).all()
Exemple #8
0
def rotz(ang):
    """Generate a homogenous trransform for ang radians around the z axis"""
    s = N.round_(sin(ang), decimals=14); c = N.round_(cos(ang), decimals=14)
    return N.array([
        [c,-s, 0, 0],
        [s, c, 0, 0],
        [0, 0, 1, 0],
        [0, 0, 0, 1]
    ])
def process_histogram(PabsFlip, N1, uCut, lCut, angleInc, radStep):
    """
    Create orientation Histogram
    Sum pixel intensity along different angles
    :param PabsFlip:
    :param N1:
    :param uCut: upper-cut parameter from the settings.SettingsWindow
    :param lCut: lower-cut parameter form the settings.SettingsWindow
    :param angleInc: angle-increment from the
    :param radStep: radial-step
    :return:
    """
    n1 = np.round(N1 / 2) - 1
    freq = np.arange(-n1, n1 + 1, 1)
    x, y = freq, freq

    # Variables for settings
    CO_lower = lCut
    CO_upper = uCut
    angleInc = angleInc
    radStep = radStep

    #  Set up polar coordinates prior to summing the spectrum
    theta1Rad = np.linspace(0.0, 2 * math.pi, num=360/angleInc)
    f1 = np.round_(N1 / (2 * CO_lower))
    f2 = np.round_(N1 / (2 * CO_upper))

    rho1 = np.linspace(f1, f2, num=(f2 - f1)/radStep)  # frequency band
    PowerX = np.zeros((theta1Rad.size, theta1Rad.size))
    PowerY = np.zeros((theta1Rad.size))

    # Interpolate using a Spine
    PowerSpline = scipy.interpolate.RectBivariateSpline(y=y, x=x, z=PabsFlip)
    n_dx = 0.001

    for p in range(0, theta1Rad.size):
        # converting theta1Rad and rho1 to cartesian coordinates
        xfinal = rho1 * math.cos(theta1Rad[p])
        yfinal = rho1 * math.sin(theta1Rad[p])

        # Evaluate spin on path
        px = PowerSpline.ev(yfinal, xfinal)
        PowerY[p] = np.sum(px)

    # Only use the data in the first two quadrants (Spectrum is symmetric)
    num = len(theta1Rad)
    PowerYFinal = PowerY[0:num // 2]
    theta1RadFinal = theta1Rad[0:num // 2]

    power_area = np.trapz(PowerYFinal, theta1RadFinal)
    normPower = PowerYFinal / power_area

    # TODO: Ask Rici what those are
    return normPower, theta1RadFinal
 def _binary_preds(self, model_preds, mean_preds, stack_preds):
     """
     """
     stack_preds_bin = []
     model_preds_bin = np.round_(model_preds, decimals=0)
     mean_preds_bin = np.round_(mean_preds, decimals=0)
     stack_preds_bin = np.round_(stack_preds, decimals=0) \
                     if self.stack else 0
             
     return model_preds_bin, mean_preds_bin, stack_preds_bin
     
def restarize_events(events, durations, dt, t_max):
    """ build a binary sequence of events. Each event start is approximated
    to the nearest time point on the time grid defined by dt and t_max.
    """
    smpl_events = np.array(np.round_(np.divide(events, dt)), dtype=int)
    smpl_durations = np.array(np.round_(np.divide(durations, dt)), dtype=int)
    smpl_events = extend_sampled_events(smpl_events, smpl_durations)
    if np.allclose(t_max % dt, 0):
        bin_seq = np.zeros(int(t_max / dt) + 1)
    else:
        bin_seq = np.zeros(int(np.round((t_max + dt) / dt)))
    bin_seq[smpl_events] = 1

    return bin_seq
Exemple #12
0
    def test_round_(self):
        self.assertQuantityEqual(
            np.round_([.5, 1.5, 2.5, 3.5, 4.5] * pq.J),
            [0., 2., 2., 4., 4.] * pq.J
            )

        self.assertQuantityEqual(
            np.round_([1,2,3,11] * pq.J, decimals=1),
            [1, 2, 3, 11] * pq.J
            )

        self.assertQuantityEqual(
            np.round_([1,2,3,11] * pq.J, decimals=-1),
            [0, 0, 0, 10] * pq.J
            )
def create_curve_states_2D():
    R90CW,FH,FV,R90CW_FH,R90CW_FV,R180CW = get_standard_matrixes()
    state0m = np.matrix([[0, 0, 0, 1],
        [0, 1, 0, 1],
        [1, 1, 0, 1],
        [1, 0, 0, 1]])


    state0 = np.round_(state0m.getT()[:2:].getT().astype(int)).astype(int)
    state1 = np.round_(R90CW_FH.dot(state0m.getT())[:2:].getT()).astype(int)
    state2 = np.round_(R90CW_FV.dot(state0m.getT())[:2:].getT()).astype(int)
    state3 = np.round_(R180CW.dot(state0m.getT())[:2:].getT()).astype(int)

    return np.array([state0,state1, \
    state2, state3])
Exemple #14
0
def general_axis_rotation(axis,  angle):
    """Generates a rotation matrix around <axis> by <angle>, using the right-hand
    rule.
    Arguments: 
        axis - a 3-component 1D array representing a unit vector
        angle - rotation counterclockwise in radians around the axis when the axis 
            points to the viewer.
    Returns: A 3x3 array representing the matrix of rotation.
    Reference: [1] p.47
    """
    N.round_(sin(ang), decimals=14); c = N.round_(cos(ang), decimals=14); v = 1 - c
    add = N.array([[0,          -axis[2], axis[1]],  
                            [axis[2],  0,          -axis[0]], 
                            [-axis[1], axis[0],  0        ] ])
    return N.multiply.outer(axis,  axis)*v + N.eye(3)*c + add*s
Exemple #15
0
def log_der_13(z, nstop):
    '''
    Calculate logarithmic derivatives of Riccati-Bessel functions psi
    and xi for complex arguments.  Riccati-Bessel conventions follow
    Bohren & Huffman.

    See Mackowski et al., Applied Optics 29, 1555 (1990).

    Parameters
    ----------
    z: complex number
    nstop: maximum order of computation
    '''
    z = np.complex128(z) # convert to double precision

    # Calculate Dn_1 (based on \psi(z)) using downward recursion.
    # See Mackowski eqn. 62
    nmx = np.maximum(nstop, int(np.round_(np.absolute(z)))) + 15
    dn1 = log_der_1(z, nmx, nstop)

    # Calculate Dn_3 (based on \xi) by up recurrence
    # initialize
    dn3 = zeros(nstop+1, dtype = 'complex128')
    psixi = zeros(nstop+1, dtype = 'complex128')
    dn3[0] = 1.j
    psixi[0] = -1j*exp(1.j*z)*sin(z)
    for dindex in arange(1, nstop+1):
        # Mackowski eqn 63
        psixi[dindex] = psixi[dindex-1] * ( (dindex/z) - dn1[dindex-1]) * (
            (dindex/z) - dn3[dindex-1])
        # Mackowski eqn 64
        dn3[dindex] = dn1[dindex] + 1j/psixi[dindex]

    return dn1, dn3
def compute_allegement_fillon(simulation, period):
    """
        Exonération Fillon
        http://www.securite-sociale.fr/comprendre/dossiers/exocotisations/exoenvigueur/fillon.htm
    """
    assiette = simulation.calculate_add('assiette_allegement', period)
    smic_proratise = simulation.calculate_add('smic_proratise', period)
    taille_entreprise = simulation.calculate('taille_entreprise', period)
    majoration = (taille_entreprise <= 2)  # majoration éventuelle pour les petites entreprises
    # Calcul du taux
    # Le montant maximum de l’allègement dépend de l’effectif de l’entreprise.
    # Le montant est calculé chaque année civile, pour chaque salarié ;
    # il est égal au produit de la totalité de la rémunération annuelle telle
    # que visée à l’article L. 242-1 du code de la Sécurité sociale par un
    # coefficient.
    # Ce montant est majoré de 10 % pour les entreprises de travail temporaire
    # au titre des salariés temporaires pour lesquels elle est tenue à
    # l’obligation d’indemnisation compensatrice de congés payés.
    Pf = simulation.legislation_at(period.start).cotsoc.exo_bas_sal.fillon
    seuil = Pf.seuil
    tx_max = (Pf.tx_max * not_(majoration) + Pf.tx_max2 * majoration)
    if seuil <= 1:
        return 0
    ratio_smic_salaire = smic_proratise / (assiette + 1e-16)
    # règle d'arrondi: 4 décimales au dix-millième le plus proche
    taux_fillon = round_(tx_max * min_(1, max_(seuil * ratio_smic_salaire - 1, 0) / (seuil - 1)), 4)

    # Montant de l'allegment
    return taux_fillon * assiette
def compute_taux_exoneration(assiette_allegement, smic_proratise, taux_max, seuil_max, seuil_min = 1):
    ratio_smic_salaire = smic_proratise / (assiette_allegement + 1e-16)
    # règle d'arrondi: 4 décimales au dix-millième le plus proche ( # TODO: reprise de l'allègement Fillon unchecked)
    return round_(
        taux_max * min_(1, max_(seuil_max * seuil_min * ratio_smic_salaire - seuil_min, 0) / (seuil_max - seuil_min)),
        4,
        )
Exemple #18
0
    def findNonLinear2D(self, t=0, npxls=af.MIN_PXLS_YX, phaseContrast=True):
        """
        calculate local cross correlation of projection images

        set self.mapyx and self.regions
        """
        #if self.refyz is None or self.refyx is None:
        self.setRefImg(removeEdge=False)

        # preparing the initial mapyx
        # mapyx is not inherited to avoid too much distortion
        self.mapyx = N.zeros((self.img.nt, self.img.nw, 2, self.img.ny, self.img.nx), N.float32)

        if N.all((N.array(self.mapyx.shape[-2:]) - self.img.shape[-2:]) >= 0):
            slcs = imgGeo.centerSlice(self.mapyx.shape[-2:], win=self.img.shape[-2:], center=None)
            self.mapyx = self.mapyx[slcs] # Ellipsis already added
        else:
            self.mapyx = imgFilters.paddingValue(self.mapyx, shape=self.img.shape[-2:], value=0)

        self.last_win_sizes = N.zeros((self.nt, self.nw), N.uint16)
        # calculation
        for w in range(self.img.nw):
            if w == self.refwave:
                self.mapyx[t,w] = 0
                continue

            self.echo('Projection local alignment -- W: %i' % w)
            if self.img.nz > 1:
                img = self.img.get3DArr(w=w, t=t)
                #img = af.fixSaturation(img, self.getSaturation(w=w, t=t))

                zs = N.round_(self.refzs-self.alignParms[t,w,0]).astype(N.int)
                if zs.max() >= self.img.nz:
                    zsbool = (zs < self.img.nz)
                    zsinds = N.nonzero(zsbool)[0]
                    zs = zs[zsinds]

                imgyx = af.prep2D(img, zs=zs, removeEdge=False)
                del img
            else:
                imgyx = N.squeeze(self.img.get3DArr(w=w, t=t))
                #imgyx = af.fixSaturation(imgyx, self.getSaturation(w=w, t=t))

            affine = self.alignParms[t,w]

            imgyx = imgyx.astype(N.float32)
            self.refyx = self.refyx.astype(N.float32)

            yxs, regions, arr2, win = af.iterWindowNonLinear(imgyx, self.refyx, npxls, affine=affine, initGuess=self.mapyx[t,w], phaseContrast=self.phaseContrast, maxErr=self.maxErrYX, cthre=self.cthre, echofunc=self.echofunc)

            self.mapyx[t,w] = yxs
            if self.regions is None or self.regions.shape[-2:] != regions.shape:
                self.regions = N.zeros((self.img.nt, self.img.nw)+regions.shape, N.uint16)
            self.regions[t,w] = regions

            self.last_win_sizes[t,w] = win
            
            self.progress()

        self.echo('Projection local alignment done')
def compute_subset_score(indices, pred_set, y):
    subset = [vect for i, vect in enumerate(pred_set) if i in indices]
    mean_preds = sp.mean(subset, axis=0)
    mean_preds = np.round_(mean_preds, decimals=0)
    mean_score = compute_score(y, mean_preds)

    return mean_score, indices
def main():
    # 10人分の得点
    x = np.array([71,72,85,85,78,92,74,82,84,79])
    # 10人分の偏差値を計算
    ans = np.round_(50+10*(x-np.average(x))/np.std(x))
    # 10人分の偏差値を表示
    print(ans)
def plotAcc(tree, numFileTest):
    arr = np.array(range(numFileTest)) + 1
    result = np.array(zeros(len(arr)), dtype = float)
    for i in arr:
        test = np.genfromtxt('TestSet'+str(i)+'.csv', dtype = int, delimiter = ',')
        result[i-1] = np.round_(Accuracy(tree, test)*100,2)
    return result
        def indice_ressources_Rp():
            # Indice de ressource utilisé par la CAF, en €
            # Différence entre les ressources du foyer et un revenu R0 de référence

            # ressources prises en compte
            R = aide_logement_base_ressources

            # Plafond RO
            R1 = al.rmi * (
                al.R1.taux1 * personne_seule * (al_pac == 0) +
                al.R1.taux2 * couple * (al_pac == 0) +
                al.R1.taux3 * (al_pac == 1) +
                al.R1.taux4 * (al_pac >= 2) +
                al.R1.taux5 * (al_pac > 2) * (al_pac - 2)
                )

            R2 = pfam_n_2.af.bmaf * (
                al.R2.taux4 * (al_pac >= 2) +
                al.R2.taux5 * (al_pac > 2) * (al_pac - 2)
                )

            Ro = round_(12 * (R1 - R2) * (1 - al.autres.abat_sal))

            Rp = max_(0, R - Ro)

            return Rp
def query_to_barchart_log(xml, resp):
    """
    A function to plot a query from its xml
    NOTE: first argument:
            The second column of query is x-axis
            The third column of query in y-axis
            The first column of query is the gene
          second argument:
          only if 'true' convert third column to its log values

    ================================================
    example:

        >>>from intermine import query_manager as qm
        >>>b.query_to_barchart_log(<xml>, 'true')
        <plots the second column vs log(third column)>

    """
    list = get_query(xml)
    root = etree.fromstring(xml)
    store = root.attrib['view']
    store = store.split(' ')
    x_val = []
    y_val = []
    for i in range(0, len(list)-1):
        x_val.append(list[i][1])
        y_val.append(float(list[i][2]))

    if resp == 'true':
        y_val = np.log(y_val)
        y_val = np.round_(y_val, 2)

    y = pd.Series(y_val)
    x = pd.Series(x_val)

    ax = y.plot(kind='bar')
    ax.set_title(list[0][0])
    ax.set_xlabel(l[1])
    if resp == 'true':
        ax.set_ylabel('log(' + l[2] + ')')
    else:
        ax.set_ylabel(l[2])
    ax.set_xticklabels(x, rotation='vertical')

    rects = ax.patches

    def autolabel(rects, ax):
        i = 0
        for rect in rects:
            x = rect.get_x() + rect.get_width()/2.
            y = rect.get_height()
            ax.annotate(y_val[i], (x, y), xytext=(0, 5),
                        textcoords="offset points",
                        ha='center', va='bottom')
            i = i+1

    autolabel(ax.patches, ax)

    ax.margins(y=0.1)
    plt.show()
Exemple #24
0
    def importEmsysAsciiData(self, filename, verbose=False):
        """Import data from emsys text export:
            yields: positions, data, frequencies, error and geometry
        """
        cols = (1, 4, 6, 8, 9, 12, 15, 16)
        xx, sep, f, pf, ip, op, hmod, q = np.loadtxt(filename, skiprows=1,
                                                     usecols=cols, unpack=True)
        err = q / pf * 100.  # percentage of primary field

        if len(np.unique(sep)) > 1:
            print("Warning! Several coil spacings present in file!")

        self.coilSpacing = np.median(sep)
        f = np.round_(f)
        self.frequencies, mf, nf = np.unique(f, True, True)
        x, mx, nx = np.unique(xx, True, True)
        self.IP = np.ones((len(x), len(f))) * np.nan
        self.OP = np.ones((len(x), len(f))) * np.nan
        self.ERR = np.ones((len(x), len(f))) * np.nan

        for i in range(len(f)):
            # print(i, nx[i], nf[i])
            self.IP[nx[i], nf[i]] = ip[i]
            self.OP[nx[i], nf[i]] = op[i]
            self.ERR[nx[i], nf[i]] = err[i]
Exemple #25
0
def outputMEMEformat(disc_pwms, disc_bkg, disc_logevs, disc_nsites, outpre, use_bkg=False):
    _pv_format = "%3.1fe%+04.0f"
    f = open(outpre+"MEMEoutput.meme","w")
    f.write("MEME version 4.9.0\n\n")
    f.write("ALPHABET= ACGT\n\n")
    f.write("strands: + -\n\n")
    f.write("Background letter frequencies (from uniform background):\n")
    if use_bkg:
        bkg_freq_str = "A %5.5f C %5.5f G %5.5f T %5.5f\n\n" % tuple(disc_bkg[0][0])
    else:
        bkg_freq_str = "A 0.25000 C 0.25000 G 0.25000 T 0.25000\n\n"
    f.write(bkg_freq_str)
    n = 1
    for pwm, logev, nsites in zip(disc_pwms, disc_logevs, disc_nsites):
        g_string = sprint_logx(logev, 1, _pv_format)
        x = round_(pwm,3)
        w = x.shape[0]
        y = str(x).replace('[','').replace(']','').replace('  ',' ').replace('1.  ','1.000').replace('0.  ','0.000').replace('0.\n', '0.000\n').replace('1.\n', '1.000\n').replace('\n  ','\n')[1:]
        if y[-1] == '.':
            y += '000'#add zeros at end if there's a hanging non-decimal number
        f.write('MOTIF M' + str(n) + ' O'+str(n)+'\n\n')
        f.write('letter-probability matrix: alength= 4 w= ' + str(w) + ' nsites= ' + str(nsites) + ' E= ' + g_string +  '\n')
        f.write(' ' + y)
        f.write('\n\n')
        n += 1
    f.close()        
Exemple #26
0
 def calculate_stats(self):
   stats_csv = self.get_stats_csv()
   imp_metric_stats_csv = self.get_important_sub_metrics_csv()
   csv_header = 'sub_metric,mean,std. deviation,median,min,max,90%,95%,99%\n'
   with open(stats_csv, 'w') as FH:
     FH.write(csv_header)
     for sub_metric in self.calculated_stats:
       percentile_data = self.calculated_percentiles[sub_metric]
       stats_data = self.calculated_stats[sub_metric]
       csv_data = ','.join([sub_metric, str(round(stats_data['mean'], 2)), str(round(stats_data['std'], 2)), str(round(stats_data['median'], 2)),
                            str(round(stats_data['min'], 2)), str(round(stats_data['max'], 2)), str(round(percentile_data[90], 2)),
                            str(round(percentile_data[95], 2)), str(round(percentile_data[99], 2))])
       FH.write(csv_data + '\n')
     self.stats_files.append(stats_csv)
   for sub_metric in self.calculated_percentiles:
     percentiles_csv = self.get_csv(sub_metric, 'percentiles')
     percentile_data = self.calculated_percentiles[sub_metric]
     with open(percentiles_csv, 'w') as FH:
       for percentile in sorted(percentile_data):
         FH.write(str(percentile) + ',' + str(numpy.round_(percentile_data[percentile], 2)) + '\n')
       self.percentiles_files.append(percentiles_csv)
   with open(imp_metric_stats_csv, 'w') as FH_IMP:
     FH_IMP.write(csv_header)
     for sub_metric in self.important_sub_metrics:
       if sub_metric in self.calculated_stats.keys():
         percentile_data = self.calculated_percentiles[sub_metric]
         stats_data = self.calculated_stats[sub_metric]
         csv_data = ','.join([sub_metric, str(round(stats_data['mean'], 2)), str(round(stats_data['std'], 2)), str(round(stats_data['median'], 2)),
                              str(round(stats_data['min'], 2)), str(round(stats_data['max'], 2)), str(round(percentile_data[90], 2)),
                              str(round(percentile_data[95], 2)), str(round(percentile_data[99], 2))])
         FH_IMP.write(csv_data + '\n')
     self.important_stats_files.append(imp_metric_stats_csv)
    def __get_figure_bar(self, dataframe_main, column_name, xlabel, ylabel):
        if not is_all_zeros(dataframe_main[column_name]):
            mean = np.round_(dataframe_main[column_name].mean(), 4)

            # Save to file
            if ENABLE_SEABORN:
                output_file_name = self.__report_file.split('.')[0]
                output_file_name += '_' + xlabel + '_' + column_name + '.eps'

                fig, ax = plt.subplots(1, 1)
                ax.get_xaxis().set_visible(False)

                title = 'Avg: ' + column_name + '=' + str(mean)
                bar_plot = dataframe_main[column_name].plot(ax=ax,
                                                            kind='bar',
                                                            table=True,
                                                            title=title)
                bar_plot.figure.savefig(output_file_name)
                bar_plot.figure.clf()

            # Bokeh draw
            bar_ratio = Bar(dataframe_main,
                            values=column_name,
                            label='ModuleId',
                            xlabel=xlabel,
                            ylabel=ylabel,
                            title='Avg: ' + column_name + '=' + str(mean))
            return bar_ratio
Exemple #28
0
def score_reconstructions(X, X_hat):
    D = []
    for i in range(X.shape[0]):
        score = dice(test_fused_X[i].astype(int), np.round_(X_hat[i], 0).astype(int))
        D.append(score)
    print 'Mean DICE Dissimilarity Score (0.0 is no dissimilarity, 1.0 is total dissimilarity): {} '.format(np.mean(D))
    return D
    def function(famille, period, legislation):
        period = period.this_month
        al = legislation(period).prestations.aides_logement
        pfam_n_2 = legislation(period.start.offset(-2, 'year')).prestations.prestations_familiales
        minim_n_2 = legislation(period.start.offset(-2, 'year')).prestations.minima_sociaux
        couple = famille('al_couple', period)
        al_nb_pac = famille('al_nb_personnes_a_charge', period)
        residence_dom = famille.demandeur.menage('residence_dom')

        n_2 = period.start.offset(-2, 'year')  # deux ans après la mise en place du rsa le 2009-06-01
        if n_2.date >= date(2009, 6, 01):
            montant_de_base = minim_n_2.rsa.montant_de_base_du_rsa
        else:
            montant_de_base = minim_n_2.rmi.montant_de_base_du_rmi

        R1 = montant_de_base * (
            al.r1.personne_isolee * not_(couple) * (al_nb_pac == 0) +
            al.r1.couple_sans_enf * couple * (al_nb_pac == 0) +
            al.r1.personne_isolee_ou_couple_avec_1_enf * (al_nb_pac == 1) +
            al.r1.personne_isolee_ou_couple_avec_2_enf * (al_nb_pac >= 2) +
            al.r1.majoration_enfant_a_charge_supp * (al_nb_pac > 2) * (al_nb_pac - 2)
            )

        R2 = pfam_n_2.af.bmaf * (
            al.r2.taux3_dom * residence_dom * (al_nb_pac == 1) +
            al.r2.personnes_isolees_ou_couples_avec_2_enf * (al_nb_pac >= 2) +
            al.r2.majoration_par_enf_supp_a_charge * (al_nb_pac > 2) * (al_nb_pac - 2)
            )

        R0 = round_(12 * (R1 - R2) * (1 - al.autres.abat_sal))

        return period, R0
    def function(self, simulation, period):
        period = period.this_month
        aide_logement_montant_brut = simulation.calculate('aide_logement_montant_brut', period)
        crds_logement = simulation.calculate('crds_logement', period)
        montant = round_(aide_logement_montant_brut + crds_logement, 2)

        return period, montant
Exemple #31
0
np.reciprocal
np.record
np.remainder
np.repeat
np.require
np.reshape(a=∂, newshape=0|(0,..), order='C|F|A')
np.resize
np.result_type
np.right_shift
np.rint
np.roll
np.rollaxis
np.roots
np.rot90
np.round
np.round_(a=[], ?decimals=0, ?out=None)
np.row_stack
np.s_
np.safe_eval
np.save
np.savetxt
np.savez
np.savez_compressed
np.ScalarType
np.sctype2char
np.sctypeDict
np.sctypeNA
np.sctypes
np.searchsorted
np.select
np.set_numeric_ops
Exemple #32
0
def nstop(x):
    #takes size parameter, outputs order to compute to according to
    # Wiscombe, Applied Optics 19, 1505 (1980).
    # 7/7/08: generalize to apply same criterion when x is complex
    return int(np.round_(np.absolute(x + 4.05 * x**(1. / 3.) + 2)))
Exemple #33
0
def deepLearning(inputFileName, outputFileName, testFileName,
                 testOutputFileName, testOutputReal, test_report, validRate,
                 valid_report, modelConfig, deviceName, epoch, printed,
                 modelName):

    # You can do only 'training' or 'testing' by setting some arguments as None.
    # inputFileName == None and outputFileName == None -> testing only
    # testFileName == None                             -> training only
    # for validation, you can set testFileName == None <- validation uses training data only

    ##############################
    ##                          ##
    ##       0. READ DATA       ##
    ##                          ##
    ##############################

    # read files
    print('[00] reading train input / train output / test input files...')

    trainI = None
    trainO = None
    testI = None

    # input train data
    if inputFileName != None: trainI = helper.getDataFromFile(inputFileName)

    #  output train data (Sigmoid applied)
    if outputFileName != None: trainO = helper.getDataFromFile(outputFileName)

    # test input data (set nullValue to 0)
    # set testI (array) as testFileName, if testFileName is an array
    if isinstance(testFileName, list):
        testI = testFileName

    # set testI (array) as test data from the file named as testFileName
    else:
        if testFileName != None: testI = helper.getDataFromFile(testFileName)

    # read configuration file (to get normalization info)
    print('[01] reading configuration files...')
    f = open('config.txt', 'r')
    fl = f.readlines()
    f.close()
    for i in range(len(fl)):
        fl[i] = fl[i].split('\n')[0]

    normalizeName = None
    validInterval = 1
    testSizeOnce = 0  # max test data size at once (for both testing and validation)

    # extract configuration
    # trainInput     : train input data file name
    # trainOutput    : train output data file name
    # testInput      : test input data file name
    for i in range(len(fl)):
        configSplit = fl[i].split('\n')[0].split(' ')  # split

        # normalize info file name
        if configSplit[0] == 'normalizeName':
            normalizeName = configSplit[1]
            if normalizeName == 'None': normalizeName = None

        # validation interval
        elif configSplit[0] == 'validInterval':
            validInterval = int(configSplit[1])

        # test input size at once
        elif configSplit[0] == 'testSize':
            testSizeOnce = int(configSplit[1])

    # read normalization info file
    if normalizeName != None and trainO != None:
        print('[02] calculating and writing average and stddev...')

        trainOutputAvg = np.mean(trainO,
                                 axis=0)  # average of train output value
        trainOutputStddev = np.std(trainO,
                                   axis=0)  # stddev of train output value

        # normalize training output data and write avg and stddev
        writeNormalizeInfo(trainO, normalizeName)
    else:
        print('[03] Reading average and stddev failed.')
        trainOutputAvg = None
        trainOutputStddev = None

    # apply sigmoid to train output data
    if trainO != None:
        print('[04] applying sigmoid to train output data...')
        for i in range(len(trainO)):
            for j in range(len(trainO[0])):
                trainO[i][j] = helper.sigmoid(trainO[i][j])

    # print input, output, and test data
    if printed != 0:
        if trainI != None:
            print('\n ---- original input data (' + str(len(trainI)) +
                  ') ----\n')
            for i in range(len(trainI)):
                print(helper.roundedArray(trainI[i], 6))

        if trainO != None:
            print('\n ---- original output data (' + str(len(trainO)) +
                  ') ----\n')
            for i in range(len(trainO)):
                print(helper.roundedArray(trainO[i], 6))

        if testI != None:
            print('\n ---- original test data (' + str(len(testI)) +
                  ') ----\n')
            for i in range(len(testI)):
                print(helper.roundedArray(testI[i], 6))

    ##############################
    ##                          ##
    ##   1. READ MODEL CONFIG   ##
    ##                          ##
    ##############################

    # model design using model configuration file
    # activation function of final layer is always 'sigmoid'
    print('[10] reading model configuration...')
    f = open(modelConfig, 'r')
    modelInfo = f.readlines()
    f.close()

    ##############################
    ##                          ##
    ##   2A. TRAINING / TEST    ##
    ##                          ##
    ##############################

    # if the model already exists, input the test input to the NN and get the result
    # if the model does not exist, newly train NN using training input and output data and then do testing procedure
    if validRate == 0:

        # NN and optimizer
        print('[11] obtaining neural network and optimizer info...')

        if trainI != None and trainO != None:
            NN = helper.getNN(modelInfo, trainI, trainO)  # Neural Network
            op = helper.getOptimizer(modelInfo)  # optimizer
            loss = helper.getLoss(modelInfo)  # loss

        try:  # try reading test.h5 and test.json
            print('[20] reading model [ ' + modelName + ' ]...')
            newModel = deepLearning_GPU.deepLearningModel(
                modelName, op, loss, True)
            testO = getTestResult(newModel, testI, testSizeOnce)

        except:  # do learning if test.h5 and test.json does not exist
            print('[21] learning...')

            # False, True는 각각 dataPrint(학습데이터 출력 여부), modelPrint(model의 summary 출력 여부)
            print(trainO[0])
            deepLearning_GPU.deepLearning(NN, op, 'mean_squared_error', trainI,
                                          trainO, modelName, epoch, False,
                                          True, deviceName)

            print('[22] reading learned model [ ' + modelName + ' ]...')
            newModel = deepLearning_GPU.deepLearningModel(
                modelName, op, loss, True)

            # get test output if testI is not None
            if testI == None:
                print('test input file name (testInput) is None.')
                return
            else:
                testO = getTestResult(newModel, testI, testSizeOnce)

        # test
        print('[23] testing...')

        # estimate

        # inverse sigmoid
        for i in range(len(testO)):  # for each output data
            for j in range(len(testO[0])):  # for each value of output data
                testO[i][j] = helper.invSigmoid(testO[i][j])

        # check if test output exists, before writing test output file
        try:
            test = open(testOutputFileName, 'r')
            test.close()
            print(' **** Delete test output file (' + testOutputFileName +
                  ') first. ****')
            return
        except:
            pass

        # write to file
        print('[24] writing test result to file [ ' + testOutputFileName +
              ' ]...')

        # open file
        f = open(testOutputFileName, 'a')

        result = ''
        for i in range(len(testO)):  # for each output data
            if i % 1000 == 0: print(str(i) + ' / ' + str(len(testO)))

            for j in range(len(testO[0])):  # for each value of output data
                result += str(testO[i][j]) + '\t'
            result += '\n'

            # flush every 10,000 steps
            if i % 10000 == 0:
                f.write(result)
                result = ''

        # final append
        f.write(result)
        f.close()

        ##############################
        ##                          ##
        ##  2A+. WRITE TEST REPORT  ##
        ##                          ##
        ##############################

        # compare prediction output data with real output data and write report
        if testOutputReal != None:
            try:
                writeTestResult(test_report, testOutputFileName,
                                testOutputReal, normalizeName, trainOutputAvg,
                                trainOutputStddev)
            except:
                pass

    ##############################
    ##                          ##
    ##      2B. VALIDATION      ##
    ##                          ##
    ##############################

    # validation (if validation rate > 0)
    else:

        ##############################
        ##                          ##
        ##   2B-0. DATA TO VALID    ##
        ##                          ##
        ##############################

        # make index-list of validation data
        print('[28] deciding data to validate...')
        inputSize = len(trainI)
        validSize = int(inputSize * validRate)
        trainSize = inputSize - validSize

        validArray = []
        for i in range(inputSize):
            validArray.append(0)
        while sum(validArray) < validSize:

            # start index for validation
            validStartIndex = int(
                random.randint(0, inputSize - 1) /
                validInterval) * validInterval

            # set data[validStartIndex : validStartIndex + validInterval] as validation data
            for i in range(validStartIndex, validStartIndex + validInterval):
                validArray[i] = 1

        # make train and validation data
        # _TrainO, _ValidO : sigmoid((originalOutput - meanOriginalOutput)/stdOriginalOutput)
        _TrainI = []  # training input
        _TrainO = []  # training output
        _ValidI = []  # valid input
        _ValidO = []  # valid output

        for i in range(inputSize):
            if validArray[i] == 0:  # training data
                _TrainI.append(trainI[i])
                _TrainO.append(trainO[i])
            else:  # validation data
                _ValidI.append(trainI[i])
                _ValidO.append(trainO[i])

        ##############################
        ##                          ##
        ## 2B-1. TRAIN (MAKE MODEL) ##
        ##                          ##
        ##############################

        # model name for validation
        newModelName = modelName + 'Valid'
        print('[29] training [ ' + newModelName + ' ]...')

        # NN and optimizer
        NN = helper.getNN(modelInfo, _TrainI, _TrainO)  # Neural Network
        op = helper.getOptimizer(modelInfo)  # optimizer
        loss = helper.getLoss(modelInfo)  # loss

        # output for validation
        try:  # try reading the validation model
            validModel = deepLearning_GPU.deepLearningModel(
                newModelName, op, loss, True)
            _predValidO = getTestResult(validModel, _ValidI, testSizeOnce)
        except:  # do learning if the validation model does not exist
            deepLearning_GPU.deepLearning(NN, op, loss, _TrainI, _TrainO,
                                          newModelName, epoch, False, True,
                                          deviceName)
            validModel = deepLearning_GPU.deepLearningModel(
                newModelName, op, loss, True)
            _predValidO = getTestResult(validModel, _ValidI, testSizeOnce)

        ##############################
        ##                          ##
        ##     2B-2. VALIDATION     ##
        ##                          ##
        ##############################
        print('[30] validating and writing result [ ' + valid_report + ' ]...')

        MAE = 0  # mean absolute error
        MSE = 0  # mean square error
        accuracy = 0  # accuracy

        # inverse sigmoid for PREDICTED validation output
        for i in range(len(_predValidO)):  # for each output data
            for j in range(len(
                    _predValidO[0])):  # for each value of output data
                _predValidO[i][j] = helper.invSigmoid(_predValidO[i][j])

        # inverse sigmoid for REAL validation output
        for i in range(len(_ValidO)):  # for each output data
            for j in range(len(_ValidO[0])):  # for each value of output data
                _ValidO[i][j] = helper.invSigmoid(_ValidO[i][j])

        # denormalize if normalized info is available (denormalize whole trainO)
        denormalize(normalizeName, len(_predValidO), len(_predValidO[0]),
                    _predValidO, trainOutputAvg, trainOutputStddev)
        denormalize(normalizeName, len(_ValidO), len(_ValidO[0]), _ValidO,
                    trainOutputAvg, trainOutputStddev)

        # compute error
        validCount = 0
        resultToWrite = ''
        outputCols = len(_ValidO[0])

        # for each data

        # set edgeitems and linewidth as infinite
        np.set_printoptions(edgeitems=10000, linewidth=1000000)

        for i in range(inputSize):
            if i % 1000 == 0: print(str(i) + ' / ' + str(inputSize))

            # validation for data whose value of valid array is 1
            if validArray[i] == 1:

                # compute MAE and MSE
                for j in range(outputCols):
                    MAE += abs(_ValidO[validCount][0] -
                               _predValidO[validCount][0])
                    MSE += pow(
                        _ValidO[validCount][0] - _predValidO[validCount][0], 2)

                # compute accuracy
                if helper.argmax(_ValidO[validCount]) == helper.argmax(
                        _predValidO[validCount]):
                    accuracy += 1

                # print and write result
                newResultToWrite = (
                    '[' + str(i) + '] pred = ' +
                    str(np.round_(_predValidO[validCount], 6)) + ', real = ' +
                    str(np.round_(_ValidO[validCount], 6)))
                resultToWrite += newResultToWrite + '\n'

                validCount += 1

        # recover edgeitems and linewidth
        np.set_printoptions(edgeitems=10000, linewidth=1000000)

        # get the average of MAE, MSE and accuracy
        MAE /= (validSize * outputCols)
        MSE /= (validSize * outputCols)
        accuracy /= validSize

        # print evaluation result
        resultSummary = '----------------\n'
        resultSummary += 'input size : ' + str(inputSize) + '\n'
        resultSummary += 'train size : ' + str(trainSize) + '\n'
        resultSummary += 'valid size : ' + str(validSize) + '\n'
        resultSummary += 'MAE        : ' + str(round(MAE, 6)) + '\n'
        resultSummary += 'MSE        : ' + str(round(MSE, 6)) + '\n'
        resultSummary += 'accuracy   : ' + str(round(accuracy, 6)) + '\n'
        resultSummary += 'pred avg   : ' + str(np.average(_predValidO,
                                                          axis=0)) + '\n'
        resultSummary += 'real avg   : ' + str(np.average(_ValidO,
                                                          axis=0)) + '\n'
        print(resultSummary)
        resultToWrite += resultSummary

        # write result file
        fvalid = open(valid_report, 'w')
        fvalid.write(resultToWrite)
        fvalid.close()

        # return final result
        return (MAE, MSE, accuracy, np.average(_predValidO, axis=0),
                np.average(_ValidO, axis=0))
Exemple #34
0
    h = hash_npa(tile)
    # print(h)
    if h not in unique_tiles:
        unique_tiles[h] = tile
print("There are {} unique tiles".format(len(unique_tiles)))

print("Computing centroids")

# centroids = np.append( np.ones( (1,4*4), dtype=float), np.zeros( (CODEBOOK_SIZE-1,4*4)), axis=0 )
# centroids, labels = vq.kmeans2( tw, centroids, minit='matrix')

cb_size = min(len(tiles), CODEBOOK_SIZE)
centroids, labels = vq.kmeans2(
    vq.whiten(tiles), cb_size, iter=15,
    minit='points')  # ++ takes at least 6 minutes, never went to the end
centroids = np.round_((centroids * (0.5))).astype(np.bool_)

# Remove white and black that may have been found by kmean
centroids = [
    x for x in filter(lambda t: 0 < t.sum() < TILE_SIZE**2, centroids)
]
# centroids = np.insert( centroids, 0, tiles[0], 0)
# centroids = np.insert( centroids, 1, tiles[1], 0)
z = [tiles[0], tiles[1]]
z.extend(centroids)
centroids = z
print("{} base tiles + transparent".format(len(centroids)))

# for c in centroids:
#     print(c)
Exemple #35
0
            ## Fit stacking model
            if index_test == 0:
                clf.fit(trainY, trainX)

            ###  Metrics
            print >> log, "computing cv score"
            mean_auc = 0.0
            mean_accuracy = 0.0
            iter_ = 1

            cv_preds, models_score, models_f1 = clf.predict(trainY,
                                                            trainX,
                                                            testX,
                                                            testY,
                                                            show_steps=True)
            cv_preds_bin = np.round_(cv_preds, decimals=0)
            accuracy = metrics.accuracy_score(testY, cv_preds_bin)
            f1 = metrics.f1_score(testY, cv_preds_bin)
            print >> log, "Accuracy: %.2f" % accuracy

            ##  header
            row_cells = table.add_row().cells

            row_cells[0].text = key_d + '.' + str(iteration) + '.' + str(
                index_test)
            col = 1
            print models_score
            ##Table test X_X row
            for model in range(len(models_score)):
                cell = ("%.2f%%\n" % (models_score[model] * 100))
                row_cells[col].text = cell
  ax.set_ylabel(r"$\frac{d\sigma}{d\,\cos\theta} [$fb$]$")
  if "TrueAngle" in input_dir:
    ax.set_xlabel(r"$\cos(\theta_{ISR-corrected})$")

  # Mark which process it is
  chirality = IOFH.find_chirality(base_name)
  Z_direction = IOFH.find_Z_direction(base_name)
  mass_label = IOFH.find_2f_mass_label(base_name)
  process_str = "${}$".format(PN.difermion_process_str(
                               "mu", chirality, mass_label, Z_direction))
  ax.set_title(process_str)

  ax.set_xlim(-1,1)
  ax.set_ylim(0,ax.get_ylim()[1])

  ax.legend( loc=0, ncol=2, title=r"$\chi^2_{pure}/ndf = $" + str(np.round_(chisq_ndf_ha, decimals=1)) + "\n$\chi^2_{cor}/ndf = $" + str(np.round_(chisq_ndf_hac, decimals=1)) )

  fig.savefig("{}/{}_shape_check.pdf".format(output_dir, base_name))
  
  plt.close(fig)
  
  # --- Plot only helicity amplitude approach ----------------------------------
  
  # Plot the distributions and the fit results
  fig, ax = plt.subplots(tight_layout=True, figsize=(8.5,7))
  ax.errorbar(bin_middles,bin_vals,yerr=yerr, fmt='.',ms=12,mew=2, label="MC")
  ax.plot(bin_middles,fit_vals_ha, ls="--", lw=2, label="Pure Hel. Ampl.")
  
  ax.set_xlabel(r"$\cos(\theta)$")
  ax.set_ylabel(r"$\frac{d\sigma}{d\,\cos\theta} [$fb$]$")
  
def im2double(im):
    min_val = np.min(im.ravel())
    max_val = np.max(im.ravel())
    out = (im.astype('float') - min_val) / (max_val - min_val)
    return np.round_(out, decimals=4)
Exemple #38
0
def naverwerking(Res, Afstanden, Lengtes):

    RDisp_real = np.array(Res["RDisp_real"])
    RDisp_imag = np.array(Res["RDisp_imag"])
    ZDisp_real = np.array(Res["ZDisp_real"])
    ZDisp_imag = np.array(Res["ZDisp_imag"])
    Rcoord = np.array(Res["Rcoord"])
    Frequency = np.array(Res["Frequency"])

    if isinstance(Afstanden, list):
        aantalCases = len(Afstanden)
        listinput = True
    else:  # scalar opgeven, die ik in een triviale array stop
        Afstanden = [Afstanden]
        Lengtes = [Lengtes]
        aantalCases = 1
        listinput = False

    if "MaxFreqLimited" in Res:
        MaxFreqLimited = Res["maxFreqLimited"]
        # met lengte aantaltreintypes
        if MaxFreqLimited <= 0:
            MaxFreqLimited = Frequency[-1]
    else:
        MaxFreqLimited = Frequency[-1]

    # Indien de FEM som een maximale frequentie heeft die lager is dan de gevraagde frequentierange
    # zijn de waardes boven die maximale waarde niet te vertrouwen en gebruiken we de waarde van
    # de hoogst betrouwbare frequentie.
    # Dit kan eleganter:
    # eerst checken of de waardes boven Fmax echt zo gek zijn
    # indien so, dan waardes bepalen op een Barkan achtige extrapolatie
    # fase info moet ook netter, want c bepaling heeft hier last van

    Resultaten = []
    # deze gaan we vullen
    for case in range(aantalCases):
        GevraagdeAfstand = Afstanden[case]
        Lengte = Lengtes[case]

        # ============ de knopen (afstandIndex) van deze case vinden ============
        # daartoe eerst uit de bak data de relevante afstanden halen, nl. de
        # afstanden tussen GevraagdeAfstand en GevraagdeAfstand + 10 (10 meter
        # verder)

        if Rcoord[-1] < GevraagdeAfstand + Lengte:
            # print('FEM bodem niet lang genoeg voor deze gevraagde afstand')
            exit(201)
        # index bepalen van de punten in de gewenste afstandrange
        # afstandIndex = find(Rcoord >= GevraagdeAfstand & Rcoord <= GevraagdeAfstand+Lengte);
        afstandIndex1 = np.where(Rcoord >= GevraagdeAfstand)
        afstandIndex2 = np.where(Rcoord <= GevraagdeAfstand + Lengte)
        afstandIndex = np.intersect1d(afstandIndex1, afstandIndex2)

        # ============ golfsnelheid c ============

        # nu gaan we werken met de fase informatie (in plaats van de amplitudes)
        # Daarvoor gebruiken we Matlabfunctie "angle".  Input is een complex getal,
        # output een hoek in radialen (tussen -2pi en +2pi).
        Rfase = np.angle(RDisp_real + 1j * RDisp_imag)
        Zfase = np.angle(ZDisp_real + 1j * ZDisp_imag)

        # dan gaan we unwrappen waarbij we gebruik maken van voorkennis over het
        # verloop van de fase over de afstand
        # we beginnen met het bepalen van de afgeleide de fase over de afstand
        dRfase = np.diff(Rfase, axis=0)
        # diff doet zijn werk, zoals alle matlabfuncties, over de kolommen
        dZfase = np.diff(Zfase, axis=0)
        # dus restant is een rij
        # overigens geen echte afgeleide want niet gedeeld door dx
        # dan de slimmigheid: een unwrap door de fasesprongen te detecteren
        dRfase = np.where(dRfase > 0, 0, dRfase)
        dZfase = np.where(dZfase > 0, 0, dZfase)

        # nu kijken wat de totale faseachterstand op de gewenste afstand i
        # we pakken de laatste afstandIndex (die we al eerder gemaakt hadden) en
        # gaan er van uit dat de afstanden altijd oplopend worden geleverd door FEM
        #aI = afstandIndex(end-1);              # diff is 1 korter, dus niet all the way
        aI = afstandIndex[-2]
        RfaseUnwrapped = sum(dRfase[1:aI, :])
        # som van alle elementjes tot en met elementje aI, per frequentie
        ZfaseUnwrapped = sum(dZfase[1:aI, :])
        # ja dat kan je hierboven ook in de geneste for-next opnemen
        # soms komt er bij een frequentie 0 uit, nl. vlak bij bron en als er 0 uit komt gaat ie verderop delen door 0
        # nu gaan we vlak bij bron nooit een cR gebruiken, dus we vullen voor de voortgang maar een kleine waarde in
        RfaseUnwrapped = np.where(RfaseUnwrapped >= 0, 0.1, RfaseUnwrapped)
        ZfaseUnwrapped = np.where(ZfaseUnwrapped >= 0, 0.1, ZfaseUnwrapped)

        # dan uit de totale fasedraaiing en de afstand de voortplantingsnelheid halen
        afstand = Rcoord[aI]
        cRsmal = -Frequency * afstand / (RfaseUnwrapped / 6.28)
        cZsmal = -Frequency * afstand / (ZfaseUnwrapped / 6.28)

        # check op gekke c, als MaxFreqLimited lager is dan gewenste frequentierange
        # in dat geval displacements en c aanpassen
        if Frequency[-2] > MaxFreqLimited:  # ok, reden tot zorg
            Betrouwbaar = np.where(Frequency <= MaxFreqLimited)
            Repareren = np.where(Frequency > MaxFreqLimited)
            Repareren = Repareren[0]
            Betrouwbaar = Betrouwbaar[0]
            cZdetrend = signal.detrend(cZsmal)
            afwijking = np.std(cZdetrend[Betrouwbaar])
            # maat voor acceptabele afwijking
            Iexplosie = np.where(abs(cZdetrend[Repareren]) > afwijking * 2)
            Iexplosie = Iexplosie[0]
            if np.size(Iexplosie) > 0:
                begin = Betrouwbaar[-1]
                einde = Repareren[Iexplosie[0]]
                for i1 in range(Repareren[Iexplosie[0]], Repareren[-1] + 1):
                    RDisp_real[afstandIndex,
                               i1] = np.mean(RDisp_real[afstandIndex,
                                                        begin:einde],
                                             axis=1)
                    RDisp_imag[afstandIndex,
                               i1] = np.mean(RDisp_imag[afstandIndex,
                                                        begin:einde],
                                             axis=1)
                    ZDisp_real[afstandIndex,
                               i1] = np.mean(ZDisp_real[afstandIndex,
                                                        begin:einde],
                                             axis=1)
                    ZDisp_imag[afstandIndex,
                               i1] = np.mean(ZDisp_imag[afstandIndex,
                                                        begin:einde],
                                             axis=1)
                    cRsmal[i1] = np.mean(cRsmal[begin:einde])
                    cZsmal[i1] = np.mean(cZsmal[begin:einde])

        # dan dat middelen naar octaven
        cZ = np.zeros(6)
        cX = np.zeros(6)
        c_ratio = np.zeros(6)
        for octaafnr in range(len(octaafbanden)):
            frequentieIndex1 = np.where(Frequency >= ondergrenzen[octaafnr])
            frequentieIndex2 = np.where(Frequency <= bovengrenzen[octaafnr])
            frequentieIndex = np.intersect1d(frequentieIndex1,
                                             frequentieIndex2)
            if len(frequentieIndex) > 0:
                cX[octaafnr] = np.mean(cRsmal[frequentieIndex])
                cZ[octaafnr] = np.mean(cZsmal[frequentieIndex])
                c_ratio[octaafnr] = cX[octaafnr] / cZ[octaafnr]

        c = np.round(cZ, decimals=0)
        c_ratio = np.round(c_ratio, decimals=2)

        # ============ admittantie Y en Y_ratio bepalen ============

        # RDisp en ZDisp van deze afstanden selecteren uit de complete set
        # Daarbij meteen naar een amplitude rekenend, met Pythagoras
        RDisp = abs(RDisp_real[afstandIndex, :] +
                    1j * RDisp_imag[afstandIndex, :])
        ZDisp = abs(ZDisp_real[afstandIndex, :] +
                    1j * ZDisp_imag[afstandIndex, :])

        # We middelen over de afstanden
        RDispMean = np.mean(RDisp, axis=0)
        # Matlab doet dat automatisch in de eerste richting
        ZDispMean = np.mean(ZDisp, axis=0)

        # Dan gaan we sommeren over de frequenties, per band
        YZ = np.zeros(6)
        YX = np.zeros(6)

        for octaafnr in range(len(octaafbanden)):
            # afvangen als frequentiebereik te laag is
            #frequentieIndex = find(Frequency>=ondergrenzen[octaafnr] & Frequency<=bovengrenzen[octaafnr]);
            frequentieIndex1 = np.where(Frequency >= ondergrenzen[octaafnr])
            frequentieIndex2 = np.where(Frequency <= bovengrenzen[octaafnr])
            frequentieIndex = np.intersect1d(frequentieIndex1,
                                             frequentieIndex2)
            if len(
                    frequentieIndex
            ) == 0:  #isempty(frequentieIndex):  # of, in basictaal: " if length(frequentieIndex)==0 "
                # dit kan voorkomen, als bijv. niet hoger dan 32 Hz is gerekend;
                # dan blijft YZ voor die band de waarde 0 houden, dat is ok
                print('geen frequenties gevonden voor een octaafband')
            else:
                YZ[octaafnr] = np.mean(6.28 * Frequency[frequentieIndex] *
                                       ZDispMean[frequentieIndex])
                YX[octaafnr] = np.mean(6.28 * Frequency[frequentieIndex] *
                                       RDispMean[frequentieIndex])

        legebanden = np.where(YZ == 0)

        Y = np.round(YZ, decimals=14)
        Y_ratio = np.round(YX / YZ, decimals=2)
        Y_ratio[legebanden] = 0

        # ============ fase van Y op gevraagde afstand ============
        fase = np.zeros(6)
        ZDispMeanAfstand_real = np.mean(ZDisp_real[afstandIndex, :], axis=0)
        ZDispMeanAfstand_imag = np.mean(ZDisp_imag[afstandIndex, :], axis=0)
        for octaafnr in range(len(octaafbanden)):
            frequentieIndex1 = np.where(Frequency >= ondergrenzen[octaafnr])
            frequentieIndex2 = np.where(Frequency <= bovengrenzen[octaafnr])
            frequentieIndex = np.intersect1d(frequentieIndex1,
                                             frequentieIndex2)
            if len(frequentieIndex) > 0:
                ZDispMean_real = np.mean(
                    ZDispMeanAfstand_real[frequentieIndex], axis=0)
                ZDispMean_imag = np.mean(
                    ZDispMeanAfstand_imag[frequentieIndex], axis=0)
                fase[octaafnr] = np.angle(ZDispMean_real + 1j * ZDispMean_imag)
        fase = np.round_(fase, decimals=2)

        # ============ en de boel bijeenbrengen in een dictionary ============
        Resultaten.append({
            'GevraagdeAfstand': GevraagdeAfstand,
            'Lengte': Lengte,
            'c': c[:].tolist(),
            'c_ratio': c_ratio[:].tolist(),
            'Y': Y[:].tolist(),
            'Y_ratio': Y_ratio[:].tolist(),
            'fase': fase[:].tolist()
        })
        if not listinput:
            Resultaten = Resultaten[0]

    return Resultaten
def get_features(image, x, y, feature_width, scales=None):
    assert image.ndim == 2, 'Image must be grayscale'
    # caluculating feature gradient
    gx = cv2.Sobel(image, cv2.CV_64F, 1, 0, 1)
    gy = cv2.Sobel(image, cv2.CV_64F, 0, 1, 1)
    g = (((gx)**2) + ((gy)**2))**0.5
    theta = np.arctan2(gy, gx) * 180 / np.pi

    feat_dim = 128
    x1 = np.round_(x)
    y1 = np.round_(y)
    k = int(feature_width)  # Size of feature
    neighbours = 1  # pixels from specified neighbourhood can contribute to the histogram of orientations
    weights = cv2.getGaussianKernel(ksize=k + 2 * neighbours, sigma=(k) *
                                    0.4)  # gaussian window weighting
    weights = np.dot(weights, weights.transpose())
    fv = np.zeros((int(x1.shape[0]), (int(feat_dim))))

    for index in range(x1.shape[0]):
        i = y1[index]
        j = x1[index]
        window_g = g[int(i - k / 2 - neighbours):int(i + k / 2 + neighbours),
                     int(j - k / 2 - neighbours):int(j + k / 2 +
                                                     neighbours)] * weights
        window_theta = theta[
            int(i - k / 2 - neighbours):int(i + k / 2 + neighbours),
            int(j - k / 2 - neighbours):int(j + k / 2 + neighbours)] * weights
        f = np.array([])

        for m in range(0, k, int(k / 4)):
            for n in range(0, k, int(k / 4)):
                g_flat = (window_g[m:m + int(k / 4 + 2 * neighbours), n:n +
                                   int(k / 4 + 2 * neighbours)]).flatten()
                theta_flat = (
                    window_theta[m:m + int(k / 4 + 2 * neighbours),
                                 n:n + int(k / 4 + 2 * neighbours)]).flatten()
                hist = np.zeros(8)
                for z in range(theta_flat.shape[0]):
                    if theta_flat[z] < 0:
                        theta_flat[z] = theta_flat[z] + 360

                    if theta_flat[z] < 45:
                        hist[0] = hist[0] + np.cos(
                            (theta_flat[z]) * np.pi / 180) * g_flat[z]
                        hist[1] = hist[1] + np.sin(
                            (theta_flat[z] + 45) * np.pi / 180) * g_flat[z]

                    elif (theta_flat[z] >= 45) and (theta_flat[z] < 90):
                        hist[1] = hist[1] + np.cos(
                            (theta_flat[z] - 45) * np.pi / 180) * g_flat[z]
                        hist[2] = hist[2] + np.sin(
                            theta_flat[z] * np.pi / 180) * g_flat[z]

                    elif (theta_flat[z] >= 90) and (theta_flat[z] < 135):
                        hist[2] = hist[2] + np.cos(
                            (theta_flat[z] - 90) * np.pi / 180) * g_flat[z]
                        hist[3] = hist[3] + np.sin(
                            (theta_flat[z] - 45) * np.pi / 180) * g_flat[z]

                    elif (theta_flat[z] >= 135) and (theta_flat[z] < 180):
                        hist[3] = hist[3] + np.cos(
                            (theta_flat[z] - 135) * np.pi / 180) * g_flat[z]
                        hist[4] = hist[4] + np.sin(
                            (theta_flat[z] - 90) * np.pi / 180) * g_flat[z]

                    elif (theta_flat[z] >= 180) and (theta_flat[z] < 225):
                        hist[4] = hist[4] + np.cos(
                            (theta_flat[z] - 180) * np.pi / 180) * g_flat[z]
                        hist[5] = hist[5] + np.sin(
                            (theta_flat[z] - 135) * np.pi / 180) * g_flat[z]

                    elif (theta_flat[z] >= 225) and (theta_flat[z] < 270):
                        hist[5] = hist[5] + np.cos(
                            (theta_flat[z] - 225) * np.pi / 180) * g_flat[z]
                        hist[6] = hist[6] + np.sin(
                            (theta_flat[z] - 180) * np.pi / 180) * g_flat[z]

                    elif (theta_flat[z] >= 270) and (theta_flat[z] < 315):
                        hist[6] = hist[6] + np.cos(
                            (theta_flat[z] - 270) * np.pi / 180) * g_flat[z]
                        hist[7] = hist[7] + np.sin(
                            (theta_flat[z] - 225) * np.pi / 180) * g_flat[z]

                    elif (theta_flat[z] >= 315) and (theta_flat[z] < 360):
                        hist[7] = hist[7] + np.cos(
                            (theta_flat[z] - 315) * np.pi / 180) * g_flat[z]
                        hist[0] = hist[0] + np.sin(
                            (theta_flat[z] - 270) * np.pi / 180) * g_flat[z]

                f = np.append(f, hist, 0)
        if np.any(f):  #so that the code is not dividing by zero
            f = (f / np.linalg.norm(f, ord=2))  # normalize
        fv[index, :] = f
    fv = fv**1  # raised to a power less than one to accentuate small values of features
    return fv
Exemple #40
0
def showDCT():
    global outputImage, dctImage, translate

    # Find dimensions and factors

    separator = 3

    n = blockSize * blockSize + (blockSize + 1) * separator
    # rows & columns needed

    minDim = 0  # min window dimension
    if windowWidth < windowHeight:
        minDim = windowWidth
    else:
        minDim = windowHeight - 20  # 20 for message at bottom

    factor = int(minDim / n)  # factor by which to scale dctBases[][]

    # Find min & max values

    min = 0.0
    max = 0.0

    for u in range(blockSize):
        for v in range(blockSize):
            for x in range(blockSize):
                for y in range(blockSize):
                    c = dctBases[u, v, x, y]
                    if c < min:
                        min = c
                    if c > max:
                        max = c

    # We'll assume that min<0 and max>0
    #
    # Set min and max to be equidistant from 0.

    if -min > max:
        max = -min
    else:
        min = -max

    # Draw the image

    start = int(np.round_(factor * (0.5 * separator)))
    end = int(
        np.round_(factor * (blockSize * blockSize +
                            (blockSize + 0.5) * separator)))

    dctImage = np.empty((start + end + 2, start + end + 2, 3), np.uint8)

    # white background

    dctImage[:, :, :] = YCbCr_white

    # Draw each basis function

    for u in range(blockSize):
        for v in range(blockSize):

            for x in range(blockSize):
                xStart = factor * (u * blockSize + (u + 1) * separator + x)

                for y in range(blockSize):
                    yStart = factor * (v * blockSize + (v + 1) * separator + y)

                    if showWalshHadamard:
                        c = (np.round_(
                            (dctBases[u, v, x, y] - min) / (max - min)) * 255,
                             128, 128)  # grey level
                    else:
                        c = (np.round_(
                            (dctBases[u, v, x, y] - min) / (max - min) * 255),
                             128, 128)

                    for i in range(factor):
                        for j in range(factor):
                            dctImage[xStart + i, yStart + j] = c

    # Separate with lines

    start = int(np.round_(factor * (0.5 * separator)))
    end = int(
        np.round_(factor * (blockSize * blockSize +
                            (blockSize + 0.5) * separator)))

    for u in range(blockSize + 1):
        x = int(np.round_(factor * (u * blockSize + (u + 0.5) * separator)))
        for y in range(start, end + 1):
            dctImage[x, y, :] = (203, 86, 75)
            dctImage[y, x, :] = (203, 86, 75)

    outputImage = dctImage.copy()
Exemple #41
0
import json
import numpy as np
import matplotlib.pyplot as plt

x = np.arange(-3, 3.1, 0.1)
x = np.round_(x, 1)
IRT = []
with open(
        'C://Users//xpdlw//Desktop//A010000025_A010025001.json') as json_file:
    json_data = json.load(json_file)

    dif_level = json_data['difficultyLevel']
    guess_level = json_data['guessLevel']
    dis_level = json_data['discriminationLevel']

print(type(dif_level))
print(guess_level)
print(dis_level)
print(json_data)
print(x)
for i in x:
    IRT.append(guess_level + (1 - guess_level) /
               (1 + (np.exp(1))**(-1.702 * dis_level * (i - dif_level))))

plt.plot(x, IRT)
plt.show()
##    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 20)
    ret, frame = cap.read()
    test_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    std_img = std.fit_transform(test_img)
    std_img = cv2.resize(std_img, (120,120),interpolation=cv2.INTER_AREA)
    blur_img = cv2.GaussianBlur(std_img, (3,3),0)

    if model_used == "CNN":
        image = np.reshape(std_img,(1,120,120,1))
    elif model_used == 'RF':
        image = std_img.ravel()
        image = image.reshape(1,-1)
        
    prediction = model.predict(image)
    print('Probability of Garbage: ',prediction[0])
    result = categories[int(np.round_(prediction[0]))]

    black = [0,0,0]     #---Color of the border---
    constant=cv2.copyMakeBorder(frame,10,10,10,10,cv2.BORDER_CONSTANT,value=black )
    #--- Here I created a violet background to include the text ---
    violet= np.zeros((100, constant.shape[1], 3), np.uint8)
    violet[:] = (255, 0, 180)
    vcat = cv2.vconcat((violet, constant))
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(vcat,result,(30,50), font, 2,(0,0,0), 3, 0)

    print(frame)

    if frame is not None:
        #while a>20:
        cv2.imshow('Garbage Detection', vcat)
Exemple #43
0
def callback(in_data, frame_count, time_info, status):
    	audio_data = np.fromstring(in_data, dtype=D_TYPE)
	normalized = [x / NORM_CONST for x in audio_data] 
	out = filter(11025, 50, 1000, 1, 7, 'ellip', normalized)
	norm_out = np.array(np.round_(out * NORM_CONST))
	return (norm_out.astype(D_TYPE).tostring(), paContinue)
Exemple #44
0
#filename = 'saved_model.sav'
#pickle.dump(model_red,open(filename, 'wb'))

#loaded_model = pickle.load(open(filename, 'rb'))
#result = loaded_model.score(X_validation,Y_validation)
predictions_train_red = model_red.predict(X_train_red)
predictions_train_white = model_white.predict(X_train_white)
predictions_test_red = model_red.predict(X_validation_red)
predictions_test_white = model_white.predict(X_validation_white)

# calculating rmse
train_red_rmse = mean_squared_error(predictions_train_red, Y_train_red)**0.5
print(train_red_rmse)
test_red_rmse = mean_squared_error(predictions_test_red, Y_validation_red)**0.5
print(test_red_rmse)  # rounding off the predicted values for test set
predicted_data_red = np.round_(test_red_rmse)
print(predicted_data_red)
print('Mean Absolute Error:',
      mean_absolute_error(Y_validation_red, predictions_test_red))
print('Mean Squared Error:',
      mean_squared_error(Y_validation_red, predictions_test_red))
print('Root Mean Squared Error:',
      np.sqrt(mean_squared_error(Y_validation_red, predictions_test_red)))
# displaying coefficients of each feature
coefficients_red = pd.DataFrame(model_red.coef_)
#coefficients_red.columns = ['Coefficient']
print(coefficients_red)

#print(X_validation)
#print(Y_validation)
#evaluate predictions
# Arguments for the Tiny Face Detector
class args_eval():
    def __init__(self):
        self.nms_thresh = 0.3
        self.prob_thresh = 0.03
        self.checkpoint = "../models/tinyfaces/checkpoint_50.pth"
        self.template_file = "../helper/tinyfaces/data/templates.json"
        self.threshold_score = 0


args_tinyface = args_eval()

# Get templates for Tiny Face Detector
templates = json.load(open(args_tinyface.template_file))
json.dump(templates, open(args_tinyface.template_file, "w"))
templates = np.round_(np.array(templates), decimals=8)
num_templates = templates.shape[0]

# Getting transforms for the Tiny Face Detector
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
val_transforms = transforms.Compose([transforms.ToTensor(), normalize])

# Specify whether to run on GPU or CPU based on user arguments 
if args.gpu > 0:
    device = torch.device('gpu')
else:
    device = torch.device('cpu')

# Load the Tiny Face Detector
model_tinyfaces = get_model(args_tinyface.checkpoint, num_templates=num_templates, gpu= (args.gpu > 0))
def result():
    if request.method == 'POST':
        print(request.form)
        stocks = request.form['stocks'].split()
        strt_date = datetime.strptime(request.form['start_date'], '%Y-%m-%d')
        window = int(request.form['window'])
        print(stocks)

        # Get data
        start_date = strt_date - timedelta(window+5)
        end_date = datetime.today()
        initial_capital = 1000000

        # stocks = ['MSFT','AAPL','GOOG','AMZN','XOM','HSBC','BRK-B','JPM','BAC','WFC']

        print("Loading data for following stocks: ")
        print(stocks)
        spy = pdr.get_data_yahoo(symbols='SPY', start=start_date, end=end_date)
        stock_data = pdr.get_data_yahoo(symbols=stocks, start=start_date, end=end_date)
        print("Stock data loaded ...")

        # Calculate Benchmark Returns
        spy['Returns'] = spy['Adj Close'].pct_change(1)


        # Calculate returns
        for ticker in stock_data.columns.levels[1]:
            stock_data['Return', ticker] = stock_data['Adj Close', ticker].pct_change(1)
        print("Calculated Returns ...")

        # Calculate Inverse Variance
        for ticker in stock_data.columns.levels[1]:
            stock_data['InvVar', ticker] = stock_data['Return', ticker].rolling(window).apply(lambda x: 1/x.var(), raw=False)

        # Calculate Portfolio weights
        df = stock_data.InvVar
        df = df.div(df.sum(axis=1), axis=0)
        for ticker in stock_data.columns.levels[1]:
            stock_data['Weights', ticker] = df[ticker].round(2)
        del df
        print("Calculated Portfolio Weights ...")

        # Remove NaN values
        stock_data.dropna(inplace=True)


        # # Portfolio Returns
        # 1. Get Change in weights
        # 2. Get number of shares
        # 3. Get Trading costs
        # 4. Calculate Porfolio Value and Cumulative returns.


        Portfolio = stock_data[['Adj Close', 'Weights']]

        stock_val = np.array(Portfolio['Adj Close'])
        stock_weight = np.array(Portfolio['Weights'])
        port_val = np.empty(shape=stock_val.shape[0])
        shares_holding = np.empty(shape=stock_val.shape)
        traded_volume = np.zeros(shape=stock_val.shape)
        traded_value = np.zeros(shape=stock_val.shape)
        trading_cost = np.zeros(shape=stock_val.shape)

        shares_holding[0] = np.round_(stock_weight[0]/stock_val[0]*initial_capital)
        port_val[0] = np.dot(shares_holding[0], stock_val[0])

        for ind in range(1, len(stock_val)):
            shares_holding[ind] = shares_holding[ind-1] + np.round_((stock_weight[ind]-stock_weight[ind-1])/stock_val[ind]*port_val[ind-1])
            traded_volume[ind] = shares_holding[ind] - shares_holding[ind-1]
            traded_value[ind] = traded_volume[ind]*stock_val[ind]
            trading_cost[ind] = 0.01*abs(traded_value[ind])
            port_val[ind] = np.dot(shares_holding[ind], stock_val[ind])
            port_val[ind] = port_val[ind] - traded_value[ind].sum() - trading_cost[ind].sum()

        print("Calculated Portfolio returns ...")

        Portfolio['Value'] = pd.DataFrame(data=port_val, index=Portfolio['Adj Close'].index)
        # Portfolio['Return'] = Portfolio['Value'].pct_change(1)
        Traded_Volume = pd.DataFrame(data=traded_volume, index=Portfolio['Adj Close'].index, columns=Portfolio['Adj Close'].columns)
        # Traded_Value = pd.DataFrame(data=traded_value, index=Portfolio['Adj Close'].index, columns=Portfolio['Adj Close'].columns)
        # Trading_Cost = pd.DataFrame(data=trading_cost)
        # Shares_Holding = pd.DataFrame(data=shares_holding, index=Portfolio['Adj Close'].index, columns=Portfolio['Adj Close'].columns)

        del stock_val
        del stock_weight
        del port_val
        del shares_holding
        del traded_volume
        del traded_value
        del trading_cost

        to_trade = Traded_Volume.iloc[-1].to_dict()

        #Plot traded volume and shares holding

        # ax1 = plt.gca()
        # (Portfolio['Value']/Portfolio['Value'].iloc[0])[start_date:].plot(figsize=(10,6), ax=ax1, title="Portfolio Performance");
        # (spy['Adj Close']/spy['Adj Close'][Portfolio['Value'].index.values[0]])[start_date:].plot(ax=ax1);
        # ax1.legend(['Portfolio','S&P 500'])
        # portfolio_value_url = 'static/img/portfolio_value.png'
        # plt.savefig(portfolio_value_url)

        fig = plt.figure(figsize=(10,6))
        ax = plt.subplot(111)
        ax.plot((Portfolio['Value']/Portfolio['Value'].iloc[0])[start_date:])
        ax.plot((spy['Adj Close']/spy['Adj Close'][Portfolio['Value'].index.values[0]])[start_date:])
        plt.xlabel("Date")
        plt.ylabel("Returns")
        plt.title('Portfolio Value')
        ax.legend(['Portfolio','SP 500'])
        #plt.show()
        portfolio_value_url = 'static/img/portfolio_value.png'
        fig.savefig(portfolio_value_url)

        fig2 = plt.figure(figsize=(10,6))
        ax2 = plt.subplot(111)
        ax2.pie(Portfolio['Weights'].iloc[-1], autopct='%.2f%%', labels=Portfolio['Weights'].columns)
        plt.title('Portfolio Composition')

        portfolio_weights_url = 'static/img/portfolio_weights.png'
        fig2.savefig(portfolio_weights_url)

        # ax2 = plt.gca()
        # Portfolio['Weights'].iloc[-1].plot(kind='pie', autopct='%.2f%%', ax=ax2, title="Portfolio Weights", legend=False);
        # portfolio_weights_url = 'static/img/portfolio_weights.png'
        # plt.savefig(portfolio_weights_url)

        return render_template('results.html', stocks=stocks, start_date=strt_date, to_trade=to_trade,
                                window=window,portfolio_weights_url=portfolio_weights_url,
                                portfolio_value_url=portfolio_value_url)
Exemple #47
0
dignity = dignity.loc[(dignity.year == 2006) & (dignity.micro == True) &
                      (dignity.race == -1) & (dignity.latin == -1) &
                      (dignity.gender == -1) & (dignity.age >= 40), :]
dignity.loc[:, 'S'] = dignity.S / dignity.S.iloc[0]

# Retrieve nominal consumption per capita in 2006
bea = dpb.data(BEAkey)
consumption2006 = float(
    1e6 * bea.NIPA('T20405', frequency='A', year=2006).iloc[0].values /
    (1e3 * bea.NIPA('T20100', frequency='A', year=2006).iloc[39].values))

# Compute the value of statistical life in 2006
VSL2006 = 7.4e6

# Calculate u-bar
ubar = VSL2006 / (np.round_(consumption2006, decimals=0) *
                  dignity.S.sum()) - np.dot(
                      dignity.S,
                      np.log(dignity.AC) + vℓ(dignity.AL)) / dignity.S.sum()

# Initialize a data frame for the re-calibrated u-bar values
factor = np.linspace(0, 1, 101)
intercept = expand({
    'factor': factor,
    'adjustment': ['health', 'incarceration', 'unemployment']
}).append(pd.DataFrame({'ubar': ubar}, index=[0])).reset_index(drop=True)

################################################################################
#                                                                              #
# This section of the script calibrates the flow utility intercept adjusted    #
# for health.                                                                  #
Exemple #48
0
def model_performance(model, subtitle):
    # Kfold
    cv = KFold(n_splits=5, shuffle=False, random_state=42)
    y_real = []
    y_proba = []
    tprs = []
    aucs = []
    mean_fpr = np.linspace(0, 1, 100)
    i = 1

    for train, test in cv.split(X, y):
        model.fit(X.iloc[train], y.iloc[train])
        pred_proba = model.predict_proba(X.iloc[test])
        precision, recall, _ = precision_recall_curve(y.iloc[test], pred_proba[:, 1])
        y_real.append(y.iloc[test])
        y_proba.append(pred_proba[:, 1])
        fpr, tpr, t = roc_curve(y[test], pred_proba[:, 1])
        tprs.append(interp(mean_fpr, fpr, tpr))
        roc_auc = auc(fpr, tpr)
        aucs.append(roc_auc)

        # Confusion matrix
    y_pred = cross_val_predict(model, X, y, cv=5)
    conf_matrix = confusion_matrix(y, y_pred)
    trace1 = go.Heatmap(z=conf_matrix, x=["0 (pred)", "1 (pred)"],
                        y=["0 (true)", "1 (true)"], xgap=2, ygap=2,
                        colorscale='Viridis', showscale=False)


    #Show metrics
    tp = conf_matrix[1,1]
    fn = conf_matrix[1,0]
    fp = conf_matrix[0,1]
    tn = conf_matrix[0,0]
    Accuracy  =  ((tp+tn)/(tp+tn+fp+fn))
    Precision =  (tp/(tp+fp))
    Recall    =  (tp/(tp+fn))
    F1_score  =  (2*(((tp/(tp+fp))*(tp/(tp+fn)))/((tp/(tp+fp))+(tp/(tp+fn)))))

    show_metrics = pd.DataFrame(data=[[Accuracy , Precision, Recall, F1_score]])
    show_metrics = show_metrics.T

    colors = ['gold', 'lightgreen', 'lightcoral', 'lightskyblue']
    trace2 = go.Bar(x = (show_metrics[0].values),
                    y = ['Accuracy', 'Precision', 'Recall', 'F1_score'], text = np.round_(show_metrics[0].values,4),
                    textposition = 'auto', textfont=dict(color='black'),
                    orientation = 'h', opacity = 1, marker=dict(
            color=colors,
            line=dict(color='#000000',width=1.5)))

    #Roc curve
    mean_tpr = np.mean(tprs, axis=0)
    mean_auc = auc(mean_fpr, mean_tpr)

    trace3 = go.Scatter(x=mean_fpr, y=mean_tpr,
                        name="Roc : ",
                        line=dict(color=('rgb(22, 96, 167)'), width=2), fill='tozeroy')
    trace4 = go.Scatter(x=[0, 1], y=[0, 1],
                        line=dict(color=('black'), width=1.5,
                                  dash='dot'))

    # Precision - recall curve
    y_real = y
    y_proba = np.concatenate(y_proba)
    precision, recall, _ = precision_recall_curve(y_real, y_proba)

    trace5 = go.Scatter(x=recall, y=precision,
                        name="Precision" + str(precision),
                        line=dict(color=('lightcoral'), width=2), fill='tozeroy')

    mean_auc = round(mean_auc, 3)
    # Subplots
    fig = tls.make_subplots(rows=2, cols=2, print_grid=False,
                            specs=[[{}, {}],
                                   [{}, {}]],
                            subplot_titles=('Confusion Matrix',
                                            'Metrics',
                                            'ROC curve' + " " + '(' + str(mean_auc) + ')',
                                            'Precision - Recall curve',
                                            ))
    # Trace and layout
    fig.append_trace(trace1, 1, 1)
    fig.append_trace(trace2, 1, 2)
    fig.append_trace(trace3, 2, 1)
    fig.append_trace(trace4, 2, 1)
    fig.append_trace(trace5, 2, 2)

    fig['layout'].update(showlegend=False, title='<b>Model performance report (5 folds)</b><br>' + subtitle,
                         autosize=False, height=830, width=830,
                         plot_bgcolor='black',
                         paper_bgcolor='black',
                         margin=dict(b=195), font=dict(color='white'))
    fig["layout"]["xaxis1"].update(color='white')
    fig["layout"]["yaxis1"].update(color='white')
    fig["layout"]["xaxis2"].update((dict(range=[0, 1], color='white')))
    fig["layout"]["yaxis2"].update(color='white')
    fig["layout"]["xaxis3"].update(dict(title="false positive rate"), color='white')
    fig["layout"]["yaxis3"].update(dict(title="true positive rate"), color='white')
    fig["layout"]["xaxis4"].update(dict(title="recall"), range=[0, 1.05], color='white')
    fig["layout"]["yaxis4"].update(dict(title="precision"), range=[0, 1.05], color='white')
    for i in fig['layout']['annotations']:
        i['font'] = titlefont = dict(color='white', size=14)
    py.plot(fig)
Exemple #49
0
def main():
    # send it all to stderr
    mp.log_to_stderr()
    # get access to a logger and set its logging level to INFO
    logger = mp.get_logger()
    logger.setLevel(logging.INFO)

    dataset = read_data(DATASET_PATH)
    # global train_x, test_x, train_y, test_y

    train_x, test_x, train_y, test_y = split_dataset(dataset, 0.25)

    # print("--- Testing Sequence DE ---")
    # start_time_seq = time.time()
    # result_seq = list(de_sequence(fobj, bounds=[(-100, 100)] * 6))
    # print(result_seq[-1])
    # print("")
    # print("--- %s seconds ---" % (time.time() - start_time_seq))
    #
    # sleep(5)

    print("--- Tuning Random Forest with Parallel DE ---")
    start_time_rf_tuning_para = time_RF.time()

    # result_para = list(de_parallel(fobj, bounds=[(-100, 100)] * 6))
    # print(result_para[-1])

    # initialization
    bounds = [(10, 150), (1, 20), (2, 20), (2, 50), (0.01, 1), (1, 10)]
    mut = 0.8
    crossp = 0.7
    popsize = 60
    its = 100

    dimensions = len(bounds)
    pop = np.random.rand(popsize, dimensions)

    # pdb.set_trace()
    min_b, max_b = np.asarray(bounds).T
    diff = np.fabs(min_b - max_b)
    pop_denorm = min_b + pop * diff

    # convert from float to integer
    pop_denorm_convert = pop_denorm.tolist()

    result_list = []
    temp_list = []

    for index in pop_denorm_convert:
        temp_list.append(np.int_(np.round_(index[0])))
        temp_list.append(np.int_(np.round_(index[1])))
        temp_list.append(np.int_(np.round_(index[2])))
        temp_list.append(np.int_(np.round_(index[3])))
        temp_list.append(float('%.2f' % index[4]))
        temp_list.append(np.int(np.round_(index[5])))
        result_list.append(temp_list)
        temp_list = []

    fitness = np.asarray([
        rf_tuning(index[0], index[1], index[2], index[3], index[4], index[5],
                  train_x, test_x, train_y, test_y) for index in result_list
    ])

    best_idx = np.argmax(fitness)
    best = pop_denorm[best_idx]

    print("Dimension:", dimensions)
    print("pop:", pop)
    print("min_b:", min_b)
    print("max_b:", max_b)
    print("diff:", diff)
    print("pop_denorm:", pop_denorm)
    print("fitness:", fitness)
    print("best_idx:", best_idx)
    print("best:", best)

    lock = mp.Lock()
    # execute loops in each process
    processes = []
    for x in range(mp.cpu_count()):
        processes.append(
            mp.Process(target=de_innerloop,
                       args=(output, its, popsize, pop, mut, dimensions,
                             crossp, min_b, diff, lock, fitness, best_idx,
                             best, train_x, test_x, train_y, test_y)))

    # Run processes
    for p in processes:
        p.start()

    # Exit the completed processes
    # Without join() function call, process will remain idle and won’t terminate
    for p in processes:
        p.join()

    # Get process results from the output queue
    results = [output.get() for p in processes]
    print(results)

    print("")
    print("--- %s seconds ---" % (time_RF.time() - start_time_rf_tuning_para))
    print("")
def FormsGraphPair_printer(config,instance, model, gpu, metrics, outDir=None, startIndex=None, lossFunc=None):
    def __eval_metrics(data,target):
        acc_metrics = np.zeros((output.shape[0],len(metrics)))
        for ind in range(output.shape[0]):
            for i, metric in enumerate(metrics):
                acc_metrics[ind,i] += metric(output[ind:ind+1], target[ind:ind+1])
        return acc_metrics

    def __to_tensor(instance,gpu):
        image = instance['img']
        bbs = instance['bb_gt']
        adjaceny = instance['adj']
        num_neighbors = instance['num_neighbors']

        if gpu is not None:
            image = image.to(gpu)
            if bbs is not None:
                bbs = bbs.to(gpu)
            if num_neighbors is not None:
                num_neighbors = num_neighbors.to(gpu)
            #adjacenyMatrix = adjacenyMatrix.to(self.gpu)
        return image, bbs, adjaceny, num_neighbors

    rel_thresholds = [config['THRESH']] if 'THRESH' in config else [0.5]
    if ('sweep_threshold' in config and config['sweep_threshold']) or ('sweep_thresholds' in config and config['sweep_thresholds']):
        rel_thresholds = np.arange(0.1,1.0,0.05)
    if ('sweep_threshold_big' in config and config['sweep_threshold_big']) or ('sweep_thresholds_big' in config and config['sweep_thresholds_big']):
        rel_thresholds = np.arange(0,20.0,1)
    if ('sweep_threshold_small' in config and config['sweep_threshold_small']) or ('sweep_thresholds_small' in config and config['sweep_thresholds_small']):
        rel_thresholds = np.arange(0,0.1,0.01)
    draw_rel_thresh = config['draw_thresh'] if 'draw_thresh' in config else rel_thresholds[0]
    #print(type(instance['pixel_gt']))
    #if type(instance['pixel_gt']) == list:
    #    print(instance)
    #    print(startIndex)
    #data, targetBB, targetBBSizes = instance
    lossWeights = config['loss_weights'] if 'loss_weights' in config else {"box": 1, "rel":1}
    if lossFunc is None:
        yolo_loss = YoloLoss(model.numBBTypes,model.rotation,model.scale,model.anchors,**config['loss_params']['box'])
    else:
        yolo_loss = lossFunc
    data = instance['img']
    batchSize = data.shape[0]
    assert(batchSize==1)
    targetBoxes = instance['bb_gt']
    adjacency = instance['adj']
    adjacency = list(adjacency)
    imageName = instance['imgName']
    scale = instance['scale']
    target_num_neighbors = instance['num_neighbors']
    if not model.detector.predNumNeighbors:
        instance['num_neighbors']=None
    dataT, targetBoxesT, adjT, target_num_neighborsT = __to_tensor(instance,gpu)


    pretty = config['pretty'] if 'pretty' in config else False
    useDetections = config['useDetections'] if 'useDetections' in config else False
    if 'useDetect' in config:
        useDetections = config['useDetect']
    confThresh = config['conf_thresh'] if 'conf_thresh' in config else None


    numClasses=2 #TODO no hard code

    resultsDirName='results'
    #if outDir is not None and resultsDirName is not None:
        #rPath = os.path.join(outDir,resultsDirName)
        #if not os.path.exists(rPath):
        #    os.mkdir(rPath)
        #for name in targetBoxes:
        #    nPath = os.path.join(rPath,name)
        #    if not os.path.exists(nPath):
        #        os.mkdir(nPath)

    #dataT = __to_tensor(data,gpu)
    #print('{}: {} x {}'.format(imageName,data.shape[2],data.shape[3]))
    if useDetections=='gt':
        outputBoxes, outputOffsets, relPred, relIndexes, bbPred = model(dataT,targetBoxesT,target_num_neighborsT,True,
                otherThresh=confThresh,
                otherThreshIntur=1 if confThresh is not None else None,
                hard_detect_limit=600)
        outputBoxes=torch.cat((torch.ones(targetBoxes.size(1),1),targetBoxes[0,:,0:5],targetBoxes[0,:,-numClasses:]),dim=1) #add score
    elif type(useDetections) is str:
        dataset=config['DATASET']
        jsonPath = os.path.join(useDetections,imageName+'.json')
        with open(os.path.join(jsonPath)) as f:
            annotations = json.loads(f.read())
        fixAnnotations(dataset,annotations)
        savedBoxes = torch.FloatTensor(len(annotations['byId']),6+model.detector.predNumNeighbors+numClasses)
        for i,(id,bb) in enumerate(annotations['byId'].items()):
            qX, qY, qH, qW, qR, qIsText, qIsField, qIsBlank, qNN = getBBInfo(bb,dataset.rotate,useBlankClass=not dataset.no_blanks)
            savedBoxes[i,0]=1 #conf
            savedBoxes[i,1]=qX*scale #x-center, already scaled
            savedBoxes[i,2]=qY*scale #y-center
            savedBoxes[i,3]=qR #rotation
            savedBoxes[i,4]=qH*scale/2
            savedBoxes[i,5]=qW*scale/2
            if model.detector.predNumNeighbors:
                extra=1
                savedBoxes[i,6]=qNN
            else:
                extra=0
            savedBoxes[i,6+extra]=qIsText
            savedBoxes[i,7+extra]=qIsField
            
        if gpu is not None:
            savedBoxes=savedBoxes.to(gpu)
        outputBoxes, outputOffsets, relPred, relIndexes, bbPred = model(dataT,savedBoxes,None,"saved",
                otherThresh=confThresh,
                otherThreshIntur=1 if confThresh is not None else None,
                hard_detect_limit=600)
        outputBoxes=savedBoxes.cpu()
    elif useDetections:
        print('Unknown detection flag: '+useDetections)
        exit()
    else:
        outputBoxes, outputOffsets, relPred, relIndexes, bbPred = model(dataT,
                otherThresh=confThresh,
                otherThreshIntur=1 if confThresh is not None else None,
                hard_detect_limit=600)

    if model.predNN and bbPred is not None:
        predNN = bbPred[:,0]
    else:
        predNN=None
    if  model.detector.predNumNeighbors and not useDetections:
        #useOutputBBs=torch.cat((outputBoxes[:,0:6],outputBoxes[:,7:]),dim=1) #throw away NN pred
        extraPreds=1
        if not model.predNN:
            predNN = outputBoxes[:,6]
    else:
        extraPreds=0
        if not model.predNN:
            predNN = None
        #useOutputBBs=outputBoxes

    if targetBoxesT is not None:
        targetSize=targetBoxesT.size(1)
    else:
        targetSize=0
    lossThis, position_loss, conf_loss, class_loss, nn_loss, recall, precision = yolo_loss(outputOffsets,targetBoxesT,[targetSize], target_num_neighborsT)

    if 'rule' in config:
        if config['rule']=='closest':
            dists = torch.FloatTensor(relPred.size())
            differentClass = torch.FloatTensor(relPred.size())
            predClasses = torch.argmax(outputBoxes[:,extraPreds+6:extraPreds+6+numClasses],dim=1)
            for i,(bb1,bb2) in enumerate(relIndexes):
                dists[i] = math.sqrt((outputBoxes[bb1,1]-outputBoxes[bb2,1])**2 + (outputBoxes[bb1,2]-outputBoxes[bb2,2])**2)
                differentClass[i] = predClasses[bb1]!=predClasses[bb2]
            maxDist = torch.max(dists)
            minDist = torch.min(dists)
            relPred = 1-(dists-minDist)/(maxDist-minDist)
            relPred *= differentClass
        elif config['rule']=='icdar':
            height = torch.FloatTensor(relPred.size())
            dists = torch.FloatTensor(relPred.size())
            right = torch.FloatTensor(relPred.size())
            sameClass = torch.FloatTensor(relPred.size())
            predClasses = torch.argmax(outputBoxes[:,extraPreds+6:extraPreds+6+numClasses],dim=1)
            for i,(bb1,bb2) in enumerate(relIndexes):
                sameClass[i] = predClasses[bb1]==predClasses[bb2]
                
                #g4 of the paper
                height[i] = max(outputBoxes[bb1,4],outputBoxes[bb2,4])/min(outputBoxes[bb1,4],outputBoxes[bb2,4])

                #g5 of the paper
                if predClasses[bb1]==0:
                    widthLabel = outputBoxes[bb1,5]*2 #we predict half width
                    widthValue = outputBoxes[bb2,5]*2
                    dists[i] = math.sqrt(((outputBoxes[bb1,1]+widthLabel)-(outputBoxes[bb2,1]-widthValue))**2 + (outputBoxes[bb1,2]-outputBoxes[bb2,2])**2)
                else:
                    widthLabel = outputBoxes[bb2,5]*2 #we predict half width
                    widthValue = outputBoxes[bb1,5]*2
                    dists[i] = math.sqrt(((outputBoxes[bb1,1]-widthValue)-(outputBoxes[bb2,1]+widthLabel))**2 + (outputBoxes[bb1,2]-outputBoxes[bb2,2])**2)
                if dists[i]>2*widthLabel:
                    dists[i]/=widthLabel
                else: #undefined
                    dists[i] = min(1,dists[i]/widthLabel)
            
                #g6 of the paper
                if predClasses[bb1]==0:
                    widthValue = outputBoxes[bb2,5]*2
                    hDist = outputBoxes[bb1,1]-outputBoxes[bb2,1]
                else:
                    widthValue = outputBoxes[bb1,5]*2
                    hDist = outputBoxes[bb2,1]-outputBoxes[bb1,1]
                right[i] = hDist/widthValue

            relPred = 1-(height+dists+right + 10000*sameClass)
        else:
            print('ERROR, unknown rule {}'.format(config['rule']))
            exit()
    elif relPred is not None:
        relPred = torch.sigmoid(relPred)[:,0]




    relCand = relIndexes
    if relCand is None:
        relCand=[]

    if model.rotation:
        bbAlignment, bbFullHit = getTargIndexForPreds_dist(targetBoxes[0],outputBoxes,0.9,numClasses,extraPreds,hard_thresh=False)
    else:
        bbAlignment, bbFullHit = getTargIndexForPreds_iou(targetBoxes[0],outputBoxes,0.5,numClasses,extraPreds,hard_thresh=False)
    if targetBoxes is not None:
        target_for_b = targetBoxes[0,:,:]
    else:
        target_for_b = torch.empty(0)

    if outputBoxes.size(0)>0:
        maxConf = outputBoxes[:,0].max().item()
        minConf = outputBoxes[:,0].min().item()
        if useDetections:
            minConf=0
    #threshConf = max(maxConf*THRESH,0.5)
    #if model.rotation:
    #    outputBoxes = non_max_sup_dist(outputBoxes.cpu(),threshConf,3)
    #else:
    #    outputBoxes = non_max_sup_iou(outputBoxes.cpu(),threshConf,0.4)
    if model.rotation:
        ap_5, prec_5, recall_5 =AP_dist(target_for_b,outputBoxes,0.9,model.numBBTypes,beforeCls=extraPreds)
    else:
        ap_5, prec_5, recall_5 =AP_iou(target_for_b,outputBoxes,0.5,model.numBBTypes,beforeCls=extraPreds)

    #precisionHistory={}
    #precision=-1
    #minStepSize=0.025
    #targetPrecisions=[None]
    #for targetPrecision in targetPrecisions:
    #    if len(precisionHistory)>0:
    #        closestPrec=9999
    #        for prec in precisionHistory:
    #            if abs(targetPrecision-prec)<abs(closestPrec-targetPrecision):
    #                closestPrec=prec
    #        precision=prec
    #        stepSize=precisionHistory[prec][0]
    #    else:
    #        stepSize=0.1
    #
    #    while True: #abs(precision-targetPrecision)>0.001:
    toRet={}
    for rel_threshold in rel_thresholds:

            if 'optimize' in config and config['optimize']:
                if 'penalty' in config:
                    penalty = config['penalty']
                else:
                    penalty = 0.25
                print('optimizing with penalty {}'.format(penalty))
                thresh=0.15
                while thresh<0.45:
                    keep = relPred>thresh
                    newRelPred = relPred[keep]
                    if newRelPred.size(0)<700:
                        break
                if newRelPred.size(0)>0:
                    #newRelCand = [ cand for i,cand in enumerate(relCand) if keep[i] ]
                    usePredNN= predNN is not None and config['optimize']!='gt'
                    idMap={}
                    newId=0
                    newRelCand=[]
                    numNeighbors=[]
                    for index,(id1,id2) in enumerate(relCand):
                        if keep[index]:
                            if id1 not in idMap:
                                idMap[id1]=newId
                                if not usePredNN:
                                    numNeighbors.append(target_num_neighbors[0,bbAlignment[id1]])
                                else:
                                    numNeighbors.append(predNN[id1])
                                newId+=1
                            if id2 not in idMap:
                                idMap[id2]=newId
                                if not usePredNN:
                                    numNeighbors.append(target_num_neighbors[0,bbAlignment[id2]])
                                else:
                                    numNeighbors.append(predNN[id2])
                                newId+=1
                            newRelCand.append( [idMap[id1],idMap[id2]] )            


                    #if not usePredNN:
                        #    decision = optimizeRelationships(newRelPred,newRelCand,numNeighbors,penalty)
                    #else:
                    decision= optimizeRelationshipsSoft(newRelPred,newRelCand,numNeighbors,penalty, rel_threshold)
                    decision= torch.from_numpy( np.round_(decision).astype(int) )
                    decision=decision.to(relPred.device)
                    relPred[keep] = torch.where(0==decision,relPred[keep]-1,relPred[keep])
                    relPred[1-keep] -=1
                    rel_threshold_use=0#-0.5
                else:
                    rel_threshold_use=rel_threshold
            else:
                rel_threshold_use=rel_threshold

            #threshed in model
            #if len(precisionHistory)==0:
            if len(toRet)==0:
                #align bb predictions (final) with GT
                if bbPred is not None and bbPred.size(0)>0:
                    #create aligned GT
                    #this was wrong...
                        #first, remove unmatched predicitons that didn't overlap (weren't close) to any targets
                        #toKeep = 1-((bbNoIntersections==1) * (bbAlignment==-1))
                    #remove predictions that overlapped with GT, but not enough
                    if model.predNN:
                        start=1
                        toKeep = 1-((bbFullHit==0) * (bbAlignment!=-1)) #toKeep = not (incomplete_overlap and did_overlap)
                        if toKeep.any():
                            bbPredNN_use = bbPred[toKeep][:,0]
                            bbAlignment_use = bbAlignment[toKeep]
                            #becuase we used -1 to indicate no match (in bbAlignment), we add 0 as the last position in the GT, as unmatched 
                            if target_num_neighborsT is not None:
                                target_num_neighbors_use = torch.cat((target_num_neighborsT[0].float(),torch.zeros(1).to(target_num_neighborsT.device)),dim=0)
                            else:
                                target_num_neighbors_use = torch.zeros(1).to(bbPred.device)
                            alignedNN_use = target_num_neighbors_use[bbAlignment_use]
                        else:
                            bbPredNN_use=None
                            alignedNN_use=None
                    else:
                        start=0
                    if model.predClass:
                        #We really don't care about the class of non-overlapping instances
                        if targetBoxes is not None:
                            toKeep = bbFullHit==1
                            if toKeep.any():
                                bbPredClass_use = bbPred[toKeep][:,start:start+model.numBBTypes]
                                bbAlignment_use = bbAlignment[toKeep]
                                alignedClass_use =  targetBoxesT[0][bbAlignment_use][:,13:13+model.numBBTypes] #There should be no -1 indexes in hereS
                            else:
                                bbPredClass_use=None
                                alignedClass_use=None
                        else:
                            alignedClass_use = None
                else:
                    bbPredNN_use = None
                    bbPredClass_use = None
                if model.predNN and bbPredNN_use is not None and bbPredNN_use.size(0)>0:
                    nn_loss_final = F.mse_loss(bbPredNN_use,alignedNN_use)
                    #nn_loss_final *= self.lossWeights['nn']

                    #loss += nn_loss_final
                    nn_loss_final = nn_loss_final.item()
                else:
                    nn_loss_final=0
                if model.predNN and predNN is not None:
                    predNN_p=bbPred[:,0]
                    diffs=torch.abs(predNN_p-target_num_neighborsT[0][bbAlignment].float())
                    nn_acc = (diffs<0.5).sum().item()
                    nn_acc /= predNN.size(0)
                elif model.predNN:
                    nn_acc = 0 
                if model.detector.predNumNeighbors and not useDetections:
                    predNN_d = outputBoxes[:,6]
                    diffs=torch.abs(predNN_d-target_num_neighbors[0][bbAlignment].float())
                    nn_acc_d = (diffs<0.5).sum().item()
                    nn_acc_d /= predNN.size(0)

                if model.predClass and bbPredClass_use is not None and bbPredClass_use.size(0)>0:
                    class_loss_final = F.binary_cross_entropy_with_logits(bbPredClass_use,alignedClass_use)
                    #class_loss_final *= self.lossWeights['class']
                    #loss += class_loss_final
                    class_loss_final = class_loss_final.item()
                else:
                    class_loss_final = 0
            #class_acc=0
            useOutputBBs=None

            truePred=falsePred=badPred=0
            scores=[]
            matches=0
            i=0
            numMissedByHeur=0
            targGotHit=set()
            for i,(n0,n1) in enumerate(relCand):
                t0 = bbAlignment[n0].item()
                t1 = bbAlignment[n1].item()
                if t0>=0 and bbFullHit[n0]:
                    targGotHit.add(t0)
                if t1>=0 and bbFullHit[n1]:
                    targGotHit.add(t1)
                if t0>=0 and t1>=0 and bbFullHit[n0] and bbFullHit[n1]:
                    if (min(t0,t1),max(t0,t1)) in adjacency:
                        matches+=1
                        scores.append( (relPred[i],True) )
                        if relPred[i]>rel_threshold_use:
                            truePred+=1
                    else:
                        scores.append( (relPred[i],False) )
                        if relPred[i]>rel_threshold_use:
                            falsePred+=1
                else:
                    scores.append( (relPred[i],False) )
                    if relPred[i]>rel_threshold_use:
                        badPred+=1
            for i in range(len(adjacency)-matches):
                numMissedByHeur+=1
                scores.append( (float('nan'),True) )
            rel_ap=computeAP(scores)

            numMissedByDetect=0
            for t0,t1 in adjacency:
                if t0 not in targGotHit or t1 not in targGotHit:
                    numMissedByHeur-=1
                    numMissedByDetect+=1
            heurRecall = (len(adjacency)-numMissedByHeur)/len(adjacency)
            detectRecall = (len(adjacency)-numMissedByDetect)/len(adjacency)
            if len(adjacency)>0:
                relRecall = truePred/len(adjacency)
            else:
                relRecall = 1
            #if falsePred>0:
            #    relPrec = truePred/(truePred+falsePred)
            #else:
            #    relPrec = 1
            if falsePred+badPred>0:
                precision = truePred/(truePred+falsePred+badPred)
            else:
                precision = 1
    

            toRet['prec@{}'.format(rel_threshold)]=precision
            toRet['recall@{}'.format(rel_threshold)]=relRecall
            if relRecall+precision>0:
                toRet['F-M@{}'.format(rel_threshold)]=2*relRecall*precision/(relRecall+precision)
            else:
                toRet['F-M@{}'.format(rel_threshold)]=0
            toRet['rel_AP@{}'.format(rel_threshold)]=rel_ap
            #precisionHistory[precision]=(draw_rel_thresh,stepSize)
            #if targetPrecision is not None:
            #    if abs(precision-targetPrecision)<0.001:
            #        break
            #    elif stepSize<minStepSize:
            #        if precision<targetPrecision:
            #            draw_rel_thresh += stepSize*2
            #            continue
            #        else:
            #            break
            #    elif precision<targetPrecision:
            #        draw_rel_thresh += stepSize
            #        if not wasTooSmall:
            #            reverse=True
            #            wasTooSmall=True
            #        else:
            #            reverse=False
            #    else:
            #        draw_rel_thresh -= stepSize
            #        if wasTooSmall:
            #            reverse=True
            #            wasTooSmall=False
            #        else:
            #            reverse=False
            #    if reverse:
            #        stepSize *= 0.5
            #else:
            #    break


            #import pdb;pdb.set_trace()

            #for b in range(len(outputBoxes)):
            
            
            dists=defaultdict(list)
            dists_x=defaultdict(list)
            dists_y=defaultdict(list)
            scaleDiffs=defaultdict(list)
            rotDiffs=defaultdict(list)
            b=0
            #print('image {} has {} {}'.format(startIndex+b,targetBoxesSizes[name][b],name))
            #bbImage = np.ones_like(image):w

    if outDir is not None:
        outputBoxes = outputBoxes.data.numpy()
        data = data.numpy()

        image = (1-((1+np.transpose(data[b][:,:,:],(1,2,0)))/2.0)).copy()
        if image.shape[2]==1:
            image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)
        #if name=='text_start_gt':

        #Draw GT bbs
        if not pretty:
            for j in range(targetSize):
                plotRect(image,(1,0.5,0),targetBoxes[0,j,0:5])
            #x=int(targetBoxes[b,j,0])
            #y=int(targetBoxes[b,j,1]+targetBoxes[b,j,3])
            #cv2.putText(image,'{:.2f}'.format(target_num_neighbors[b,j]),(x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0.6,0.3,0),2,cv2.LINE_AA)
            #if alignmentBBs[b] is not None:
            #    aj=alignmentBBs[b][j]
            #    xc_gt = targetBoxes[b,j,0]
            #    yc_gt = targetBoxes[b,j,1]
            #    xc=outputBoxes[b,aj,1]
            #    yc=outputBoxes[b,aj,2]
            #    cv2.line(image,(xc,yc),(xc_gt,yc_gt),(0,1,0),1)
            #    shade = 0.0+(outputBoxes[b,aj,0]-threshConf)/(maxConf-threshConf)
            #    shade = max(0,shade)
            #    if outputBoxes[b,aj,6] > outputBoxes[b,aj,7]:
            #        color=(0,shade,shade) #text
            #    else:
            #        color=(shade,shade,0) #field
            #    plotRect(image,color,outputBoxes[b,aj,1:6])

        #bbs=[]
        #pred_points=[]
        #maxConf = outputBoxes[b,:,0].max()
        #threshConf = 0.5 
        #threshConf = max(maxConf*0.9,0.5)
        #print("threshConf:{}".format(threshConf))
        #for j in range(outputBoxes.shape[1]):
        #    conf = outputBoxes[b,j,0]
        #    if conf>threshConf:
        #        bbs.append((conf,j))
        #    #pred_points.append(
        #bbs.sort(key=lambda a: a[0]) #so most confident bbs are draw last (on top)
        #import pdb; pdb.set_trace()

        #Draw pred bbs
        bbs = outputBoxes
        for j in range(bbs.shape[0]):
            #circle aligned predictions
            conf = bbs[j,0]
            if outDir is not None:
                shade = 0.0+(conf-minConf)/(maxConf-minConf)
                #print(shade)
                #if name=='text_start_gt' or name=='field_end_gt':
                #    cv2.bb(bbImage[:,:,1],p1,p2,shade,2)
                #if name=='text_end_gt':
                #    cv2.bb(bbImage[:,:,2],p1,p2,shade,2)
                #elif name=='field_end_gt' or name=='field_start_gt':
                #    cv2.bb(bbImage[:,:,0],p1,p2,shade,2)
                if bbs[j,6+extraPreds] > bbs[j,7+extraPreds]:
                    color=(0,0,shade) #text
                else:
                    color=(0,shade,shade) #field
                if pretty=='light':
                    lineWidth=2
                else:
                    lineWidth=1
                plotRect(image,color,bbs[j,1:6],lineWidth)

                if predNN is not None and not pretty: #model.detector.predNumNeighbors:
                    x=int(bbs[j,1])
                    y=int(bbs[j,2])#-bbs[j,4])
                    targ_j = bbAlignment[j].item()
                    if targ_j>=0:
                        gtNN = target_num_neighbors[0,targ_j].item()
                    else:
                        gtNN = 0
                    pred_nn = predNN[j].item()
                    color = min(abs(pred_nn-gtNN),1)#*0.5
                    cv2.putText(image,'{:.2}/{}'.format(pred_nn,gtNN),(x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(color,0,0),2,cv2.LINE_AA)

        #for j in alignmentBBsTarg[name][b]:
        #    p1 = (targetBoxes[name][b,j,0], targetBoxes[name][b,j,1])
        #    p2 = (targetBoxes[name][b,j,0], targetBoxes[name][b,j,1])
        #    mid = ( int(round((p1[0]+p2[0])/2.0)), int(round((p1[1]+p2[1])/2.0)) )
        #    rad = round(math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)/2.0)
        #    #print(mid)
        #    #print(rad)
        #    cv2.circle(image,mid,rad,(1,0,1),1)

        draw_rel_thresh = relPred.max() * draw_rel_thresh


        #Draw pred pairings
        numrelpred=0
        hits = [False]*len(adjacency)
        for i in range(len(relCand)):
            #print('{},{} : {}'.format(relCand[i][0],relCand[i][1],relPred[i]))
            if pretty:
                if relPred[i]>0 or pretty=='light':
                    score = relPred[i]
                    pruned=False
                    lineWidth=2
                else:
                    score = relPred[i]+1
                    pruned=True
                    lineWidth=1
                #else:
                #    score = (relPred[i]+1)/2
                #    pruned=False
                #    lineWidth=2
                #if pretty=='light':
                #    lineWidth=3
            else:
                lineWidth=1
            if relPred[i]>draw_rel_thresh or (pretty and score>draw_rel_thresh):
                ind1 = relCand[i][0]
                ind2 = relCand[i][1]
                x1 = round(bbs[ind1,1])
                y1 = round(bbs[ind1,2])
                x2 = round(bbs[ind2,1])
                y2 = round(bbs[ind2,2])

                if pretty:
                    targ1 = bbAlignment[ind1].item()
                    targ2 = bbAlignment[ind2].item()
                    aId=None
                    if bbFullHit[ind1] and bbFullHit[ind2]:
                        if (targ1,targ2) in adjacency:
                            aId = adjacency.index((targ1,targ2))
                        elif (targ2,targ1) in adjacency:
                            aId = adjacency.index((targ2,targ1))
                    if aId is None:
                        if pretty=='clean' and pruned:
                            color=np.array([1,1,0])
                        else:
                            color=np.array([1,0,0])
                    else:
                        if pretty=='clean' and pruned:
                            color=np.array([1,0,1])
                        else:
                            color=np.array([0,1,0])
                        hits[aId]=True
                    #if pruned:
                    #    color = color*0.7
                    cv2.line(image,(x1,y1),(x2,y2),color.tolist(),lineWidth)
                    #color=color/3
                    #x = int((x1+x2)/2)
                    #y = int((y1+y2)/2)
                    #if pruned:
                    #    cv2.putText(image,'[{:.2}]'.format(score),(x,y), cv2.FONT_HERSHEY_PLAIN, 0.6,color.tolist(),1)
                    #else:
                    #    cv2.putText(image,'{:.2}'.format(score),(x,y), cv2.FONT_HERSHEY_PLAIN,1.1,color.tolist(),1)
                else:
                    shade = (relPred[i].item()-draw_rel_thresh)/(1-draw_rel_thresh)

                    #print('draw {} {} {} {} '.format(x1,y1,x2,y2))
                    cv2.line(image,(x1,y1),(x2,y2),(0,shade,0),lineWidth)
                numrelpred+=1
        if pretty and pretty!="light" and pretty!="clean":
            for i in range(len(relCand)):
                #print('{},{} : {}'.format(relCand[i][0],relCand[i][1],relPred[i]))
                if relPred[i]>-1:
                    score = (relPred[i]+1)/2
                    pruned=False
                else:
                    score = (relPred[i]+2+1)/2
                    pruned=True
                if relPred[i]>draw_rel_thresh or (pretty and score>draw_rel_thresh):
                    ind1 = relCand[i][0]
                    ind2 = relCand[i][1]
                    x1 = round(bbs[ind1,1])
                    y1 = round(bbs[ind1,2])
                    x2 = round(bbs[ind2,1])
                    y2 = round(bbs[ind2,2])

                    targ1 = bbAlignment[ind1].item()
                    targ2 = bbAlignment[ind2].item()
                    aId=None
                    if bbFullHit[ind1] and bbFullHit[ind2]:
                        if (targ1,targ2) in adjacency:
                            aId = adjacency.index((targ1,targ2))
                        elif (targ2,targ1) in adjacency:
                            aId = adjacency.index((targ2,targ1))
                    if aId is None:
                        color=np.array([1,0,0])
                    else:
                        color=np.array([0,1,0])
                    color=color/2
                    x = int((x1+x2)/2)
                    y = int((y1+y2)/2)
                    if pruned:
                        cv2.putText(image,'[{:.2}]'.format(score),(x,y), cv2.FONT_HERSHEY_PLAIN, 0.6,color.tolist(),1)
                    else:
                        cv2.putText(image,'{:.2}'.format(score),(x,y), cv2.FONT_HERSHEY_PLAIN,1.1,color.tolist(),1)
        #print('number of pred rels: {}'.format(numrelpred))
        #Draw GT pairings
        if not pretty:
            gtcolor=(0.25,0,0.25)
            wth=3
        else:
            #gtcolor=(1,0,0.6)
            gtcolor=(1,0.6,0)
            wth=2
        for aId,(i,j) in enumerate(adjacency):
            if not pretty or not hits[aId]:
                x1 = round(targetBoxes[0,i,0].item())
                y1 = round(targetBoxes[0,i,1].item())
                x2 = round(targetBoxes[0,j,0].item())
                y2 = round(targetBoxes[0,j,1].item())
                cv2.line(image,(x1,y1),(x2,y2),gtcolor,wth)

        #Draw alginment between gt and pred bbs
        if not pretty:
            for predI in range(bbs.shape[0]):
                targI=bbAlignment[predI].item()
                x1 = int(round(bbs[predI,1]))
                y1 = int(round(bbs[predI,2]))
                if targI>0:

                    x2 = round(targetBoxes[0,targI,0].item())
                    y2 = round(targetBoxes[0,targI,1].item())
                    cv2.line(image,(x1,y1),(x2,y2),(1,0,1),1)
                else:
                    #draw 'x', indicating not match
                    cv2.line(image,(x1-5,y1-5),(x1+5,y1+5),(.1,0,.1),1)
                    cv2.line(image,(x1+5,y1-5),(x1-5,y1+5),(.1,0,.1),1)



        saveName = '{}_boxes_prec:{:.2f},{:.2f}_recall:{:.2f},{:.2f}_rels_AP:{:.3f}'.format(imageName,prec_5[0],prec_5[1],recall_5[0],recall_5[1],rel_ap)
        #for j in range(metricsOut.shape[1]):
        #    saveName+='_m:{0:.3f}'.format(metricsOut[i,j])
        saveName+='.png'
        io.imsave(os.path.join(outDir,saveName),image)
        #print('saved: '+os.path.join(outDir,saveName))

    print('\n{} ap:{}\tnumMissedByDetect:{}\tmissedByHuer:{}'.format(imageName,rel_ap,numMissedByDetect,numMissedByHeur))
    retData= { 'bb_ap':[ap_5],
               'bb_recall':[recall_5],
               'bb_prec':[prec_5],
               'bb_Fm': -1,#(recall_5[0]+recall_5[1]+prec_5[0]+prec_5[1])/4,
               'nn_loss': nn_loss,
               'rel_recall':relRecall,
               'rel_precision':precision,
               'rel_Fm':2*relRecall*precision/(relRecall+precision) if relRecall+precision>0 else 0,
               'relMissedByHeur':numMissedByHeur,
               'relMissedByDetect':numMissedByDetect,
               'heurRecall': heurRecall,
               'detectRecall': detectRecall,
               **toRet

             }
    if rel_ap is not None: #none ap if no relationships
        retData['rel_AP']=rel_ap
        retData['no_targs']=0
    else:
        retData['no_targs']=1
    if model.predNN:
        retData['nn_loss_final']=nn_loss_final
        retData['nn_loss_diff']=nn_loss_final-nn_loss
        retData['nn_acc_final'] = nn_acc
    if model.detector.predNumNeighbors and not useDetections:
        retData['nn_acc_detector'] = nn_acc_d
    if model.predClass:
        retData['class_loss_final']=class_loss_final
        retData['class_loss_diff']=class_loss_final-class_loss
    return (
             retData,
             (lossThis, position_loss, conf_loss, class_loss, recall, precision)
            )
Exemple #51
0
fourth.loc[:, 'ASECWT'] = fourth.ASECWT / 4
CPS.loc[CPS.RACE == 1234, 'RACE'] = 1
CPS.loc[CPS.RACE == 1234, 'ASECWT'] = CPS.ASECWT / 4
CPS = CPS.append(second.append(third.append(fourth, ignore_index=True),
                               ignore_index=True),
                 ignore_index=True)

# Drop the unknown race observations
CPS = CPS.loc[CPS.RACE.notna(), :]

# Recode the latin origin variable
CPS.loc[:, 'HISPAN'] = CPS.HISPAN.map(latinmap)

# Recode the education variable
CPS.loc[CPS.EDUC == 999, 'EDUC'] = np.nan
CPS.loc[:, 'EDUC'] = np.round_(CPS.EDUC / 10).map(educationmap)

# Create a family identifier
CPS.loc[:, 'SERIAL'] = CPS.SERIAL.astype('str') + CPS.FAMUNIT.astype('str')
CPS = CPS.drop('FAMUNIT', axis=1)

# Recode the income variables
CPS.loc[CPS.INCWAGE == 99999999, 'INCWAGE'] = 0
CPS.loc[CPS.INCWAGE == 99999998, 'INCWAGE'] = np.nan
CPS.loc[CPS.INCBUS == 99999999, 'INCBUS'] = 0
CPS.loc[CPS.INCBUS == 99999998, 'INCBUS'] = np.nan
CPS.loc[CPS.INCFARM == 99999999, 'INCFARM'] = 0
CPS.loc[CPS.INCFARM == 99999998, 'INCFARM'] = np.nan

# Compute total personal income
CPS.loc[:, 'income'] = CPS.INCWAGE.fillna(value=0) + CPS.INCBUS.fillna(
Exemple #52
0
from datetime import datetime

import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns

if __name__ == "__main__":
    np.random.seed(1337)
    mean = 0
    std = 1

    dist = np.random.normal(mean, std, size=500)
    # Vectorized rounding
    rounded = np.round_(dist, 1)

    # Plot distribution point-by-point
    fig, ax = plt.subplots()
    counts = dict()
    for i, x in enumerate(rounded):
        y = counts.get(x, 0) + 1
        plt.scatter(x, y, s=100, alpha=0.5, color="b")
        plt.xlim(-2.5, 2.5)
        plt.ylim(-1, 25)
        ax.set_yticklabels([])
        ax.set_xticklabels([])
        counts.update({x: y})
        filename = "anim/" + datetime.now().strftime("%d%H%M%S%f") + ".png"
        plt.savefig(filename)

    # Generate gif using imagemagick from command line/IPython
    # !convert -delay 5 anim/*.png figs/dist.gif
def Bandpass_Performance_eccentricity_plotter(
        distance_array, ecc_pairs, performance_array_theta,
        performance_array_alpha, performance_array_beta,
        performance_array_gamma, performance_array_highgamma, dist_value,
        figure_title, file_name):
    """
    Plot discrimination performance of LFP versus eccentricity at different 
    frequency bands
    """
    ecc_values = ecc_pairs[distance_array == dist_value]
    performance_array_values_theta = performance_array_theta[distance_array ==
                                                             dist_value]
    performance_array_values_alpha = performance_array_alpha[distance_array ==
                                                             dist_value]
    performance_array_values_beta = performance_array_beta[distance_array ==
                                                           dist_value]
    performance_array_values_gamma = performance_array_gamma[distance_array ==
                                                             dist_value]
    performance_array_values_highgamma = performance_array_highgamma[
        distance_array == dist_value]

    y_x_theta, y_x_std_theta = mean_relationship(
        ecc_values, performance_array_values_theta, np.arange(3, 27, 3))
    y_x_alpha, y_x_std_alpha = mean_relationship(
        ecc_values, performance_array_values_alpha, np.arange(3, 27, 3))
    y_x_beta, y_x_std_beta = mean_relationship(ecc_values,
                                               performance_array_values_beta,
                                               np.arange(3, 27, 3))
    y_x_gamma, y_x_std_gamma = mean_relationship(
        ecc_values, performance_array_values_gamma, np.arange(3, 27, 3))
    y_x_highgamma, y_x_std_highgamma = mean_relationship(
        ecc_values, performance_array_values_highgamma, np.arange(3, 27, 3))

    coeff, p_value_theta = pearsonr(ecc_values, performance_array_values_theta)
    coeff, p_value_alpha = pearsonr(ecc_values, performance_array_values_alpha)
    coeff, p_value_beta = pearsonr(ecc_values, performance_array_values_beta)
    coeff, p_value_gamma = pearsonr(ecc_values, performance_array_values_gamma)
    coeff, p_value_highgamma = pearsonr(ecc_values,
                                        performance_array_values_highgamma)

    fig = plt.figure(figsize=(12, 10))

    plt.plot(np.arange(3, 24, 3) + 1.5,
             y_x_theta,
             linewidth=3.0,
             marker='o',
             color=(0.035, 0.062, 0.682))
    plt.plot(np.arange(3, 24, 3) + 1.5,
             y_x_alpha,
             linewidth=3.0,
             marker='o',
             color=(0.298, 0.662, 0.941))
    plt.plot(np.arange(3, 24, 3) + 1.5,
             y_x_beta,
             linewidth=3.0,
             marker='o',
             color=(0.031, 0.568, 0.098))
    plt.plot(np.arange(3, 24, 3) + 1.5,
             y_x_gamma,
             linewidth=3.0,
             marker='o',
             color=(0.960, 0.050, 0.019))
    plt.plot(np.arange(3, 24, 3) + 1.5,
             y_x_highgamma,
             linewidth=3.0,
             marker='o',
             color=(0.960, 0.454, 0.019))

    plt.axis('tight')
    plt.tight_layout()
    plt.ylim(0.3, 1)
    plt.ylabel('Performance', fontweight='bold')
    plt.xlabel('Eccentricity (deg)', fontweight='bold')
    plt.legend(
        ('Theta p-value={0}'.format(np.round_(
            p_value_theta, 4)), 'Alpha p-value={0}'.format(
                np.round_(p_value_alpha, 4)), 'Beta p-value={0}'.format(
                    np.round_(p_value_beta, 4)), 'Gamma p-value={0}'.format(
                        np.round_(p_value_gamma, 4)),
         'High-gamma p-value={0}'.format(np.round_(p_value_highgamma, 4))),
        loc='upper right',
        fontsize=26)
    plt.title(figure_title, fontweight='bold', loc='center')
    plt.show()
    fig.savefig(file_name, dpi=200)
Exemple #54
0
      outf = open(save, 'w')
      outf.write(contents)
      outf.close()
    else:
      return result

  # Skip empty images
  data = np.nan_to_num(data)
  if np.sum(data) == 0:
    return image_to_json()

  # Round values to save space. Note that in practice the resulting JSON file will
  # typically be larger than the original nifti unless the image is relatively
  # dense (even when compressed). More reason to switch from JSON to nifti reading
  # in the viewer!
  data = np.round_(data, decimals)

  # Temporary kludge to fix orientation issue
  if swap:
    data = np.swapaxes(data, 0, 2)

  # Identify threshold--minimum nonzero value
  thresh = np.min(np.abs(data[np.nonzero(data)]))

  # compress into 2 lists, one with values, the other with list of indices for each value
  uniq = list(np.unique(data))
  # uniq = np.unique()
  uniq.remove(0)
  if len(uniq) == 0:
    return image_to_json()
Exemple #55
0
def trend_lbl(name, length, slope, intercept, r_value):
    return "{} ({})\n$y={}x+{}$\n$r^2$={}".format(name, length, np.round_(slope, 3), np.round_(intercept, 3),
                                                  np.round(r_value, 3))
Exemple #56
0
    def solve_atmospheric_entry(self,
                                radius,
                                velocity,
                                density,
                                strength,
                                angle,
                                init_altitude=100e3,
                                dt=0.05,
                                radians=False,
                                fragmentation=True,
                                num_scheme='RK',
                                ensemble=False):
        """
        Solve the system of differential equations for a given impact scenario

        Parameters
        ----------

        radius : float
            The radius of the asteroid in meters

        velocity : float
            The entery speed of the asteroid in meters/second

        density : float
            The density of the asteroid in kg/m^3

        strength : float
            The strength of the asteroid (i.e., the ram pressure above which
            fragmentation and spreading occurs) in N/m^2 (Pa)

        angle : float
            The initial trajectory angle of the asteroid to the horizontal
            By default, input is in degrees. If 'radians' is set to True, the
            input should be in radians

        init_altitude : float, optional
            Initial altitude in m

        dt : float, optional
            The output timestep, in s

        radians : logical, optional
            Whether angles should be given in degrees or radians. Default=False
            Angles returned in the DataFrame will have the same units as the
            input

        Returns
        -------
        Result : DataFrame
            A pandas DataFrame containing the solution to the system.
            Includes the following columns:
            ``velocity``, ``mass``, ``angle``, ``altitude``,
            ``distance``, ``radius``, ``time``
        """
        assert velocity >= 0, 'no negative velocity allowed'
        num_scheme_dict = {
            'EE': self.explicit_euler,
            'IE': self.implicit_euler,
            'MIE': self.midpoint_implicit_euler,
            'RK': self.runge_kutta
        }
        if ensemble is True:
            num_scheme = 'EE'
        if radians is False:  # converts degrees to radians
            angle = angle * (np.pi) / 180

        assert angle <= np.pi and angle >= 0, 'Invalid entry for angle. Valid entries are values between 0 and 90 degrees'

        T = 12000  # max duration of simulation in seconds
        T_arr = []  # list to store the all timesteps
        t = 0  # inital time assumed to be zero
        T_arr.append(0)  # storing first time

        mass = density * 4 / 3 * radius**3 * np.pi  # defining the mass of astroid assuming a sphere shape
        init_distance = 0  # intial distance assumed to be zero
        y = np.array(
            [velocity, mass, angle, init_altitude, init_distance,
             radius])  # defining initial condition array

        Y = []  # empty list to store solution array for every timestep
        Y.append(y)  # store initial condition
        while t <= T:  # initiate timeloop

            if strength <= (self.rhoa(y[3]) *
                            y[0]**2) and fragmentation is True:
                fragmented = True  # define status of fragmentation
            else:
                fragmented = False

            y_next = num_scheme_dict[num_scheme](
                y, self.f, dt, fragmented,
                density)  # compute values for next timestep

            if ensemble is True and y[2] > (
                    89 * np.pi /
                    180):  # for purpose of ensemble: break after airburst
                break

            if y_next[1] <= 0 or y_next[
                    3] <= 0:  # stop simulation if mass or altitude become zero
                break

            assert (
                y_next[1] - y[1]
            ) < 0, 'Simulation failed, entry values are suitable for mathematical model'

            t += dt
            T_arr.append(t)  # store new timestep

            Y.append(y_next)  #store caomputed values
            y = y_next
        Y = np.array(Y)

        if radians is False:
            Y[:, 2] = np.round_(list(map(lambda x: x * 180 / np.pi, Y[:, 2])),
                                decimals=10)

        return pd.DataFrame({
            'velocity': Y[:,
                          0],  # return all the stored values in pd.DataFrame
            'mass': Y[:, 1],
            'angle': Y[:, 2],
            'altitude': Y[:, 3],
            'distance': Y[:, 4],
            'radius': Y[:, 5],
            'time': T_arr
        })  #, index=range(1))
Exemple #57
0
    optimizer='adam',
    learning_rate=0.01,
    loss='categorical_crossentropy',
    name='target'
)  #regression layer with adam optimizer and crossentropy loss function

model = tflearn.DNN(network, tensorboard_verbose=0)

#----------------------------------------------------------------------
# Training the Convolutional Neural Network
#----------------------------------------------------------------------
model.fit({'input': x}, {'target': y},
          n_epoch=5,
          validation_set=({
              'input': test_x
          }, {
              'target': test_y
          }),
          show_metric=True,
          run_id='convnet_mnist')

#----------------------------------------------------------------------
# Testing the model with your own images (optional)
#----------------------------------------------------------------------
image = misc.imread("test.png", flatten=True)
image = image.reshape([-1, 28, 28, 1])

predict = model.predict({'input': image})

print(np.round_(predict, decimals=3))
print("prediction: " + str(np.argmax(predict)))
Exemple #58
0
 def predict(self, X_test, y_test):
     X_test, y_test = self.reshape_features_labels(X_test, y_test)
     y_pred = self.bst.predict(X_test, num_iteration=self.bst.best_iteration)
     y_pred_rounded = np.round_(y_pred, 0)
     return y_pred_rounded
Exemple #59
0
y_test[y_test == -1] = 0
y_pred_test[y_pred_test == -1] = 0

# +
#Comparison between models: (PCA)

classifiers = [best_svm_lin, logreg, best_svm_nonlin]
roc_score = []
plt.figure()
ax = plt.gca()
for clf in classifiers:
    plot_roc_curve(clf, X_test, y_test, ax=ax)
    roc_score.append(
        np.round_(roc_auc_score(y_test,
                                clf.predict_proba(X_test)[:, 1]),
                  decimals=3))
ax.plot(np.linspace(0, 1, X_test.shape[0]), np.linspace(0, 1, X_test.shape[0]))
plt.legend(('lin_svm, AUROC = ' + str(roc_score[0]),
            'log_reg, AUROC = ' + str(roc_score[1]),
            'nonlin_svm, AUROC = ' + str(roc_score[2]), 'flipping a coin'))
plt.show()

# +
#Dimension reduction on 2 features:
#  We chose the two important features from section 6, located in columns 4 and 6 in the onehotvector:
feature2 = onehot[:, [4, 6]]

#logistic regression-two features:

X_train, X_test, y_train, y_test = train_test_split(feature2,
Exemple #60
0
    def polarplot(self, R=None, SRF=None, V=None, incp=[15., 35., 55., 75.],
                  incBRDF=[15., 35., 55., 75.], pmultip=2., BRDFmultip=1.,
                  plabel='Volume-Scattering Phase Function',
                  BRDFlabel='Surface-BRDF', paprox=True, BRDFaprox=True,
                  plegend=True, plegpos=(0.75, 0.5), BRDFlegend=True,
                  BRDFlegpos=(0.285, 0.5), groundcolor="none"):
        """
        Generation of polar-plots of the volume- and the surface scattering
        phase function as well as the used approximations in terms of
        legendre-polynomials.


        Parameters
        -----------
        R : RT1-class object
            If R is provided, SRF and V are taken from it
            as V = R.V and SRF = R.SRF
        SRF : RT1.surface class object
              Alternative direct specification of the surface BRDF,
              e.g. SRF = CosineLobe(i=3, ncoefs=5)
        V : RT1.volume class object
            Alternative direct specification of the volume-scattering
            phase-function  e.g. V = Rayleigh()

        Other Parameters
        -----------------
        incp : list of floats (default = [15.,35.,55.,75.])
               Incidence-angles in degree at which the volume-scattering
               phase-function will be plotted
        incBRDF : list of floats (default = [15.,35.,55.,75.])
                  Incidence-angles in degree at which the BRDF will be plotted
        pmultip : float (default = 2.)
                  Multiplicator to scale the plotrange for the plot of the
                  volume-scattering phase-function
                  (the max-plotrange is given by the max. value of V in
                  forward-direction (for the chosen incp) )
        BRDFmultip : float (default = 1.)
                     Multiplicator to scale the plotrange for the plot of
                     the BRDF (the max-plotrange is given by the max. value
                     of SRF in specular-direction (for the chosen incBRDF) )
        plabel : string
                 Manual label for the volume-scattering phase-function plot
        BRDFlabel : string
                    Manual label for the BRDF plot
        paprox : boolean (default = True)
                 Indicator if the approximation of the phase-function in terms
                 of Legendre-polynomials will be plotted.
        BRDFaprox : boolean (default = True)
                 Indicator if the approximation of the BRDF in terms of
                 Legendre-polynomials will be plotted.
        plegend : boolean (default = True)
                 Indicator if a legend should be shown that indicates the
                 meaning of the different colors for the phase-function
        plegpos : (float,float) (default = (0.75,0.5))
                 Positioning of the legend for the V-plot (controlled via
                 the matplotlib.legend keyword  bbox_to_anchor = plegpos )
        BRDFlegend : boolean (default = True)
                 Indicator if a legend should be shown that indicates the
                 meaning of the different colors for the BRDF
        BRDFlegpos : (float,float) (default = (0.285,0.5))
                 Positioning of the legend for the SRF-plot (controlled via
                 the matplotlib.legend keyword  bbox_to_anchor = BRDFlegpos)
        groundcolor : string (default = "none")
                 Matplotlib color-indicator to change the color of the lower
                 hemisphere in the BRDF-plot possible values are:
                 ('r', 'g' , 'b' , 'c' , 'm' , 'y' , 'k' , 'w' , 'none')

        Returns
        ---------
        polarfig : figure
                   a matplotlib figure showing a polar-plot of the functions
                   specified by V or SRF
        """

        assert isinstance(incp, list), 'Error: incidence-angles for ' + \
            'polarplot of p must be a list'
        assert isinstance(incBRDF, list), 'Error: incidence-angles for ' + \
            'polarplot of the BRDF must be a list'
        for i in incBRDF:
            assert i <= 90, 'ERROR: the incidence-angle of the BRDF in ' + \
                'polarplot must be < 90'

        assert isinstance(pmultip, float), 'Error: plotrange-multiplier ' + \
            'for polarplot of p must be a floating-point number'
        assert isinstance(BRDFmultip, float), 'Error: plotrange-' + \
            'multiplier for plot of the BRDF must be a floating-point number'

        assert isinstance(plabel, str), 'Error: plabel of V-plot must ' + \
            'be a string'
        assert isinstance(BRDFlabel, str), 'Error: plabel of SRF-plot ' + \
            'must be a string'

        if R is None and SRF is None and V is None:
            assert False, 'Error: You must either provide R or SRF and/or V'

        # if R is provided, use it to define SRF and V,
        # else use the provided functions
        if R is not None:
            SRF = R.SRF
            V = R.V

        # define functions for plotting that evaluate the used
        # approximations in terms of legendre-polynomials
        if V is not None:
            # if V is a scalar, make it a list
            if np.ndim(V) == 0:
                V = [V]

            # make new figure
            if SRF is None:
                # if SRF is None, plot only a single plot of p
                polarfig = plt.figure(figsize=(7, 7))
                polarax = polarfig.add_subplot(111, projection='polar')
            else:
                # plot p and the BRDF together
                polarfig = plt.figure(figsize=(14, 7))
                polarax = polarfig.add_subplot(121, projection='polar')

            # plot of volume-scattering phase-function's
            pmax = 0
            for V in V:
                # define a plotfunction of the legendre-approximation of p
                if paprox is True:
                    phasefunktapprox = sp.lambdify((
                        'theta_0', 'theta_s',
                        'phi_0', 'phi_s'),
                        V.legexpansion('theta_0', 'theta_s',
                                       'phi_0', 'phi_s', 'vvvv').doit(),
                        modules=["numpy", "sympy"])

                # set incidence-angles for which p is calculated
                plottis = np.deg2rad(incp)
                colors = ['k', 'r',
                          'g', 'b',
                          'c', 'm',
                          'y'] * int(round((len(plottis) / 7. + 1)))

                #pmax = pmultip * np.max(V.p(plottis, np.pi - plottis, 0., 0.))
                for i in plottis:
                    ts = np.arange(0., 2. * np.pi, .01)
                    pmax_i = pmultip * np.max(V.p(np.full_like(ts, i),
                                                ts,
                                                0.,
                                                0.))
                    if pmax_i > pmax:
                        pmax = pmax_i


                if plegend is True:
                    legend_lines = []

                # set color-counter to 0
                i = 0
                for ti in plottis:
                    color = colors[i]
                    i = i + 1
                    thetass = np.arange(0., 2. * np.pi, .01)
                    rad = V.p(ti, thetass, 0., 0.)
                    if paprox is True:
                        # the use of np.pi-ti stems from the definition
                        # of legexpansion() in volume.py
                        radapprox = phasefunktapprox(np.pi - ti,
                                                     thetass, 0., 0.)
                    # set theta direction to clockwise
                    polarax.set_theta_direction(-1)
                    # set theta to start at z-axis
                    polarax.set_theta_offset(np.pi / 2.)

                    polarax.plot(thetass, rad, color)
                    if paprox is True:
                        polarax.plot(thetass, radapprox, color + '--')
                    polarax.arrow(-ti, pmax * 1.2, 0., -pmax * 0.8,
                                  head_width=.0, head_length=.0,
                                  fc=color, ec=color, lw=1, alpha=0.3)

                    polarax.fill_between(thetass, rad, alpha=0.2, color=color)
                    polarax.set_xticklabels(['$0^\circ$', '$45^\circ$',
                                             '$90^\circ$', '$135^\circ$',
                                             '$180^\circ$'])
                    polarax.set_yticklabels([])
                    polarax.set_rmax(pmax * 1.2)
                    polarax.set_title(plabel + '\n')

            # add legend for covering layer phase-functions
            if plegend is True:
                i = 0
                for ti in plottis:
                    color = colors[i]
                    legend_lines += [mlines.Line2D(
                        [], [], color=color,
                        label='$\\theta_0$ = ' + str(
                            np.round_(np.rad2deg(ti),
                                      decimals=1)) + '${}^\circ$')]
                    i = i + 1

                if paprox is True:
                    legend_lines += [mlines.Line2D(
                        [], [], color='k',
                        linestyle='--', label='approx.')]

                legend = plt.legend(bbox_to_anchor=plegpos,
                                    loc=2, handles=legend_lines)
                legend.get_frame().set_facecolor('w')
                legend.get_frame().set_alpha(.5)

        if SRF is not None:
            # if SRF is a scalar, make it a list
            if np.ndim(SRF) == 0:
                SRF = [SRF]

            # append to figure or make new figure
            if V is None:
                # if V is None, plot only a single plot of the BRDF
                polarfig = plt.figure(figsize=(7, 7))
                polarax = polarfig.add_subplot(111, projection='polar')
            else:
                # plot p and the BRDF together
                polarax = polarfig.add_subplot(122, projection='polar')

            if BRDFlegend is True:
                legend_lines = []

            # plot of BRDF
            brdfmax = 0
            for SRF in SRF:
                # define a plotfunction of the analytic form of the BRDF
                if BRDFaprox is True:
                    brdffunktapprox = sp.lambdify(
                        ('theta_ex', 'theta_s', 'phi_ex', 'phi_s'),
                        SRF.legexpansion(
                            'theta_ex', 'theta_s', 'phi_ex', 'phi_s', 'vvvv'
                            ).doit(), modules=["numpy", "sympy"])

                # set incidence-angles for which the BRDF is calculated
                plottis = np.deg2rad(incBRDF)
                colors = ['k', 'r',
                          'g', 'b',
                          'c', 'm',
                          'y'] * int(round((len(plottis) / 7. + 1)))

                #brdfmax = BRDFmultip * np.max(SRF.brdf(plottis,
                #                                       plottis, 0., 0.))

                for i in plottis:
                    ts = np.arange(0., 2. * np.pi, .01)
                    brdfmax_i = BRDFmultip * np.max(SRF.brdf(
                            np.full_like(ts, i), ts, 0., 0.))
                    if brdfmax_i > brdfmax:
                        brdfmax = brdfmax_i


                # set color-counter to 0
                i = 0
                for ti in plottis:
                    color = colors[i]
                    i = i + 1
                    thetass = np.arange(-np.pi / 2., np.pi / 2., .01)
                    rad = SRF.brdf(ti, thetass, 0., 0.)
                    if BRDFaprox is True:
                        radapprox = brdffunktapprox(ti, thetass, 0., 0.)
                    # set theta direction to clockwise
                    polarax.set_theta_direction(-1)
                    # set theta to start at z-axis
                    polarax.set_theta_offset(np.pi / 2.)

                    polarax.plot(thetass, rad, color=color)
                    if BRDFaprox is True:
                        polarax.plot(thetass, radapprox, color + '--')

                    polarax.fill(
                        np.arange(np.pi / 2., 3. * np.pi / 2., .01),
                        np.ones_like(np.arange(np.pi / 2.,
                                               3. * np.pi / 2.,
                                               .01)
                                     ) * brdfmax * 1.2, color=groundcolor)

                    polarax.arrow(-ti, brdfmax * 1.2, 0.,
                                  -brdfmax * 0.8, head_width=.0,
                                  head_length=.0, fc=color,
                                  ec=color, lw=1, alpha=0.3)

                    polarax.fill_between(thetass, rad, alpha=0.2, color=color)
                    polarax.set_xticklabels(['$0^\circ$',
                                             '$45^\circ$',
                                             '$90^\circ$'])
                    polarax.set_yticklabels([])
                    polarax.set_rmax(brdfmax * 1.2)
                    polarax.set_title(BRDFlabel + '\n')

            # add legend for BRDF's
            if BRDFlegend is True:
                i = 0
                for ti in plottis:
                    color = colors[i]
                    legend_lines += [
                        mlines.Line2D([], [], color=color,
                                      label='$\\theta_0$ = ' + str(
                            np.round_(np.rad2deg(ti), decimals=1)) +
                            '${}^\circ$')]
                    i = i + 1
                if BRDFaprox is True:
                    legend_lines += [mlines.Line2D([], [], color='k',
                                                   linestyle='--',
                                                   label='approx.')]

                legend = plt.legend(bbox_to_anchor=BRDFlegpos,
                                    loc=2, handles=legend_lines)
                legend.get_frame().set_facecolor('w')
                legend.get_frame().set_alpha(.5)

        plt.show()
        return polarfig