def showInference(model, evs=None, size=None): try: # fails outside notebook import pyAgrum.lib.notebook as gnb gnb.showInference(model, evs=evs, size=size) except ImportError: pass
bn.cpt('relationship')[0:] = [ 1, 1, 1,10,10,10] bn.cpt('relationship')[1:] = [ 10, 10, 10,1,1,1] bn.cpt('relationship').normalizeAsCPT() bn.cpt('relationship') bn.cpt('marital_status')[0:] = [ 1, 1, 1,1,10,10,10] bn.cpt('marital_status')[1:] = [ 10, 10, 10,10,1,1,1] bn.cpt('marital_status').normalizeAsCPT() bn.cpt('marital_status') bn.cpt('occupation')[0:] = [ 1, 1, 1,1,1,1,1,10,10,10,10,10,10,10] bn.cpt('occupation')[1:] = [ 10, 10,10,10,10,10,10,1,1,1,1,1,1,1] bn.cpt('occupation').normalizeAsCPT() bn.cpt('occupation') gnb.showInference(bn,size="10") gnb.showInference(bn,size="10", evs={'target':'<=50K'}) gnb.showInference(bn,size="10", evs={'target':'>50K'}) gnb.showInference(bn,size="10", evs={'target':'<=50K', 'sex':' Male'}) male_str=[" ?"] for (row, col) in train_df.iterrows(): if str.lower(col.occupation) in male_str: train_df['occupation'].replace(to_replace=col.occupation, value=' Sales', inplace=True) if str.lower(col.workclass) in male_str: train_df['workclass'].replace(to_replace=col.workclass, value=' Private', inplace=True) ie=gum.LazyPropagation(bn)
bn.cpt(s)[{'c': 1}] = [0.9,0.1] bn.cpt(s) bn.cpt(w)[{'r': 0, 's': 0}] = [1, 0] bn.cpt(w)[{'r': 0, 's': 1}] = [0.1, 0.9] bn.cpt(w)[{'r': 1, 's': 0}] = [0.1, 0.9] bn.cpt(w)[{'r': 1, 's': 1}] = [0.01, 0.99] bn.cpt(w) bn.cpt(r)[{'c':0}]=[0.8,0.2] bn.cpt(r)[{'c':1}]=[0.2,0.8] bn.cpt(r) """Qual é a probabilidade de a grama estar molhada em um caso geral?""" gnb.showInference(bn,evs={}) """Vemos que a probabilidade da grama estar molhada no geral é de 64,7% Supondo que queremos inferir a partir das seguintes condições a priori: Não está nublado e o aspersor está ligado. Qual a probabilidade de a grama estar molhada? """ gnb.showInference(bn,evs={'s':1,'c':0}) """Vemos que a probabilidade é de 91,8%. ## Agora é sua vez! Imagine que está nublado e está chovendo. Qual a probabilidade da grama estar molhada? """
s = bn.cpt("L").var_names s.reverse() p.reorganize(s) bn.cpt("L").fillWith(p) p = gum.Potential().add(bn.variable("J")).add(bn.variable("T")).add( bn.variable("V")) p.fillWith(list(VibrationKnowingTorqueAndJoint)) s = bn.cpt("V").var_names s.reverse() p.reorganize(s) bn.cpt("V").fillWith(p).normalizeAsCPT() showInformation(bn) # %% showInference(bn, size="20") # %% showInference(bn, evs={"L": True}, size="20") # %% showInference(bn, evs={"L": False, "A": "0.2"}, size="20") # %% ie = gum.LazyPropagation(bn) ie.addJointTarget(set(["T", "J"])) ie.setEvidence({"L": True}) ie.makeInference() # %% distrib = otagrum.Utils.FromPotential(ie.jointPosterior({"T", "J"}))
def main(): bn = gum.BayesNet('nuc_inf') #add variables to the network va = gum.LabelizedVariable('nuc', 'a labelized variable', 2) va.addLabel('-1') nuc = bn.add(va) A = bn.add('A', 6) R, N = [bn.add(name, 7) for name in ['R', 'N']] D, Q = [bn.add(name, 2) for name in ['D', 'Q']] partition("protein") learner = gum.BNLearner("protein_train.csv", bn) #These arcs can be added or deleted #learner.addMandatoryArc('A','nuc') #learner.addMandatoryArc('R','nuc') #learner.addMandatoryArc('Q','nuc') #learner.addMandatoryArc('N','nuc') #learner.addMandatoryArc('D','nuc') learner.useLocalSearchWithTabuList() bn0 = learner.learnBN() gnb.showBN(bn0) learner.useGreedyHillClimbing() bn1 = learner.learnBN() gnb.showBN(bn1) learner.useK2([5, 4, 3, 2, 1, 0]) bn2 = learner.learnBN() gnb.showBN(bn2) #We have 2 different BN structures according to the previous parts. Now, we do parameter learning learner = gum.BNLearner("protein_train.csv", bn) learner.setInitialDAG(bn0.dag()) learner.useAprioriSmoothing(1) bn01 = learner.learnParameters() #first gnb.showBN(bn01) learner = gum.BNLearner("protein_train.csv", bn) learner.setInitialDAG(bn2.dag()) learner.useAprioriSmoothing(1) bn11 = learner.learnParameters() #second gnb.showBN(bn11) #first ie1 = gum.LazyPropagation(bn01) ie1.makeInference() gnb.showInference(bn01, evs={}) #second ie2 = gum.LazyPropagation(bn11) ie2.makeInference() gnb.showInference(bn11, evs={}) with open('protein_test.csv', 'r', encoding="utf-8") as csvfile: reader = csv.reader(csvfile) count1 = 1 count2 = 1 acc1 = 0 acc2 = 0 for line in list(reader)[1:]: vnuc, vA, vR, vN, vD, vQ = [ int(line[0]), int(line[1]), int(line[2]), int(line[3]), int(line[4]), int(line[5]) ] #print(vnuc,vA,vR,vN,vD,vQ) ie2.eraseAllEvidence() ie1.eraseAllEvidence() ie1.setEvidence({'A': vA, 'R': vR, 'N': vN, 'D': vD, 'Q': vQ}) ie2.setEvidence({'A': vA, 'R': vR, 'N': vN, 'D': vD, 'Q': vQ}) ie1.makeInference() ie2.makeInference() ie2.addTarget(nuc) ie1.addTarget(nuc) if len(ie2.posterior(nuc).argmax() ) == 1: #if we have one determined value of prob #print(ie2.posterior(nuc)) #print(ie2.posterior(nuc).argmax()[0]['nuc']) if ie2.posterior(nuc).argmax()[0]['nuc'] == 2: #nuc=-1 if vnuc == -1: acc2 = acc2 + 1 if ie2.posterior(nuc).argmax()[0]['nuc'] == vnuc: acc2 = acc2 + 1 count2 = count2 + 1 if len(ie1.posterior(nuc).argmax()) == 1: #print(ie1.posterior(nuc)) #print(ie1.posterior(nuc).argmax()[0]['nuc']) if ie1.posterior(nuc).argmax()[0]['nuc'] == 2: if vnuc == -1: acc1 = acc1 + 1 if ie1.posterior(nuc).argmax()[0]['nuc'] == vnuc: acc1 = acc1 + 1 count1 = count1 + 1 acc2 = acc2 / count2 acc1 = acc1 / count1 print(acc2, acc1)