def deleteExtraNodes(self, netName, netOut, keepNodes): ### removes all nodes that aren't given in input list # open the net stored in netName cnet = self.pyt.OpenNeticaNet(netName) #get the nodes and their number allnodes = self.pyt.GetNetNodes(cnet) numnodes = self.pyt.LengthNodeList(allnodes) # loop over all nodes and delete those that arent needed for ii in np.arange(numnodes): cnode = self.pyt.NthNode(allnodes, ct.c_int(ii)) cname = cth.c_char_p2str(self.pyt.GetNodeName(cnode)) # cross check the name if not cname in keepNodes: self.pyt.DeleteNode(cnode) #store the net outfile_streamer = self.pyt.NewFileStreamer( ct.c_char_p(netOut.encode())) self.pyt.CompileNet(cnet) vprint(3, self.pyt.verboselvl, "Writing new net with correct nodes to: {0:s}".format(netOut)) self.pyt.WriteNet(cnet, outfile_streamer) self.pyt.DeleteStream(outfile_streamer) self.pyt.DeleteNet(cnet)
def LimitMemoryUsage(self, memlimit): ### double LimitMemoryUsage_ns ( double max_mem, environ_ns* env ) self.n.LengthNodeList_bn.argtypes = [ct.c_double, ct.c_void_p] self.n.LengthNodeList_bn.restype = ct.c_double self.n.LimitMemoryUsage_ns(memlimit, self.env) vprint(1, self.verboselvl, 'set memory limit to ---> %f bytes' % memlimit) self.chkerr()
def sanitize(self): vprint(2, self.verboselvl, 'Sanitizing pynetica object to remove pointers') # code to strip out all ctypes information from SELF to # allow for pickling self.n = None self.mesg = None self.env = None
def CloseNetica(self): self.n.CloseNetica_bn.argtypes = [ct.c_void_p, ct.c_char_p] self.n.CloseNetica_bn.restype = ct.c_int res = self.n.CloseNetica_bn(self.env, self.mesg) if res >= 0: vprint(1, self.verboselvl, "Closing Netica:") vprint(1, self.verboselvl, self.mesg.value.decode('utf-8')) else: raise (NeticaCloseFail(res.value)) self.n = None
def NodeParentIndexing(self, netName, casfile): ''' Find all the configurations of states in the parent nodes for each response node This is used only for ''' # open the net stored in netName cnet = self.pyt.OpenNeticaNet(netName) #get the nodes and their number allnodes = self.pyt.GetNetNodes(cnet) numnodes = self.pyt.LengthNodeList(allnodes) #parent indices dictionary for the results parent_indices = dict() # now focus in on the response nodes only respnodes = self.probpars.scenario.response for cr in respnodes: parent_indices[cr] = parent_inds() crespnode = self.pyt.GetNodeNamed(ct.c_char_p(cr.encode()), cnet) # get the parent nodes and their names cparents = self.pyt.GetNodeParents(crespnode) numparents = self.pyt.LengthNodeList(cparents) for cp in np.arange(numparents): tmpnode = self.pyt.NthNode(cparents, ct.c_int(cp)) parent_indices[cr].parent_names.append( cth.c_char_p2str(self.pyt.GetNodeName(tmpnode))) # open a streamer to the CAS file we will read over cas_streamer = self.pyt.NewFileStreamer(ct.c_char_p(casfile.encode())) # loop over the cases for ccas in np.arange(self.N): if ccas == 0: case_posn = pnC.netica_const.FIRST_CASE else: case_posn = pnC.netica_const.NEXT_CASE # first set the findings according to what's in the case file # case_posn_out = self.pyt.ReadNetFindings2(case_posn, cas_streamer, allnodes) # now, for each parent, in order, read the states for cr in respnodes: tmpinds = list() for cp in parent_indices[cr].parent_names: cnode = self.pyt.GetNodeNamed(ct.c_char_p(cp.encode()), cnet) tmpinds.append(self.pyt.GetNodeFinding(cnode)) parent_indices[cr].parent_indices.append(tmpinds) for cr in respnodes: vprint(3, self.pyt.verboselvl, 'making into an array --> %s' % (cr)) parent_indices[cr].parent_indices = np.array( parent_indices[cr].parent_indices, dtype=int) # clean up the temporary streamer and net self.pyt.DeleteNet(cnet) self.pyt.DeleteStream(cas_streamer) self.parent_inds = parent_indices
def ExperiencePostProc(self): vprint( 3, self.pyt.verboselvl, 'Post-Processing Experience data, matching with predicted nodes and cases' ) for cn in self.probpars.scenario.response: for ccas in np.arange(self.N): testinds = self.parent_inds[cn].parent_indices[ccas, :] tmp = testinds - self.BaseNeticaTests.experience[ cn].parent_states tmp = np.sum(np.abs(tmp), axis=1) cind = np.where(tmp == 0) self.BaseNeticaTests.experience[cn].case_experience.append( self.BaseNeticaTests.experience[cn].node_experience[ cind[0]])
def start_environment(self, licfile): # read in the license file information self.licensefile = licfile if os.path.exists(self.licensefile): self.license = open(self.licensefile, 'r').readlines()[0].strip().split()[0] else: vprint( 2, self.verboselvl, "Warning: License File [{0:s}] not found.\n".format( self.licensefile) + "Opening Netica without licence, which will limit size of nets that can be used.\n" + "Window may become unresponsive.") self.license = None self.NewNeticaEnviron()
def PredictBayesPostProc(self, cpred, outname, casname, cNeticaTestStats): ofp = open(outname, 'w') ofp.write( 'Validation statistics for net --> {0:s} and casefile --> {1:s}\n'. format(outname, casname)) ofp.write('%14s ' * 12 % ('Response', 'skillMean', 'rmseMean', 'meanErrMean', 'meanAbsErrMean', 'skillML', 'rmseML', 'meanErrML', 'meanAbsErrML', 'LogLoss', 'ErrorRate', 'QuadraticLoss') + '\n') for i in self.probpars.scenario.response: vprint(3, self.pyt.verboselvl, 'writing output for --> {0:s}'.format(i)) ofp.write( '%14s %14.4f %14.6e %14.6e %14.6e %14.4f %14.6e %14.6e %14.6e %14.6e %14.6e %14.6e\n' % (i, cpred[i].stats.skMean, cpred[i].stats.rmseM, cpred[i].stats.meaneM, cpred[i].stats.meanabserrM, cpred[i].stats.skML, cpred[i].stats.rmseML, cpred[i].stats.meaneML, cpred[i].stats.meanabserrML, cNeticaTestStats.logloss[i], cNeticaTestStats.errrate[i], cNeticaTestStats.quadloss[i])) ofp.close() outfileConfusion = re.sub('base_stats', 'Confusion', outname) ofpC = open(outfileConfusion, 'w') ofpC.write('Confusion matrices for net --> %s and casefile --> %s\n' % (outname, casname)) for j in self.probpars.scenario.response: ofpC.write('*' * 16 + '\nConfusion matrix for %s\n' % (j) + '*' * 16 + '\n') numstates = len(self.NETNODES[j].levels) - 1 ofpC.write('%24s' % ('')) for i in np.arange(numstates): ofpC.write('%24s' % ('%8.4e--%8.4e' % (self.NETNODES[j].levels[i], self.NETNODES[j].levels[i + 1]))) ofpC.write('\n') for i in np.arange(numstates): ofpC.write('%24s' % ('%8.4e--%8.4e' % (self.NETNODES[j].levels[i], self.NETNODES[j].levels[i + 1]))) for k in cNeticaTestStats.confusion_matrix[j][i, :]: ofpC.write('%24d' % (k)) ofpC.write('\n') ofpC.write('\n' * 2) ofpC.close()
def linkNodes(self, netName, netOut, parentNode, childNode): ### links a parent and child node # open the net stored in netName cnet = self.pyt.OpenNeticaNet(netName) pnode = self.pyt.GetNodeNamed(ct.c_char_p(parentNode.encode()), cnet) chnode = self.pyt.GetNodeNamed(ct.c_char_p(childNode.encode()), cnet) self.pyt.AddLink(pnode, chnode) #store the net outfile_streamer = self.pyt.NewFileStreamer( ct.c_char_p(netOut.encode())) self.pyt.CompileNet(cnet) vprint(3, self.pyt.verboselvl, "Writing new net with new node links to: {0:s}".format(netOut)) self.pyt.WriteNet(cnet, outfile_streamer) self.pyt.DeleteStream(outfile_streamer) self.pyt.DeleteNet(cnet)
def PredictBayesPostProcCV(self, cpred, numfolds, ofp, calval, cNeticaTestStats): for cfold in np.arange(numfolds): for j in self.probpars.scenario.response: vprint( 3, self.pyt.verboselvl, 'writing %s cross-validation output for --> %s' % (calval, j)) ofp.write( '%14d %14s %14.4f %14.6e %14.6e %14.6e %14.4f %14.6e %14.6e %14.6e %14.6e %14.6e %14.6e\n' % (cfold, j, cpred[cfold][j].stats.skMean, cpred[cfold][j].stats.rmseM, cpred[cfold][j].stats.meaneM, cpred[cfold][j].stats.meanabserrM, cpred[cfold][j].stats.skML, cpred[cfold][j].stats.rmseML, cpred[cfold][j].stats.meaneML, cpred[cfold][j].stats.meanabserrML, cNeticaTestStats[cfold].logloss[j], cNeticaTestStats[cfold].errrate[j], cNeticaTestStats[cfold].quadloss[j]))
def NewNeticaEnviron(self): ''' create a new Netica environment based on operating system ''' # first access the .dll or .so in the same directory as CVNetica try: if 'window' in platform.system().lower(): self.n = ct.windll.LoadLibrary( os.path.join(os.path.dirname(__file__), 'Netica.dll')) else: self.n = ct.cdll.LoadLibrary( os.path.join(os.path.dirname(__file__), 'libnetica.so')) except: raise (dllFail(platform.system())) # next try to establish an environment for Netica # need to be sure to specify argument and return types to send None self.n.NewNeticaEnviron_ns.argtypes = [ ct.c_char_p, ct.c_void_p, ct.c_char_p ] self.n.NewNeticaEnviron_ns.restype = ct.c_void_p self.env = self.n.NewNeticaEnviron_ns(self.license, None, None) # try to intialize Netica self.n.InitNetica2_bn.argtypes = [ct.c_void_p, ct.c_char_p] self.n.InitNetica2_bn.restype = ct.c_int res = self.n.InitNetica2_bn(self.env, self.mesg) # now check the initialisation if res >= 0: vprint(1, self.verboselvl, '\n' * 2 + '#' * 40 + '\nOpening Netica:') vprint(1, self.verboselvl, self.mesg.value.decode('utf-8')) else: raise (NeticaInitFail(res.value)) vprint(1, self.verboselvl, 'Netica is open\n' + '#' * 40 + '\n' * 2)
def ReadNodeInfo(self, netName): ''' Read in all information on beliefs, states, and likelihoods for all nodes in the net called netName ''' # open the net stored in netName cnet = self.OpenNeticaNet(netName) #get the nodes and their number allnodes = self.GetNetNodes(cnet) numnodes = self.LengthNodeList(allnodes) vprint(3, self.verboselvl, 'Reading Node information from net --> {0:s}'.format(netName)) cNETNODES = dict() # loop over the nodes for cn in np.arange(numnodes): cnode = self.NthNode(allnodes, ct.c_int(cn)) cnodename = cth.c_char_p2str(self.GetNodeName(cnode)) cNETNODES[cnodename] = nodestruct() cNETNODES[cnodename].name = cth.c_char_p2str( self.GetNodeName(cnode)) cNETNODES[cnodename].title = cth.c_char_p2str( self.GetNodeTitle(cnode)) vprint(3, self.verboselvl, ' Parsing node --> %s' % (cNETNODES[cnodename].title)) cNETNODES[cnodename].Nbeliefs = self.GetNodeNumberStates(cnode) cNETNODES[cnodename].beliefs = cth.c_float_p2float( self.GetNodeBeliefs(cnode), cNETNODES[cnodename].Nbeliefs) cNETNODES[cnodename].likelihood = cth.c_float_p2float( self.GetNodeLikelihood(cnode), cNETNODES[cnodename].Nbeliefs) cNETNODES[cnodename].levels = cth.c_double_p2float( self.GetNodeLevels(cnode), cNETNODES[cnodename].Nbeliefs + 1) # loop over the states in each node for cs in range(cNETNODES[cnodename].Nbeliefs): cNETNODES[cnodename].state.append(statestruct()) cNETNODES[cnodename].state[-1].name = cth.c_char_p2str( self.GetNodeStateName(cnode, ct.c_int(cs))) self.DeleteNet(cnet) return cNETNODES
def SensitivityAnalysis(self): ''' Peforms sensitivity analysis on each response node assuming all input nodes are active (as defined in self.probpars.scenario) Reports results to a text file. ''' vprint( 3, self.pyt.verboselvl, '\n' * 3 + '*' * 10 + '\n' + 'Performing Sensitity Analysis\n' + '*' * 10) # meke a streamer to the Net file net_streamer = self.pyt.NewFileStreamer( ct.c_char_p(self.probpars.baseNET.encode())) # read in the net using the streamer cnet = self.pyt.ReadNet(net_streamer) # remove the input net streamer self.pyt.DeleteStream(net_streamer) self.pyt.CompileNet(cnet) self.sensitivityvar = dict() self.sensitivityEntropy = dict() self.sensitivityEntropyNorm = dict() self.percentvarreduction = dict() allnodes = list() allnodes.extend(self.probpars.scenario.nodesIn) allnodes.extend(self.probpars.scenario.response) for cres in self.probpars.scenario.response: vprint(3, self.pyt.verboselvl, "Calculating sensitivity to node --> %s" % (cres)) # calculate the sensitivity for each response variable using all nodes as Vnodes Qnode = self.pyt.GetNodeNamed(ct.c_char_p(cres.encode()), cnet) Vnodes = self.pyt.GetNetNodes(cnet) self.sensitivityvar[cres] = dict() self.sensitivityEntropy[cres] = dict() self.sensitivityEntropyNorm[cres] = dict() self.percentvarreduction[cres] = dict() sensvar = self.pyt.NewSensvToFinding( Qnode, Vnodes, ct.c_int(pnC.netica_const.VARIANCE_OF_REAL_SENSV)) sensmutual = self.pyt.NewSensvToFinding( Qnode, Vnodes, ct.c_int(pnC.netica_const.ENTROPY_SENSV)) for cn in allnodes: Vnode = self.pyt.GetNodeNamed(ct.c_char_p(cn.encode()), cnet) self.sensitivityvar[cres][cn] = self.pyt.GetVarianceOfReal( sensvar, Vnode) self.sensitivityEntropy[cres][cn] = self.pyt.GetMutualInfo( sensmutual, Vnode) # percent variance reduction is the variance reduction of a node divided by variance reduction of self for cn in allnodes: self.percentvarreduction[cres][cn] = self.sensitivityvar[cres][ cn] / self.sensitivityvar[cres][cres] self.sensitivityEntropyNorm[cres][ cn] = self.sensitivityEntropy[cres][ cn] / self.sensitivityEntropy[cres][cres] vprint(3, self.pyt.verboselvl, "Deleting sensitivity to --> %s" % (cres)) self.pyt.DeleteSensvToFinding(sensvar) self.pyt.DeleteSensvToFinding(sensmutual) self.pyt.DeleteNet(cnet) # #### WRITE OUTPUT #### # ofp = open(self.probpars.scenario.name + 'Sensitivity.dat', 'w') ofp.write('Sensitivity analysis for scenario --> %s\n' % (self.probpars.scenario.name)) ofp.write('Base Case Net: %s\nBase Case Casfile: %s\n' % (self.probpars.baseNET, self.probpars.baseCAS)) # write out the raw variance reduction values ofp.write('#' * 10 + ' Raw Variance Reduction Values ' + '#' * 10 + '\n') ofp.write('{0:<14s}'.format('Response_node ')) for cn in allnodes: ofp.write('%-14s' % (cn)) ofp.write('\n') for cres in self.sensitivityvar: ofp.write('%-14s' % (cres)) for cn in allnodes: ofp.write('%-14.5f' % (self.sensitivityvar[cres][cn])) ofp.write('\n') # write out the percent variance reduction values ofp.write('#' * 10 + ' Percent Variance Reduction Values ' + '#' * 10 + '\n') ofp.write('%-14s' % ('Response_node ')) for cn in allnodes: ofp.write('%-14s' % (cn)) ofp.write('\n') for cres in self.percentvarreduction: ofp.write('%-14s' % (cres)) for cn in allnodes: ofp.write('%-14.5f' % (self.percentvarreduction[cres][cn] * 100.0)) ofp.write('\n') # write out the mutual information (Entropy) values ofp.write('#' * 10 + ' Mutual Information (Entropy) ' + '#' * 10 + '\n') ofp.write('%-14s' % ('Response_node ')) for cn in allnodes: ofp.write('%-14s' % (cn)) ofp.write('\n') for cres in self.sensitivityEntropy: ofp.write('%-14s' % (cres)) for cn in allnodes: ofp.write('%-14.5f' % (self.sensitivityEntropy[cres][cn])) ofp.write('\n') # write out the normalized mutual information (Entropy) values ofp.write('#' * 10 + ' Mutual Information (Entropy) Normalized ' + '#' * 10 + '\n') ofp.write('%-14s' % ('Response_node ')) for cn in allnodes: ofp.write('%-14s' % (cn)) ofp.write('\n') for cres in self.sensitivityEntropyNorm: ofp.write('%-14s' % (cres)) for cn in allnodes: ofp.write('%-14.5f' % (self.sensitivityEntropyNorm[cres][cn])) ofp.write('\n') ofp.close()
def predictBayes(self, netName, N, casdata): ''' netName --> name of the built net to make predictions on ''' # first read in the information about a Net's nodes cNETNODES = self.pyt.ReadNodeInfo(netName) ''' Initialize output ''' # initialize dictionary of predictions objects cpred = dict() vprint(3, self.pyt.verboselvl, 'Making predictions for net named --> {0:s}'.format(netName)) cnet = self.pyt.OpenNeticaNet(netName) #retract all the findings self.pyt.RetractNetFindings(cnet) for CN in cNETNODES: CNODES = cNETNODES[CN] Cname = CNODES.name if Cname in self.probpars.scenario.response: cpred[Cname] = predictions() cpred[Cname].stats = pred_stats() Nbins = CNODES.Nbeliefs cpred[Cname].pdf = np.zeros((N, Nbins)) cpred[Cname].ranges = np.array(CNODES.levels) # get plottable ranges if Nbins < len(CNODES.levels): # continuous, so plot bin centers CNODES.continuous = True cpred[Cname].continuous = True cpred[Cname].rangesplt = ( cpred[Cname].ranges[1:] - 0.5 * np.diff(cpred[Cname].ranges)) else: #discrete so just use the bin values cpred[Cname].rangesplt = cpred[Cname].ranges.copy() cpred[Cname].priorPDF = CNODES.beliefs allnodes = self.pyt.GetNetNodes(cnet) numnodes = self.pyt.LengthNodeList(allnodes) # # Now loop over each input and get the Netica predictions # for i in np.arange(N): if self.pyt.verboselvl >= 3: sys.stdout.write('predicting value {0} of {1}\r'.format(i, N)) sys.stdout.flush() # first have to enter the values for each node # retract all the findings again self.pyt.RetractNetFindings(cnet) for cn in np.arange(numnodes): cnode = self.pyt.NthNode(allnodes, ct.c_int(cn)) cnodename = cth.c_char_p2str(self.pyt.GetNodeName(cnode)) # set the current node values if cnodename in self.probpars.scenario.nodesIn: self.pyt.EnterNodeValue(cnode, ct.c_double(casdata[cnodename][i])) for cn in np.arange(numnodes): # obtain the updated beliefs from ALL nodes including input and output cnode = self.pyt.NthNode(allnodes, ct.c_int(cn)) cnodename = cth.c_char_p2str(self.pyt.GetNodeName(cnode)) if cnodename in self.probpars.scenario.response: # get the current belief values cpred[cnodename].pdf[i, :] = cth.c_float_p2float( self.pyt.GetNodeBeliefs(cnode), self.pyt.GetNodeNumberStates(cnode)) # # Do some postprocessing for just the output nodes # currstds = np.ones((N, 1)) * 1.0e-16 for i in self.probpars.scenario.response: vprint(3, self.pyt.verboselvl, 'postprocessing output node --> {0:s}'.format(i)) # record whether the node is continuous or discrete if cpred[i].continuous: curr_continuous = 'continuous' else: curr_continuous = 'discrete' # these ranges may throw overflow warnings as e.g., <25 is # implemented as -(really big number)*10^308 to 25 pdfRanges = cpred[i].ranges cpred[i].z = np.atleast_2d(casdata[i]).T pdfParam = np.hstack((cpred[i].z, currstds)) pdfData = statfuns.makeInputPdf(pdfRanges, pdfParam, 'norm', curr_continuous) cpred[i].probModelUpdate = np.nansum(pdfData * cpred[i].pdf, 1) cpred[i].probModelPrior = np.nansum( pdfData * np.tile(cpred[i].priorPDF, (N, 1)), 1) cpred[i].logLikelihoodRatio = ( np.log10(cpred[i].probModelUpdate + np.spacing(1)) - np.log10(cpred[i].probModelPrior + np.spacing(1))) cpred[i].dataPDF = pdfData.copy() # note --> np.spacing(1) is like eps in MATLAB # get the PDF stats here vprint(3, self.pyt.verboselvl, 'getting stats') cpred = self.PDF2Stats(i, cpred, alpha=0.1) self.pyt.DeleteNet(cnet) return cpred, cNETNODES
def PredictBayesNeticaCV(self, cfold, cnetname, calval): ''' function using Netica built-in testing functionality to evaluate Net ''' ctestresults = netica_test() # open up the current net cnet = self.pyt.OpenNeticaNet(cnetname) #retract all the findings self.pyt.RetractNetFindings(cnet) # first create a caseset with the current leftout indices casefile if cfold > -10: if calval.upper() == 'CAL': ccasefile = '{0:s}_fold_{1:d}.cas'.format( self.probpars.scenario.name, cfold) elif calval.upper() == 'VAL': ccasefile = '{0:s}_fold_{1:d}_leftout.cas'.format( self.probpars.scenario.name, cfold) else: pass # unless this is the base case --> else: ccasefile = self.probpars.baseCAS currcases = self.pyt.NewCaseset( ct.c_char_p('cval{0:d}'.format(np.abs(cfold)).encode())) ccaseStreamer = self.pyt.NewFileStreamer( ct.c_char_p(ccasefile.encode())) self.pyt.AddFileToCaseset(currcases, ccaseStreamer, 100.0) # create a set of prediction nodes numprednodes = len(self.probpars.scenario.response) cnodelist = self.pyt.NewNodeList2(ct.c_int(numprednodes), cnet) for i, cn in enumerate(self.probpars.scenario.response): cnode = self.pyt.GetNodeNamed(ct.c_char_p(cn.encode()), cnet) self.pyt.SetNthNode(cnodelist, ct.c_int(i), cnode) # create a tester object ctester = self.pyt.NewNetTester(cnodelist, cnodelist) self.pyt.DeleteNodeList(cnodelist) # test the network using the left-out cases # first retract all the findings and compile the net self.pyt.TestWithCaseset(ctester, currcases) self.pyt.DeleteCaseset(currcases) # # now get the results # ctestresults.logloss = dict() ctestresults.errrate = dict() ctestresults.quadloss = dict() ctestresults.confusion_matrix = dict() ctestresults.experience = dict() for cn in self.probpars.scenario.response: cnode = self.pyt.GetNodeNamed(ct.c_char_p(cn.encode()), cnet) # get log loss ctestresults.logloss[cn] = self.pyt.GetTestLogLoss(ctester, cnode) vprint( 3, self.pyt.verboselvl, "LogLoss for {0:s} --> {1:f}".format(cn, ctestresults.logloss[cn])) # get error rate ctestresults.errrate[cn] = self.pyt.GetTestErrorRate( ctester, cnode) vprint( 3, self.pyt.verboselvl, "ErrorRate for {0:s} --> {1:f}".format( cn, ctestresults.errrate[cn])) # get quadratic loss ctestresults.quadloss[cn] = self.pyt.GetTestQuadraticLoss( ctester, cnode) vprint( 3, self.pyt.verboselvl, "QuadLoss for {0:s} --> {1:f}".format( cn, ctestresults.quadloss[cn])) # write confusion matrix --- only for the base case if cfold < 0: vprint(3, self.pyt.verboselvl, "Calculating confusion matrix for {0:s}".format(cn)) ctestresults.confusion_matrix[cn] = self.pyt.ConfusionMatrix( ctester, cnode) # also calculate the experience for the node vprint( 3, self.pyt.verboselvl, "Calculating Experience for the base Net, node --> {0:s}". format(cn)) ctestresults.experience[cn] = self.pyt.ExperienceAnalysis( cn, cnet) self.pyt.DeleteNetTester(ctester) self.pyt.DeleteNet(cnet) # write to the proper dictionary if cfold > -10: if calval.upper() == 'CAL': self.NeticaTests['CAL'].append(ctestresults) elif calval.upper() == 'VAL': self.NeticaTests['VAL'].append(ctestresults) else: pass else: self.BaseNeticaTests = ctestresults
def rebuild_net(self, NetName, newCaseFile, voodooPar, outfilename, EMflag=False): ''' rebuild_net(NetName,newCaseFilename,voodooPar,outfilename) a m!ke@usgs joint <*****@*****.**> function to build the CPT tables for a new CAS file on an existing NET (be existing, meaning that the nodes, edges, and bins are dialed) INPUT: NetName --> a filename, including '.neta' extension newCaseFilename --> new case file including '.cas' extension voodooPar --> the voodoo tuning parameter for building CPTs outfilename --> netica file for newly build net (including '.neta') EMflag --> if True, use EM to learn from casefile, else (default) incorporate the CPT table directly ''' # create a Netica environment vprint( 3, self.verboselvl, 'Rebuilding net: {0:s} using Casefile: {1:s}'.format( NetName, newCaseFile)) # make a streamer to the Net file net_streamer = self.NewFileStreamer(ct.c_char_p(NetName.encode())) # read in the net using the streamer cnet = self.ReadNet(net_streamer) # remove the input net streamer self.DeleteStream(net_streamer) self.CompileNet(cnet) #get the nodes and their number allnodes = self.GetNetNodes(cnet) numnodes = self.LengthNodeList(allnodes) # loop over the nodes deleting CPT for cn in np.arange(numnodes): cnode = self.NthNode(allnodes, ct.c_int(cn)) self.DeleteNodeTables(cnode) # make a streamer to the new cas file new_cas_streamer = self.NewFileStreamer( ct.c_char_p(newCaseFile.encode())) if EMflag: vprint(3, self.verboselvl, 'Learning new CPTs using EM algorithm') # to use EM learning, must first make a learner and set a couple options newlearner = self.NewLearner(pnC.learn_method_bn_const.EM_LEARNING) self.SetLearnerMaxTol(newlearner, ct.c_double(1.0e-6)) self.SetLearnerMaxIters(newlearner, ct.c_int(1000)) # now must associate the casefile with a caseset (weighted by unity) newcaseset = self.NewCaseset(ct.c_char_p(b'currcases')) self.AddFileToCaseset(newcaseset, new_cas_streamer, 1.0) self.LearnCPTs(newlearner, allnodes, newcaseset, ct.c_double(voodooPar)) self.DeleteCaseset(newcaseset) self.DeleteLearner(newlearner) else: vprint(3, self.verboselvl, 'Learning new CPTs using ReviseCPTsByCaseFile') self.ReviseCPTsByCaseFile(new_cas_streamer, allnodes, ct.c_double(voodooPar)) outfile_streamer = self.NewFileStreamer( ct.c_char_p(outfilename.encode())) self.CompileNet(cnet) outfile_streamer = self.NewFileStreamer( ct.c_char_p(outfilename.encode())) vprint(3, self.verboselvl, 'Writing new net to: %s' % (outfilename)) self.WriteNet(cnet, outfile_streamer) self.DeleteStream(outfile_streamer) self.DeleteNet(cnet)