Beispiel #1
0
def _tdivdemo(file='data/nasa93dem.csv'):
    #==============================================================================
    # We start by recursively clustering the model.
    #==============================================================================
    makeaModel = makeAModel()
    m = makeaModel.csv2py(file)
    rseed(1)
    #alias =  dict (zip(makeaModel.translate.values(),makeaModel.translate.keys()))
    #print alias
    #def num2str(lst):
    # return [alias[z] for z in lst]

    prepare(m)  # Initialize all parameters for where2 to run
    tree = where2(m, m._rows)  # Decision tree using where2
    tbl = table(file)
    headerLabel = '=klass'
    Rows = []
    for k, _ in leaves(tree):
        for j in k.val:
            tmp = (j.cells)
            tmp.append('_' + str(id(k) % 1000))
            j.__dict__.update({'cells': tmp})
            Rows.append(j.cells)
    tbl2 = makeMeATable(tbl, headerLabel, Rows)
    #print
    testCase = [tbl2._rows.pop(randi(0, len(tbl2._rows))) for k in xrange(500)]
    t = discreteNums(tbl2, map(lambda x: x.cells, tbl2._rows))
    myTree = tdiv(t)
    showTdiv(myTree)
    loc = leaveOneOut(testCase[randi(0, len(testCase))], myTree)
Beispiel #2
0
def _tdivdemo(file='data/nasa93dem.csv'): 
 #==============================================================================
 # We start by recursively clustering the model.
 #==============================================================================
 makeaModel=makeAModel()
 m=makeaModel.csv2py(file)
 
 #alias =  dict (zip(makeaModel.translate.values(),makeaModel.translate.keys()))
 #print alias
 #def num2str(lst):
 # return [alias[z] for z in lst]
 
 prepare(m) # Initialize all parameters for where2 to run
 tree=where2(m, m._rows) # Decision tree using where2
 tbl = table(file)  
 headerLabel='=klass'
 Rows=[]
 for k,_ in leaves(tree):
  for j in k.val:
    tmp=(j.cells)
    tmp.append('_'+str(id(k) % 1000)) 
    j.__dict__.update({'cells': tmp})
    Rows.append(j.cells)
 tbl2=makeMeATable(tbl, headerLabel, Rows)
 print 
 testCase=[tbl2._rows.pop(randi(0, len(tbl2._rows))) for k in xrange(500)]
 t=discreteNums(tbl2, map(lambda x: x.cells, tbl2._rows))
 myTree=tdiv(t) 
 showTdiv(myTree)
 loc = leaveOneOut(testCase[randi(0, len(testCase))], myTree)
 contrastSet = getContrastSet(loc, myTree)
 print 'Contrast Set:', contrastSet
Beispiel #3
0
def _tdivdemo(file='data/nasa93dem.csv'): 
 #==============================================================================
 # We start by recursively clustering the model.
 #==============================================================================
 makeaModel=makeAModel()
 m=makeaModel.csv2py(file) 
 prepare(m) # Initialize all parameters for where2 to run
 tree=where2(m, m._rows) # Decision tree using where2
 tbl = table(file)  
 headerLabel='=klass'
 Rows=[]
 for k,_ in leaves(tree):
  for j in k.val:
    tmp=j.cells
    tmp.append(id(k) % 1000) 
    j.__dict__.update({'cells': tmp})
    Rows.append(j.cells)
 tbl2=makeMeATable(tbl, headerLabel, Rows)
 
 testCase=tbl2._rows.pop(randi(0,len(Rows)-1))
 t=discreteNums(tbl2, map(lambda x: x.cells, tbl2._rows))  
 myTree=tdiv(t) 
 loc=apex(testCase, myTree)
 print loc.__dict__
 print 'Id: ',loc.mode, ' Level: ', loc.lvl, ' Variable: ', loc.f.name
 showTdiv(myTree)
 #==============================================================================
 for node, lvl in dtnodes(myTree):
   rows=map(lambda x:x.cells,node.rows)
Beispiel #4
0
def _tdivPrec(dir='camel/'):
    #==============================================================================
    # Recursively clustering the model.
    #==============================================================================
    train = ['camel-1.0.csv', 'camel-1.2.csv', 'camel-1.2.csv']
    test = ['camel-1.6.csv']
    rseed(1)
    makeaModel = makeAModel()
    _rows = []

    # Concatenate training cases
    for t in train:
        file = dir + t
        m = makeaModel.csv2py(file)
        prepare(m)  # Initialize all parameters for where2 to run
        tree = where2(m, m._rows)  # Decision tree using where2
        tbl = table(file)
        headerLabel = '=klass'
        Rows = []
        for k, _ in leaves(tree):
            for j in k.val:
                tmp = (j.cells)
                tmp.append('_' + str(id(k) % 1000))
                j.__dict__.update({'cells': tmp})
                Rows.append(j.cells)
        _rows += Rows
        tbl2 = makeMeATable(tbl, headerLabel, _rows)

    # Test case!
    _rows = []
    for tt in test:
        file = dir + tt
        m = makeaModel.csv2py(file)
        prepare(m)  # Initialize all parameters for where2 to run
        tree = where2(m, m._rows)  # Decision tree using where2
        tbl = table(file)
        headerLabel = '=klass'
        Rows = []
        for k, _ in leaves(tree):
            for j in k.val:
                tmp = (j.cells)
                tmp.append('_' + str(id(k) % 1000))
                j.__dict__.update({'cells': tmp})
                Rows.append(j.cells)
        _rows += Rows
        tbl3 = makeMeATable(tbl, headerLabel, _rows)

    testCase = tbl3._rows
    print testCase
    t = discreteNums(tbl2, map(lambda x: x.cells, tbl2._rows))
    myTree = tdiv(t)
    showTdiv(myTree)
    loc = leaveOneOut(testCase[randi(0, len(testCase))], myTree)
    contrastSet = getContrastSet(loc, myTree)
    print 'Contrast Set:', contrastSet
Beispiel #5
0
def _tdivPrec(dir='camel/'): 
 #==============================================================================
 # Recursively clustering the model.
 #==============================================================================
 train=['camel-1.0.csv', 'camel-1.2.csv', 'camel-1.2.csv']
 test=['camel-1.6.csv']
 rseed(1)
 makeaModel=makeAModel()
 _rows=[]
 
 # Concatenate training cases
 for t in train:
  file=dir+t
  m=makeaModel.csv2py(file)
  prepare(m) # Initialize all parameters for where2 to run
  tree=where2(m, m._rows) # Decision tree using where2
  tbl = table(file)  
  headerLabel='=klass'
  Rows=[]
  for k,_ in leaves(tree):
   for j in k.val:
     tmp=(j.cells)
     tmp.append('_'+str(id(k) % 1000)) 
     j.__dict__.update({'cells': tmp})
     Rows.append(j.cells)
  _rows+=Rows
  tbl2=makeMeATable(tbl, headerLabel, _rows)
 
 # Test case!
 _rows=[]
 for tt in test:
  file=dir+tt
  m=makeaModel.csv2py(file)
  prepare(m) # Initialize all parameters for where2 to run
  tree=where2(m, m._rows) # Decision tree using where2
  tbl = table(file)  
  headerLabel='=klass'
  Rows=[]
  for k,_ in leaves(tree):
   for j in k.val:
     tmp=(j.cells)
     tmp.append('_'+str(id(k) % 1000)) 
     j.__dict__.update({'cells': tmp})
     Rows.append(j.cells)
  _rows+=Rows
  tbl3=makeMeATable(tbl, headerLabel, _rows)
  
 testCase=tbl3._rows
 print testCase 
 t=discreteNums(tbl2, map(lambda x: x.cells, tbl2._rows))
 myTree=tdiv(t) 
 showTdiv(myTree)
 loc = leaveOneOut(testCase[randi(0, len(testCase))], myTree)
 contrastSet = getContrastSet(loc, myTree)
 print 'Contrast Set:', contrastSet
Beispiel #6
0
def _tdivdemo(file='data/nasa93dem.csv'): 
 #==============================================================================
 # We start by recursively clustering the model.
 #==============================================================================
 makeaModel=makeAModel()
 m=makeaModel.csv2py(file)
 alias =  dict (zip(makeaModel.translate.values(),makeaModel.translate.keys()))
 def num2str(lst):
  return [alias[z] for z in lst]
   
 prepare(m) # Initialize all parameters for where2 to run
 tree=where2(m, m._rows) # Decision tree using where2
 tbl = table(file)  
 headerLabel='=klass'
 Rows=[]

 for k,_ in leaves(tree):
  for j in k.val:
    tmp=num2str(j.cells)
    tmp.append('_'+str(id(k) % 1000)) 
    j.__dict__.update({'cells': tmp})
    Rows.append(j.cells)
 tbl2=makeMeATable(tbl, headerLabel, Rows)
 
 testCase=tbl2._rows.pop(1)
 t=discreteNums(tbl2, map(lambda x: x.cells, tbl2._rows))  
 myTree=tdiv(t) 
 showTdiv(myTree)
 loc = leaveOneOut(testCase, myTree)
 print loc.__dict__
 getContrastSet(loc, myTree)
 #==============================================================================
 #for node, lvl in dtnodes(myTree):
   #rows=map(lambda x:x.cells,node.rows)
   #pdb.set_trace()
   #print lvl, len(rows), [ k._id for k in node.rows]
 #============================================================================== 
 headerLabels={}
 [headerLabels.update({k.name:indx}) for indx, k in enumerate(tbl2.headers)]
Beispiel #7
0
def _tdivdemo(file='data/nasa93dem.csv'): 
 #==============================================================================
 # We start by recursively clustering the model.
 #==============================================================================
 makeaModel=makeAModel()
 m=makeaModel.csv2py(file) 
 prepare(m) # Initialize all parameters for where2 to run
 tree=where2(m, m._rows) # Decision tree using where2
 tbl = table(file)  
 headerLabel='=klass'
 Rows=[]
 for k,_ in leaves(tree):
  for j in k.val:
    tmp=j.cells
    tmp.append(id(k) % 1000) 
    j.__dict__.update({'cells': tmp})
    Rows.append(j.cells)
  
 tbl2=makeMeATable(tbl, headerLabel, Rows)
 t=discreteNums(tbl2, Rows)  
 myTree=tdiv(t) 
 showTdiv(myTree)