コード例 #1
0
ファイル: Model.py プロジェクト: irreversibly/LP-SDA
 def eval(self, Y_pred, Y, idx):
     y_pred, y_gold = [], []
     for r, c in zip(idx[0], idx[1]):
         y_pred.append(Y_pred[r, c])
         y_gold.append(Y[r, c])
     ev = Eval(y_pred, y_gold)
     return ev.Metrics(self.metrics)
コード例 #2
0
	def compile(self, srcfile, base_dir, output_dir):
		#fp = codecs.open(sys.argv[1], 'r', 'utf-8')
		fp = open(srcfile, 'r')
		char_stream = antlr3.ANTLRInputStream(fp)
		lexer = ExprLexer(char_stream)
		tokens = antlr3.CommonTokenStream(lexer)

		parser = ExprParser(tokens)
		r = parser.prog()

		# this is the root of the AST
		root = r.tree
		#print (root.toStringTree())
		#print '-------'

		nodes = antlr3.tree.CommonTreeNodeStream(root)
		nodes.setTokenStream(tokens)
		from Eval import Eval
		eval = Eval(nodes)

		#######################################
		head, tail = os.path.split(srcfile)

		if not os.path.exists(output_dir):
			os.mkdir(output_dir)
		if not os.path.exists(output_dir + '/__init__.py'):
			fp = open(output_dir + '/__init__.py', 'w')
			fp.close()

		dstfile = os.path.normpath(output_dir + '/' + tail.split('.')[0] + '.py')
		#print 'compile: %-30s=> %s' % (srcfile, dstfile)

		cpy = CpyBuilder(dstfile, base_dir, output_dir)
		eval.prog(cpy)
		return dstfile
コード例 #3
0
ファイル: Model.py プロジェクト: irreversibly/LP-SDA
 def eval_DME(self, Y_pred, Y, idx, DME):
     y_pred, y_gold = defaultdict(list), defaultdict(list)
     for r, c in zip(idx[0], idx[1]):
         adrid = id2adr.get(c)
         if adrid in DME:
             y_pred[adrid].append(Y_pred[r, c])
             y_gold[adrid].append(Y[r, c])
     EV = {}
     for k in y_pred.keys():
         y_p, y_g = y_pred.get(k), y_gold.get(k)
         ev = Eval(y_p, y_g)
         EV[k] = ev.Metrics(self.metrics)
     return EV
コード例 #4
0
 def learnepsgrid(self, tlvoc):
     # Choose a grid of param values to try
     # For each param value
     #   Set eps to that point
     #   Compute cands for tr data instances
     #   Compute MRR for cand set
     #   If MRR best so far, save eps
     self.tlvoc = tlvoc
     self.ev = Eval(self.trlex, len(tlvoc))
     self.wtrange = [0., .1, .2, .3, .4, .5, .6, .7, .8, .9, 1.]
     self.bestmrr = 0
     self.besteps = None
     self.gridsearch([], 0, self.E)
     self.eps = self.besteps
コード例 #5
0
ファイル: Translator.py プロジェクト: katcipis/playground
 def __init__(self, nodes):
     Eval.__init__(self, nodes)
     self.dados = []
     self.codigo = [".function"]
     self.label = 0
     self.parametros = [] 
     self.while_cond = "off"
     self.and_cond = "on"
     self.or_cond = "off"
     self.swhile = -1
     # key = linha que se encontra // value = linhas a pular
     self.tabela_jmp_and = {} #a tabela and ve se eh falso e pula pro fim
     self.tabela_jmp_or = {} #a tabela or ve se eh verdade e pula pro bloco
     self.tabela_jmp = {}
     self.tabela_jmp_while = {}
コード例 #6
0
ファイル: SemRel.py プロジェクト: vinodhinir/WikiTSu
def evalscores(goldfile, methfile):
    goldsc = {}
    for line in open(goldfile):
        line = line.decode('utf-8').rstrip()
        w1, w2, sc = line.split()
        goldsc[w1+'#'+w2] = float(sc)
    methsc = {}
    for line in open(methfile):
        line = line.decode('utf-8').rstrip()
        w1, w2, sc = line.split()
        methsc[w1+'#'+w2] = float(sc)
    scores = [(goldsc[w1w2], sc) for w1w2, sc in methsc.iteritems()]
    scorr, spval = Eval.spear_corr([s1 for s1, s2 in scores], [s2 for s1, s2 in scores])
    pcorr, ppval = Eval.pears_corr([s1 for s1, s2 in scores], [s2 for s1, s2 in scores])
    kcorr, kpval = Eval.ktau_corr([s1 for s1, s2 in scores], [s2 for s1, s2 in scores])
    print "%f\t%f\t%f\t%f\t%f\t%f" % (scorr, spval, pcorr, ppval, kcorr, kpval)
コード例 #7
0
 def addStyles(self, a):
     # print "addStyle ", a
     """
     adds to this.styles
     """
     for r in self.ruleChains:
         if not self.selzooms:
             self.selzooms = [r.minZoom, r.maxZoom]
         else:
             self.selzooms[0] = min(self.selzooms[0], r.minZoom)
             self.selzooms[1] = max(self.selzooms[1], r.maxZoom)
         self.compatible_types.update(r.get_compatible_types())
     rb = []
     for r in a:
         ra = {}
         for a, b in r.iteritems():
             a = a.strip()
             b = b.strip()
             if a == "casing-width":
                 "josm support"
                 if b[0] == "+":
                     try:
                         b = str(float(b) / 2)
                     except:
                         pass
             if "text" == a[-4:]:
                 if b[:5] != "eval(":
                     b = "eval(tag(\"" + b + "\"))"
             if b[:5] == "eval(":
                 b = Eval(b)
                 self.has_evals = True
             ra[a] = b
         ra = make_nice_style(ra)
         rb.append(ra)
     self.styles = self.styles + rb
コード例 #8
0
 def __init__(self, scalepair):
   self.ruleChains = [[],]
   self.styles = []
   self.eval_type = type(Eval())
   self.scalepair = scalepair
   self.rcpos=0
   self.stylepos=0
コード例 #9
0
 def addStyles(self, a):
   """
   adds to this.styles
   """
   rb = []
   for r in a:
     ra = {}
     for a,b in r.iteritems():
       a = a.strip()
       b = b.strip()
       if a == "casing-width":
         "josm support"
         if b[0] == "+":
           try:
             b = str(float(b)/2)
           except:
             pass
       if "text" == a[-4:]:
         if b[:5] != "eval(":
           b = "eval(tag(\""+b+"\"))"
       if b[:5] == "eval(":
         b = Eval(b)
       ra[a] = b
     rb.append(ra)
  # print rb
   self.styles = self.styles + rb
コード例 #10
0
def make_nice_style(r):
    ra = {}
    for a, b in r.iteritems():
        "checking and nicifying style table"
        if type(b) == type(Eval()):
            ra[a] = b
        elif "color" in a:
            "parsing color value to 3-tuple"
            # print "res:", b
            if b and (type(b) != tuple):
                # if not b:
                #    print sl, ftype, tags, zoom, scale, zscale
                # else:
                ra[a] = colorparser(b)
            elif b:
                ra[a] = b
        elif any(x in a for x in ("width", "z-index", "opacity", "offset",
                                  "radius", "extrude")):
            "these things are float's or not in table at all"
            try:
                ra[a] = float(b)
            except ValueError:
                pass
        elif "dashes" in a and type(b) != list:
            "these things are arrays of float's or not in table at all"
            try:
                b = b.split(",")
                b = [float(x) for x in b]
                ra[a] = b
            except ValueError:
                ra[a] = []
        else:
            ra[a] = b
    return ra
コード例 #11
0
 def __init__(self, scalepair):
     self.ruleChains = []
     self.styles = []
     self.eval_type = type(Eval())
     self.scalepair = scalepair
     self.selzooms = None
     self.compatible_types = set()
     self.has_evals = False
コード例 #12
0
 def Eval(self, test):
     Y_pred = self.PredictLabel(test.X,0,True)
     recall_pos = []
     recall_neg = []
     precision_pos = []
     precision_neg =[]
     ev = Eval(Y_pred, test.Y)
     accy = ev.Accuracy()
     #print(ev.EvalRecall())
     #print(ev.EvalPrecision())
     probThresh = [0.2,0.4,0.6,0.8]
     for i in range(len(probThresh)):
         pred = self.PredictLabel(test.X,probThresh[i],False) 
         ev = Eval(pred, test.Y)
         #ev.EvalRecall()
         #print('Threshold Value %f'%probThresh[i])
         recallP,recallN=ev.EvalRecall()
         recall_pos.append(recallP)
         recall_neg.append(recallN)
         precisionP,precisionN=ev.EvalPrecision()
         precision_pos.append(precisionP)
         precision_neg.append(precisionN)
     plt.pause(0.1)
     plt.xlabel('Recall')
     plt.ylabel('Precision')
     fig=plt.figure()
     plt.title('For +1 Label')
     plt.plot(recall_pos,precision_pos,'r')
     fig.savefig('Pos_class.png')
     plt.title('For -1 Label')
     fig1=plt.figure(1)
     plt.plot(recall_neg,precision_neg,'b')
     fig1.savefig('Neg_class.png')
        
     return accy
コード例 #13
0
ファイル: engine.py プロジェクト: godsonhyl/ssdb
    def _compile(self, srcfile, base_dir, output_dir):
        head, tail = os.path.split(srcfile)

        dstfile = os.path.normpath(output_dir + "/" + tail.split(".")[0] + ".py")
        if os.path.exists(dstfile):
            src_mtime = os.path.getmtime(srcfile)
            dst_mtime = os.path.getmtime(dstfile)
            # print src_mtime, dst_mtime
            if src_mtime < dst_mtime:
                return dstfile
                # print 'compile: %-30s=> %s' % (srcfile, dstfile)
                # print 'compile: %-30s=> %s' % (srcfile[len(base_dir)+1:], dstfile[len(base_dir)+1:])

        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        if not os.path.exists(output_dir + "/__init__.py"):
            fp = open(output_dir + "/__init__.py", "w")
            fp.close()

            # fp = codecs.open(sys.argv[1], 'r', 'utf-8')
        fp = open(srcfile, "r")
        char_stream = antlr3.ANTLRInputStream(fp)
        lexer = ExprLexer(char_stream)
        tokens = antlr3.CommonTokenStream(lexer)

        parser = ExprParser(tokens)
        r = parser.prog()

        # this is the root of the AST
        root = r.tree
        # print (root.toStringTree())
        # print '-------'

        nodes = antlr3.tree.CommonTreeNodeStream(root)
        nodes.setTokenStream(tokens)
        from Eval import Eval

        eval = Eval(nodes)

        #######################################

        cpy = CpyBuilder(dstfile, base_dir, output_dir)
        eval.prog(cpy)
        return dstfile
    def plotLimitGraph(self, test):
        x_axis = []
        accuracy = []
        for i in range(9):
            Y_pred = self.PredictLabel(test.X, (i + 1) / 10)
            ev = Eval(Y_pred, test.Y)
            accuracy.append(ev.Accuracy())
            x_axis.append((i + 1) / 10)

            # Y_pred = self.PredictLabel(test.X, (i+1)/10)
            # ev = Eval(Y_pred, test.Y)
            # accuracy.append(ev.Accuracy())
            # #Y_pred1 = np.array(Y_pred)
            # #recall_pos.append(recall_score(test.Y,Y_pred1))
            # #precision_pos.append( precision_score(test.Y,Y_pred1))
            # #Y_pred_neg = np.array([1 if i == -1 else -1 for i in Y_pred])
            # #Y_test_neg = np.array([1 if i == -1 else -1 for i in test.Y])
            # #recall_neg.append(recall_score(Y_test_neg,Y_pred_neg))
            # #precision_neg.append(precision_score(Y_test_neg,Y_pred_neg))
            # x_axis.append((i+1)/10)
            # # print(i,recall_pos,precision_pos)

        plt.title('Recall Positive Graph.')
        plt.plot(x_axis, self.pos_recall, label="Recall Positive")
        plt.plot(x_axis, self.neg_recall, label="Recall Negative")
        plt.xlabel('Threshold')
        plt.ylabel('Recall')
        plt.title('Recall Plot')
        plt.legend()
        plt.show()

        plt.plot(x_axis, self.pos_precision, label="Precision Positive")
        plt.plot(x_axis, self.neg_precision, label="Precision Negative")
        plt.xlabel('Threshold')
        plt.ylabel('Precision')
        plt.title('Precision Plot')
        plt.legend()
        plt.show()
コード例 #15
0
    def Precision_Recall_curve(self, test, positive_probs, negative_probs,
                               indexes):
        positive_precision_recall_points = []
        negative_precision_recall_points = []
        positive_F1 = []
        negative_F1 = []
        for threshold in np.arange(0, 1, 0.01):
            positive_prediction_based_on_threshold = []
            negative_prediction_based_on_threshold = []
            for prob in range(len(indexes)):
                if positive_probs[prob] > threshold:
                    positive_prediction_based_on_threshold.append(1)
                else:
                    positive_prediction_based_on_threshold.append(-1)
            ev = Eval(positive_prediction_based_on_threshold, test.Y)
            ev.ComputeConfusionMatrix()
            positive_precision_recall_points.append(
                (ev.Recall(), ev.Precision()))
            #positive_F1.append((2*ev.Recall()*ev.Precision()) / (ev.Recall()+ev.Precision()))

            for prob in range(len(indexes)):
                if negative_probs[prob] > threshold:
                    negative_prediction_based_on_threshold.append(-1)
                else:
                    negative_prediction_based_on_threshold.append(1)
            ev = Eval(negative_prediction_based_on_threshold, test.Y)
            ev.ComputeConfusionMatrix()
            negative_precision_recall_points.append(
                (ev.Recall(), ev.Precision()))
            #negative_F1.append((2*ev.Recall()*ev.Precision()) / (ev.Recall()+ev.Precision()))
        #print(positive_F1, negative_F1)
        #plot each recall and precision points based on threshold
        fig, ax = plt.subplots()
        ax.plot(*zip(*negative_precision_recall_points))
        plt.title('Precision versus Recall curve')
        plt.xlabel('Recall')
        plt.ylabel('Precision')
        plt.show()
コード例 #16
0
 def Eval(self, X_test, Y_test):
     Y_pred = self.Predict(X_test)
     ev = Eval(Y_pred, Y_test)
     return ev.Accuracy()
コード例 #17
0
X_test=X_test.drop('defensive_work_rate',1)
X_train=X_train.drop('defensive_work_rate',1)
X_test=X_test.drop('attacking_work_rate',1)
X_train=X_train.drop('attacking_work_rate',1)
X_test=X_test.drop('preferred_foot',1)
X_train=X_train.drop('preferred_foot',1)

from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
model= gnb.fit(X_train, y_train)
y_pred = gnb.fit(X_train, y_train).predict(X_test)
y_pred.dtype
model.score(X_test,y_test)
print("Number of mislabeled points out of a total %d points : %d"  % (X_test.shape[0],(y_test != y_pred).sum()))
from Eval import Eval
eval1 = Eval(y_pred, np.array(y_test))

print("Positive Class:")
print("Accuracy: ",eval1.Accuracy())
from sklearn.metrics import recall_score,precision_score,accuracy_score
print(recall_score(y_test,y_pred,average=None))
print(precision_score(y_test,y_pred,average=None))
print(accuracy_score(y_test,y_pred))

import pickle
filename = 'finalized_model.sav'
pickle.dump(model, open(filename, 'wb'))
loaded_model = pickle.load(open(filename, 'rb'))
loaded_model.score(X_test,y_test)
#X_test.to_csv('sample_players1.csv')
#y_test.to_csv('pred.csv')
コード例 #18
0
 def Eval(self, test):
     Y_pred = self.PredictLabel(test.X)
     ev = Eval(Y_pred, test.Y)
     print("For Positive Class:")
     print("Test Accuracy: ",ev.Accuracy())
     print("Test Recall: ",ev.Recall())
     print("Test Precision: ",ev.Precision())
     print("\n")
     print("For Negative Class:")
     ev_neg = Eval([1 if i == -1 else -1 for i in Y_pred], [1 if i == -1 else -1 for i in test.Y])
     print("Test Accuracy: ",ev_neg.Accuracy())
     print("Test Recall: ",ev_neg.Recall())
     print("Test Precision: ",ev_neg.Precision())
     probality_threshold=[0.2,0.4,0.6,0.8]
     Precision=[]
     Recall=[]
     Precision.append(ev.Precision())
     Recall.append(ev.Recall())
     length=len(probality_threshold)
     for i in range(0,length):
         Y_pred = self.PredictLabel(test.X,probality_threshold[i])
         ev = Eval(Y_pred, test.Y)
         Precision.append(ev.Precision())
         Recall.append(ev.Recall())
     plt.plot(Precision,Recall)
     plt.ylabel('Recall')
     plt.xlabel('Precision')
     #plt.ylim([0.0, 1.05])
     #plt.xlim([0.0, 1.0])
     plt.title('2-class Precision-Recall curve')
     plt.show()
コード例 #19
0
 def EvalProbabilities(self, test, indexes):
     Y_pred = self.PredictProb(test, indexes)
     
     ev = Eval(Y_pred, test.Y[indexes])
     return ev.Accuracy()
コード例 #20
0
    def EvalLabels(self, test):
        Y_pred = self.PredictLabel(test.X)

        ev = Eval(Y_pred, test.Y)
        return ev.Accuracy()
コード例 #21
0
 def test_addition_or_subtraction(self):
     new_eval = Eval()
     ans = new_eval.addition_or_subtraction([1, '+', 2, '-', 3, '+', 4])
     assert ans == [3, '-', 3, '+', 4]
     ans = new_eval.addition_or_subtraction([1, '-', 2, '-', 3, '+', 4])
     assert ans == [-1, '-', 3, '+', 4]
コード例 #22
0
 def Eval(self, test):
     y_pred = self.predictlabel(test.X)
     ev = Eval(y_pred, test.Y)
     return ev.Accuracy()
コード例 #23
0
    def test_expression_string_to_list(self):
        evaluator = Eval()
        evaluator.expression_string = "1+2+3+4"
        evaluator._get_expression_list()
        assert evaluator.expression_list == [1, '+', 2, '+', 3, '+', 4]

        evaluator = Eval()
        evaluator.expression_string = "12+23+34+45"
        evaluator._get_expression_list()
        assert evaluator.expression_list == [12, '+', 23, '+', 34, '+', 45]

        evaluator = Eval()
        evaluator.expression_string = "12*23/34^45-8"
        evaluator._get_expression_list()
        expected_result = [12, '*', 23, '/', 34, '^', 45, '-', 8]
        assert evaluator.expression_list == expected_result
コード例 #24
0
 def Eval(self, test):
     #With the labels obtained and those that really are we obtain the accuracy
     Y_pred = self.PredictLabel(test.X)
     ev = Eval(Y_pred, test.Y)
     return ev.Accuracy()
コード例 #25
0
ファイル: OnePlyEngine.py プロジェクト: Kash009/SlyMlego
import Const
from Eval import Eval
import chess.uci
import os
import sys

#if len(sys.argv)>3:
#    print("Usage: "+sys.argv[0]+" [<modelfile>]")
#    exit(1)

#if len(sys.argv)>=2:
#    eval = Eval(sys.argv[1])
#    print("Using model "+sys.argv[1])
#else:
eval = Eval()

board = chess.Board()

while True:
    # cycle extracted from from https://github.com/flok99/feeks

    line = sys.stdin.readline()

    if line == None:
        break

    line = line.rstrip('\n')

    if len(line) == 0:
        continue
コード例 #26
0
 def test_exponents(self):
     new_eval = Eval()
     ans = new_eval.exponents([1, '*', 2, '^', 3, '+', 4])
     assert ans == [1, '*', 8, '+', 4]
コード例 #27
0
X_test = X_test.drop('defensive_work_rate', 1)
X_train = X_train.drop('defensive_work_rate', 1)
X_test = X_test.drop('attacking_work_rate', 1)
X_train = X_train.drop('attacking_work_rate', 1)
X_test = X_test.drop('preferred_foot', 1)
X_train = X_train.drop('preferred_foot', 1)

from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
y_pred = gnb.fit(reduced_data_pca, y_train).predict(reduced_data_pca1)
y_pred.dtype
print("Number of mislabeled points out of a total %d points : %d" %
      (X_test.shape[0], (y_test != y_pred).sum()))
from Eval import Eval
eval1 = Eval(y_pred, np.array(y_test))

print("Positive Class:")
print("Accuracy: ", eval1.Accuracy())
from sklearn.metrics import recall_score, precision_score, accuracy_score
print(recall_score(y_test, y_pred, average=None))
print(precision_score(y_test, y_pred, average=None))
print(accuracy_score(y_test, y_pred))
print("Recall: ", eval1.Recall())
print("Precision: ", eval1.Precision())

means = pd.DataFrame(columns=['Feature', 'diff_abs_mean', 'diff_var'])
diff = []
ad = []
for i in range(0, gnb.theta_.shape[1]):
    diff.append(abs(gnb.theta_[0, i] - gnb.theta_[1, i]))
コード例 #28
0
 def test_multiplication_or_division(self):
     new_eval = Eval()
     ans = new_eval.multiplication_or_division([1, '*', 2, '-', 3, '+', 4])
     assert ans == [2, '-', 3, '+', 4]
     ans = new_eval.multiplication_or_division([1, '/', 2, '-', 3, '+', 4])
     assert ans == [0.5, '-', 3, '+', 4]
コード例 #29
0
import os
import sys
import time

if len(sys.argv) < 2:
    print("Plot the confusion graph using validation samples")
    print("Usage: " + sys.argv[0] + " [<numsamples> [<modelfile>]]")
    exit(1)

if len(sys.argv) >= 2:
    numsamples = int(sys.argv[1])
else:
    numsamples = 999999999

if len(sys.argv) >= 3:
    eval = Eval(sys.argv[2])
    print("Using model " + sys.argv[2])
else:
    eval = Eval()

xcoords = []
ycoords = []
starttime = time.time()
for file in glob.glob(Const.VALIDATIONDATADIR + "/*.pickle"):
    numsamples -= 1
    if numsamples <= 0: break
    (epd, X, Y) = pickle.load(open(file, "rb"))
    val = eval.EvaluatePosition(epd)[0]  # model evaluation
    ycoords.append(val)
    if Y[0] < -Const.INFINITECP:
        xcoords.append(-Const.INFINITECP)
コード例 #30
0
 def test_order_emdas(self):
     new_eval = Eval()
     ans = new_eval.order_emdas([1, '+', 2, '-', 3, '+', 4])
     assert ans == 4
     ans = new_eval.order_emdas([12, '*', 23, '/', 34, '^', 45, '-', 8])
     assert ans == -8.0
コード例 #31
0
 def Eval(self, test):
     Y_pred = self.PredictLabel(test.X)
     ev = Eval(Y_pred, test.Y)
     print("For Positive Class:")
     print("Test Accuracy: ", ev.Accuracy())
     print("Test Recall: ", ev.Recall())
     print("Test Precision: ", ev.Precision())
     print("\n")
     print("For Negative Class:")
     ev_neg = Eval([1 if i == -1 else -1 for i in Y_pred],
                   [1 if i == -1 else -1 for i in test.Y])
     print("Test Accuracy: ", ev_neg.Accuracy())
     print("Test Recall: ", ev_neg.Recall())
     print("Test Precision: ", ev_neg.Precision())
     ev.PvRcurve()
コード例 #32
0
#   kothic is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU General Public License for more details.

#   You should have received a copy of the GNU General Public License
#   along with kothic.  If not, see <http://www.gnu.org/licenses/>.

from Rule import Rule
from webcolors.webcolors import whatever_to_cairo as colorparser
from webcolors.webcolors import cairo_to_hex
from Eval import Eval
from Condition import *

TYPE_EVAL = type(Eval())


def make_nice_style(r):
    ra = {}
    for a, b in r.iteritems():
        "checking and nicifying style table"
        if type(b) == TYPE_EVAL:
            ra[a] = b
        elif "color" in a:
            "parsing color value to 3-tuple"
            # print "res:", b
            if b and (type(b) != tuple):
                # if not b:
                #    print sl, ftype, tags, zoom, scale, zscale
                # else:
コード例 #33
0
ファイル: Test.py プロジェクト: clothbot/eda
import sys
import antlr3
import antlr3.tree
from ExprLexer import ExprLexer
from ExprParser import ExprParser
from Eval import Eval

char_stream = antlr3.ANTLRInputStream(sys.stdin)
lexer = ExprLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = ExprParser(tokens)
r = parser.prog()

# this is the root of the AST
root = r.tree

nodes = antlr3.tree.CommonTreeNodeStream(root)
nodes.setTokenStream(tokens)
eval = Eval(nodes)
eval.prog()