Пример #1
0
def main():
    #repl
    env = Env.Environment()
    print("Welcome to the lisp interpreter")
    while True:
        str = input()
        if str == "!quit":
            print("Quitting lisp interpreter")
            break

        if len(str) >= 5 and str[0:5] == "!load":
            print("loading file")
            filename = str[6:]
            with open(filename) as f:
                for line in f:
                    ev.eval(par.parse(par.tokenize(line)), env)

        else:
            toke_str = par.tokenize(str)
            syntax_tree = par.parse(toke_str)
            #insert_quote(syntax_tree)
            out = ev.eval(syntax_tree, env)
            if out is not None:
                #need to check is return value is is a list
                #if it is, need to recursivley turn the list into a symbol instead of returning a list
                print(out)
Пример #2
0
def driver_loop():
    print '*** SCHEME RUDIMENTARY AND INEFFICIENT INETRPRETER IN PYTHON ***'
    print '-- Load [addr] to load and execute file : load test.scm'
    print '-- EOF to quit'

    while True:
        try:
            read_input = raw_input(in_prompt)
        except (EOFError, KeyboardInterrupt) as e:
            print 'Good bye'
            break

        read_input = read_input.lower()
        #print 'READ :', read_input
        if read_input == 'log':
            logging.basicConfig(filename='details.log',
                                filemode='w',
                                format='INFO : %(message)s',
                                level=logging.INFO)
            print 'Logging enabled, check detail.log file'
            continue

        if read_input.startswith("load"):
            load_exec_file(read_input.strip("load").strip())
            continue

        read_input = transform_input(read_input)
        if type(read_input) == tuple:
            for exp in read_input:
                result = Eval.Eval(exp, global_env)
        else:
            result = Eval.Eval(read_input, global_env)
        user_print(result)
Пример #3
0
def max_value(state, alpha, beta, depth, start_time):
    local_depth = 0
    print "Depth =", depth
    global globaldepth
    if time.time() - start_time > 2:
        return 5
    if globaldepth == 2 or time.time() - start_time > 2:
        # return Eval.utilGen2(state)
        return Eval.utilGen(state)
    if terminal(state) == None:
        # return Eval.utilGen2(state)
        return Eval.utilGen(state)
    v = -float("inf")

    for x in range(0, 15):
        for y in range(0, 15):
            if state[x][y] == None and check_neighbor(
                    board, x, y
            ):  #only checks if the element is inserted near a board piece
                state[x][y] = 1
                v = max(v, min_value(state, alpha, beta, depth - 1,
                                     start_time))
                state[x][y] = None
            if v >= beta:
                return v
            alpha = max(alpha, v)
    local_depth += 1
    globaldepth += 1
    return v
Пример #4
0
def execute(fn):
    confM.loadconf(fn)
    conf.isPlot=False
    Predict.main();
    Eval.main()
    
    
    return 0
Пример #5
0
def ex42(f):
    s = ""
    if Eval.satisfiable(f):
        s += "satisfiable "
    if Eval.unsatisfiable(f):
        s += "unsatisfiable "
    if Eval.tautology(f):
        s += "tautology"

    return s + "\n(satisfiable: %s, unsatisfiable %s, tautology: %s)" % (Eval.satisfiable(f), Eval.unsatisfiable(f), Eval.tautology(f))
Пример #6
0
    def eval_proc(self, args):
        if len(args) is not self.num_args:
            raise SyntaxError("Wrong number of arguements for procedure call")
        env = Environment(self.env)
        #add arguements to environment
        for i in range(self.num_args):
            val = Eval.eval(args[i], env)
            env.add_symbol(self.args[i].data, val)

        return Eval.eval(self.body, env)
Пример #7
0
def terminal(board):
    if Eval.utilGen(board) >= 10000 or Eval.utilGen(board) <= -10000:
        return True
    dval = True

    for i in board:
        for j in i:
            if (j == None):
                dval = False

    return dval
Пример #8
0
    def test_all_valuations(self):
        self.assertEquals(list(Eval.all_valuations(["a", "b"])), [
            {"a": False, "b": False},
            {"a": False, "b": True},
            {"a": True, "b": False},
            {"a": True, "b": True},
            ])

        self.assertEquals(len(list(Eval.all_valuations(["a", "b"]))), 2 ** 2)
        self.assertEquals(len(list(Eval.all_valuations(["a", "b", "c"]))), 2 ** 3)
        self.assertEquals(len(list(Eval.all_valuations(["a", "b", "c", "d"]))), 2 ** 4)
def main():
    data = dict(x=[[], []], y=[[], []])
    val_data = dict(x=[[], []], y=[[], []])
    Pred_True = [0] * class_number
    pred_num=[0,0]
    L = [0, 0]
    last_j=float('-inf')

    datas, L[0], L[1], Lall = pd.read_datas(file_name)
    data_block = pd.split_datas(datas, cross_validation_number)
    # i = 0
    for cla, val in data_block:
        # print(i)
        # i += 1
        for j in range(len(cla)):
            (data['x'][j], data['y'][j], val_data['x'][j], val_data['y'][j]) = (
            cla[j][:, :-1], cla[j][:, -1:], val[j][:, :-1], val[j][:, -1:])
        myfisher = Fisher(L[0], L[1], Lall, data)

        W = myfisher.W_Direction()  # 投影参数
        J=Eval.simple_Fisher_Friterion(W,myfisher.between_class_scatter_matrix())#评估一下投影效果,并更新投影向量
        if J>last_j:
            last_j=J
            W_great=W
        W0 = myfisher.OneKey_W0(W)  # 阈值
        # %%
        for j in range(class_number):
            predict_y = myfisher.Pred_result(W, val_data['x'][j], W0)
            # print(predict_y)
            pred_num[j]+=len(val_data['x'][j])
            Pred_True[j] += Eval.Comp_as1(predict_y, val_data['y'][j][0][0])
            # if j==1:
            #     print(Eval.Comp_as1(predict_y, val_data['y'][j][0][0]))

            # print(W,W0)
            # print(S_W)
            # print(time.time()-t0)
            # break
    print('  预测      第1类   第2类   全部')
    for j in range(class_number):
        print('实际第{0}类    {1}     {2}      {3}'.format(j+1,Pred_True[j],pred_num[j]-Pred_True[j],pred_num[j]))
    print('mixed_accuracy:%f' % ((Pred_True[0]+pred_num[1]-Pred_True[1])/(pred_num[0]+pred_num[1])))

    # print(os.path.isfile(Eval.w_filename),W_great)
    # if os.path.isfile(Eval.w_filename)==False and W_great:
    print('评价函数最大的W保存在',os.path.abspath(Eval.w_filename))
    Eval.save_par(W_great)
    pass
Пример #10
0
    def writeResult(self, filename, bests=Bee):
        f = open(filename, 'w')
        f.write(str("ObjValue\t").rjust(5))
        f.write("Accuracy" + "\t")

        f.write("\n")
        for i in range(0, int(len(bests))):
            total_acertos = 0
            total_predicts = 0
            count = 0
            f.write("%12.10f" % (bests[i].objvalue) + "\t")
            for j in self.parameters.Z:
                output = Eval.think(j, bests[i].weights, self.parameters.dim,
                                    bests[i].bias)
                total_predicts += 1
                print("output:", numpy.around(output[-1]).astype(int))
                print("test:", self.parameters.Z2[count].astype(int))
                if ((numpy.around(
                        self.parameters.Z2[count]).astype(int) == numpy.around(
                            output[-1]).astype(int)).all()):
                    total_acertos += 1
                count += 1

            accuracy = (total_acertos / total_predicts) * 100
            f.write("Accuracy: %.2f" % (accuracy))
            f.write("\n")

        #end it by closing the file\
        f.close()
Пример #11
0
 def execute(self, sourceCode):
     tokens = Tokenize.tokenize(sourceCode, 0)
     for status, exp in parse(tokens):
         if status != ParseError.OK:
             print "Error occurs:", status
             return
         return Eval.eval(exp, self.glob)[1]
Пример #12
0
def DB_SCAN_Bisect():
    bins, n_cluster, db = DB_SCAN()
    while (n_cluster < 6):
        matrixes_index = []
        matrixes = []
        SSEs = []
        for i in range(n_cluster):
            matrixes_index.append(bins.loc[bins['DBSCAN'] == i].index)
            matrix = bins.loc[bins['DBSCAN'] == i]
            matrix = matrix.drop(['Bin_label', 'DBSCAN'], axis=1)
            matrix = matrix.to_numpy()
            SSEs.append(Eval.SSE(matrix))
            matrixes.append(matrix)
        SSEs = np.array(SSEs)
        max_cluster = np.where(SSEs == np.max(SSEs))
        km = KMeans(
            n_clusters=2,  # số cluster
            init='k-means++',  # vị trí center của cluster  default: 'k-means++'
            n_init=
            10,  # số lần chọn center của cluster default: '10'  trong số lần chọn  , sẽ chọn ra model có  SSE nhỏ nhất
            max_iter=
            300,  # Tiến hành chạy k-means nhiều nhất bao nhiêu lần default: '300'
            tol=
            1e-04,  # Khi tiến hành hội tụ các điểm, sai số cho phép là bao nhiêu, default: '1e-04'
            random_state=0)
        bisect = km.fit_predict(matrixes[max_cluster[0][0]])
        for i in range(len(matrixes_index[max_cluster[0][0]])):
            if bisect[i] == 0:
                a = matrixes_index[max_cluster[0][0]][i]
                bins.loc[a, 'DBSCAN'] = max_cluster[0][0]
            elif bisect[i] == 1:
                a = matrixes_index[max_cluster[0][0]][i]
                bins.loc[a, 'DBSCAN'] = n_cluster
        n_cluster += 1
    return bins
Пример #13
0
def load_exec_file(addr):
    f = open(addr, 'r')
    buf = ''
    for line in f:
        if line.strip() == '' and buf != '':
            #print 'LINE :', line
            #print 'BUF :', buf
            buf = transform_input(buf)
            if type(buf) == tuple:
                for exp in buf:
                    result = Eval.Eval(exp, global_env)
            else:
                result = Eval.Eval(buf, global_env)
            user_print(result)
            buf = ''
        else:
            buf = buf + ' ' + line.strip()
Пример #14
0
def Apply(proc, args):
    if isinstance(proc, Env.Procedure) :
        body, newEnv = proc.getValues(args)
        return Eval.eval_seq(body, newEnv)
    else :
        # Primitives functions
        #print 'Proc :', proc
        #print 'Args :', args
        return proc(args)
Пример #15
0
def evaluate(input):
    expr = input
    print ("Expression to be evaluated: ", expr)
    if '"' in expr:
        expr = expr[1:-1]
        return expr
    else:
        expr = Eval.evaluate(expr)
        return expr
Пример #16
0
def Apply(proc, args):
    if isinstance(proc, Env.Procedure):
        body, newEnv = proc.getValues(args)
        return Eval.eval_seq(body, newEnv)
    else:
        # Primitives functions
        #print 'Proc :', proc
        #print 'Args :', args
        return proc(args)
Пример #17
0
    def test_eval_complex(self):
        self.assertFalse(Eval.eval(Parse.parse("a /\\ ~(a <=> a)"), {"a": False}))
        self.assertFalse(Eval.eval(Parse.parse("a /\\ ~(a <=> a)"), {"a": True}))

        self.assertTrue(Eval.eval(Parse.parse("(Smoke => Fire) => (~Smoke => ~Fire)"), {"Smoke": False, "Fire": False}))
        self.assertFalse(Eval.eval(Parse.parse("(Smoke => Fire) => (~Smoke => ~Fire)"), {"Smoke": False, "Fire": True}))
        self.assertTrue(Eval.eval(Parse.parse("(Smoke => Fire) => (~Smoke => ~Fire)"), {"Smoke": True, "Fire": False}))
        self.assertTrue(Eval.eval(Parse.parse("(Smoke => Fire) => (~Smoke => ~Fire)"), {"Smoke": True, "Fire": True}))

        self.assertTrue(Eval.eval(Parse.parse("a /\\ (b \\/ c)"), {"a": True, "b": False, "c": True}))
        self.assertFalse(Eval.eval(Parse.parse("(a /\\ b) \\/ c"), {"a": True, "b": False, "c": False}))
Пример #18
0
def writePanels(cursor):
    # data = json.dumps(Morning.sendMorningPanel())
    data = json.dumps(Eval.sendEvalPanel())
    ts = current_time
    unixt = formatUnixt(current_time.timestamp())

    query = "insert into lnews.panel (unixt, ts, panel) values(%s,%s,%s)"

    print("Write points: {0}".format(query))
    cursor.execute(query, (unixt, ts, data))
Пример #19
0
def evaluate(value):
    input = ANTLRStringStream(value)
    lexer = ExprLexer.ExprLexer(input)
    tokens = CommonTokenStream(lexer)
    parser = ExprParser.ExprParser(tokens)
    r = parser.prog()
    t = r.tree
    # // get tree from parser
    nodes = CommonTreeNodeStream(t)
    walker = Eval.Eval(nodes)
    # // create a tree parser
    return walker.prog()
Пример #20
0
def min_value(state, alpha, beta, depth, start_time):

    global globaldepth
    if globaldepth == 2 or time.time() - start_time > 2:
        # return Eval.utilGen2(state)
        return Eval.utilGen(state)
    if terminal(state) == None:
        # return Eval.utilGen2(state)
        return Eval.utilGen(state)
    v = float("inf")

    for x in range(0, 15):
        for y in range(0, 15):
            if state[x][y] == None and check_neighbor(board, x, y):
                state[x][y] = 0
                v = min(v, max_value(state, alpha, beta, depth, start_time))
                state[x][y] = None
            if v <= alpha:
                return v
            beta = min(beta, v)
        globaldepth += 1
    return v
Пример #21
0
 def __init__(self,parameter,body,env):
     self.parameter = parameter
     self.param_list = Eval._lispList2PythonList(parameter)
     self.body = copy.copy(body)
     self.env = LISP.Enviroment(env)
     self.env.setParameterSymbols(self.param_list)
     print "-------- ---------"
     print "body: %s" % self.body
 #    if not self.body.first == new(LispSymbol,"lambda"):
     self.optcode = LISP.OptCoder.getOptCode(body,self.env,self.param_list)
     self.bytecode,self.literals = LISP.ByteCoder.getByteCode(LISP.ByteCoder.Bytecode(),LISP.ByteCoder.Literals(),self.optcode,self.env)
     print "optcode: %s"% self.optcode
     print "bytecode :%s"%self.bytecode
     print "-----------------"
Пример #22
0
def minValue(state, a, b, d):
    if d >= 0:
        return Eval.utility(state.board, 1)
    v = 9999
    for action in state.actions:
        temp_state = copy.deepcopy(state)
        update(temp_state, action[0], action[1], 2)
        vchild = maxValue(temp_state, a, b, d + 1)
        if v > vchild:
            state.bestAction = action
            v = vchild
        if v <= a:
            return v
        b = min(b, v)
    return v
Пример #23
0
def jobsPOST(version):
    """
    Nimmt den Body eines /jobs post request entgegen. Wichtig: Startet ihn NICHT!
    :returns:
        jsonify(data): HTTP Statuscode für Erfolg (?)
    """

    # Todo: Funktion schreiben die auswertet was im JSON steht...
    if (version == "v1"):
        dataFromPost = request.get_json()  # Todo: JSON Evaluieren
        ev = Eval.evalTaskAndQueue(dataFromPost, Datastore)
        if (ev[0]):
            resp = Response()
            resp.headers["Location"] = "localhost/api/v1/jobs/" + str(ev[1])
            resp.headers["OpenEO-Identifier"] = str(ev[1])
            return resp
        else:
            data = {
                "id":
                "",  # Todo: ID Generieren bzw. Recherchieren
                "code":
                "400",
                "message":
                "Unbekannter Job Typ.",
                "links": [{
                    "href":
                    "https://example.openeo.org/docs/errors/SampleError",
                    # Todo: Passenden Link Recherchieren & Einfügen
                    "rel": "about"
                }]
            }
            return jsonify(data)
    else:
        data = {
            "id":
            "",  # Todo: ID Generieren bzw. Recherchieren
            "code":
            "404",
            "message":
            "Ungültiger API Aufruf.",
            "links": [{
                "href": "https://example.openeo.org/docs/errors/SampleError",
                # Todo: Passenden Link Recherchieren & Einfügen
                "rel": "about"
            }]
        }
        return jsonify(data)
Пример #24
0
    def test_exercise43(self):
        bsays = "b <=> (a <=> ~a)"
        csays = "c <=> ~b"
        kb = "(%s) /\\ (%s) " % (bsays, csays)

        self.assertFalse(Eval.entails(kb, "a"))
        self.assertFalse(Eval.entails(kb, "~a"))
        self.assertFalse(Eval.entails(kb, "b"))
        self.assertTrue(Eval.entails(kb, "~b"))
        self.assertTrue(Eval.entails(kb, "c"))
        self.assertFalse(Eval.entails(kb, "~c"))
Пример #25
0
def parse():
    char_stream = antlr3.ANTLRInputStream(sys.stdin, encoding='utf-8')
    lexer = ExprLexer(char_stream)
    tokens = antlr3.CommonTokenStream(lexer)
    parser = ExprParser(tokens)
    r = parser.prog()
    root = r.tree
    nodes = antlr3.tree.CommonTreeNodeStream(root)
    walker = Eval.Eval(nodes)

    try:
        walker.prog()
    except ReturnValue, v:
        if isinstance(v.getValue(), str) or isinstance(v.getValue(), unicode):
            print v.getValue().encode('utf-8')
        else:
            print v.getValue()
Пример #26
0
def patchFromID(version, id):
    """
    Nimmt den Body einer Patch request mit einer ID entgegen
    Todo: Queue Implementieren welche Jobs nacheinander Abarbeitet. Fehler antwort senden wenn job bereits Prozessiert wird
    :parameter:
        id (int): Nimmt die ID aus der URL entgegen
    :returns:
        jsonify(data): HTTP Statuscode für Erfolg (?)
    """
    dataFromPatch = request.get_json()
    if (version == "v1"):
        if Eval.evalTask(dataFromPatch):
            Datastore[uuid.UUID(id)] = dataFromPatch
        else:
            data = {
                "id":
                "",  # Todo: ID Generieren bzw. Recherchieren
                "code":
                "404",
                "message":
                "Ungültiger API Aufruf.",
                "links": [{
                    "href":
                    "https://example.openeo.org/docs/errors/SampleError",
                    # Todo: Passenden Link Recherchieren & Einfügen
                    "rel": "about"
                }]
            }
            return jsonify(data)

    else:
        data = {
            "id":
            "",  # Todo: ID Generieren bzw. Recherchieren
            "code":
            "404",
            "message":
            "Ungültiger API Aufruf.",
            "links": [{
                "href": "https://example.openeo.org/docs/errors/SampleError",
                # Todo: Passenden Link Recherchieren & Einfügen
                "rel": "about"
            }]
        }
        return jsonify(data)
Пример #27
0
 def retrieveSystem(self):
     if self.connected == False:
         self.connectToServer()        
     #self.getAllStars()
     print "attempting to retrieve and launch system: "
     print self.sysName
     self.xfile = self.server.retrieveSystem(self.sysName)
     print "unpacking system"
     self.mySystem = cPickle.loads(self.xfile)
     print "launching evaluator"
     self.Evaluator = Eval(self.mySystem, 1000)
     print "calculating score"
     self.score = self.Evaluator.evaluate()
     print "system stability score = "
     print self.score
     print "launching planetarium.. .  .    .        ."
     import planetarium
     self.planetWindow = planetarium.Universe(self.Evaluator)
     run()
Пример #28
0
def main(
):  #, ref_file_name, read_file_name, answer_file_name, allowable_errors):
    ref_file_path = 'ref_genomeWALU.txt'
    reads_file_path = 'reads_genomeWALU.txt'
    ans_file_path = 'naive_ans_genomeWALU.txt'
    ans_key_path = 'ans_genomeWALU.txt'
    #with open(ref_file_path, 'r') as ref_file:
    #    with open(reads_file_path, 'r') as read_file:
    #        with open(ans_file_path, 'w') as answer_file:
    #            start = time.time()
    #            naive = MultiReSequencer(2, 10, ref_file, read_file, answer_file)
    #            naive.ref_genome_map_str(6)
    #            naive.process_reads()
    #            naive.create_answer_file()
    #            print 'Sequencing time: ' + str(time.time() - start)
    #sort_file(ans_file_path)
    #ref_check(ans_file_path, ref_file_path)
    with open(ans_file_path, 'r') as answer_file:
        with open(ans_key_path, 'r') as answer_key:
            ans_dict = Eval.Eval(answer_key, answer_file)
            for key in ans_dict:
                print key + ' ' + str(ans_dict[key])
Пример #29
0
    def evalModel(self, k=None, beta=None):
        """
            Fontions qui nous permet d'evaluer le modèle self, un IRModel
        
        :type k: float
        :param k: parametre k pour les evaluations
        
        :type beta: float
        :param beta: parametre beta pour les FMesureAtK
        """

        if k is not None:
            self.k = k

        if beta is not None:
            self.beta = beta

        evaluation = [
            Eval.PrecisionAtK(self.k),
            Eval.RappelAtK(self.k),
            Eval.FMesureAtK(self.k, self.beta),
            Eval.AvgP(),
            Eval.reciprocalRank(),
            Eval.Ndcg()
        ]
        resultat = [[] for _ in range(len(evaluation))]

        for query in self.collectionQry:
            self.print_verbose('query =', self.collectionQry[query].getTexte())
            liste = [
                resultat[0] for resultat in self.model.getRanking(
                    self.collectionQry[query].getTexte())
            ]
            self.print_verbose(liste)
            for i in range(len(evaluation)):
                resultat[i].append(evaluation[i].evalQuery(
                    liste, self.collectionQry[query]))
        self.print_verbose(resultat)
        return [(np.mean(l), np.std(l)) for l in resultat]
Пример #30
0
def spatial_groups_rf(idw_example_grid, loc_dict, Cvar_dict, shapefile, blocknum, nfolds,\
                      replacement, dictionary_Groups, file_path_elev, idx_list, expand_area):
    '''Stratified shuffle-split cross-validation procedure

    Parameters
    ----------
         idw_example_grid  : ndarray
              used for reference of study area grid size
         loc_dict : dictionary
              the latitude and longitudes of the daily/hourly stations
         Cvar_dict : dictionary
              dictionary of weather variable values for each station
         shapefile : string
              path to the study area shapefile
         blocknum : int
              number of blocks/clusters
         nfolds : int
              number of folds to create (essentially repetitions)
         replacement : bool
              whether or not to use replacement between folds, should usually be true
         dictionary_Groups : dictionary
              dictionary of what groups (clusters) the stations belong to
         expand_area : bool
              function will expand the study area so that more stations are taken into account (200 km)
              
    Returns
    ----------
         dictionary
              - a dictionary of the absolute error at each fold when it was left out
    '''
    station_list_used = [
    ]  # If not using replacement, keep a record of what we have done
    count = 1
    error_dictionary = {}

    na_map = gpd.read_file(shapefile)
    bounds = na_map.bounds
    if expand_area:
        xmax = bounds['maxx'] + 200000
        xmin = bounds['minx'] - 200000
        ymax = bounds['maxy'] + 200000
        ymin = bounds['miny'] - 200000
    else:
        xmax = bounds['maxx']
        xmin = bounds['minx']
        ymax = bounds['maxy']
        ymin = bounds['miny']

    while count <= nfolds:
        x_origin_list = []
        y_origin_list = []

        absolute_error_dictionary = {}
        projected_lat_lon = {}

        station_list = Eval.select_random_station(dictionary_Groups, blocknum,
                                                  replacement,
                                                  station_list_used).values()

        if replacement == False:
            station_list_used.append(list(station_list))
        # print(station_list_used)

        for station_name in Cvar_dict.keys():

            if station_name in loc_dict.keys():

                loc = loc_dict[station_name]
                latitude = loc[0]
                longitude = loc[1]
                Plat, Plon = pyproj.Proj('esri:102001')(longitude, latitude)
                Plat = float(Plat)
                Plon = float(Plon)
                # Filter out stations outside of grid
                proj_coord = pyproj.Proj('esri:102001')(longitude, latitude)
                if (proj_coord[1] <= float(ymax[0])
                        and proj_coord[1] >= float(ymin[0])
                        and proj_coord[0] <= float(xmax[0])
                        and proj_coord[0] >= float(xmin[0])):
                    projected_lat_lon[station_name] = [Plat, Plon]

        lat = []
        lon = []
        Cvar = []
        for station_name in sorted(Cvar_dict.keys()):
            if station_name in loc_dict.keys():
                if station_name not in station_list:  # This is the step where we hold back the fold
                    loc = loc_dict[station_name]
                    latitude = loc[0]
                    longitude = loc[1]
                    cvar_val = Cvar_dict[station_name]

                    # Filter out stations outside of grid
                    proj_coord = pyproj.Proj('esri:102001')(longitude,
                                                            latitude)
                    if (proj_coord[1] <= float(ymax[0])
                            and proj_coord[1] >= float(ymin[0])
                            and proj_coord[0] <= float(xmax[0])
                            and proj_coord[0] >= float(xmin[0])):
                        lat.append(float(latitude))
                        lon.append(float(longitude))
                        Cvar.append(cvar_val)
                else:
                    pass  # Skip the station

        y = np.array(lat)
        x = np.array(lon)
        z = np.array(Cvar)

        pixelHeight = 10000
        pixelWidth = 10000
        num_col = int((xmax - xmin) / pixelHeight) + 1
        num_row = int((ymax - ymin) / pixelWidth) + 1

        # We need to project to a projected system before making distance matrix
        source_proj = pyproj.Proj(proj='latlong', datum='NAD83')
        xProj, yProj = pyproj.Proj('esri:102001')(x, y)

        df_trainX = pd.DataFrame({'xProj': xProj, 'yProj': yProj, 'var': z})

        if expand_area:

            yProj_extent = np.append(
                yProj, [bounds['maxy'] + 200000, bounds['miny'] - 200000])
            xProj_extent = np.append(
                xProj, [bounds['maxx'] + 200000, bounds['minx'] - 200000])
        else:
            yProj_extent = np.append(yProj, [bounds['maxy'], bounds['miny']])
            xProj_extent = np.append(xProj, [bounds['maxx'], bounds['minx']])

        Yi = np.linspace(np.min(yProj_extent), np.max(yProj_extent),
                         num_row + 1)
        Xi = np.linspace(np.min(xProj_extent), np.max(xProj_extent),
                         num_col + 1)

        Xi, Yi = np.meshgrid(Xi, Yi)
        Xi, Yi = Xi.flatten(), Yi.flatten()

        maxmin = [
            np.min(yProj_extent),
            np.max(yProj_extent),
            np.max(xProj_extent),
            np.min(xProj_extent)
        ]

        # Elevation
        # Preparing the coordinates to send to the function that will get the elevation grid
        concat = np.array((Xi.flatten(), Yi.flatten())).T
        send_to_list = concat.tolist()
        # The elevation function takes a tuple
        send_to_tuple = [tuple(x) for x in send_to_list]

        Xi1_grd = []
        Yi1_grd = []
        elev_grd = []
        # Get the elevations from the lookup file
        elev_grd_dict = GD.finding_data_frm_lookup(send_to_tuple,
                                                   file_path_elev, idx_list)

        for keys in elev_grd_dict.keys():  # The keys are each lat lon pair
            x = keys[0]
            y = keys[1]
            Xi1_grd.append(x)
            Yi1_grd.append(y)
            # Append the elevation data to the empty list
            elev_grd.append(elev_grd_dict[keys])

        elev_array = np.array(elev_grd)  # make an elevation array

        elev_dict = GD.finding_data_frm_lookup(
            zip(xProj, yProj), file_path_elev,
            idx_list)  # Get the elevations for the stations

        xProj_input = []
        yProj_input = []
        e_input = []

        for keys in zip(
                xProj, yProj
        ):  # Repeat process for just the stations not the whole grid
            x = keys[0]
            y = keys[1]
            xProj_input.append(x)
            yProj_input.append(y)
            e_input.append(elev_dict[keys])

        source_elev = np.array(e_input)

        Xi1_grd = np.array(Xi1_grd)
        Yi1_grd = np.array(Yi1_grd)

        df_trainX = pd.DataFrame({
            'xProj': xProj,
            'yProj': yProj,
            'elevS': source_elev,
            'var': z
        })

        df_testX = pd.DataFrame({
            'Xi': Xi1_grd,
            'Yi': Yi1_grd,
            'elev': elev_array
        })

        reg = RandomForestRegressor(n_estimators=100,
                                    max_features='sqrt',
                                    random_state=1)

        y = np.array(df_trainX['var']).reshape(-1, 1)
        X_train = np.array(df_trainX[['xProj', 'yProj', 'elevS']])
        X_test = np.array(df_testX[['Xi', 'Yi', 'elev']])

        reg.fit(X_train, y)

        Zi = reg.predict(X_test)

        rf_grid = Zi.reshape(num_row + 1, num_col + 1)

        # Compare at a certain point
        for statLoc in station_list:

            coord_pair = projected_lat_lon[statLoc]

            x_orig = int((coord_pair[0] - float(xmin)) / pixelHeight)  # lon
            y_orig = int((coord_pair[1] - float(ymin)) / pixelWidth)  # lat
            x_origin_list.append(x_orig)
            y_origin_list.append(y_orig)

            interpolated_val = rf_grid[y_orig][x_orig]

            original_val = Cvar_dict[statLoc]
            absolute_error = abs(interpolated_val - original_val)
            absolute_error_dictionary[statLoc] = absolute_error

        error_dictionary[count] = sum(
            absolute_error_dictionary.values()) / len(
                absolute_error_dictionary.values(
                ))  # average of all the withheld stations
        # print(absolute_error_dictionary)
        count += 1
    overall_error = sum(error_dictionary.values()) / \
        nfolds  # average of all the runs
    # print(overall_error)
    return overall_error
Пример #31
0
def TestModel(new_train,
              new_test,
              texts_train_path,
              texts_test_path,
              train_pos,
              test_pos,
              new_train_ngrams,
              new_test_ngrams,
              nFeaturesList,
              subsample=False,
              removeCenter=True,
              BoW=True,
              charNgrams=False,
              POS=False,
              features=False,
              POSgrams=False,
              tfidf=False,
              binary=False,
              statistics=False):

    names = []
    train, test, train_labels, test_labels, feature_names = FeaturesReader.readFeatures(
        new_train, new_test)

    labels_train, texts_train, nominates_train = Eval.readDataSet(
        texts_train_path, 0)
    labels_test, texts_test, nominates_test = Eval.readDataSet(
        texts_test_path, 0)
    """
    train_pos_3gram_file = open(train_pos_3gram, 'r')
    train_pos_3gram_file_list = train_pos_3gram_file.readlines() 
    pos_3gram_names = train_pos_3gram_file_list[0].split(",")
    train_pos_3gram_file_list.pop(0)
    test_pos_3gram_file = open(test_pos_3gram,'r')
    test_pos_3gram_file_list = test_pos_3gram_file.readlines()
    test_pos_3gram_file_list.pop(0)
    """

    if features:
        print("Reading feature files")
        train_matrix = np.matrix(train)
        test_matrix = np.matrix(test)

        names = names + feature_names

    if POS:
        print("Reading POS files")

        train_pos_file = open(train_pos, 'r')
        train_pos_file_list = train_pos_file.readlines()
        pos_names = train_pos_file_list[0].split(",")
        train_pos_file_list.pop(0)
        test_pos_file = open(test_pos, 'r')
        test_pos_file_list = test_pos_file.readlines()
        test_pos_file_list.pop(0)

        pos_train_rows = []
        for line in train_pos_file_list:
            line = line.replace("\n", '')
            pos_train_rows.append([int(r) for r in line.split(',')])
        train_pos_file.close()

        pos_test_rows = []
        for line in test_pos_file_list:
            line = line.replace("\n", '')
            pos_test_rows.append([int(r) for r in line.split(',')])
        test_pos_file.close()

        if features:
            train_matrix = np.concatenate(
                [train_matrix, np.matrix(pos_train_rows)], axis=1)
            test_matrix = np.concatenate(
                [test_matrix, np.matrix(pos_test_rows)], axis=1)
        else:
            train_matrix = np.matrix(pos_train_rows)
            test_matrix = np.matrix(pos_test_rows)
        names = names + pos_names

    if charNgrams:
        print("Reading ngram files")
        train_ngram_file = open(new_train_ngrams, 'r')
        train_ngram_file_list = train_ngram_file.readlines()
        ngram_names = train_ngram_file_list[0].split(",")
        train_ngram_file_list.pop(0)
        test_ngram_file = open(new_test_ngrams, 'r')
        test_ngram_file_list = test_ngram_file.readlines()
        test_ngram_file_list.pop(0)

        lines = train_ngram_file_list
        ngram_train_rows = []
        for line in lines:
            line = line.replace("\n", '')
            ngram_train_rows.append([int(r) for r in line.split(',')])
        train_ngram_file.close()

        lines = test_ngram_file_list
        ngram_test_rows = []
        for line in lines:
            line = line.replace("\n", '')

            ngram_test_rows.append([int(r) for r in line.split(',')])
        test_ngram_file.close()

        if (features or POS):
            train_matrix = np.concatenate(
                [train_matrix, np.matrix(ngram_train_rows)], axis=1)
            test_matrix = np.concatenate(
                [test_matrix, np.matrix(ngram_test_rows)], axis=1)
        else:
            train_matrix = np.matrix(ngram_train_rows)
            test_matrix = np.matrix(ngram_test_rows)
        names = names + ngram_names

    if POSgrams:
        print("Reading POS n gram files")

        train_pos_gram_file = open(train_pos_gram, 'r')
        train_pos_gram_file_list = train_pos_gram_file.readlines()
        pos_gram_names = train_pos_gram_file_list[0].split(",")
        train_pos_gram_file_list.pop(0)
        test_pos_gram_file = open(test_pos_gram, 'r')
        test_pos_gram_file_list = test_pos_gram_file.readlines()
        test_pos_gram_file_list.pop(0)

        pos_gram_train_rows = []
        for line in train_pos_gram_file_list:
            line = line.replace("\n", '')
            pos_gram_train_rows.append([int(r) for r in line.split(',')])
        train_pos_gram_file.close()

        pos_gram_test_rows = []
        for line in test_pos_gram_file_list:
            line = line.replace("\n", '')
            pos_gram_test_rows.append([int(r) for r in line.split(',')])
        test_pos_gram_file.close()

        if (features or POS or charNgrams):
            train_matrix = np.concatenate(
                [train_matrix, np.matrix(pos_gram_train_rows)], axis=1)
            test_matrix = np.concatenate(
                [test_matrix, np.matrix(pos_gram_test_rows)], axis=1)
        else:
            train_matrix = np.matrix(pos_gram_train_rows)
            test_matrix = np.matrix(pos_gram_test_rows)
        names = names + pos_gram_names
        """
        pos_3gram_train_rows = []
        for line in train_pos_3gram_file_list:
            line = line.replace("\n",'')
            pos_3gram_train_rows.append([int(r) for r in line.split(',')])
        train_pos_3gram_file.close()
        
        
        pos_3gram_test_rows = []
        for line in test_pos_3gram_file_list:
            line = line.replace("\n",'')
            pos_3gram_test_rows.append([int(r) for r in line.split(',')])
        test_pos_3gram_file.close()
            

        train_matrix = np.concatenate([train_matrix, np.matrix(pos_gram_train_rows)], axis = 1)
        test_matrix = np.concatenate([test_matrix, np.matrix(pos_gram_test_rows)], axis = 1)
        
        names = names + pos_3gram_names
        """

    if BoW:
        print("Generating Bag of Words")

        #vocab_f = open(vocab_path, 'r')
        #vocab = vocab_f.readline().split(',')
        vectorizer = CountVectorizer(token_pattern='[a-zA-Z]+',
                                     stop_words='english')
        bow_train = vectorizer.fit_transform(texts_train)
        bow_test = vectorizer.transform(texts_test)
        if (features or POS or charNgrams or POSgrams):
            train_matrix = hstack((bow_train, train_matrix))
            test_matrix = hstack((bow_test, test_matrix))
        else:
            train_matrix = bow_train
            test_matrix = bow_test
        bow_names = vectorizer.get_feature_names()
        names = bow_names + names

    if tfidf:
        print("Generating TFIDF")

        #vocab_f = open(vocab_path, 'r')
        #vocab = vocab_f.readline().split(',')
        vectorizer = TfidfVectorizer()
        bow_train = vectorizer.fit_transform(texts_train)
        bow_test = vectorizer.transform(texts_test)
        if (features or POS or charNgrams or POSgrams or BoW):
            train_matrix = hstack((bow_train, train_matrix))
            test_matrix = hstack((bow_test, test_matrix))
        else:
            train_matrix = bow_train
            test_matrix = bow_test
        bow_names = vectorizer.get_feature_names()
        names = bow_names + names

    if not BoW or not tfidf:
        train_matrix = sparse.csc_matrix(train_matrix)
        test_matrix = sparse.csc_matrix(test_matrix)

    if binary:
        transformer = Binarizer().fit(train_matrix)
        train_matrix = transformer.transform(train_matrix)

        transformer = Binarizer().fit(test_matrix)
        test_matrix = transformer.transform(test_matrix)

    if removeCenter:
        extreme_indexes = []
        for i in range(0, len(texts_train)):
            if (nominates_train[i] > 0.2 or nominates_train[i] < -0.2):
                extreme_indexes.append(i)
        train_matrix = train_matrix.tocsr()[extreme_indexes, :]
        labels_train = [labels_train[i] for i in extreme_indexes]

    pos_train = []
    neg_train = []
    for i in range(0, len(labels_train)):
        if labels_train[i] == -1.0:
            neg_train.append(i)
        else:
            pos_train.append(i)

    pos_matrix = train_matrix.tocsr()[pos_train, :]
    neg_matrix = train_matrix.tocsr()[neg_train, :]
    diff = [
        abs(x - y) for x, y in zip(
            pos_matrix.mean(axis=0).tolist()[0],
            neg_matrix.mean(axis=0).tolist()[0])
    ]

    indexes = []

    indexes_sorted = [
        i[0] for i in sorted(enumerate(diff), key=lambda x: x[1])
    ]
    names_sorted = [names[i] for i in indexes_sorted]

    ac_nb_list = []
    ac_log_list = []

    train_matrix_original = train_matrix
    test_matrix_original = test_matrix

    for nFeatures in nFeaturesList:
        indexes = indexes_sorted[len(indexes_sorted) -
                                 nFeatures:len(indexes_sorted)]
        names = names_sorted[len(indexes_sorted) -
                             nFeatures:len(indexes_sorted)]
        train_matrix = train_matrix_original.tocsr()[:, indexes]
        test_matrix = test_matrix_original.tocsr()[:, indexes]

        print("Training the Naive Bayes classifier")
        clf = MultinomialNB()
        clf.fit(train_matrix, labels_train)
        pred = clf.predict(test_matrix)

        print("Naive Bayes")
        print("Accuracy:  " + str(Eval.Accuracy(labels_test, pred.tolist())))
        print("Precision: " + str(Eval.Precision(labels_test, pred.tolist())))
        print("Recall: " + str(+Eval.Recall(labels_test, pred.tolist())))
        ac_nb = Eval.Accuracy(labels_test, pred.tolist())
        ac_nb_list.append(float(ac_nb))

        if statistics:
            Eval.histogram(nominates_test, labels_test, pred.tolist(), 10,
                           'Naive Bayes', 'blue')

            a = clf.feature_log_prob_[0] - clf.feature_log_prob_[1]
            b = [
                x * y for x, y in zip(a,
                                      train_matrix.mean(axis=0).tolist()[0])
            ]
            coefs_with_fns = sorted(zip(b, names))
            top = zip(coefs_with_fns[:20], coefs_with_fns[:-(20 + 1):-1])
            for (coef_1, fn_1), (coef_2, fn_2) in top:
                print("\t%.4f\t%-15s\t\t%.4f\t%-15s" %
                      (coef_2, fn_2, coef_1, fn_1))

        clf = LogisticRegression(solver='saga', max_iter=2000)
        clf.fit(train_matrix, labels_train)
        pred = clf.predict(test_matrix)

        print("Logistic Regression")
        print("Accuracy: " + str(Eval.Accuracy(labels_test, pred.tolist())))
        print("Precision: " + str(Eval.Precision(labels_test, pred.tolist())))
        print("Recall: " + str(Eval.Recall(labels_test, pred.tolist())))
        ac_log = Eval.Accuracy(labels_test, pred.tolist())
        ac_log_list.append(float(ac_log))

        if statistics:
            Eval.histogram(nominates_test, labels_test, pred.tolist(), 10,
                           'Logistic Regression', 'orange')

            plt.legend(bbox_to_anchor=(0., 1.02, 1., .102),
                       loc=3,
                       ncol=2,
                       mode="expand",
                       borderaxespad=0.)

            b = [
                x * y for x, y in zip(clf.coef_[0],
                                      train_matrix.mean(axis=0).tolist()[0])
            ]
            coefs_with_fns = sorted(zip(b, names))
            top = zip(coefs_with_fns[:20], coefs_with_fns[:-(20 + 1):-1])
            for (coef_1, fn_1), (coef_2, fn_2) in top:
                print("\t%.4f\t%-15s\t\t%.4f\t%-15s" %
                      (coef_1, fn_1, coef_2, fn_2))

    return ac_nb_list, ac_log_list
Пример #32
0
 def test_questoin423(self):
     self.assertFalse(Eval.tautology("~(kb /\ ~A) => (kb /\ A)"))
Пример #33
0
            orient = orient[argmax, :]
            cos = orient[0]
            sin = orient[1]
            
            #img = Batch2Image(batch)
            theta = np.arctan2(sin, cos) / np.pi * 180
            theta =  theta + centerAngle[argmax] / np.pi * 180
             
            orientation_estimate = pydriver.common.functions.pyNormalizeAngle(np.radians(360 - info['ThetaRay'] - theta))
            orientation_estimate = orientation_estimate / np.pi * 180
            error = abs(orientation_estimate - info['Ry'])
            if error > 180:
                error = abs(360 - error)
            error_lst.append(error)

            Translation = Eval.GetTranslation(P, box_2D, orientation_estimate, dim)
            distance_lst.append(np.linalg.norm(Translation - info['Location']))
            #Translation = Eval.GetTranslation(P, box_2D, info['Ry'], np.array(dimGT))
            #print box_2D, info['Ry'], dimGT
            #print Translation
            #print info['ID']
            #print info['Location']
            #print error
            #print dimGT
            #print dim
            #sys.exit()
            #error = abs(theta - info['LocalAngle'])
            if i % 40 == 0:
                print '===='
                print info['Ry']
                print info['ThetaRay']
Пример #34
0
    if len(sys.argv) == 5:
        print("\n\t\t\t\t>>>>>>>>>> TEST <<<<<<<<<<\n")
        parametersTest, Xtest, ytest = createData(sys.argv[4], True)

        ypredict = predictLabels(Xtest, tree)

        success = 0
        if ytest != []:
            for i in range(len(ytest)):
                if ytest[i] == ypredict[i]:
                    success += 1

            success = float(success * 100) / len(ytest)
            print("\nSuccess: " + "%.2f" % success + " %")

    print(ytest)
    print(ypredict)

    evaluation = Eval(ytest, ypredict, labels)
    evaluation.computeErrors()
    print("TP: " + evaluation.tp)
    print("FP: " + evaluation.fp)
    print("TN: " + evaluation.tn)
    print("FN: " + evaluation.fn)

    evaluation.confusionMatrix()
    evaluation.ROC_curve()
    evaluation.precision_recall_curve()
    evaluation.fmeasure(1)
    evaluation.DET_curve()
Пример #35
0
    def test_exercise42(self):
        f1 = "Smoke => Smoke"
        self.assertTrue(Eval.satisfiable(f1))
        self.assertFalse(Eval.unsatisfiable(f1))
        self.assertTrue(Eval.tautology(f1))

        f1 = "(Smoke => Fire) => (~Smoke => ~Fire)"
        self.assertTrue(Eval.satisfiable(f1))
        self.assertFalse(Eval.unsatisfiable(f1))
        self.assertFalse(Eval.tautology(f1))

        f1 = "Smoke \\/ Fire\\/ ~Fire"
        self.assertTrue(Eval.satisfiable(f1))
        self.assertFalse(Eval.unsatisfiable(f1))
        self.assertTrue(Eval.tautology(f1))

        f1 = "(Fire => Smoke) /\\ Fire /\\ ~Smoke"
        self.assertFalse(Eval.satisfiable(f1))
        self.assertTrue(Eval.unsatisfiable(f1))
        self.assertFalse(Eval.tautology(f1))
Пример #36
0
import Parameters, Bee, Cycles, Eval

print("teste4")
p = Parameters.Params("par")
#c = Cycles.Cycles(p)
b = Bee.Bee(p)

#for i in range(0,p.SN):
#print(b.weights)

#c.employedCycle(b)

print(Eval.error2([[1, 1, 1], [1, 0, 1]], [[1], [0]], b.weights, b.bias,
                  p.dim))

#def error(self, training_inputs, training_outputs, weights, bias, param = Parameters):
Пример #37
0
 def execute(self,env, *unEvalArgs):
     arg = Eval.evall(unEvalArgs[0], env)
     if(arg == new(LispSymbol,"mainloop")):
         self.value.mainloop()
Пример #38
0
 def test_satisfiable(self):
     self.assertTrue(Eval.satisfiable("a /\\ b"))
     self.assertFalse(Eval.satisfiable("a /\\ ~a"))
Пример #39
0
    USE_CUDA = False
    if torch.cuda.is_available():
        USE_CUDA = True
    print('\n\nUSE_CUDA = {}\n\n'.format(USE_CUDA))

    img_size = 128
    radial_lines = 44

    Params_dict = {
        'img_size':
        img_size,  # length of image
        'batchsize':
        1,  # number of samples in batch
        'grad_steps':
        1,  # number of concatenated gradients
        'train_steps':
        1,  # number of optimization steps
        'theta':
        0.005,  # weighting of regularizer
        'mask':
        GetKMask.createkSpaceMask(np.array([img_size, img_size]),
                                  radial_lines),  # mask of radial lines
        'optimizer_net':
        Architectures.UNet,  # optimizer network architecture
        'load_model':
        True  # whether to start new or resume from last saved point
    }

    solver = Eval.Learnable_Solver(Params_dict)
    solver.run()
Пример #40
0
def run_single_fold_train_test(df, phys_target, run_params, pre, curr_fold_num):
    """
    Train, predict, and calculate eval metrics, for model. on a single data fold.
    This function receives the data, takes care of splitting it to folds, trains the model and returns the results
    for the fold (index) it ran on.
    :param df: dataframe or list of dataframes. all columns are those that will be used for training
    :param phys_target: series with the physical model of the target data (for eval purposes)
    :param run_params: instance of type TestInstanceParams class, holds relevant model configurations
    :param pre: instance of type Process - for data preprocessing
    :param curr_fold_num: The index of the relevant fold number. has to be an int between (and including)
    0 and run_params.k -1.
    :return: a dictionary with the model, the predicitons and ground truth for the test, validation and train datasets,
    and for the validation and test also the physical model predictions
    and a dataframe summarizing the evaluation metrics for the fold, for the train, validation and test sets
    """
    fold_dict = {}
    fold_dict["fold_num"] = curr_fold_num
    train, val, test, phys_val, phys_test = Split.kfold_split_train_test(df, curr_fold_num,
                                                                         k=run_params.k, phys_target=phys_target)
    pre.fit(*get_feature_and_target_data(
        train, run_params.target_col, run_params.is_target_in_input))
    fold_dict["preprocess"] = pre
    X_train, y_train, dates_y_train = pre.transform(
        *get_feature_and_target_data(train, run_params.target_col, run_params.is_target_in_input))
    X_val, y_val, dates_y_val = pre.transform(
        *get_feature_and_target_data(val, run_params.target_col, run_params.is_target_in_input))
    X_test, y_test, dates_y_test = pre.transform(
        *get_feature_and_target_data(test, run_params.target_col, run_params.is_target_in_input))
    input_dim = X_train.shape[2]
    model_structure_args = {"look_back": run_params.train_steps, "input_dimension": input_dim,
                            "build_config_description": run_params.desc_str + "_f{}".format(curr_fold_num)}

    fold_dict["train"] = {}
    fold_dict["val"] = {}
    fold_dict["test"] = {}

    fold_dict["train"]["dates"] = dates_y_train
    fold_dict["val"]["dates"] = dates_y_val
    fold_dict["test"]["dates"] = dates_y_test

    with tf.device("/cpu:0"):
        curr_model = run_params.model_class(**model_structure_args)

    with tf.device("/cpu:0"):
        # train model (and save it, if this was implemented in model class)
        curr_model = curr_model.fit(X_train, y_train, val_data=(X_val, y_val), **run_params.model_args)

    fold_dict["model"] = curr_model

    fold_dict["test"]["pred"] = pre.inverse_scale_target(fold_dict["model"].predict(X_test))
    fold_dict["test"]["true"] = pre.inverse_scale_target(y_test.reshape(-1, 1))
    fold_dict["test"]["ww3"] = phys_test.iloc[run_params.train_steps + run_params.pred_forward:].values.reshape(-1, 1)

    fold_dict["val"]["pred"] = pre.inverse_scale_target(fold_dict["model"].predict(X_val))
    fold_dict["val"]["true"] = pre.inverse_scale_target(y_val.reshape(-1, 1))
    fold_dict["val"]["ww3"] = phys_val.iloc[run_params.train_steps + run_params.pred_forward:].values.reshape(-1, 1)

    fold_dict["train"]["pred"] = pre.inverse_scale_target(fold_dict["model"].predict(X_train))
    fold_dict["train"]["true"] = pre.inverse_scale_target(y_train.reshape(-1, 1))

    fold_dict["results_test"] = Eval.eval_pred_phys_const(fold_dict["test"], pre)
    fold_dict["results_val"] = Eval.eval_pred_phys_const(fold_dict["val"], pre)
    # for train we don't look at ww3 model or const guess. these metrics are interesting
    # only for checking overfit in training
    train_eval = Eval.eval_model(
        fold_dict["train"]["true"], fold_dict["train"]["pred"])
    fold_dict["results_train"] = pd.Series(train_eval, name="ML")
    return fold_dict
Пример #41
0
                [dev_x1, l_dev_x1, r_dev_x1, dev_pos, dev_ner], batch_size=200)
            test_res = model.predict(
                [test_x1, l_test_x1, r_test_x1, test_pos, test_ner],
                batch_size=200)
        else:
            train_res = model.predict([train_token, train_pos, train_ner],
                                      batch_size=200)
            dev_res = model.predict([dev_token, dev_pos, dev_ner],
                                    batch_size=200)
            test_res = model.predict([test_token, test_pos, test_ner],
                                     batch_size=200)
    elif s['pos_fea'] == False and s['ner_fea'] == False:
        if s['model'] == 'RCNN':
            train_res = model.predict([train_x1, l_train_x1, r_train_x1],
                                      batch_size=200)
            dev_res = model.predict([dev_x1, l_dev_x1, r_dev_x1],
                                    batch_size=200)
            test_res = model.predict([test_x1, l_test_x1, r_test_x1],
                                     batch_size=200)
        else:
            train_res = model.predict(train_token, batch_size=200)
            dev_res = model.predict(dev_token, batch_size=200)
            test_res = model.predict(test_token, batch_size=200)
    F = Eval.eval_mulclass(train_y, train_res, True, True)
    F = Eval.eval_mulclass(dev_y, dev_res, True, True)
    F = Eval.eval_mulclass(test_y, test_res, True, True)
    if s['model_save'] == True:
        FileUtil.writeFloatMatrix(train_res, train_result_file)
        FileUtil.writeFloatMatrix(dev_res, dev_result_file)
        FileUtil.writeFloatMatrix(test_res, test_result_file)
Пример #42
0
 def test_eval_simple(self):
     self.assertTrue(Eval.eval(Parse.parse("true"), {}))
     self.assertFalse(Eval.eval(Parse.parse("false"), {}))
Пример #43
0
    def name(self):
        return "TreeSearch"
    def version(self):
        return "1.0"
    def pick_move(self, color):
        x,y = choose_move_alphabeta(self.board, self.policy, self.value, depth=3)
        return Move(x,y)
    def get_position_eval(self):
        return self.value.evaluate(self.board)

if __name__ == '__main__':
    import GTP
    fclient = GTP.redirect_all_output("log_engine.txt")

    import Policy
    import MoveModels
    import Eval
    import EvalModels

    #policy = Policy.AllPolicy()
    policy = Policy.TFPolicy(model=MoveModels.Conv12PosDepELU(N=19, Nfeat=21), threshold_prob=0.8, softmax_temp=1.0)
    value = Eval.TFEval(EvalModels.Conv11PosDepFC1ELU(N=19, Nfeat=21))

    engine = TreeSearchEngine(policy, value)
    
    gtp = GTP.GTP(engine, fclient)
    gtp.loop()



Пример #44
0
    def test_eval_by_def(self):
        self.assertFalse(Eval.eval(Parse.parse("a \\/ b"), {"a": False, "b": False}))
        self.assertTrue(Eval.eval(Parse.parse("a \\/ b"), {"a": False, "b": True}))
        self.assertTrue(Eval.eval(Parse.parse("a \\/ b"), {"a": True, "b": False}))
        self.assertTrue(Eval.eval(Parse.parse("a \\/ b"), {"a": True, "b": True}))
        self.assertFalse(Eval.eval(Parse.parse("a \\/ a"), {"a": False}))
        self.assertTrue(Eval.eval(Parse.parse("a \\/ a"), {"a": True}))

        self.assertFalse(Eval.eval(Parse.parse("a /\\ b"), {"a": False, "b": False}))
        self.assertFalse(Eval.eval(Parse.parse("a /\\ b"), {"a": False, "b": True}))
        self.assertFalse(Eval.eval(Parse.parse("a /\\ b"), {"a": True, "b": False}))
        self.assertTrue(Eval.eval(Parse.parse("a /\\ b"), {"a": True, "b": True}))
        self.assertFalse(Eval.eval(Parse.parse("a /\\ a"), {"a": False}))
        self.assertTrue(Eval.eval(Parse.parse("a /\\ a"), {"a": True}))

        self.assertFalse(Eval.eval(Parse.parse("a => b"), {"a": True, "b": False}))
        self.assertTrue(Eval.eval(Parse.parse("a => b"), {"a": True, "b": True}))
        self.assertTrue(Eval.eval(Parse.parse("a => b"), {"a": False, "b": True}))
        self.assertTrue(Eval.eval(Parse.parse("a => b"), {"a": False, "b": False}))

        self.assertTrue(Eval.eval(Parse.parse("a <=> b"), {"a": False, "b": False}))
        self.assertFalse(Eval.eval(Parse.parse("a <=> b"), {"a": False, "b": True}))
        self.assertFalse(Eval.eval(Parse.parse("a <=> b"), {"a": True, "b": False}))
        self.assertTrue(Eval.eval(Parse.parse("a <=> b"), {"a": True, "b": True}))

        self.assertTrue(Eval.eval(Parse.parse("~a"), {"a": False}))

        self.assertTrue(Eval.eval(Parse.parse("~a \\/ b"), {"a": False, "b": False}))
        self.assertTrue(Eval.eval(Parse.parse("~a \\/ b"), {"a": False, "b": True}))
        self.assertFalse(Eval.eval(Parse.parse("~a \\/ b"), {"a": True, "b": False}))
        self.assertTrue(Eval.eval(Parse.parse("~a \\/ b"), {"a": True, "b": True}))
Пример #45
0
 def test_exercise41(self):
     self.assertTrue(Eval.entails("False", "True"))
     self.assertFalse(Eval.entails("True", "False"))
     self.assertTrue(Eval.entails("a /\\ b", "a <=> b"))
     self.assertFalse(Eval.entails("a <=> b", "a \\/ b"))
     self.assertTrue(Eval.entails("a <=> b", "~a \\/ b"))
Пример #46
0
 def test_atoms(self):
     parser = Parse.ParseTreeGenertor()
     self.assertEquals(Eval.atoms(parser.parse("~((a => ((b /\\ c) \\/ d \\/ e) <=> f))")), {'a', 'b', 'c', 'd', 'e', 'f'})
     self.assertEquals(Eval.atoms(parser.parse("~((a => ((b /\\ c) \\/ d \\/ e) <=> true))")), {'a', 'b', 'c', 'd', 'e'})
Пример #47
0
def main():
    # Problem 4.1.
    print('''
Problem 4.1. Use the function entails to check whether the following
entailment is true or not.
1. False |= True
2. True |= False
3. (A ∧ B) |= (A ⇔ B)
4. (A ⇔ B) |= A ∨ B
5. (A ⇔ B) |= ¬A ∨ B

Solutions:
1. %s
2. %s
3. %s
4. %s
5. %s
''' % (
        Eval.entails("False", "True"),  # 1. False |= True
        Eval.entails("True", "False"),  # 2. True |= False
        Eval.entails("a /\\ b", "a <=> b"),  # 3. (A ∧ B) |= (A ⇔ B)
        Eval.entails("a <=> b", "a \\/ b"),  # 4. (A ⇔ B) |= A ∨ B
        Eval.entails("a <=> b", "~a \\/ b")  # 5. (A ⇔ B) |= ¬A ∨ B
    ))


    # Problem 4.2.2
    f1 = "Smoke => Smoke"
    f2 = "(Smoke => Fire) => (~Smoke => ~Fire)"
    f3 = "Smoke \\/ Fire\\/ ~Fire"
    f4 = "(Fire => Smoke) /\\ Fire /\\ ~Smoke"


    print(
    '''
Problem 4.2.2 Use the function tautology, satisfiable, unsatisfiable
to check whether the following formulae is tautology, satisfiable, or unsat-
isfiable. Compare the output with the result from your pencil-and-paper
derivation.
1. Smoke ⇒ Smoke
2. (Smoke ⇒ Fire) ⇒ (¬Smoke ⇒ ¬Fire)
3. Smoke ∨ Fire ∨ ¬Fire
4. (Fire ⇒ Smoke) ∧ Fire ∧ ¬Smoke

Solutions:
1. %s

2. %s

3. %s

4. %s
    ''' % (
        ex42(f1),
        ex42(f2),
        ex42(f3),
        ex42(f4)
        ))

    # Exercise 4.2.3
    bsays = "b <=> (a <=> ~a)"
    csays = "c <=> ~b"
    kb = "(%s) /\\ (%s) " % (bsays, csays)

    print(
'''
Problem 4.2.3
Represent what B says with your parser.

    bsays = parse "b <=> (a <=> ~a)"

where parse is your parser implementation. Do the same with what C says.

    csays = parse "c <=> ~b"

Construct a knowledge base —kb of type Formula— by performing conjunc-
tion of what B and C says. By using the function entails, check whether
the knowledge base entails whether A is a knight. Check also whether it
entails whether A is a knave. Perform these checks for B and C as well.

Solutions:
%s
%s
%s
%s
%s
%s
'''
        % (
    "KB |= a  = " + str(Eval.entails(kb, "a")),
    "KB |= ~a = " + str(Eval.entails(kb, "~a")),
    "KB |= b  = " + str(Eval.entails(kb, "b")),
    "KB |= ~b = " + str(Eval.entails(kb, "~b")),
    "KB |= c  = " + str(Eval.entails(kb, "c")),
    "KB |= ~c = " + str(Eval.entails(kb, "~c"))
    ))

    print("size: %d" % Eval.num_valid_valuations(
        "((S \\/ ~A) /\\ (~N \\/ ~T \\/ A) /\\ (~C \\/ L \\/ A) /\\ (~U \\/ ~S) /\\ (E))"))
Пример #48
0
	def evaluateN(self, somebodies):
                tempSys = System()
                tempSys.bodies = somebodies
                tempEval = Eval.soPhysics(tempSys,1000000,.01)
                return tempEval.sumFit
Пример #49
0
Файл: LDA.py Проект: edurra/TFM
from sklearn.feature_extraction.text import CountVectorizer
import datetime
import matplotlib.pyplot as plt
from gensim.models.coherencemodel import CoherenceModel
from nltk.stem import PorterStemmer
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import subjectivity
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import numpy as np

train_n = 110
test_n = 112
texts_train_path = "dir/speeches_" + str(train_n) + "_dwnominate_nonames.txt"
texts_test_path = "dir/speeches_" + str(test_n) + "_dwnominate_nonames.txt"

labels_train, texts_train, nominates_train = Eval.readDataSet(
    texts_train_path, 0)
labels_test, texts_test, nominates_test = Eval.readDataSet(texts_test_path, 0)


def preprocess(text, stop, stemmer):

    result = []
    for w in text.split(" "):
        if len(w) > 3 and not stop.get(w):
            result.append(stemmer.stem(w.lower()))
    return result


def saveHTML(lda_model, path, dictionary, corpus):
    import pyLDAvis.gensim
    vis = pyLDAvis.gensim.prepare(lda_model,
Пример #50
0
class solarClient(object):
    def __init__(self, xString='http://bamdastard.kicks-ass.net:8000',
                 mode="normal", userName="******"):
        self.connected=False
        self.xString = xString
        self.username = userName
        self.sysName = "aSystem"
        self.mode = mode
        self.scoreThreshold = 1;
        self.score = 1000
        self.galaxy = Galaxy()
        if xString == "local":
            self.runLocal()

    def clientLoop(self):
        try:
#            self.retrieveGalaxy()
#            self.logIn()
            self.generateSystem()
#            self.insertSystem()
            self.launchSystem()
        except:
            self.runLocal()
            
    def logIn(self):
        print"Please type your username and password"
        print "user: "******"":
            uname="Default"
        print "pass: "******"getting all star names"
        allStars = self.server.getAllStars()
        for star in allStars:
            print star
        return

    def connectToServer(self):
        print "connecting to server ",self.xString
        self.server = xmlrpclib.Server(self.xString)
        self.connected=True
        
        
    def retrieveGalaxy(self):
        if self.connected == False:
            self.connectToServer()
        print "retrieving galaxy"
        xfile = self.server.getGalaxy()
        print "loading galaxy"
        self.galaxy=cPickle.loads(xfile)

    def retrieveSystem(self):
        if self.connected == False:
            self.connectToServer()        
        #self.getAllStars()
        print "attempting to retrieve and launch system: "
        print self.sysName
        self.xfile = self.server.retrieveSystem(self.sysName)
        print "unpacking system"
        self.mySystem = cPickle.loads(self.xfile)
        print "launching evaluator"
        self.Evaluator = Eval(self.mySystem, 1000)
        print "calculating score"
        self.score = self.Evaluator.evaluate()
        print "system stability score = "
        print self.score
        print "launching planetarium.. .  .    .        ."
        import planetarium
        self.planetWindow = planetarium.Universe(self.Evaluator)
        run()
        
    def runSol(self):
        print "earthSun"
        sysCount = 0
        self.mySystem = System(sysCount)
        self.Evaluator = soPhysics(self.mySystem, 100,0.2)
        print "number of bodies:"
        print len(self.mySystem.bodies)
        print "launching planetarium.. .  .    .        ."
        import planetarium
        self.planetWindow = planetarium.Universe(self.Evaluator)
        run()

    def generateSystem(self):
        sysCount = 1
        self.mySystem = System(sysCount)
        self.scoreThreshold =1;
        self.score = 1000
        starcount=1
        
       # bodycount = raw_input()

        #if bodycount=="":
        bodycount = 32
        bodyDistance=.5
        bodySpeed=.03
        self.mySystem = System(sysCount, starcount,
                               bodycount, bodyDistance,
                               bodySpeed)
        #try:
  #          print "trying to get a star"
 #           self.connectToServer()
 #           self.mySystem.star = cPickle.loads(self.server.getNextStar())
 #           print "got one"
            
        #except:
#        print "couldn't get one"
        self.galaxy.stars[4*len(self.galaxy.stars)/5] 
        self.Evaluator = soPhysics(self.mySystem, 10000, .02)
        print "number of bodies:"
        print len(self.mySystem.bodies)
        
    def launchSystem(self):
        print "launching planetarium.. .  .    .        ."
        import planetarium        
 #       import interactiveConsole.interactiveConsole
 #       from interactiveConsole.interactiveConsole import(
 #           pandaConsole, INPUT_CONSOLE, INPUT_GUI,
 #           OUTPUT_PYTHON, OUTPUT_IRC)
        
 #       self.console = pandaConsole( INPUT_CONSOLE|INPUT_GUI
 #                                    |OUTPUT_PYTHON|OUTPUT_IRC,
 #                                    locals() )
        self.planetWindow = planetarium.Universe(self.Evaluator,
                                                 self.galaxy.stars)
 #                                                self.console)
        run()
        
    def runLocal(self, sofigs=""):
        print "run sol? y/n"
        genvar = 'n'#raw_input()
        if genvar == 'y':
            self.runSol()
        else:
            self.generateSystem()
        self.launchSystem() 
             [dev_token, dev_pos, dev_ner], batch_size=200)
         test_res = model.predict(
             [test_token, test_pos, test_ner],
             batch_size=200)
 elif s['pos_fea'] == False and s['ner_fea'] == False:
     if s['model'] == 'RCNN':
         dev_res = model.predict(
             [dev_x1, l_dev_x1, r_dev_x1], batch_size=200)
         test_res = model.predict(
             [test_x1, l_test_x1, r_test_x1],
             batch_size=200)
     else:
         dev_res = model.predict(dev_token, batch_size=200)
         test_res = model.predict(test_token,
                                  batch_size=200)
 F = Eval.eval_mulclass(dev_y, dev_res, False, True)
 if F[2] > max_f[2]:
     test_F = Eval.eval_mulclass(test_y, test_res, False,
                                 True)
     max_f[0] = F[0]
     max_f[1] = F[1]
     max_f[2] = F[2]
     max_f[3] = F[3]
     max_f[4] = epoch
     test_f[0] = test_F[0]
     test_f[1] = test_F[1]
     test_f[2] = test_F[2]
     test_f[3] = test_F[3]
     max_res = test_res
     max_minibatch = minibatch
     if s['model_save'] == True:
Пример #52
0
 def test_tautology(self):
     self.assertFalse(Eval.tautology("a /\\ b"))
     self.assertTrue(Eval.tautology("a <=> a"))
     self.assertFalse(Eval.tautology("a <=> b"))