コード例 #1
0
def main():
    getParams()
    x = 1

    allLog = SortedSet()
    errorNo = 0
    for line in theOpenFile:
        if line.startswith("#"):
            continue
        if line.startswith("result, logSet"):
            continue
        if len(line) > 20:
            x += 1
            if x % 10000 == 0:
                print(f'{int(x / 1000):,}')
            try:
                p: PositionTesterLogItem = PositionTesterLogItem(line, 1)
                if not allLog.__contains__(p):
                    allLog.add(p)
                else:
                    t4 = 1

            except Exception as e:

                print(f'error {errorNo}:{e}')
                errorNo += 1

    theOpenFile.close()
    agList = []
    # remove duplicates

    # for each position id,
    #     remove duplicates
    #     build a list of (positionId, number of networks that found it, sum of all agrees all nets)
    for k in range(1, 200000):
        l = list(allLog.irange_key(k, k))
        if len(l) > 0:
            # if your going to save the entire list or compare other features then do a clean up.
            for item in l:
                agList.append((k, enu.count(), enu.sum(lambda x: x.agree)))
                print(agList[-1])

    with open('ur frequencyEasy20k.csv', 'w') as out:
        csv_out = csv.writer(out)
        csv_out.writerow(['positionID', 'count', 'agree'])
        for row in agList:
            csv_out.writerow(row)
    groups = Enumerable(agList).group_by(key_names=['id', 'count', 'agree'], key=lambda x: x[2])

    countList = []
    for g in groups:
        countList.append((g.count(), g.first()[2]))

    with open('countlist.csv', 'w') as out:
        csv_out = csv.writer(out)
        csv_out.writerow(['howManyPositions', 'AgreeCount'])
        for row in countList:
            csv_out.writerow(row)

    tmp55 = 1
コード例 #2
0
def astar(stare_initiala, stare_finala, euristica, lista_chei):
    nod_initial = Nod(stare_initiala, None, None)
    deschise = SortedSet([nod_initial])
    scor_optim = SortedDict({tuple(stare_initiala): 0})

    # [1, 1, 1, 1, 1]
    # (1, 1, 1, 1, 1)

    while len(deschise) > 0:
        # extragem nodul cu f minim
        nod = deschise[0]
        deschise.pop(0)

        # daca am ajuns la starea finala, ne oprim
        if nod.stare == stare_finala:
            return nod

        # generam succesorii si facem verificari
        lista_succesori = genereaza_succesori(nod, lista_chei, euristica)
        for succesor in lista_succesori:
            if scor_optim.__contains__(tuple(succesor.stare)) == False:
                # daca starea succesorului nu a mai fost intalnita pana acum, o inseram
                scor_optim[tuple(succesor.stare)] = succesor.g
                deschise.add(succesor)
            elif succesor.g < scor_optim[tuple(succesor.stare)]:
                # introducem/editam starea curenta in setul "deschis", dupa caz
                succesor_fals = Nod(succesor.stare, None, None)
                succesor_fals.f = scor_optim[tuple(
                    succesor.stare)] + euristica(succesor.stare)

                if deschise.__contains__(succesor_fals) is True:
                    deschise.discard(succesor)
                deschise.add(succesor)
                # daca starea curenta este intalnita cu un cost mai mic, o reactualizam
                scor_optim[tuple(succesor.stare)] = succesor.g

    return None