def ba3(agent, r, c):
    scale = 1.0 - agent.map[r][c].probability + \
        agent.map[r][c].probability * agent.map[r][c].falseNegativeProbability
    agent.map[r][c].probability = agent.map[r][c].falseNegativeProbability * \
        agent.map[r][c].probability
    minScore = -1
    new_r = r
    new_c = c
    i = 0
    while i < agent.dim:
        j = 0
        while j < agent.dim:
            if i == r and j == c:
                j += 1
                continue
            agent.map[i][j].probability = agent.map[i][j].probability / scale
            dist = manhattanDistance((r, c), (i, j))
            agent.map[i][j].score = float(dist) / (
                agent.map[i][j].probability *
                (1 - agent.map[i][j].falseNegativeProbability))
            if ((minScore > agent.map[i][j].score) or minScore == -1):
                minScore = agent.map[i][j].score
                new_r = i
                new_c = j
            j = j + 1
        i = i + 1
    return (new_r, new_c)
def improvedMAMTWithClue(agent, target):
    highest = 0
    r = c = 0

    for row in agent.map:
        for cell in row:
            cell.score = cell.probability * \
                (1 - cell.falseNegativeProbability)
            if cell.score > highest:
                highest = cell.score
                r, c = cell.row, cell.col

    while agent.hasFoundTarget == False:
        minScore = agent.dim**4
        prevr = r
        prevc = c

        numChecks = int(agent.map[r][c].falseNegativeProbability * 10.0)
        for check in range(numChecks):
            searchResult = agent.searchCell(r, c, target)
            #print('searched cell: ', (prevr,prevc))
            #print('Target Position: ', target.position)

            if searchResult == False:
                target.move()
                withinFive = targetInRange(agent, target, prevr, prevc)
                #print('target in range? ', withinFive)
                scale = 1 - agent.map[r][c].probability + \
                    agent.map[r][c].probability * \
                    agent.map[r][c].falseNegativeProbability
                agent.map[r][c].probability = agent.map[r][c].probability * \
                    agent.map[r][c].falseNegativeProbability
                i = 0
                while i < agent.dim:
                    j = 0
                    while j < agent.dim:
                        if prevr == i and prevc == j:
                            j += 1
                            continue
                        # probabilities/scores are updated here
                        agent.map[i][j].probability = agent.map[i][
                            j].probability / scale
                        agent.map[i][j].score = agent.map[i][j].probability * \
                            (1 - agent.map[i][j].falseNegativeProbability)
                        dist = manhattanDistance((prevr, prevc), (i, j))
                        agent.map[i][j].score = (1 + float(dist)) / (
                            agent.map[i][j].probability *
                            (1 - agent.map[i][j].falseNegativeProbability))
                        j = j + 1
                    i = i + 1
                if check == numChecks - 1:
                    if withinFive:
                        minScore, r, c = minInRange(agent, prevr, prevc)
                    else:
                        minScore, r, c = minOutRange(agent, prevr, prevc)
                    numActions((prevr, prevc), (r, c), agent)
            else:
                break
    return agent.numMoves
def improvedMAMTWithoutClue(agent, target):
    highest = 0
    r = c = 0
    for row in agent.map:
        for cell in row:
            cell.score = cell.probability * (1 - cell.falseNegativeProbability)
            # dist = float(manhattanDistance((r,c), (i,j)))
            # agent.map[i][j].score = dist / agent.map[i][j].score
            if cell.score > highest:
                highest = cell.score
                r, c = cell.row, cell.col

    while (agent.hasFoundTarget == False):
        minScore = agent.dim**4
        prevr = r
        prevc = c
        numChecks = int(agent.map[r][c].falseNegativeProbability * 10.0)
        for check in range(0, numChecks):
            searchResult = agent.searchCell(r, c, target)
            if searchResult == False:
                target.move()
                scale = 1 - agent.map[r][c].probability + \
                    agent.map[r][c].probability * \
                    agent.map[r][c].falseNegativeProbability
                agent.map[r][c].probability = agent.map[r][c].falseNegativeProbability * \
                    agent.map[r][c].probability
                i = 0
                while i < agent.dim:
                    j = 0
                    while j < agent.dim:
                        if i == prevr and j == prevc:
                            j += 1
                            continue
                        agent.map[i][j].probability = agent.map[i][
                            j].probability / scale
                        dist = manhattanDistance((prevr, prevc), (i, j))
                        agent.map[i][j].score = (1 + float(dist)) / (
                            agent.map[i][j].probability *
                            (1 - agent.map[i][j].falseNegativeProbability))
                        if check == numChecks - 1 and minScore > agent.map[i][
                                j].score:
                            minScore = agent.map[i][j].score
                            r = i
                            c = j
                        j = j + 1
                    i = i + 1
                if check == numChecks - 1:
                    numActions((prevr, prevc), (r, c), agent)
            else:
                break

    return agent.numMoves
Exemple #4
0
def rule3MAST(agent, target):
    highest = 0
    r = c = 0
    # the initializing of the first probabilities (no score here)
    for row in agent.map:
        for cell in row:
            cell.score = cell.probability * (1 - cell.falseNegativeProbability)
            if cell.score > highest:
                highest = cell.score
                r, c = cell.row, cell.col

    # while the target has not been found, keep searching cells
    while (agent.hasFoundTarget == False):
        minScore = agent.dim**4
        prevr = r
        prevc = c
        searchResult = agent.searchCell(r, c, target)
        if searchResult == False:
            # update the probabilties of the cell we just searched
            scale = 1 - agent.map[r][c].probability + \
                agent.map[r][c].probability * \
                agent.map[r][c].falseNegativeProbability
            agent.map[r][c].probability = agent.map[r][c].falseNegativeProbability * \
                agent.map[r][c].probability
            i = 0
            while i < agent.dim:
                j = 0
                while j < agent.dim:
                    # if we reach the same cell that we already updated, skip, as the distance will be zero and this will always be the lowest scored cell
                    if i == prevr and j == prevc:
                        j += 1
                        continue
                    # update the probabilties of each of the other cells in the board
                    agent.map[i][
                        j].probability = agent.map[i][j].probability / scale
                    # update the score of each of the other cells in the board.
                    dist = manhattanDistance((prevr, prevc), (i, j))
                    agent.map[i][j].score = (1 + float(dist)) / (
                        agent.map[i][j].probability *
                        (1 - agent.map[i][j].falseNegativeProbability))
                    if minScore > agent.map[i][j].score:
                        minScore = agent.map[i][j].score
                        r = i
                        c = j
                    j = j + 1
                i = i + 1
            # imcrement number of actions between the previous cell and the one we have just explored
            numActions((prevr, prevc), (r, c), agent)

    return agent.numMoves