コード例 #1
0
def MultiExperiment(args):
    occlusionInd = args[0][0]
    occlusions = args[0][1]
    predatorInd = args[1][0]
    predatorHome = args[1][1]
    visualRange = args[2]
    simulationInd = args[3]
    directory = args[4]

    real = Game(XSize, YSize, occlusions=occlusions)
    simulator = Game(XSize, YSize, occlusions=occlusions)

    knowledge = Knowledge()
    knowledge.TreeLevel = treeknowlege
    knowledge.RolloutLevel = rolloutknowledge
    knowledge.SmartTreeCount = smarttreecount
    knowledge.SmartTreeValue = smarttreevalue

    experiment = Experiment(real, simulator)

    simulationDirectory = directory + '/Data/Simulation_%d' % (simulationInd)
    Path(simulationDirectory).mkdir(parents=True, exist_ok=True)

    _ = experiment.DiscountedReturn(occlusions,
                                    predatorHome,
                                    knowledge,
                                    occlusionInd,
                                    predatorInd,
                                    simulationDirectory,
                                    visualRange=visualRange)
コード例 #2
0
def MultiExperiment(args):
    simulationInd = args[0][0]
    predatorHome = args[0][1]
    directory = args[1]

    real = Game(XSize, YSize)
    simulator = Game(XSize, YSize)

    knowledge = Knowledge()
    knowledge.TreeLevel = treeknowlege
    knowledge.RolloutLevel = rolloutknowledge
    knowledge.SmartTreeCount = smarttreecount
    knowledge.SmartTreeValue = smarttreevalue

    experiment = Experiment(real, simulator)

    simulationDirectory = directory + '/Data/Simulation_%d' % (simulationInd)
    Path(simulationDirectory).mkdir(parents=True, exist_ok=True)

    _ = experiment.DiscountedReturn(predatorHome, simulationDirectory,
                                    knowledge)
コード例 #3
0
                if not real.Grid.VisualRay(
                    (real.AgentHome).Copy(),
                    (predatorHome).Copy(), occlusions)[0]:
                    aggregatePolicyLibrary = []
                    for predatorInd2 in range(5):

                        try:
                            if np.isnan(predatorHomes[predatorInd2,
                                                      simulationInd,
                                                      occlusionInd]):
                                continue
                        except:
                            pass

                        if not real.Grid.VisualRay(
                            (real.AgentHome).Copy(),
                            (predatorHomes[predatorInd2, simulationInd,
                                           occlusionInd]).Copy(),
                                occlusions)[0]:
                            aggregatePolicyLibrary.extend(
                                environmentPolicies[simulationInd]
                                [occlusionInd][predatorInd2])

                    policies = aggregatePolicyLibrary
                sr, gDist = experiment.DiscountedReturn(
                    policies, predatorHome, occlusions)

                habitSurvivalRate[predatorInd, simulationInd,
                                  occlusionInd] = sr
                habitGDist[predatorInd, simulationInd, occlusionInd] = gDist