def __call__(self, parameters):
        print(parameters)
        numWolves = parameters['numWolves']
        numSheep = parameters['numSheep']
        softParameterInInference = parameters['inferenceSoft']
        softParameterInPlanning = parameters['wolfPolicySoft']
        otherCompeteRate = parameters['otherCompeteRate']
        competeDetectionRate = parameters['competeDetectionRate']

        ## MDP Env
        # state is all multi agent state # action is all multi agent action
        xBoundary = [0, 600]
        yBoundary = [0, 600]
        numOfAgent = numWolves + numSheep
        reset = Reset(xBoundary, yBoundary, numOfAgent)

        possibleSheepIds = list(range(numSheep))
        possibleWolvesIds = list(range(numSheep, numSheep + numWolves))
        getSheepStatesFromAll = lambda state: np.array(state)[possibleSheepIds]
        getWolvesStatesFromAll = lambda state: np.array(state)[
            possibleWolvesIds]
        killzoneRadius = 50
        isTerminal = IsTerminal(killzoneRadius, getSheepStatesFromAll,
                                getWolvesStatesFromAll)

        stayInBoundaryByReflectVelocity = StayInBoundaryByReflectVelocity(
            xBoundary, yBoundary)
        interpolateOneFrame = InterpolateOneFrame(
            stayInBoundaryByReflectVelocity)
        numFramesToInterpolate = 3
        transit = TransitWithTerminalCheckOfInterpolation(
            numFramesToInterpolate, interpolateOneFrame, isTerminal)

        maxRunningSteps = 61
        timeCost = 1 / maxRunningSteps
        terminalBonus = 1
        rewardFunction = RewardFunctionByTerminal(timeCost, terminalBonus,
                                                  isTerminal)

        forwardOneStep = ForwardOneStep(transit, rewardFunction)
        sampleTrajectory = SampleTrajectory(maxRunningSteps, isTerminal, reset,
                                            forwardOneStep)

        ## MDP Policy
        # Sheep Part

        # Sheep Policy Function
        numSheepPolicyStateSpace = 2 * (numWolves + 1)
        sheepActionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0),
                            (-7, -7), (0, -10), (7, -7), (0, 0)]
        preyPowerRatio = 12
        sheepIndividualActionSpace = list(
            map(tuple,
                np.array(sheepActionSpace) * preyPowerRatio))
        numSheepActionSpace = len(sheepIndividualActionSpace)
        regularizationFactor = 1e-4
        generateSheepModel = GenerateModel(numSheepPolicyStateSpace,
                                           numSheepActionSpace,
                                           regularizationFactor)
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        sheepNNDepth = 9
        resBlockSize = 2
        dropoutRate = 0.0
        initializationMethod = 'uniform'
        initSheepModel = generateSheepModel(sharedWidths * sheepNNDepth,
                                            actionLayerWidths,
                                            valueLayerWidths, resBlockSize,
                                            initializationMethod, dropoutRate)
        sheepModelPath = os.path.join(
            '..', '..', 'data', 'preTrainModel',
            'agentId=0.' + str(numWolves) +
            '_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations=110_trainSteps=50000'
        )
        sheepNNModel = restoreVariables(initSheepModel, sheepModelPath)
        sheepPolicy = ApproximatePolicy(sheepNNModel,
                                        sheepIndividualActionSpace)

        # Sheep Generate Action
        softParameterInPlanningForSheep = 2.0
        softPolicyInPlanningForSheep = SoftDistribution(
            softParameterInPlanningForSheep)
        softenSheepPolicy = lambda relativeAgentsStatesForSheepPolicy: softPolicyInPlanningForSheep(
            sheepPolicy(relativeAgentsStatesForSheepPolicy))

        sheepChooseActionMethod = sampleFromDistribution
        sheepSampleActions = [
            SampleActionOnFixedIntention(selfId, possibleWolvesIds,
                                         softenSheepPolicy,
                                         sheepChooseActionMethod)
            for selfId in possibleSheepIds
        ]

        # Wolves Part

        # Percept Action For Inference
        perceptAction = lambda action: action

        # Policy Likelihood function: Wolf Centrol Control NN Policy Given Intention
        numWolvesStateSpaces = [
            2 * (numInWe + 1) for numInWe in range(2, numWolves + 1)
        ]
        actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7),
                       (0, -10), (7, -7)]
        predatorPowerRatio = 8
        wolfIndividualActionSpace = list(
            map(tuple,
                np.array(actionSpace) * predatorPowerRatio))
        wolvesCentralControlActionSpaces = [
            list(it.product(wolfIndividualActionSpace, repeat=numInWe))
            for numInWe in range(2, numWolves + 1)
        ]
        numWolvesCentralControlActionSpaces = [
            len(wolvesCentralControlActionSpace) for
            wolvesCentralControlActionSpace in wolvesCentralControlActionSpaces
        ]
        regularizationFactor = 1e-4
        generateWolvesCentralControlModels = [
            GenerateModel(numStateSpace, numActionSpace, regularizationFactor)
            for numStateSpace, numActionSpace in zip(
                numWolvesStateSpaces, numWolvesCentralControlActionSpaces)
        ]
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        wolfNNDepth = 9
        resBlockSize = 2
        dropoutRate = 0.0
        initializationMethod = 'uniform'
        initWolvesCentralControlModels = [
            generateWolvesCentralControlModel(sharedWidths * wolfNNDepth,
                                              actionLayerWidths,
                                              valueLayerWidths, resBlockSize,
                                              initializationMethod,
                                              dropoutRate)
            for generateWolvesCentralControlModel in
            generateWolvesCentralControlModels
        ]
        NNNumSimulations = 250
        wolvesModelPaths = [
            os.path.join(
                '..', '..', 'data', 'preTrainModel',
                'agentId=' + str(8 * np.sum([10**_ for _ in range(numInWe)])) +
                '_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations='
                + str(NNNumSimulations) + '_trainSteps=50000')
            for numInWe in range(2, numWolves + 1)
        ]
        print(wolvesModelPaths)
        wolvesCentralControlNNModels = [
            restoreVariables(initWolvesCentralControlModel, wolvesModelPath)
            for initWolvesCentralControlModel, wolvesModelPath in zip(
                initWolvesCentralControlModels, wolvesModelPaths)
        ]
        wolvesCentralControlPolicies = [
            ApproximatePolicy(NNModel, actionSpace) for NNModel, actionSpace in
            zip(wolvesCentralControlNNModels, wolvesCentralControlActionSpaces)
        ]

        centralControlPolicyListBasedOnNumAgentsInWe = wolvesCentralControlPolicies  # 0 for two agents in We, 1 for three agents...
        softPolicyInInference = SoftDistribution(softParameterInInference)
        policyForCommittedAgentsInInference = PolicyForCommittedAgent(
            centralControlPolicyListBasedOnNumAgentsInWe,
            softPolicyInInference, getStateOrActionThirdPersonPerspective)
        concernedAgentsIds = [2]
        calCommittedAgentsPolicyLikelihood = CalCommittedAgentsPolicyLikelihood(
            concernedAgentsIds, policyForCommittedAgentsInInference)

        getGoalStateForIndividualHeatseeking = lambda statesRelative: np.array(
            statesRelative)[0]
        getSelfStateForIndividualHeatseeking = lambda statesRelative: np.array(
            statesRelative)[1]
        heatseekingPrecesion = 1.83
        heatSeekingDiscreteStochasticPolicy = HeatSeekingDiscreteStochasticPolicy(
            heatseekingPrecesion, wolfIndividualActionSpace,
            getSelfStateForIndividualHeatseeking,
            getGoalStateForIndividualHeatseeking)
        policyForUncommittedAgentsInInference = PolicyForUncommittedAgent(
            possibleWolvesIds, heatSeekingDiscreteStochasticPolicy,
            softPolicyInInference, getStateOrActionFirstPersonPerspective)
        calUncommittedAgentsPolicyLikelihood = CalUncommittedAgentsPolicyLikelihood(
            possibleWolvesIds, concernedAgentsIds,
            policyForUncommittedAgentsInInference)

        # Joint Likelihood
        calJointLikelihood = lambda intention, state, perceivedAction: calCommittedAgentsPolicyLikelihood(intention, state, perceivedAction) * \
                calUncommittedAgentsPolicyLikelihood(intention, state, perceivedAction)

        wolvesValueListBasedOnNumAgentsInWe = [
            ApproximateValue(NNModel)
            for NNModel in wolvesCentralControlNNModels
        ]
        calIntentionValueGivenState = CalIntentionValueGivenState(
            wolvesValueListBasedOnNumAgentsInWe)
        softParamterForValue = 0.01
        softValueToBuildDistribution = SoftMax(softParamterForValue)
        adjustIntentionPriorGivenValueOfState = AdjustIntentionPriorGivenValueOfState(
            calIntentionValueGivenState, softValueToBuildDistribution)

        # Sample and Save Trajectory
        trajectoriesWithIntentionDists = []
        for trajectoryId in range(self.numTrajectories):

            # Intention Prior For inference
            otherWolfPossibleIntentionSpaces = {0: [(0, (1, 2))], 1: [(0, ())]}
            otherIntentionType = np.random.choice(
                [1, 0], p=[otherCompeteRate, 1 - otherCompeteRate])
            otherWolfIntentionSpace = otherWolfPossibleIntentionSpaces[
                otherIntentionType]
            selfPossibleIntentionSpaces = {
                0: [(0, (1, 2))],
                0.5: [(0, (1, 2)), (0, ())],
                1: [(0, ())]
            }
            selfWolfIntentionSpace = selfPossibleIntentionSpaces[
                competeDetectionRate]
            intentionSpacesForAllWolves = [
                selfWolfIntentionSpace, otherWolfIntentionSpace
            ]
            wolvesIntentionPriors = [{
                tuple(intention): 1 / len(allPossibleIntentionsOneWolf)
                for intention in allPossibleIntentionsOneWolf
            } for allPossibleIntentionsOneWolf in intentionSpacesForAllWolves]
            # Infer and update Intention
            variablesForAllWolves = [[
                intentionSpace
            ] for intentionSpace in intentionSpacesForAllWolves]
            jointHypothesisSpaces = [
                pd.MultiIndex.from_product(variables, names=['intention'])
                for variables in variablesForAllWolves
            ]
            concernedHypothesisVariable = ['intention']
            priorDecayRate = 1
            softPrior = SoftDistribution(priorDecayRate)
            inferIntentionOneStepList = [
                InferOneStep(jointHypothesisSpace, concernedHypothesisVariable,
                             calJointLikelihood, softPrior)
                for jointHypothesisSpace in jointHypothesisSpaces
            ]

            chooseIntention = sampleFromDistribution
            valuePriorEndTime = -100
            updateIntentions = [
                UpdateIntention(intentionPrior, valuePriorEndTime,
                                adjustIntentionPriorGivenValueOfState,
                                perceptAction, inferIntentionOneStep,
                                chooseIntention)
                for intentionPrior, inferIntentionOneStep in zip(
                    wolvesIntentionPriors, inferIntentionOneStepList)
            ]

            # reset intention and adjuste intention prior attributes tools for multiple trajectory
            intentionResetAttributes = [
                'timeStep', 'lastState', 'lastAction', 'intentionPrior',
                'formerIntentionPriors'
            ]
            intentionResetAttributeValues = [
                dict(
                    zip(intentionResetAttributes,
                        [0, None, None, intentionPrior, [intentionPrior]]))
                for intentionPrior in wolvesIntentionPriors
            ]
            resetIntentions = ResetObjects(intentionResetAttributeValues,
                                           updateIntentions)
            returnAttributes = ['formerIntentionPriors']
            getIntentionDistributions = GetObjectsValuesOfAttributes(
                returnAttributes, updateIntentions)
            attributesToRecord = ['lastAction']
            recordActionForUpdateIntention = RecordValuesForObjects(
                attributesToRecord, updateIntentions)

            # Wovels Generate Action
            softPolicyInPlanning = SoftDistribution(softParameterInPlanning)
            policyForCommittedAgentInPlanning = PolicyForCommittedAgent(
                centralControlPolicyListBasedOnNumAgentsInWe,
                softPolicyInPlanning, getStateOrActionThirdPersonPerspective)

            policyForUncommittedAgentInPlanning = PolicyForUncommittedAgent(
                possibleWolvesIds, heatSeekingDiscreteStochasticPolicy,
                softPolicyInPlanning, getStateOrActionFirstPersonPerspective)

            wolfChooseActionMethod = sampleFromDistribution
            getSelfActionThirdPersonPerspective = lambda weIds, selfId: list(
                weIds).index(selfId)
            chooseCommittedAction = GetActionFromJointActionDistribution(
                wolfChooseActionMethod, getSelfActionThirdPersonPerspective)
            chooseUncommittedAction = sampleFromDistribution
            wolvesSampleIndividualActionGivenIntentionList = [
                SampleIndividualActionGivenIntention(
                    selfId, policyForCommittedAgentInPlanning,
                    policyForUncommittedAgentInPlanning, chooseCommittedAction,
                    chooseUncommittedAction) for selfId in possibleWolvesIds
            ]

            wolvesSampleActions = [
                SampleActionOnChangableIntention(
                    updateIntention,
                    wolvesSampleIndividualActionGivenIntention)
                for updateIntention, wolvesSampleIndividualActionGivenIntention
                in zip(updateIntentions,
                       wolvesSampleIndividualActionGivenIntentionList)
            ]
            allIndividualSampleActions = sheepSampleActions + wolvesSampleActions
            sampleActionMultiAgent = SampleActionMultiagent(
                allIndividualSampleActions, recordActionForUpdateIntention)
            trajectory = sampleTrajectory(sampleActionMultiAgent)
            intentionDistributions = getIntentionDistributions()
            trajectoryWithIntentionDists = [
                tuple(list(SASRPair) + list(intentionDist)) for SASRPair,
                intentionDist in zip(trajectory, intentionDistributions)
            ]
            trajectoriesWithIntentionDists.append(
                tuple(trajectoryWithIntentionDists))
            resetIntentions()
            #print(intentionDistributions[-1], otherCompeteRate)
        trajectoryFixedParameters = {
            'sheepPolicySoft': softParameterInPlanningForSheep,
            'wolfPolicySoft': softParameterInPlanning,
            'maxRunningSteps': maxRunningSteps,
            'competePolicy': 'heatseeking',
            'NNNumSimulations': NNNumSimulations,
            'heatseekingPrecesion': heatseekingPrecesion
        }
        self.saveTrajectoryByParameters(trajectoriesWithIntentionDists,
                                        trajectoryFixedParameters, parameters)
        print(np.mean([len(tra) for tra in trajectoriesWithIntentionDists]))
    def __call__(self, parameters):
        print(parameters)
        numWolves = parameters['numWolves']
        numSheep = 1
        
        ## MDP Env  
	# state is all multi agent state # action is all multi agent action
        xBoundary = [0,600]
        yBoundary = [0,600]
        numOfAgent = numWolves + numSheep
        reset = Reset(xBoundary, yBoundary, numOfAgent)

        possibleSheepIds = list(range(numSheep))
        possibleWolvesIds = list(range(numSheep, numSheep + numWolves))
        getSheepStatesFromAll = lambda state: np.array(state)[possibleSheepIds]
        getWolvesStatesFromAll = lambda state: np.array(state)[possibleWolvesIds]
        killzoneRadius = 50
        isTerminal = IsTerminal(killzoneRadius, getSheepStatesFromAll, getWolvesStatesFromAll)

        stayInBoundaryByReflectVelocity = StayInBoundaryByReflectVelocity(xBoundary, yBoundary)
        interpolateOneFrame = InterpolateOneFrame(stayInBoundaryByReflectVelocity)
        numFramesToInterpolate = 3
        transit = TransitWithTerminalCheckOfInterpolation(numFramesToInterpolate, interpolateOneFrame, isTerminal)

        maxRunningSteps = 52
        timeCost = 1/maxRunningSteps
        terminalBonus = 1
        rewardFunction = RewardFunctionByTerminal(timeCost, terminalBonus, isTerminal)

        forwardOneStep = ForwardOneStep(transit, rewardFunction)
        sampleTrajectory = SampleTrajectory(maxRunningSteps, isTerminal, reset, forwardOneStep)

        ## MDP Policy
	# Sheep Part

	# Sheep Policy Function
        numSheepPolicyStateSpace = 2 * (numWolves + 1)
        sheepActionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7),
                       (-10, 0), (-7, -7), (0, -10), (7, -7), (0, 0)]
        preyPowerRatio = 12
        sheepIndividualActionSpace = list(map(tuple, np.array(sheepActionSpace) * preyPowerRatio))
        numSheepActionSpace = len(sheepIndividualActionSpace)
        regularizationFactor = 1e-4
        generateSheepModel = GenerateModel(numSheepPolicyStateSpace, numSheepActionSpace, regularizationFactor)
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        sheepNNDepth = 9
        resBlockSize = 2
        dropoutRate = 0.0
        initializationMethod = 'uniform'
        initSheepModel = generateSheepModel(sharedWidths * sheepNNDepth, actionLayerWidths, valueLayerWidths, 
                resBlockSize, initializationMethod, dropoutRate)
        sheepModelPath = os.path.join('..', '..', 'data', 'preTrainModel',
                'agentId=0.'+str(numWolves)+'_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations=110_trainSteps=50000')
        sheepNNModel = restoreVariables(initSheepModel, sheepModelPath)
        sheepPolicy = ApproximatePolicy(sheepNNModel, sheepIndividualActionSpace)

        # Sheep Generate Action
        softParameterInPlanningForSheep = 2.5
        softPolicyInPlanningForSheep = SoftDistribution(softParameterInPlanningForSheep)
        softenSheepPolicy = lambda relativeAgentsStatesForSheepPolicy: softPolicyInPlanningForSheep(sheepPolicy(relativeAgentsStatesForSheepPolicy))

        sheepChooseActionMethod = sampleFromDistribution
        sheepSampleActions = [SampleActionOnFixedIntention(selfId, possibleWolvesIds, sheepPolicy, sheepChooseActionMethod) for selfId in possibleSheepIds]

	# Wolves Part

        # Policy Likelihood function: Wolf Centrol Control NN Policy Given Intention
        numWolvesStateSpaces = [2 * (numInWe + 1) 
                for numInWe in range(2, numWolves + 1)]
        actionSpace = [(10, 0), (0, 10), (-10, 0), (0, -10)]
        predatorPowerRatio = 8
        wolfIndividualActionSpace = list(map(tuple, np.array(actionSpace) * predatorPowerRatio))
        wolvesCentralControlActionSpaces = [list(it.product(wolfIndividualActionSpace, repeat = numInWe)) 
                for numInWe in range(2, numWolves + 1)]
        numWolvesCentralControlActionSpaces = [len(wolvesCentralControlActionSpace)
                for wolvesCentralControlActionSpace in wolvesCentralControlActionSpaces]
        regularizationFactor = 1e-4
        generateWolvesCentralControlModels = [GenerateModel(numStateSpace, numActionSpace, regularizationFactor) 
            for numStateSpace, numActionSpace in zip(numWolvesStateSpaces, numWolvesCentralControlActionSpaces)]
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        wolfNNDepth = 9
        resBlockSize = 2
        dropoutRate = 0.0
        initializationMethod = 'uniform'
        initWolvesCentralControlModels = [generateWolvesCentralControlModel(sharedWidths * wolfNNDepth, actionLayerWidths, valueLayerWidths, 
                resBlockSize, initializationMethod, dropoutRate) for generateWolvesCentralControlModel in generateWolvesCentralControlModels] 
        NNNumSimulations = 250
        wolvesModelPaths = [os.path.join('..', '..', 'data', 'preTrainModel', 
                'agentId='+str(len(actionSpace) * np.sum([10**_ for _ in
                range(numInWe)]))+'_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations='+str(NNNumSimulations)+'_trainSteps=50000') 
                for numInWe in range(2, numWolves + 1)]
        print(wolvesModelPaths)
        wolvesCentralControlNNModels = [restoreVariables(initWolvesCentralControlModel, wolvesModelPath) 
                for initWolvesCentralControlModel, wolvesModelPath in zip(initWolvesCentralControlModels, wolvesModelPaths)]
        wolvesCentralControlPolicies = [ApproximatePolicy(NNModel, actionSpace) 
                for NNModel, actionSpace in zip(wolvesCentralControlNNModels, wolvesCentralControlActionSpaces)] 

        centralControlPolicyListBasedOnNumAgentsInWe = wolvesCentralControlPolicies # 0 for two agents in We, 1 for three agents...
        softParameterInInference = 1
        softPolicyInInference = SoftDistribution(softParameterInInference)
        policyForCommittedAgentsInInference = PolicyForCommittedAgent(centralControlPolicyListBasedOnNumAgentsInWe, softPolicyInInference,
                getStateThirdPersonPerspective)
        calCommittedAgentsPolicyLikelihood = CalCommittedAgentsPolicyLikelihood(policyForCommittedAgentsInInference)
        
        wolfLevel2ActionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7),
                       (-10, 0), (-7, -7), (0, -10), (7, -7)]
        wolfLevel2IndividualActionSpace = list(map(tuple, np.array(wolfLevel2ActionSpace) * predatorPowerRatio))
        wolfLevel2CentralControlActionSpace = list(it.product(wolfLevel2IndividualActionSpace))
        numWolfLevel2ActionSpace = len(wolfLevel2CentralControlActionSpace)
        regularizationFactor = 1e-4
        generatewolfLevel2Models = [GenerateModel(numStateSpace, numWolfLevel2ActionSpace, regularizationFactor) for numStateSpace in numWolvesStateSpaces]
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        wolfLevel2NNDepth = 9
        resBlockSize = 2
        dropoutRate = 0.0
        initializationMethod = 'uniform'
        initwolfLevel2Models = [generatewolfLevel2Model(sharedWidths * wolfLevel2NNDepth, actionLayerWidths, valueLayerWidths, 
                resBlockSize, initializationMethod, dropoutRate) for generatewolfLevel2Model in generatewolfLevel2Models]
        wolfLevel2ModelPaths = [os.path.join('..', '..', 'data', 'preTrainModel', 
                'agentId=1.'+str(numInWe)+'_depth=9_hierarchy=2_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations='+str(NNNumSimulations)+'_trainSteps=50000') 
                for numInWe in range(2, numWolves + 1)]
        wolfLevel2NNModels = [restoreVariables(initwolfLevel2Model, wolfLevel2ModelPath)
                for initwolfLevel2Model, wolfLevel2ModelPath in zip(initwolfLevel2Models, wolfLevel2ModelPaths)]
        wolfLevel2Policies = [ApproximatePolicy(wolfLevel2NNModel, wolfLevel2CentralControlActionSpace) 
                for wolfLevel2NNModel in wolfLevel2NNModels]
        level2PolicyListBasedOnNumAgentsInWe = wolfLevel2Policies # 0 for two agents in We, 1 for three agents...

        softPolicy = SoftDistribution(2.5)
        totalInSmallRangeFlags = []
        for trial in range(self.numTrajectories):
            state = reset()
            while isTerminal(state):
                state = reset()

            jointActions = sampleFromDistribution(softPolicy(wolvesCentralControlPolicies[numWolves - 2](state)))

            hierarchyActions = []
            weIds = [list(range(numSheep, numWolves + numSheep)) for _ in range(numWolves)]
            for index in range(numWolves):
                weId = weIds[index].copy()
                weId.insert(0, weId.pop(index))
                relativeId = [0] + weId
                action = sampleFromDistribution(softPolicy(wolfLevel2Policies[numWolves - 2](state[relativeId])))
                hierarchyActions.append(action)

            reasonableActionRange = [int(np.linalg.norm(np.array(jointAction) - np.array(hierarchyAction)) <= 8 * predatorPowerRatio)
                    for jointAction, hierarchyAction in zip(jointActions, hierarchyActions) if jointAction != (0, 0) and hierarchyAction != (0, 0)]
            totalInSmallRangeFlags = totalInSmallRangeFlags + reasonableActionRange
        inSmallRangeRateMean = np.mean(totalInSmallRangeFlags)
        return inSmallRangeRateMean
Exemplo n.º 3
0
def iterateTrainOneCondition(parameterOneCondition):

    numTrainStepEachIteration = int(
        parameterOneCondition['numTrainStepEachIteration'])
    numTrajectoriesPerIteration = int(
        parameterOneCondition['numTrajectoriesPerIteration'])
    dirName = os.path.dirname(__file__)

    numOfAgent = 2
    agentIds = list(range(numOfAgent))

    maxRunningSteps = 50
    numSimulations = 250
    killzoneRadius = 50
    fixedParameters = {
        'maxRunningSteps': maxRunningSteps,
        'numSimulations': numSimulations,
        'killzoneRadius': killzoneRadius
    }
    # env MDP
    sheepsID = [0]
    wolvesID = [1, 2]
    blocksID = []

    numSheeps = len(sheepsID)
    numWolves = len(wolvesID)
    numBlocks = len(blocksID)

    numAgents = numWolves + numSheeps
    numEntities = numAgents + numBlocks

    sheepSize = 0.05
    wolfSize = 0.075
    blockSize = 0.2

    sheepMaxSpeed = 1.3 * 1
    wolfMaxSpeed = 1.0 * 1
    blockMaxSpeed = None

    entitiesSizeList = [sheepSize] * numSheeps + [wolfSize] * numWolves + [
        blockSize
    ] * numBlocks
    entityMaxSpeedList = [sheepMaxSpeed] * numSheeps + [
        wolfMaxSpeed
    ] * numWolves + [blockMaxSpeed] * numBlocks
    entitiesMovableList = [True] * numAgents + [False] * numBlocks
    massList = [1.0] * numEntities

    centralControlId = 1
    centerControlIndexList = [centralControlId]
    reshapeAction = UnpackCenterControlAction(centerControlIndexList)
    getCollisionForce = GetCollisionForce()
    applyActionForce = ApplyActionForce(wolvesID, sheepsID,
                                        entitiesMovableList)
    applyEnvironForce = ApplyEnvironForce(numEntities, entitiesMovableList,
                                          entitiesSizeList, getCollisionForce,
                                          getPosFromAgentState)
    integrateState = IntegrateState(numEntities, entitiesMovableList, massList,
                                    entityMaxSpeedList, getVelFromAgentState,
                                    getPosFromAgentState)
    interpolateState = TransitMultiAgentChasing(numEntities, reshapeAction,
                                                applyActionForce,
                                                applyEnvironForce,
                                                integrateState)

    numFramesToInterpolate = 1

    def transit(state, action):
        for frameIndex in range(numFramesToInterpolate):
            nextState = interpolateState(state, action)
            action = np.array([(0, 0)] * numAgents)
            state = nextState
        return nextState

    isTerminal = lambda state: False

    isCollision = IsCollision(getPosFromAgentState)
    collisonRewardWolf = 1
    punishForOutOfBound = PunishForOutOfBound()
    rewardWolf = RewardCentralControlPunishBond(
        wolvesID, sheepsID, entitiesSizeList, getPosFromAgentState,
        isCollision, punishForOutOfBound, collisonRewardWolf)
    collisonRewardSheep = -1
    rewardSheep = RewardCentralControlPunishBond(
        sheepsID, wolvesID, entitiesSizeList, getPosFromAgentState,
        isCollision, punishForOutOfBound, collisonRewardSheep)

    resetState = ResetMultiAgentChasing(numAgents, numBlocks)

    observeOneAgent = lambda agentID: Observe(agentID, wolvesID, sheepsID,
                                              blocksID, getPosFromAgentState,
                                              getVelFromAgentState)
    observe = lambda state: [
        observeOneAgent(agentID)(state) for agentID in range(numAgents)
    ]

    # policy
    actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7),
                   (0, -10), (7, -7), (0, 0)]
    wolfActionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7),
                       (0, -10), (7, -7), (0, 0)]

    preyPowerRatio = 0.5
    sheepActionSpace = list(map(tuple, np.array(actionSpace) * preyPowerRatio))

    predatorPowerRatio = 0.5
    wolfActionOneSpace = list(
        map(tuple,
            np.array(wolfActionSpace) * predatorPowerRatio))
    wolfActionTwoSpace = list(
        map(tuple,
            np.array(wolfActionSpace) * predatorPowerRatio))

    wolvesActionSpace = list(it.product(wolfActionOneSpace,
                                        wolfActionTwoSpace))

    actionSpaceList = [sheepActionSpace, wolvesActionSpace]

    # neural network init
    numStateSpace = 4 * numEntities
    numSheepActionSpace = len(sheepActionSpace)
    numWolvesActionSpace = len(wolvesActionSpace)

    regularizationFactor = 1e-4
    sharedWidths = [128]
    actionLayerWidths = [128]
    valueLayerWidths = [128]

    generateSheepModel = GenerateModel(numStateSpace, numSheepActionSpace,
                                       regularizationFactor)
    generateWolvesModel = GenerateModel(numStateSpace, numWolvesActionSpace,
                                        regularizationFactor)
    generateModelList = [generateSheepModel, generateWolvesModel]

    sheepDepth = 9
    wolfDepth = 9
    depthList = [sheepDepth, wolfDepth]
    resBlockSize = 2
    dropoutRate = 0.0
    initializationMethod = 'uniform'
    multiAgentNNmodel = [
        generateModel(sharedWidths * depth, actionLayerWidths,
                      valueLayerWidths, resBlockSize, initializationMethod,
                      dropoutRate)
        for depth, generateModel in zip(depthList, generateModelList)
    ]

    # replay buffer
    bufferSize = 20000
    saveToBuffer = SaveToBuffer(bufferSize)

    def getUniformSamplingProbabilities(buffer):
        return [(1 / len(buffer)) for _ in buffer]

    miniBatchSize = 512
    sampleBatchFromBuffer = SampleBatchFromBuffer(
        miniBatchSize, getUniformSamplingProbabilities)

    # pre-process the trajectory for replayBuffer
    rewardMultiAgents = [rewardSheep, rewardWolf]
    decay = 1
    accumulateMultiAgentRewards = AccumulateMultiAgentRewards(decay)

    addMultiAgentValuesToTrajectory = AddValuesToTrajectory(
        accumulateMultiAgentRewards)
    actionIndex = 1

    def getTerminalActionFromTrajectory(trajectory):
        return trajectory[-1][actionIndex]

    removeTerminalTupleFromTrajectory = RemoveTerminalTupleFromTrajectory(
        getTerminalActionFromTrajectory)

    # pre-process the trajectory for NNTraining
    sheepActionToOneHot = ActionToOneHot(sheepActionSpace)
    wolvesActionToOneHot = ActionToOneHot(wolvesActionSpace)
    actionToOneHotList = [sheepActionToOneHot, wolvesActionToOneHot]
    processTrajectoryForPolicyValueNets = [
        ProcessTrajectoryForPolicyValueNetMultiAgentReward(
            actionToOneHotList[agentId], agentId) for agentId in agentIds
    ]

    # function to train NN model
    terminalThreshold = 1e-6
    lossHistorySize = 10
    initActionCoeff = 1
    initValueCoeff = 1
    initCoeff = (initActionCoeff, initValueCoeff)
    afterActionCoeff = 1
    afterValueCoeff = 1
    afterCoeff = (afterActionCoeff, afterValueCoeff)

    terminalController = TrainTerminalController(lossHistorySize,
                                                 terminalThreshold)
    coefficientController = CoefficientCotroller(initCoeff, afterCoeff)

    reportInterval = 10000
    trainStepsIntervel = 1  # 10000

    trainReporter = TrainReporter(numTrainStepEachIteration, reportInterval)
    learningRateDecay = 1
    learningRateDecayStep = 1
    learningRate = 0.0001
    learningRateModifier = LearningRateModifier(learningRate,
                                                learningRateDecay,
                                                learningRateDecayStep)

    trainNN = Train(numTrainStepEachIteration, miniBatchSize, sampleData,
                    learningRateModifier, terminalController,
                    coefficientController, trainReporter)

    # load save dir

    trajectorySaveExtension = '.pickle'
    NNModelSaveExtension = ''
    trajectoriesSaveDirectory = os.path.join(
        dirName, '..', '..', 'data', 'iterTrain2wolves1sheepMADDPGEnv',
        'trajectories')
    if not os.path.exists(trajectoriesSaveDirectory):
        os.makedirs(trajectoriesSaveDirectory)

    NNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data',
                                        'iterTrain2wolves1sheepMADDPGEnv',
                                        'NNModelRes')
    if not os.path.exists(NNModelSaveDirectory):
        os.makedirs(NNModelSaveDirectory)

    generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory,
                                             trajectorySaveExtension,
                                             fixedParameters)
    generateNNModelSavePath = GetSavePath(NNModelSaveDirectory,
                                          NNModelSaveExtension,
                                          fixedParameters)

    startTime = time.time()

    sheepDepth = 9
    wolfDepth = 9
    depthList = [sheepDepth, wolfDepth]
    resBlockSize = 2
    dropoutRate = 0.0
    initializationMethod = 'uniform'
    multiAgentNNmodel = [
        generateModel(sharedWidths * depth, actionLayerWidths,
                      valueLayerWidths, resBlockSize, initializationMethod,
                      dropoutRate)
        for depth, generateModel in zip(depthList, generateModelList)
    ]

    preprocessMultiAgentTrajectories = PreprocessTrajectoriesForBuffer(
        addMultiAgentValuesToTrajectory, removeTerminalTupleFromTrajectory)
    numTrajectoriesToStartTrain = 1024

    trainOneAgent = TrainOneAgent(numTrainStepEachIteration,
                                  numTrajectoriesToStartTrain,
                                  processTrajectoryForPolicyValueNets,
                                  sampleBatchFromBuffer, trainNN)

    # restorePretrainModel
    sheepPreTrainModelPath = os.path.join(
        dirName, '..', '..', 'data', 'MADDPG2wolves1sheep',
        'trainSheepWithPretrrainWolves', 'trainedResNNModels',
        'agentId=0_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations=250_trainSteps=50000'
    )

    wolvesPreTrainModelPath = os.path.join(
        dirName, '..', '..', 'data', 'MADDPG2wolves1sheep',
        'trainWolvesTwoCenterControlAction', 'trainedResNNModels',
        'agentId=1_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations=250_trainSteps=50000'
    )

    pretrainModelPathList = [sheepPreTrainModelPath, wolvesPreTrainModelPath]

    sheepId, wolvesId = [0, 1]
    trainableAgentIds = [sheepId, wolvesId]
    for agentId in trainableAgentIds:

        restoredNNModel = restoreVariables(multiAgentNNmodel[agentId],
                                           pretrainModelPathList[agentId])
        multiAgentNNmodel[agentId] = restoredNNModel

        NNModelPathParameters = {
            'iterationIndex': 0,
            'agentId': agentId,
            'numTrajectoriesPerIteration': numTrajectoriesPerIteration,
            'numTrainStepEachIteration': numTrainStepEachIteration
        }
        NNModelSavePath = generateNNModelSavePath(NNModelPathParameters)
        saveVariables(multiAgentNNmodel[agentId], NNModelSavePath)

    fuzzySearchParameterNames = ['sampleIndex']
    loadTrajectoriesForParallel = LoadTrajectories(generateTrajectorySavePath,
                                                   loadFromPickle,
                                                   fuzzySearchParameterNames)
    loadTrajectoriesForTrainBreak = LoadTrajectories(
        generateTrajectorySavePath, loadFromPickle)

    # initRreplayBuffer
    replayBuffer = []
    trajectoryBeforeTrainIndex = 0
    trajectoryBeforeTrainPathParamters = {
        'iterationIndex': trajectoryBeforeTrainIndex
    }
    trajectoriesBeforeTrain = loadTrajectoriesForParallel(
        trajectoryBeforeTrainPathParamters)
    preProcessedTrajectoriesBeforeTrain = preprocessMultiAgentTrajectories(
        trajectoriesBeforeTrain)
    replayBuffer = saveToBuffer(replayBuffer,
                                preProcessedTrajectoriesBeforeTrain)

    # delete used model for disk space
    fixedParametersForDelete = {
        'maxRunningSteps': maxRunningSteps,
        'numSimulations': numSimulations,
        'killzoneRadius': killzoneRadius,
        'numTrajectoriesPerIteration': numTrajectoriesPerIteration,
        'numTrainStepEachIteration': numTrainStepEachIteration
    }
    toDeleteNNModelExtensionList = ['.meta', '.index', '.data-00000-of-00001']
    generatetoDeleteNNModelPathList = [
        GetSavePath(NNModelSaveDirectory, toDeleteNNModelExtension,
                    fixedParametersForDelete)
        for toDeleteNNModelExtension in toDeleteNNModelExtensionList
    ]

    # restore model
    restoredIteration = 0
    for agentId in trainableAgentIds:
        modelPathForRestore = generateNNModelSavePath({
            'iterationIndex':
            restoredIteration,
            'agentId':
            agentId,
            'numTrajectoriesPerIteration':
            numTrajectoriesPerIteration,
            'numTrainStepEachIteration':
            numTrainStepEachIteration
        })
        restoredNNModel = restoreVariables(multiAgentNNmodel[agentId],
                                           modelPathForRestore)
        multiAgentNNmodel[agentId] = restoredNNModel


# restore buffer
    bufferTrajectoryPathParameters = {
        'numTrajectoriesPerIteration': numTrajectoriesPerIteration,
        'numTrainStepEachIteration': numTrainStepEachIteration
    }
    restoredIterationIndexRange = range(restoredIteration)
    restoredTrajectories = loadTrajectoriesForTrainBreak(
        parameters=bufferTrajectoryPathParameters,
        parametersWithSpecificValues={
            'iterationIndex': list(restoredIterationIndexRange)
        })
    preProcessedRestoredTrajectories = preprocessMultiAgentTrajectories(
        restoredTrajectories)
    print(len(preProcessedRestoredTrajectories))
    replayBuffer = saveToBuffer(replayBuffer, preProcessedRestoredTrajectories)

    modelMemorySize = 5
    modelSaveFrequency = 50
    deleteUsedModel = DeleteUsedModel(modelMemorySize, modelSaveFrequency,
                                      generatetoDeleteNNModelPathList)
    numIterations = 10000
    for iterationIndex in range(restoredIteration + 1, numIterations):
        print('iterationIndex: ', iterationIndex)

        numCpuToUseWhileTrain = int(16)
        numCmdList = min(numTrajectoriesPerIteration, numCpuToUseWhileTrain)
        sampleTrajectoryFileName = 'sampleMultiMCTSAgentCenterControlResNetTrajCondtion.py'

        generateTrajectoriesParallelWhileTrain = GenerateTrajectoriesParallel(
            sampleTrajectoryFileName, numTrajectoriesPerIteration, numCmdList)
        trajectoryPathParameters = {
            'iterationIndex': iterationIndex,
            'numTrajectoriesPerIteration': numTrajectoriesPerIteration,
            'numTrainStepEachIteration': numTrainStepEachIteration
        }
        cmdList = generateTrajectoriesParallelWhileTrain(
            trajectoryPathParameters)

        trajectories = loadTrajectoriesForParallel(trajectoryPathParameters)
        trajectorySavePath = generateTrajectorySavePath(
            trajectoryPathParameters)
        saveToPickle(trajectories, trajectorySavePath)

        preProcessedTrajectories = preprocessMultiAgentTrajectories(
            trajectories)
        updatedReplayBuffer = saveToBuffer(replayBuffer,
                                           preProcessedTrajectories)

        for agentId in trainableAgentIds:

            updatedAgentNNModel = trainOneAgent(agentId, multiAgentNNmodel,
                                                updatedReplayBuffer)

            NNModelPathParameters = {
                'iterationIndex': iterationIndex,
                'agentId': agentId,
                'numTrajectoriesPerIteration': numTrajectoriesPerIteration,
                'numTrainStepEachIteration': numTrainStepEachIteration
            }
            NNModelSavePath = generateNNModelSavePath(NNModelPathParameters)
            saveVariables(updatedAgentNNModel, NNModelSavePath)
            multiAgentNNmodel[agentId] = updatedAgentNNModel
            replayBuffer = updatedReplayBuffer

            deleteUsedModel(iterationIndex, agentId)

    endTime = time.time()
    print("Time taken for {} iterations: {} seconds".format(
        numIterations, (endTime - startTime)))
Exemplo n.º 4
0
def trainOneCondition(manipulatedVariables):
    depth = int(manipulatedVariables['depth'])
    # Get dataset for training
    DIRNAME = os.path.dirname(__file__)
    dataSetDirectory = os.path.join(dirName, '..', '..', '..', '..', 'data',
                                    'NoPhysics2wolves1sheep',
                                    'trainWolvesTwoCenterControlAction88',
                                    'trajectories')

    if not os.path.exists(dataSetDirectory):
        os.makedirs(dataSetDirectory)

    dataSetExtension = '.pickle'
    dataSetMaxRunningSteps = 50
    dataSetNumSimulations = 250
    killzoneRadius = 150
    agentId = 1
    wolvesId = 1
    dataSetFixedParameters = {
        'agentId': agentId,
        'maxRunningSteps': dataSetMaxRunningSteps,
        'numSimulations': dataSetNumSimulations,
        'killzoneRadius': killzoneRadius
    }

    getDataSetSavePath = GetSavePath(dataSetDirectory, dataSetExtension,
                                     dataSetFixedParameters)
    print("DATASET LOADED!")

    numOfAgent = 3
    # accumulate rewards for trajectories
    decay = 1
    accumulateRewards = AccumulateRewards(decay)
    addValuesToTrajectory = AddValuesToTrajectory(accumulateRewards)

    # pre-process the trajectories
    actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7),
                   (0, -10), (7, -7), (0, 0)]
    preyPowerRatio = 10
    sheepActionSpace = list(map(tuple, np.array(actionSpace) * preyPowerRatio))

    predatorPowerRatio = 8
    wolfActionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7),
                       (0, -10), (7, -7)]
    wolfActionOneSpace = list(
        map(tuple,
            np.array(wolfActionSpace) * predatorPowerRatio))
    wolfActionTwoSpace = list(
        map(tuple,
            np.array(wolfActionSpace) * predatorPowerRatio))
    wolvesActionSpace = list(it.product(wolfActionOneSpace,
                                        wolfActionTwoSpace))

    numActionSpace = len(wolvesActionSpace)

    actionIndex = 1
    actionToOneHot = ActionToOneHot(wolvesActionSpace)
    getTerminalActionFromTrajectory = lambda trajectory: trajectory[-1][
        actionIndex]
    removeTerminalTupleFromTrajectory = RemoveTerminalTupleFromTrajectory(
        getTerminalActionFromTrajectory)
    processTrajectoryForNN = ProcessTrajectoryForPolicyValueNet(
        actionToOneHot, wolvesId)

    preProcessTrajectories = PreProcessTrajectories(
        addValuesToTrajectory, removeTerminalTupleFromTrajectory,
        processTrajectoryForNN)

    fuzzySearchParameterNames = ['sampleIndex']
    loadTrajectories = LoadTrajectories(getDataSetSavePath, loadFromPickle,
                                        fuzzySearchParameterNames)
    loadedTrajectories = loadTrajectories(parameters={})
    print(loadedTrajectories[0])

    filterState = lambda timeStep: (timeStep[0][:numOfAgent], timeStep[1],
                                    timeStep[2], timeStep[3])
    trajectories = [[filterState(timeStep) for timeStep in trajectory]
                    for trajectory in loadedTrajectories]
    print(len(trajectories))

    preProcessedTrajectories = np.concatenate(
        preProcessTrajectories(trajectories))

    trainData = [list(varBatch) for varBatch in zip(*preProcessedTrajectories)]
    valuedTrajectories = [addValuesToTrajectory(tra) for tra in trajectories]

    # neural network init and save path
    numStateSpace = 6
    regularizationFactor = 1e-4
    sharedWidths = [128]
    actionLayerWidths = [128]
    valueLayerWidths = [128]

    generateModel = GenerateModel(numStateSpace, numActionSpace,
                                  regularizationFactor)

    resBlockSize = 2
    dropoutRate = 0.0
    initializationMethod = 'uniform'
    sheepNNModel = generateModel(sharedWidths * depth, actionLayerWidths,
                                 valueLayerWidths, resBlockSize,
                                 initializationMethod, dropoutRate)

    initTimeStep = 0
    valueIndex = 3
    trainDataMeanAccumulatedReward = np.mean(
        [tra[initTimeStep][valueIndex] for tra in valuedTrajectories])
    print(trainDataMeanAccumulatedReward)

    # function to train NN model
    terminalThreshold = 1e-10
    lossHistorySize = 10
    initActionCoeff = 1
    initValueCoeff = 1
    initCoeff = (initActionCoeff, initValueCoeff)
    afterActionCoeff = 1
    afterValueCoeff = 1
    afterCoeff = (afterActionCoeff, afterValueCoeff)
    terminalController = lambda evalDict, numSteps: False
    coefficientController = CoefficientCotroller(initCoeff, afterCoeff)
    reportInterval = 10000
    trainStepsIntervel = 10000
    trainReporter = TrainReporter(trainStepsIntervel, reportInterval)
    learningRateDecay = 1
    learningRateDecayStep = 1
    learningRateModifier = lambda learningRate: LearningRateModifier(
        learningRate, learningRateDecay, learningRateDecayStep)
    getTrainNN = lambda batchSize, learningRate: Train(
        trainStepsIntervel, batchSize, sampleData,
        learningRateModifier(learningRate), terminalController,
        coefficientController, trainReporter)

    # get path to save trained models
    NNModelFixedParameters = {
        'agentId': agentId,
        'maxRunningSteps': dataSetMaxRunningSteps,
        'numSimulations': dataSetNumSimulations
    }

    NNModelSaveDirectory = os.path.join(dirName, '..', '..', '..', '..',
                                        'data', 'NoPhysics2wolves1sheep',
                                        'trainWolvesTwoCenterControlAction88',
                                        'trainedResNNModels')
    if not os.path.exists(NNModelSaveDirectory):
        os.makedirs(NNModelSaveDirectory)
    NNModelSaveExtension = ''
    getNNModelSavePath = GetSavePath(NNModelSaveDirectory,
                                     NNModelSaveExtension,
                                     NNModelFixedParameters)

    # function to train models
    numOfTrainStepsIntervel = 6
    trainIntervelIndexes = list(range(numOfTrainStepsIntervel))
    trainModelForConditions = TrainModelForConditions(trainIntervelIndexes,
                                                      trainStepsIntervel,
                                                      trainData, sheepNNModel,
                                                      getTrainNN,
                                                      getNNModelSavePath)
    trainModelForConditions(manipulatedVariables)
Exemplo n.º 5
0
def main():
    parametersForTrajectoryPath = json.loads(sys.argv[1])
    startSampleIndex = int(sys.argv[2])
    endSampleIndex = int(sys.argv[3])

    # parametersForTrajectoryPath['sampleOneStepPerTraj']=1 #0
    # parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex, endSampleIndex)

    trainSteps = int(parametersForTrajectoryPath['trainSteps'])
    depth = int(parametersForTrajectoryPath['depth'])
    dataSize = int(parametersForTrajectoryPath['dataSize'])

    # parametersForTrajectoryPath = {}
    # depth = 5
    # dataSize = 5000
    # trainSteps = 50000
    # startSampleIndex = 0
    # endSampleIndex = 100

    killzoneRadius = 25
    numSimulations = 200
    maxRunningSteps = 100

    fixedParameters = {
        'maxRunningSteps': maxRunningSteps,
        'numSimulations': numSimulations,
        'killzoneRadius': killzoneRadius
    }
    trajectorySaveExtension = '.pickle'
    dirName = os.path.dirname(__file__)
    trajectoriesSaveDirectory = os.path.join(
        dirName, '..', '..', '..', 'data', 'evaluateSupervisedLearning',
        'multiMCTSAgentResNetNoPhysicsCenterControl',
        'evaluateCenterControlTrajByCondition')
    if not os.path.exists(trajectoriesSaveDirectory):
        os.makedirs(trajectoriesSaveDirectory)
    generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory,
                                             trajectorySaveExtension,
                                             fixedParameters)

    trajectorySavePath = generateTrajectorySavePath(
        parametersForTrajectoryPath)
    if not os.path.isfile(trajectorySavePath):

        numOfAgent = 3
        sheepId = 0
        wolvesId = 1

        wolfOneId = 1
        wolfTwoId = 2
        xPosIndex = [0, 1]
        xBoundary = [0, 600]
        yBoundary = [0, 600]
        reset = Reset(xBoundary, yBoundary, numOfAgent)

        getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex)
        getWolfOneXPos = GetAgentPosFromState(wolfOneId, xPosIndex)
        getWolfTwoXPos = GetAgentPosFromState(wolfTwoId, xPosIndex)

        isTerminalOne = IsTerminal(getWolfOneXPos, getSheepXPos,
                                   killzoneRadius)
        isTerminalTwo = IsTerminal(getWolfTwoXPos, getSheepXPos,
                                   killzoneRadius)
        isTerminal = lambda state: isTerminalOne(state) or isTerminalTwo(state)

        stayInBoundaryByReflectVelocity = StayInBoundaryByReflectVelocity(
            xBoundary, yBoundary)
        transit = TransiteForNoPhysics(stayInBoundaryByReflectVelocity)

        actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7),
                       (0, -10), (7, -7), (0, 0)]
        preyPowerRatio = 3
        sheepActionSpace = list(
            map(tuple,
                np.array(actionSpace) * preyPowerRatio))

        predatorPowerRatio = 2
        wolfActionOneSpace = list(
            map(tuple,
                np.array(actionSpace) * predatorPowerRatio))
        wolfActionTwoSpace = list(
            map(tuple,
                np.array(actionSpace) * predatorPowerRatio))
        wolvesActionSpace = list(
            it.product(wolfActionOneSpace, wolfActionTwoSpace))

        # neural network init
        numStateSpace = 6
        numSheepActionSpace = len(sheepActionSpace)
        numWolvesActionSpace = len(wolvesActionSpace)

        regularizationFactor = 1e-4
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        generateSheepModel = GenerateModel(numStateSpace, numSheepActionSpace,
                                           regularizationFactor)

        # load save dir
        NNModelSaveExtension = ''
        NNModelSaveDirectory = os.path.join(
            dirName, '..', '..', '..', 'data',
            'evaluateEscapeMultiChasingNoPhysics',
            'trainedResNNModelsMultiStillAction')
        NNModelFixedParameters = {
            'agentId': 0,
            'maxRunningSteps': 150,
            'numSimulations': 200,
            'miniBatchSize': 256,
            'learningRate': 0.0001
        }
        getNNModelSavePath = GetSavePath(NNModelSaveDirectory,
                                         NNModelSaveExtension,
                                         NNModelFixedParameters)

        if not os.path.exists(NNModelSaveDirectory):
            os.makedirs(NNModelSaveDirectory)

        resBlockSize = 2
        dropoutRate = 0.0
        initializationMethod = 'uniform'
        initSheepNNModel = generateSheepModel(sharedWidths * 5,
                                              actionLayerWidths,
                                              valueLayerWidths, resBlockSize,
                                              initializationMethod,
                                              dropoutRate)

        sheepTrainedModelPath = getNNModelSavePath({
            'trainSteps': 50000,
            'depth': 5
        })
        sheepTrainedModel = restoreVariables(initSheepNNModel,
                                             sheepTrainedModelPath)
        sheepPolicy = ApproximatePolicy(sheepTrainedModel, sheepActionSpace)

        generateWolvesModel = GenerateModel(numStateSpace,
                                            numWolvesActionSpace,
                                            regularizationFactor)
        initWolvesNNModel = generateWolvesModel(sharedWidths * depth,
                                                actionLayerWidths,
                                                valueLayerWidths, resBlockSize,
                                                initializationMethod,
                                                dropoutRate)
        NNModelSaveDirectory = os.path.join(
            dirName, '..', '..', '..', 'data', 'evaluateSupervisedLearning',
            'multiMCTSAgentResNetNoPhysicsCenterControl', 'trainedResNNModels')
        wolfId = 1
        NNModelFixedParametersWolves = {
            'agentId': wolfId,
            'maxRunningSteps': maxRunningSteps,
            'numSimulations': numSimulations,
            'miniBatchSize': 256,
            'learningRate': 0.0001,
        }

        getNNModelSavePath = GetSavePath(NNModelSaveDirectory,
                                         NNModelSaveExtension,
                                         NNModelFixedParametersWolves)
        wolvesTrainedModelPath = getNNModelSavePath({
            'trainSteps': trainSteps,
            'depth': depth,
            'dataSize': dataSize
        })
        wolvesTrainedModel = restoreVariables(initWolvesNNModel,
                                              wolvesTrainedModelPath)
        wolfPolicy = ApproximatePolicy(wolvesTrainedModel, wolvesActionSpace)

        from exec.evaluateNoPhysicsEnvWithRender import Render
        import pygame as pg
        from pygame.color import THECOLORS
        screenColor = THECOLORS['black']
        circleColorList = [
            THECOLORS['green'], THECOLORS['red'], THECOLORS['orange']
        ]
        circleSize = 10

        saveImage = False
        saveImageDir = os.path.join(dirName, '..', '..', '..', 'data',
                                    'demoImg')
        if not os.path.exists(saveImageDir):
            os.makedirs(saveImageDir)
        renderOn = False
        render = None
        if renderOn:
            screen = pg.display.set_mode([xBoundary[1], yBoundary[1]])
            render = Render(numOfAgent, xPosIndex, screen, screenColor,
                            circleColorList, circleSize, saveImage,
                            saveImageDir)
        chooseActionList = [chooseGreedyAction, chooseGreedyAction]
        sampleTrajectory = SampleTrajectoryWithRender(maxRunningSteps, transit,
                                                      isTerminal, reset,
                                                      chooseActionList, render,
                                                      renderOn)
        # All agents' policies
        policy = lambda state: [sheepPolicy(state), wolfPolicy(state)]
        trajectories = [
            sampleTrajectory(policy)
            for sampleIndex in range(startSampleIndex, endSampleIndex)
        ]

        saveToPickle(trajectories, trajectorySavePath)
Exemplo n.º 6
0
def main():
    DEBUG = 0
    renderOn = 0
    if DEBUG:
        parametersForTrajectoryPath = {}
        startSampleIndex = 5
        endSampleIndex = 7
        agentId = 1
        parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex,
                                                      endSampleIndex)
    else:
        parametersForTrajectoryPath = json.loads(sys.argv[1])
        startSampleIndex = int(sys.argv[2])
        endSampleIndex = int(sys.argv[3])
        agentId = int(parametersForTrajectoryPath['agentId'])
        parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex,
                                                      endSampleIndex)

    # check file exists or not
    dirName = os.path.dirname(__file__)
    trajectoriesSaveDirectory = os.path.join(
        dirName, '..', '..', '..', '..', 'data', 'NoPhysics2wolves1sheep',
        'trainWolvesTwoCenterControlAction88', 'trajectories')
    if not os.path.exists(trajectoriesSaveDirectory):
        os.makedirs(trajectoriesSaveDirectory)

    trajectorySaveExtension = '.pickle'
    maxRunningSteps = 50
    numSimulations = 250
    killzoneRadius = 150
    fixedParameters = {
        'agentId': agentId,
        'maxRunningSteps': maxRunningSteps,
        'numSimulations': numSimulations,
        'killzoneRadius': killzoneRadius
    }

    generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory,
                                             trajectorySaveExtension,
                                             fixedParameters)

    trajectorySavePath = generateTrajectorySavePath(
        parametersForTrajectoryPath)

    if not os.path.isfile(trajectorySavePath):
        numOfAgent = 3
        xBoundary = [0, 600]
        yBoundary = [0, 600]
        resetState = Reset(xBoundary, yBoundary, numOfAgent)

        stayInBoundaryByReflectVelocity = StayInBoundaryByReflectVelocity(
            xBoundary, yBoundary)
        interpolateOneFrame = InterpolateOneFrame(
            stayInBoundaryByReflectVelocity)

        chooseInterpolatedNextState = lambda interpolatedStates: interpolatedStates[
            -1]

        sheepId = 0
        wolvesId = 1
        centerControlIndexList = [wolvesId]
        unpackCenterControlAction = UnpackCenterControlAction(
            centerControlIndexList)

        numFramesToInterpolate = 0
        transit = TransitWithInterpolation(numFramesToInterpolate,
                                           interpolateOneFrame,
                                           chooseInterpolatedNextState,
                                           unpackCenterControlAction)

        # NNGuidedMCTS init
        cInit = 1
        cBase = 100
        calculateScore = ScoreChild(cInit, cBase)
        selectChild = SelectChild(calculateScore)

        actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7),
                       (0, -10), (7, -7), (0, 0)]
        wolfActionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0),
                           (-7, -7), (0, -10), (7, -7)]

        preyPowerRatio = 10
        sheepActionSpace = list(
            map(tuple,
                np.array(actionSpace) * preyPowerRatio))

        predatorPowerRatio = 8
        wolfActionOneSpace = list(
            map(tuple,
                np.array(wolfActionSpace) * predatorPowerRatio))
        wolfActionTwoSpace = list(
            map(tuple,
                np.array(wolfActionSpace) * predatorPowerRatio))

        wolvesActionSpace = list(
            product(wolfActionOneSpace, wolfActionTwoSpace))

        actionSpaceList = [sheepActionSpace, wolvesActionSpace]

        # neural network init
        numStateSpace = 2 * numOfAgent
        numSheepActionSpace = len(sheepActionSpace)
        numWolvesActionSpace = len(wolvesActionSpace)

        regularizationFactor = 1e-4
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        generateSheepModel = GenerateModel(numStateSpace, numSheepActionSpace,
                                           regularizationFactor)

        # load save dir
        NNModelSaveExtension = ''
        sheepNNModelSaveDirectory = os.path.join(
            dirName, '..', '..', '..', '..', 'data', 'NoPhysics2wolves1sheep',
            'trainSheepWithTwoHeatSeekingWolves', 'trainedResNNModels')
        sheepNNModelFixedParameters = {
            'agentId': 0,
            'maxRunningSteps': 50,
            'numSimulations': 110,
            'miniBatchSize': 256,
            'learningRate': 0.0001,
        }
        getSheepNNModelSavePath = GetSavePath(sheepNNModelSaveDirectory,
                                              NNModelSaveExtension,
                                              sheepNNModelFixedParameters)

        depth = 9
        resBlockSize = 2
        dropoutRate = 0.0
        initializationMethod = 'uniform'
        initSheepNNModel = generateSheepModel(sharedWidths * depth,
                                              actionLayerWidths,
                                              valueLayerWidths, resBlockSize,
                                              initializationMethod,
                                              dropoutRate)

        sheepTrainedModelPath = getSheepNNModelSavePath({
            'trainSteps': 50000,
            'depth': depth
        })
        sheepTrainedModel = restoreVariables(initSheepNNModel,
                                             sheepTrainedModelPath)
        sheepPolicy = ApproximatePolicy(sheepTrainedModel, sheepActionSpace)

        wolfOneId = 1
        wolfTwoId = 2
        xPosIndex = [0, 1]
        getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex)
        getWolfOneXPos = GetAgentPosFromState(wolfOneId, xPosIndex)
        speed = 120
        #sheepPolicy = HeatSeekingContinuesDeterministicPolicy(getWolfOneXPos, getSheepXPos, speed)

        # MCTS
        cInit = 1
        cBase = 100
        calculateScore = ScoreChild(cInit, cBase)
        selectChild = SelectChild(calculateScore)

        # prior
        getActionPrior = lambda state: {
            action: 1 / len(wolvesActionSpace)
            for action in wolvesActionSpace
        }

        # load chase nn policy
        chooseActionInMCTS = sampleFromDistribution

        def wolvesTransit(state, action):
            return transit(state,
                           [chooseActionInMCTS(sheepPolicy(state)), action])

        # reward function
        wolfOneId = 1
        wolfTwoId = 2
        xPosIndex = [0, 1]
        getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex)
        getWolfOneXPos = GetAgentPosFromState(wolfOneId, xPosIndex)
        getWolfTwoXPos = GetAgentPosFromState(wolfTwoId, xPosIndex)
        isCollidedOne = IsTerminal(getWolfOneXPos, getSheepXPos,
                                   killzoneRadius)
        isCollidedTwo = IsTerminal(getWolfTwoXPos, getSheepXPos,
                                   killzoneRadius)

        calCollisionTimes = lambda state: np.sum([
            isCollidedOne(state), isCollidedTwo(state)
        ])  # collisionTimeByAddingCollisionInAllWolves
        #calCollisionTimes = lambda state: np.max([isCollidedOne(state), isCollidedTwo(state)]) # collisionTimeByBooleanCollisionForAnyWolf

        calTerminationSignals = calCollisionTimes
        chooseInterpolatedStateByEarlyTermination = ChooseInterpolatedStateByEarlyTermination(
            calTerminationSignals)

        numFramesToInterpolateInReward = 3
        interpolateStateInReward = TransitWithInterpolation(
            numFramesToInterpolateInReward, interpolateOneFrame,
            chooseInterpolatedStateByEarlyTermination,
            unpackCenterControlAction)

        aliveBonus = -1 / maxRunningSteps * 10
        deathPenalty = 1
        rewardFunction = RewardFunctionCompeteWithStateInterpolation(
            aliveBonus, deathPenalty, calCollisionTimes,
            interpolateStateInReward)

        # initialize children; expand
        initializeChildren = InitializeChildren(wolvesActionSpace,
                                                wolvesTransit, getActionPrior)
        isTerminal = lambda state: False
        expand = Expand(isTerminal, initializeChildren)

        # random rollout policy
        def rolloutPolicy(state):
            return [
                sampleFromDistribution(sheepPolicy(state)),
                wolvesActionSpace[np.random.choice(
                    range(numWolvesActionSpace))]
            ]

        # rollout
        #rolloutHeuristicWeight = 0
        #minDistance = 400
        #rolloutHeuristic1 = HeuristicDistanceToTarget(
        #    rolloutHeuristicWeight, getWolfOneXPos, getSheepXPos, minDistance)
        #rolloutHeuristic2 = HeuristicDistanceToTarget(
        #    rolloutHeuristicWeight, getWolfTwoXPos, getSheepXPos, minDistance)

        #rolloutHeuristic = lambda state: (rolloutHeuristic1(state) + rolloutHeuristic2(state)) / 2

        rolloutHeuristic = lambda state: 0
        maxRolloutSteps = 15
        rollout = RollOut(rolloutPolicy, maxRolloutSteps, transit,
                          rewardFunction, isTerminal, rolloutHeuristic)

        wolfPolicy = MCTS(numSimulations, selectChild, expand, rollout, backup,
                          establishSoftmaxActionDist)

        # All agents' policies
        policy = lambda state: [sheepPolicy(state), wolfPolicy(state)]
        chooseActionList = [maxFromDistribution, maxFromDistribution]

        def sampleAction(state):
            actionDists = [sheepPolicy(state), wolfPolicy(state)]
            action = [
                chooseAction(actionDist) for actionDist, chooseAction in zip(
                    actionDists, chooseActionList)
            ]
            return action

        render = None
        if renderOn:
            import pygame as pg
            from pygame.color import THECOLORS
            screenColor = THECOLORS['black']
            circleColorList = [
                THECOLORS['green'], THECOLORS['yellow'], THECOLORS['red']
            ]
            circleSize = 10
            saveImage = False
            saveImageDir = os.path.join(dirName, '..', '..', '..', '..',
                                        'data', 'demoImg')
            if not os.path.exists(saveImageDir):
                os.makedirs(saveImageDir)
            screen = pg.display.set_mode([max(xBoundary), max(yBoundary)])
            render = Render(numOfAgent, xPosIndex, screen, screenColor,
                            circleColorList, circleSize, saveImage,
                            saveImageDir)

        forwardOneStep = ForwardOneStep(transit, rewardFunction)
        sampleTrajectory = SampleTrajectoryWithRender(maxRunningSteps,
                                                      isTerminal, resetState,
                                                      forwardOneStep, render,
                                                      renderOn)

        trajectories = [
            sampleTrajectory(sampleAction)
            for sampleIndex in range(startSampleIndex, endSampleIndex)
        ]
        print([len(traj) for traj in trajectories])
        saveToPickle(trajectories, trajectorySavePath)
Exemplo n.º 7
0
def main():
    DEBUG = 1
    renderOn = 1

    if DEBUG:
        parametersForTrajectoryPath = {}
        startSampleIndex = 1
        endSampleIndex = 2
        parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex, endSampleIndex)
        iterationIndex = 2
        numTrainStepEachIteration = 1
        numTrajectoriesPerIteration = 1

    else:
        parametersForTrajectoryPath = json.loads(sys.argv[1])
        startSampleIndex = int(sys.argv[2])
        endSampleIndex = int(sys.argv[3])
        parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex, endSampleIndex)
        iterationIndex = int(parametersForTrajectoryPath['iterationIndex'])
        numTrainStepEachIteration = int(parametersForTrajectoryPath['numTrainStepEachIteration'])
        numTrajectoriesPerIteration = int(parametersForTrajectoryPath['numTrajectoriesPerIteration'])

    # check file exists or not
    dirName = os.path.dirname(__file__)
    trajectoriesSaveDirectory = os.path.join(dirName, '..', '..',  'data', 'iterTrain2wolves1sheepMADDPGEnv', 'trajectories')
    if not os.path.exists(trajectoriesSaveDirectory):
        os.makedirs(trajectoriesSaveDirectory)

    trajectorySaveExtension = '.pickle'

    maxRunningSteps = 50
    numSimulations = 250
    killzoneRadius = 50
    numTree = 2
    fixedParameters = {'maxRunningSteps': maxRunningSteps, 'numSimulations': numSimulations, 'killzoneRadius': killzoneRadius}
    generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory, trajectorySaveExtension, fixedParameters)
    trajectorySavePath = generateTrajectorySavePath(parametersForTrajectoryPath)

    if not os.path.isfile(trajectorySavePath):
        # env MDP
        sheepsID = [0]
        wolvesID = [1, 2]
        blocksID = []

        numSheeps = len(sheepsID)
        numWolves = len(wolvesID)
        numBlocks = len(blocksID)

        numAgents = numWolves + numSheeps
        numEntities = numAgents + numBlocks

        sheepSize = 0.05
        wolfSize = 0.075
        blockSize = 0.2

        sheepMaxSpeed = 1.3 * 1
        wolfMaxSpeed = 1.0 * 1
        blockMaxSpeed = None

        entitiesSizeList = [sheepSize] * numSheeps + [wolfSize] * numWolves + [blockSize] * numBlocks
        entityMaxSpeedList = [sheepMaxSpeed] * numSheeps + [wolfMaxSpeed] * numWolves + [blockMaxSpeed] * numBlocks
        entitiesMovableList = [True] * numAgents + [False] * numBlocks
        massList = [1.0] * numEntities

        centralControlId = 1
        centerControlIndexList = [centralControlId]
        reshapeAction = UnpackCenterControlAction(centerControlIndexList)
        getCollisionForce = GetCollisionForce()
        applyActionForce = ApplyActionForce(wolvesID, sheepsID, entitiesMovableList)
        applyEnvironForce = ApplyEnvironForce(numEntities, entitiesMovableList, entitiesSizeList,
                                              getCollisionForce, getPosFromAgentState)
        integrateState = IntegrateState(numEntities, entitiesMovableList, massList,
                                        entityMaxSpeedList, getVelFromAgentState, getPosFromAgentState)
        interpolateState = TransitMultiAgentChasing(numEntities, reshapeAction, applyActionForce, applyEnvironForce, integrateState)

        numFramesToInterpolate = 1

        def transit(state, action):
            for frameIndex in range(numFramesToInterpolate):
                nextState = interpolateState(state, action)
                action = np.array([(0, 0)] * numAgents)
                state = nextState
            return nextState

        isTerminal = lambda state: False

        isCollision = IsCollision(getPosFromAgentState)
        collisonRewardWolf = 1
        punishForOutOfBound = PunishForOutOfBound()
        rewardWolf = RewardCentralControlPunishBond(wolvesID, sheepsID, entitiesSizeList, getPosFromAgentState, isCollision, punishForOutOfBound, collisonRewardWolf)
        collisonRewardSheep = -1
        rewardSheep = RewardCentralControlPunishBond(sheepsID, wolvesID, entitiesSizeList, getPosFromAgentState, isCollision, punishForOutOfBound, collisonRewardSheep)
        terminalRewardList = [collisonRewardSheep,collisonRewardWolf]
        rewardMultiAgents = [rewardSheep, rewardWolf]

        resetState = ResetMultiAgentChasing(numAgents, numBlocks)

        observeOneAgent = lambda agentID: Observe(agentID, wolvesID, sheepsID, blocksID, getPosFromAgentState, getVelFromAgentState)
        observe = lambda state: [observeOneAgent(agentID)(state) for agentID in range(numAgents)]

    # policy
        actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7), (0, -10), (7, -7), (0, 0)]
        wolfActionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7), (0, -10), (7, -7), (0, 0)]

        preyPowerRatio = 0.5
        sheepActionSpace = list(map(tuple, np.array(actionSpace) * preyPowerRatio))

        predatorPowerRatio = 0.5
        wolfActionOneSpace = list(map(tuple, np.array(wolfActionSpace) * predatorPowerRatio))
        wolfActionTwoSpace = list(map(tuple, np.array(wolfActionSpace) * predatorPowerRatio))

        wolvesActionSpace = list(product(wolfActionOneSpace, wolfActionTwoSpace))

        actionSpaceList = [sheepActionSpace, wolvesActionSpace]

        # neural network init
        numStateSpace = 4 * numEntities
        numSheepActionSpace = len(sheepActionSpace)
        numWolvesActionSpace = len(wolvesActionSpace)

        regularizationFactor = 1e-4
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        generateSheepModel = GenerateModel(numStateSpace, numSheepActionSpace, regularizationFactor)
        generateWolvesModel = GenerateModel(numStateSpace, numWolvesActionSpace, regularizationFactor)
        generateModelList = [generateSheepModel, generateWolvesModel]

        sheepDepth = 9
        wolfDepth = 9
        depthList = [sheepDepth, wolfDepth]
        resBlockSize = 2
        dropoutRate = 0.0
        initializationMethod = 'uniform'
        sheepId,wolvesId = [0,1]
        trainableAgentIds = [sheepId, wolvesId]

        multiAgentNNmodel = [generateModel(sharedWidths * depth, actionLayerWidths, valueLayerWidths, resBlockSize, initializationMethod, dropoutRate) for depth, generateModel in zip(depthList, generateModelList)]

        otherAgentApproximatePolicy = [lambda NNmodel, : ApproximatePolicy(NNmodel, sheepActionSpace), lambda NNmodel, : ApproximatePolicy(NNmodel, wolvesActionSpace)]
        # NNGuidedMCTS init
        cInit = 1
        cBase = 100
        calculateScore = ScoreChild(cInit, cBase)
        selectChild = SelectChild(calculateScore)

        getApproximatePolicy = [lambda NNmodel, : ApproximatePolicy(NNmodel, sheepActionSpace), lambda NNmodel, : ApproximatePolicy(NNmodel, wolvesActionSpace)]
        getApproximateValue = [lambda NNmodel: ApproximateValue(NNmodel), lambda NNmodel: ApproximateValue(NNmodel)]

        def getStateFromNode(node): return list(node.id.values())[0]

        chooseActionInMCTS = sampleFromDistribution

        composeMultiAgentTransitInSingleAgentMCTS = ComposeMultiAgentTransitInSingleAgentMCTS(chooseActionInMCTS)
        composeSingleAgentGuidedMCTS = ComposeSingleAgentGuidedMCTS(numTree, numSimulations, actionSpaceList, terminalRewardList, selectChild, isTerminal, transit, getStateFromNode, getApproximatePolicy, getApproximateValue, composeMultiAgentTransitInSingleAgentMCTS)
        prepareMultiAgentPolicy = PrepareMultiAgentPolicy(composeSingleAgentGuidedMCTS, otherAgentApproximatePolicy, trainableAgentIds)

        # load model
        NNModelSaveExtension = ''
        NNModelSaveDirectory = os.path.join(dirName, '..', '..',  'data', 'iterTrain2wolves1sheepMADDPGEnv', 'NNModelRes')
        if not os.path.exists(NNModelSaveDirectory):
            os.makedirs(NNModelSaveDirectory)

        generateNNModelSavePath = GetSavePath(NNModelSaveDirectory, NNModelSaveExtension, fixedParameters)

        for agentId in trainableAgentIds:
            modelPath = generateNNModelSavePath({'iterationIndex': iterationIndex - 1, 'agentId': agentId, 'numTrajectoriesPerIteration': numTrajectoriesPerIteration, 'numTrainStepEachIteration': numTrainStepEachIteration})
            restoredNNModel = restoreVariables(multiAgentNNmodel[agentId], modelPath)
            multiAgentNNmodel[agentId] = restoredNNModel

        multiAgentPolicy = prepareMultiAgentPolicy(multiAgentNNmodel)
        chooseActionList = [maxFromDistribution, maxFromDistribution]

        def sampleAction(state):
            actionDists = multiAgentPolicy(state)
            action = [chooseAction(actionDist) for actionDist, chooseAction in zip(actionDists, chooseActionList)]
            return action

        render = lambda state: None
        forwardOneStep = ForwardMultiAgentsOneStep(transit, rewardMultiAgents)
        sampleTrajectory = SampleTrajectoryWithRender(maxRunningSteps, isTerminal, resetState, forwardOneStep, render, renderOn)

        trajectories = [sampleTrajectory(sampleAction) for sampleIndex in range(startSampleIndex, endSampleIndex)]
        print([len(traj) for traj in trajectories])
        saveToPickle(trajectories, trajectorySavePath)
Exemplo n.º 8
0
def main():
    startTime = time.time()

    DEBUG = 1
    renderOn = 1
    if DEBUG:
        parametersForTrajectoryPath = {}
        startSampleIndex = 5
        endSampleIndex = 8
        agentId = 1
        parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex,
                                                      endSampleIndex)
    else:
        parametersForTrajectoryPath = json.loads(sys.argv[1])
        startSampleIndex = int(sys.argv[2])
        endSampleIndex = int(sys.argv[3])
        agentId = int(parametersForTrajectoryPath['agentId'])
        parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex,
                                                      endSampleIndex)

    # check file exists or not
    dirName = os.path.dirname(__file__)
    trajectoriesSaveDirectory = os.path.join(
        dirName, '..', '..', '..', '..', 'data', 'MADDPG2wolves1sheep',
        'trainWolvesTwoCenterControlAction', 'trajectories')
    if not os.path.exists(trajectoriesSaveDirectory):
        os.makedirs(trajectoriesSaveDirectory)

    trajectorySaveExtension = '.pickle'
    maxRunningSteps = 50
    numSimulations = 250
    fixedParameters = {
        'agentId': agentId,
        'maxRunningSteps': maxRunningSteps,
        'numSimulations': numSimulations
    }

    generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory,
                                             trajectorySaveExtension,
                                             fixedParameters)

    trajectorySavePath = generateTrajectorySavePath(
        parametersForTrajectoryPath)

    if not os.path.isfile(trajectorySavePath):

        # env MDP
        sheepsID = [0]
        wolvesID = [1, 2]
        blocksID = []

        numSheeps = len(sheepsID)
        numWolves = len(wolvesID)
        numBlocks = len(blocksID)

        numAgents = numWolves + numSheeps
        numEntities = numAgents + numBlocks

        sheepSize = 0.05
        wolfSize = 0.075
        blockSize = 0.2

        sheepMaxSpeed = 1.3 * 1
        wolfMaxSpeed = 1.0 * 1
        blockMaxSpeed = None

        entitiesSizeList = [sheepSize] * numSheeps + [wolfSize] * numWolves + [
            blockSize
        ] * numBlocks
        entityMaxSpeedList = [sheepMaxSpeed] * numSheeps + [
            wolfMaxSpeed
        ] * numWolves + [blockMaxSpeed] * numBlocks
        entitiesMovableList = [True] * numAgents + [False] * numBlocks
        massList = [1.0] * numEntities

        centralControlId = 1
        centerControlIndexList = [centralControlId]
        reshapeAction = UnpackCenterControlAction(centerControlIndexList)
        getCollisionForce = GetCollisionForce()
        applyActionForce = ApplyActionForce(wolvesID, sheepsID,
                                            entitiesMovableList)
        applyEnvironForce = ApplyEnvironForce(numEntities, entitiesMovableList,
                                              entitiesSizeList,
                                              getCollisionForce,
                                              getPosFromAgentState)
        integrateState = IntegrateState(numEntities, entitiesMovableList,
                                        massList, entityMaxSpeedList,
                                        getVelFromAgentState,
                                        getPosFromAgentState)
        interpolateState = TransitMultiAgentChasing(numEntities, reshapeAction,
                                                    applyActionForce,
                                                    applyEnvironForce,
                                                    integrateState)

        numFramesToInterpolate = 1

        def transit(state, action):
            for frameIndex in range(numFramesToInterpolate):
                nextState = interpolateState(state, action)
                action = np.array([(0, 0)] * numAgents)
                state = nextState
            return nextState

        isTerminal = lambda state: False

        isCollision = IsCollision(getPosFromAgentState)
        collisonRewardWolf = 1
        punishForOutOfBound = PunishForOutOfBound()
        rewardWolf = RewardCentralControlPunishBond(
            wolvesID, sheepsID, entitiesSizeList, getPosFromAgentState,
            isCollision, punishForOutOfBound, collisonRewardWolf)
        collisonRewardSheep = -1
        rewardSheep = RewardCentralControlPunishBond(
            sheepsID, wolvesID, entitiesSizeList, getPosFromAgentState,
            isCollision, punishForOutOfBound, collisonRewardSheep)

        resetState = ResetMultiAgentChasing(numAgents, numBlocks)

        observeOneAgent = lambda agentID: Observe(
            agentID, wolvesID, sheepsID, blocksID, getPosFromAgentState,
            getVelFromAgentState)
        observe = lambda state: [
            observeOneAgent(agentID)(state) for agentID in range(numAgents)
        ]

        # policy
        actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7),
                       (0, -10), (7, -7), (0, 0)]
        wolfActionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0),
                           (-7, -7), (0, -10), (7, -7), (0, 0)]

        preyPowerRatio = 0.5
        sheepActionSpace = list(
            map(tuple,
                np.array(actionSpace) * preyPowerRatio))

        predatorPowerRatio = 0.5
        wolfActionOneSpace = list(
            map(tuple,
                np.array(wolfActionSpace) * predatorPowerRatio))
        wolfActionTwoSpace = list(
            map(tuple,
                np.array(wolfActionSpace) * predatorPowerRatio))

        wolvesActionSpace = list(
            product(wolfActionOneSpace, wolfActionTwoSpace))

        actionSpaceList = [sheepActionSpace, wolvesActionSpace]

        # neural network init
        numStateSpace = 4 * numEntities
        numSheepActionSpace = len(sheepActionSpace)
        numWolvesActionSpace = len(wolvesActionSpace)

        regularizationFactor = 1e-4
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        generateSheepModel = GenerateModel(numStateSpace, numSheepActionSpace,
                                           regularizationFactor)

        sheepPolicy = lambda state: {(0, 0): 1}

        # MCTS
        cInit = 1
        cBase = 100
        calculateScore = ScoreChild(cInit, cBase)
        selectChild = SelectChild(calculateScore)

        # prior
        getActionPrior = lambda state: {
            action: 1 / len(wolvesActionSpace)
            for action in wolvesActionSpace
        }

        # load chase nn policy
        chooseActionInMCTS = sampleFromDistribution

        def wolvesTransit(state, action):
            return transit(state,
                           [chooseActionInMCTS(sheepPolicy(state)), action])

        # initialize children; expand
        initializeChildren = InitializeChildren(wolvesActionSpace,
                                                wolvesTransit, getActionPrior)
        isTerminal = lambda state: False
        expand = Expand(isTerminal, initializeChildren)

        # random rollout policy
        def rolloutPolicy(state):
            return [
                sampleFromDistribution(sheepPolicy(state)),
                wolvesActionSpace[np.random.choice(
                    range(numWolvesActionSpace))]
            ]

        rolloutHeuristic = lambda state: 0
        maxRolloutSteps = 15
        rollout = RollOut(rolloutPolicy, maxRolloutSteps, transit, rewardWolf,
                          isTerminal, rolloutHeuristic)

        wolfPolicy = MCTS(numSimulations, selectChild, expand, rollout, backup,
                          establishSoftmaxActionDist)

        # All agents' policies
        policy = lambda state: [sheepPolicy(state), wolfPolicy(state)]
        chooseActionList = [maxFromDistribution, maxFromDistribution]

        def sampleAction(state):
            actionDists = [sheepPolicy(state), wolfPolicy(state)]
            action = [
                chooseAction(actionDist) for actionDist, chooseAction in zip(
                    actionDists, chooseActionList)
            ]
            return action

        render = None
        forwardOneStep = ForwardOneStep(transit, rewardWolf)
        sampleTrajectory = SampleTrajectoryWithRender(maxRunningSteps,
                                                      isTerminal, resetState,
                                                      forwardOneStep, render,
                                                      renderOn)

        trajectories = [
            sampleTrajectory(sampleAction)
            for sampleIndex in range(startSampleIndex, endSampleIndex)
        ]
        print([len(traj) for traj in trajectories])
        saveToPickle(trajectories, trajectorySavePath)

    endTime = time.time()
Exemplo n.º 9
0
def main():
    numWolves = 2
    numSheep = 1
    numWolvesStateSpaces = [
        2 * (numInWe + 1) for numInWe in range(2, numWolves + 1)
    ]
    actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7),
                   (0, -10), (7, -7)]
    #actionSpace = [(10, 0), (0, 10), (-10, 0), (0, -10)]
    predatorPowerRatio = 8
    wolfIndividualActionSpace = list(
        map(tuple,
            np.array(actionSpace) * predatorPowerRatio))
    wolvesCentralControlActionSpaces = [
        list(it.product(wolfIndividualActionSpace, repeat=numInWe))
        for numInWe in range(2, numWolves + 1)
    ]
    numWolvesCentralControlActionSpaces = [
        len(wolvesCentralControlActionSpace)
        for wolvesCentralControlActionSpace in wolvesCentralControlActionSpaces
    ]
    regularizationFactor = 1e-4
    generateWolvesCentralControlModels = [
        GenerateModel(numStateSpace, numActionSpace, regularizationFactor)
        for numStateSpace, numActionSpace in zip(
            numWolvesStateSpaces, numWolvesCentralControlActionSpaces)
    ]
    sharedWidths = [128]
    actionLayerWidths = [128]
    valueLayerWidths = [128]
    wolfNNDepth = 9
    resBlockSize = 2
    dropoutRate = 0.0
    initializationMethod = 'uniform'
    initWolvesCentralControlModels = [
        generateWolvesCentralControlModel(sharedWidths * wolfNNDepth,
                                          actionLayerWidths, valueLayerWidths,
                                          resBlockSize, initializationMethod,
                                          dropoutRate) for
        generateWolvesCentralControlModel in generateWolvesCentralControlModels
    ]
    NNNumSimulations = 250
    wolvesModelPaths = [
        os.path.join(
            '..', '..', 'data', 'preTrainModel', 'agentId=' +
            str(len(actionSpace) * np.sum([10**_ for _ in range(numInWe)])) +
            '_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations='
            + str(NNNumSimulations) + '_trainSteps=50000')
        for numInWe in range(2, numWolves + 1)
    ]
    print(wolvesModelPaths)
    wolvesCentralControlNNModels = [
        restoreVariables(initWolvesCentralControlModel, wolvesModelPath)
        for initWolvesCentralControlModel, wolvesModelPath in zip(
            initWolvesCentralControlModels, wolvesModelPaths)
    ]
    wolvesValueFunctionListBasedOnNumAgentsInWe = [
        ApproximateValue(NNModel) for NNModel in wolvesCentralControlNNModels
    ]
    valueFunction = wolvesValueFunctionListBasedOnNumAgentsInWe[numWolves - 2]

    xBoundary = [0, 600]
    yBoundary = [0, 600]
    reset = Reset(xBoundary, yBoundary, numWolves)

    numGridX = 120
    numGridY = 120
    xInterval = (xBoundary[1] - xBoundary[0]) / numGridX
    yInterval = (yBoundary[1] - yBoundary[0]) / numGridY
    sheepXPosition = [(gridIndex + 0.5) * xInterval
                      for gridIndex in range(numGridX)]
    sheepYPosition = [(gridIndex + 0.5) * yInterval
                      for gridIndex in range(numGridY)]

    wolvesState = reset()
    wolvesState = np.array([[300, 350], [550, 400]])
    print(wolvesState)
    levelValues = [sheepXPosition, sheepYPosition]
    levelNames = ["sheepXPosition", "sheepYPosition"]

    modelIndex = pd.MultiIndex.from_product(levelValues, names=levelNames)

    toSplitFrame = pd.DataFrame(index=modelIndex)

    evaluate = lambda df: evaluateValue(df, valueFunction, wolvesState)
    valueResultDf = toSplitFrame.groupby(levelNames).apply(evaluate)

    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    drawHeatmapPlot(valueResultDf, ax)

    fig.savefig('valueMap2', dpi=300)
    plt.show()
    def __call__(self, parameters):
        print(parameters)
        numWolves = parameters['numWolves']
        numSheep = parameters['numSheep']
        softParamterForValue = parameters['valuePriorSoftMaxBeta']
        valuePriorEndTime = parameters['valuePriorEndTime']
        
        ## MDP Env  
	# state is all multi agent state # action is all multi agent action
        xBoundary = [0,600]
        yBoundary = [0,600]
        numOfAgent = numWolves + numSheep
        reset = Reset(xBoundary, yBoundary, numOfAgent)

        possibleSheepIds = list(range(numSheep))
        possibleWolvesIds = list(range(numSheep, numSheep + numWolves))
        getSheepStatesFromAll = lambda state: np.array(state)[possibleSheepIds]
        getWolvesStatesFromAll = lambda state: np.array(state)[possibleWolvesIds]
        killzoneRadius = 25
        isTerminal = IsTerminal(killzoneRadius, getSheepStatesFromAll, getWolvesStatesFromAll)

        stayInBoundaryByReflectVelocity = StayInBoundaryByReflectVelocity(xBoundary, yBoundary)
        interpolateOneFrame = InterpolateOneFrame(stayInBoundaryByReflectVelocity)
        numFramesToInterpolate = 5
        transit = TransitWithTerminalCheckOfInterpolation(numFramesToInterpolate, interpolateOneFrame, isTerminal)

        maxRunningSteps = 52
        timeCost = 1/maxRunningSteps
        terminalBonus = 1
        rewardFunction = RewardFunctionByTerminal(timeCost, terminalBonus, isTerminal)

        forwardOneStep = ForwardOneStep(transit, rewardFunction)
        sampleTrajectory = SampleTrajectory(maxRunningSteps, isTerminal, reset, forwardOneStep)

        ## MDP Policy
	# Sheep Part

	# Sheep Policy Function
        numSheepPolicyStateSpace = 2 * (numWolves + 1)
        sheepActionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7),
                       (-10, 0), (-7, -7), (0, -10), (7, -7), (0, 0)]
        preyPowerRatio = 12
        sheepIndividualActionSpace = list(map(tuple, np.array(sheepActionSpace) * preyPowerRatio))
        numSheepActionSpace = len(sheepIndividualActionSpace)
        regularizationFactor = 1e-4
        generateSheepModel = GenerateModel(numSheepPolicyStateSpace, numSheepActionSpace, regularizationFactor)
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        sheepNNDepth = 9
        resBlockSize = 2
        dropoutRate = 0.0
        initializationMethod = 'uniform'
        initSheepModel = generateSheepModel(sharedWidths * sheepNNDepth, actionLayerWidths, valueLayerWidths, 
                resBlockSize, initializationMethod, dropoutRate)
        sheepModelPath = os.path.join('..', '..', 'data', 'preTrainModel',
                'agentId=0.'+str(numWolves)+'_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations=110_trainSteps=50000')
        sheepNNModel = restoreVariables(initSheepModel, sheepModelPath)
        sheepPolicy = ApproximatePolicy(sheepNNModel, sheepIndividualActionSpace)

        # Sheep Generate Action
        softParameterInPlanningForSheep = 2.5
        softPolicyInPlanningForSheep = SoftDistribution(softParameterInPlanningForSheep)
        softenSheepPolicy = lambda relativeAgentsStatesForSheepPolicy: softPolicyInPlanningForSheep(sheepPolicy(relativeAgentsStatesForSheepPolicy))

        sheepChooseActionMethod = sampleFromDistribution
        sheepSampleActions = [SampleActionOnFixedIntention(selfId, possibleWolvesIds, softenSheepPolicy, sheepChooseActionMethod) for selfId in possibleSheepIds]

	# Wolves Part

        # Policy Likelihood function: Wolf Centrol Control NN Policy Given Intention
        numWolvesStateSpaces = [2 * (numInWe + numSheep) 
                for numInWe in range(2, numWolves + 1)]
        actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7),
                       (-10, 0), (-7, -7), (0, -10), (7, -7), (0, 0)]
        predatorPowerRatio = 8
        wolfIndividualActionSpace = list(map(tuple, np.array(actionSpace) * predatorPowerRatio))
        wolvesCentralControlActionSpaces = [list(it.product(wolfIndividualActionSpace, repeat = numInWe)) 
                for numInWe in range(2, numWolves + 1)]
        numWolvesCentralControlActionSpaces = [len(wolvesCentralControlActionSpace)
                for wolvesCentralControlActionSpace in wolvesCentralControlActionSpaces]
        regularizationFactor = 1e-4
        generateWolvesCentralControlModels = [GenerateModel(numStateSpace, numActionSpace, regularizationFactor) 
            for numStateSpace, numActionSpace in zip(numWolvesStateSpaces, numWolvesCentralControlActionSpaces)]
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        wolfNNDepth = 9
        resBlockSize = 2
        dropoutRate = 0.0
        initializationMethod = 'uniform'
        initWolvesCentralControlModels = [generateWolvesCentralControlModel(sharedWidths * wolfNNDepth, actionLayerWidths, valueLayerWidths, 
                resBlockSize, initializationMethod, dropoutRate) for generateWolvesCentralControlModel in generateWolvesCentralControlModels] 
        NNNumSimulations = 250
        wolvesModelPaths = [os.path.join('..', '..', 'data', 'preTrainModel', 
                'agentId=.'+str(len(actionSpace) * np.sum([10**_ for _ in
                range(numInWe)]))+'_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations='+str(NNNumSimulations)+'_trainSteps=50000') 
                for numInWe in range(2, numWolves + 1)]
        print(wolvesModelPaths)
        wolvesCentralControlNNModels = [restoreVariables(initWolvesCentralControlModel, wolvesModelPath) 
                for initWolvesCentralControlModel, wolvesModelPath in zip(initWolvesCentralControlModels, wolvesModelPaths)]
        wolvesCentralControlPolicies = [ApproximatePolicy(NNModel, actionSpace) 
                for NNModel, actionSpace in zip(wolvesCentralControlNNModels, wolvesCentralControlActionSpaces)] 


	# Wovels Generate Action
        softParameterInPlanning = 2.5
        softPolicyInPlanning = SoftDistribution(softParameterInPlanning)
        
        wolvesPolicy = lambda state: wolvesCentralControlPolicies[numWolves - 2](state) 
        wolfChooseActionMethod = sampleFromDistribution
        wolvesSampleAction = lambda state: wolfChooseActionMethod(softPolicyInPlanning(wolvesPolicy(state))) 
        
        def sampleAction(state):
            action = list(wolvesSampleAction(state)) + [sheepSampleAction(state) for sheepSampleAction in sheepSampleActions]
            return action

        # Sample and Save Trajectory
        trajectories = [sampleTrajectory(sampleAction) for _ in range(self.numTrajectories)]
        
        wolfType = 'sharedReward'
        trajectoryFixedParameters = {'sheepPolicySoft': softParameterInPlanningForSheep, 'wolfPolicySoft': softParameterInPlanning,
                'maxRunningSteps': maxRunningSteps, 'hierarchy': 0, 'NNNumSimulations':NNNumSimulations, 'wolfType': wolfType}
        self.saveTrajectoryByParameters(trajectories, trajectoryFixedParameters, parameters)
        print(np.mean([len(tra) for tra in trajectories]))
Exemplo n.º 11
0
def main():
    DEBUG = 0
    renderOn = 0
    if DEBUG:
        parametersForTrajectoryPath = {}
        startSampleIndex = 0
        endSampleIndex = 10
        agentId = 1
        parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex,
                                                      endSampleIndex)
    else:
        parametersForTrajectoryPath = json.loads(sys.argv[1])
        startSampleIndex = int(sys.argv[2])
        endSampleIndex = int(sys.argv[3])
        agentId = int(parametersForTrajectoryPath['agentId'])
        parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex,
                                                      endSampleIndex)

    # check file exists or not
    dirName = os.path.dirname(__file__)
    trajectoriesSaveDirectory = os.path.join(
        dirName, '..', '..', '..', '..', 'data', '2wolves1sheep',
        'trainWolvesTwoCenterControlMultiTrees', 'trajectories')
    if not os.path.exists(trajectoriesSaveDirectory):
        os.makedirs(trajectoriesSaveDirectory)

    trajectorySaveExtension = '.pickle'
    maxRunningSteps = 50
    numSimulations = 500
    killzoneRadius = 50
    fixedParameters = {
        'agentId': agentId,
        'maxRunningSteps': maxRunningSteps,
        'numSimulations': numSimulations,
        'killzoneRadius': killzoneRadius
    }

    generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory,
                                             trajectorySaveExtension,
                                             fixedParameters)

    trajectorySavePath = generateTrajectorySavePath(
        parametersForTrajectoryPath)

    if not os.path.isfile(trajectorySavePath):
        numOfAgent = 3
        sheepId = 0
        wolvesId = 1

        wolfOneId = 1
        wolfTwoId = 2

        xPosIndex = [0, 1]
        xBoundary = [0, 600]
        yBoundary = [0, 600]

        getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex)
        getWolfOneXPos = GetAgentPosFromState(wolfOneId, xPosIndex)
        getWolfTwoXPos = GetAgentPosFromState(wolfTwoId, xPosIndex)

        reset = Reset(xBoundary, yBoundary, numOfAgent)

        isTerminalOne = IsTerminal(getWolfOneXPos, getSheepXPos,
                                   killzoneRadius)
        isTerminalTwo = IsTerminal(getWolfTwoXPos, getSheepXPos,
                                   killzoneRadius)

        isTerminal = lambda state: isTerminalOne(state) or isTerminalTwo(state)

        stayInBoundaryByReflectVelocity = StayInBoundaryByReflectVelocity(
            xBoundary, yBoundary)

        centerControlIndexList = [wolvesId]
        unpackCenterControlAction = UnpackCenterControlAction(
            centerControlIndexList)
        transitionFunction = TransiteForNoPhysicsWithCenterControlAction(
            stayInBoundaryByReflectVelocity)

        numFramesToInterpolate = 3
        transit = TransitWithInterpolateStateWithCenterControlAction(
            numFramesToInterpolate, transitionFunction, isTerminal,
            unpackCenterControlAction)

        # NNGuidedMCTS init
        cInit = 1
        cBase = 100
        calculateScore = ScoreChild(cInit, cBase)
        selectChild = SelectChild(calculateScore)

        actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7),
                       (0, -10), (7, -7), (0, 0)]
        wolfActionSpace = actionSpace
        # wolfActionSpace = [(10, 0), (0, 10), (-10, 0), (0, -10), (0, 0)]

        preyPowerRatio = 12
        sheepActionSpace = list(
            map(tuple,
                np.array(actionSpace) * preyPowerRatio))

        predatorPowerRatio = 8
        wolfActionOneSpace = list(
            map(tuple,
                np.array(wolfActionSpace) * predatorPowerRatio))
        wolfActionTwoSpace = list(
            map(tuple,
                np.array(wolfActionSpace) * predatorPowerRatio))

        wolvesActionSpace = list(
            product(wolfActionOneSpace, wolfActionTwoSpace))

        actionSpaceList = [sheepActionSpace, wolvesActionSpace]

        # neural network init
        numStateSpace = 2 * numOfAgent
        numSheepActionSpace = len(sheepActionSpace)
        numWolvesActionSpace = len(wolvesActionSpace)

        regularizationFactor = 1e-4
        sharedWidths = [128]
        actionLayerWidths = [128]
        valueLayerWidths = [128]
        generateSheepModel = GenerateModel(numStateSpace, numSheepActionSpace,
                                           regularizationFactor)

        # load save dir
        NNModelSaveExtension = ''
        sheepNNModelSaveDirectory = os.path.join(
            dirName, '..', '..', '..', '..', 'data', '2wolves1sheep',
            'trainSheepWithTwoHeatSeekingWolves', 'trainedResNNModels')
        sheepNNModelFixedParameters = {
            'agentId': 0,
            'maxRunningSteps': 50,
            'numSimulations': 110,
            'miniBatchSize': 256,
            'learningRate': 0.0001,
        }
        getSheepNNModelSavePath = GetSavePath(sheepNNModelSaveDirectory,
                                              NNModelSaveExtension,
                                              sheepNNModelFixedParameters)

        depth = 9
        resBlockSize = 2
        dropoutRate = 0.0
        initializationMethod = 'uniform'
        initSheepNNModel = generateSheepModel(sharedWidths * depth,
                                              actionLayerWidths,
                                              valueLayerWidths, resBlockSize,
                                              initializationMethod,
                                              dropoutRate)

        sheepTrainedModelPath = getSheepNNModelSavePath({
            'trainSteps': 50000,
            'depth': depth
        })
        sheepTrainedModel = restoreVariables(initSheepNNModel,
                                             sheepTrainedModelPath)
        sheepPolicy = ApproximatePolicy(sheepTrainedModel, sheepActionSpace)

        # MCTS
        cInit = 1
        cBase = 100
        calculateScore = ScoreChild(cInit, cBase)
        selectChild = SelectChild(calculateScore)

        # prior
        getActionPrior = lambda state: {
            action: 1 / len(wolvesActionSpace)
            for action in wolvesActionSpace
        }

        # load chase nn policy
        temperatureInMCTS = 1
        chooseActionInMCTS = SampleAction(temperatureInMCTS)

        def wolvesTransit(state, action):
            return transit(state,
                           [chooseActionInMCTS(sheepPolicy(state)), action])

        # reward function
        aliveBonus = -1 / maxRunningSteps
        deathPenalty = 1
        rewardFunction = reward.RewardFunctionCompete(aliveBonus, deathPenalty,
                                                      isTerminal)

        # initialize children; expand
        initializeChildren = InitializeChildren(wolvesActionSpace,
                                                wolvesTransit, getActionPrior)
        expand = Expand(isTerminal, initializeChildren)

        # random rollout policy
        def rolloutPolicy(state):
            return wolvesActionSpace[np.random.choice(
                range(numWolvesActionSpace))]

        # rollout
        rolloutHeuristicWeight = 0
        minDistance = 400
        rolloutHeuristic1 = reward.HeuristicDistanceToTarget(
            rolloutHeuristicWeight, getWolfOneXPos, getSheepXPos, minDistance)
        rolloutHeuristic2 = reward.HeuristicDistanceToTarget(
            rolloutHeuristicWeight, getWolfTwoXPos, getSheepXPos, minDistance)

        rolloutHeuristic = lambda state: (rolloutHeuristic1(state) +
                                          rolloutHeuristic2(state)) / 2

        maxRolloutSteps = 15
        rollout = RollOut(rolloutPolicy, maxRolloutSteps, wolvesTransit,
                          rewardFunction, isTerminal, rolloutHeuristic)

        numTree = 4
        numSimulationsPerTree = int(numSimulations / numTree)
        wolfPolicy = StochasticMCTS(
            numTree, numSimulationsPerTree, selectChild, expand, rollout,
            backup, establishSoftmaxActionDistFromMultipleTrees)

        # All agents' policies
        policy = lambda state: [sheepPolicy(state), wolfPolicy(state)]
        chooseActionList = [chooseGreedyAction, chooseGreedyAction]

        render = None
        if renderOn:
            import pygame as pg
            from pygame.color import THECOLORS
            screenColor = THECOLORS['black']
            circleColorList = [
                THECOLORS['green'], THECOLORS['red'], THECOLORS['red']
            ]
            circleSize = 10

            saveImage = False
            saveImageDir = os.path.join(dirName, '..', '..', '..', '..',
                                        'data', 'demoImg')
            if not os.path.exists(saveImageDir):
                os.makedirs(saveImageDir)

            screen = pg.display.set_mode([xBoundary[1], yBoundary[1]])
            render = Render(numOfAgent, xPosIndex, screen, screenColor,
                            circleColorList, circleSize, saveImage,
                            saveImageDir)

        sampleTrajectory = SampleTrajectoryWithRender(maxRunningSteps, transit,
                                                      isTerminal, reset,
                                                      chooseActionList, render,
                                                      renderOn)
        trajectories = [
            sampleTrajectory(policy)
            for sampleIndex in range(startSampleIndex, endSampleIndex)
        ]
        print([len(traj) for traj in trajectories])
        saveToPickle(trajectories, trajectorySavePath)