예제 #1
0
        def __call__(self, oneConditionDf):
            allTrajectories = self.getTrajectories(oneConditionDf)
            minDistance = readParametersFromDf(
                oneConditionDf)['hasWalls'] + 0.05

            checkStateOfofBound = CheckStateOfofBound(minDistance)
            # allMeasurements = np.array(self.measurementFunction(allTrajectories,minDistance))
            # allMeasurements = np.array(self.measurementFunction(allTrajectories,minDistance))s
            allMeasurements = np.array([
                checkStateOfofBound(state) for traj in allTrajectories
                for state in traj
            ])
            # print(allMeasurements)
            measurementMean = np.mean(allMeasurements)
            measurementStd = np.std(allMeasurements)
            return pd.Series({'mean': measurementMean, 'std': measurementStd})
def main():
    manipulatedVariables = OrderedDict()
    #solimp
    manipulatedVariables['dmin'] = [0.9]
    manipulatedVariables['dmax'] = [0.9999]
    manipulatedVariables['width'] = [0.001]
    manipulatedVariables['midpoint'] = [0.5]  #useless
    manipulatedVariables['power'] = [1]  #useless
    #solref
    manipulatedVariables['timeconst'] = [
        0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0
    ]
    manipulatedVariables['dampratio'] = [0.2, 0.4, 0.6, 0.8, 1.0]

    eavluateParmetersList = ['dampratio', 'dmax']
    lineParameter = ['timeconst']
    prametersToDrop = list(
        set(manipulatedVariables.keys()) -
        set(eavluateParmetersList + lineParameter))
    print(prametersToDrop, manipulatedVariables)

    # timeconst: 0.1-0.2
    # dmin: 0.5-dimax
    # dmax:0.9-0.9999
    # dampratio:0.3-0.8
    productedValues = it.product(
        *[[(key, value) for value in values]
          for key, values in manipulatedVariables.items()])
    parametersAllCondtion = [
        dict(list(specificValueParameter))
        for specificValueParameter in productedValues
    ]

    evalNum = 500
    randomSeed = 1542
    dt = 0.05  #0.05

    dirName = os.path.dirname(__file__)
    dataFolderName = os.path.join(dirName, '..')
    trajectoryDirectory = os.path.join(dataFolderName, 'trajectory',
                                       'variousCollsiondt={}'.format(dt))
    if not os.path.exists(trajectoryDirectory):
        os.makedirs(trajectoryDirectory)

    for parameterOneCondiiton in parametersAllCondtion:
        # print(parameterOneCondiiton,evalNum,randomSeed)
        simuliateOneParameter(parameterOneCondiiton, evalNum, randomSeed, dt)

    levelNames = list(manipulatedVariables.keys())
    levelValues = list(manipulatedVariables.values())
    modelIndex = pd.MultiIndex.from_product(levelValues, names=levelNames)
    toSplitFrame = pd.DataFrame(index=modelIndex)
    # save evaluation trajectories

    trajectoryExtension = '.pickle'
    trajectoryFixedParameters = {
        'isMujoco': 1,
        'isCylinder': 1,
        'evalNum': evalNum,
        'randomSeed': randomSeed
    }

    getTrajectorySavePath = GetSavePath(trajectoryDirectory,
                                        trajectoryExtension,
                                        trajectoryFixedParameters)
    getTrajectorySavePathFromDf = lambda df: getTrajectorySavePath(
        readParametersFromDf(df))

    # compute statistics on the trajectories

    fuzzySearchParameterNames = []
    loadTrajectories = LoadTrajectories(getTrajectorySavePath, loadFromPickle,
                                        fuzzySearchParameterNames)
    loadTrajectoriesFromDf = lambda df: loadTrajectories(
        readParametersFromDf(df))

    def compareXPos(traj, baselineTraj):
        SE = [
            np.linalg.norm(s1[0][0][:2] - s2[0][0][:2])**2
            for s1, s2 in zip(baselineTraj, traj)
        ]

        return SE

    originEnvTrajSavePath = os.path.join(
        dirName, '..', 'trajectory', 'variousCollsion',
        'collisionMoveOriginBaseLine_evalNum={}_randomSeed={}.pickle'.format(
            evalNum, randomSeed))
    baselineTrajs = loadFromPickle(originEnvTrajSavePath)

    class CompareTrajectories():
        def __init__(self, baselineTrajs, calculateStatiscs):
            self.baselineTrajs = baselineTrajs
            self.calculateStatiscs = calculateStatiscs

        def __call__(self, trajectorys):

            allMeasurements = np.array([
                self.calculateStatiscs(traj, baselineTraj)
                for traj, baselineTraj in zip(trajectorys, self.baselineTrajs)
            ])
            # print()
            measurementMean = allMeasurements.mean(axis=0)
            measurementStd = allMeasurements.std(axis=0)
            return pd.Series({'mean': measurementMean, 'std': measurementStd})

    measurementFunction = CompareTrajectories(baselineTrajs, compareXPos)

    computeStatistics = ComputeStatistics(loadTrajectoriesFromDf,
                                          measurementFunction)
    statisticsDf = toSplitFrame.groupby(levelNames).apply(computeStatistics)
    print(statisticsDf)

    fig = plt.figure(0)

    rowName, columnName = eavluateParmetersList
    numRows = len(manipulatedVariables[rowName])
    numColumns = len(manipulatedVariables[columnName])
    plotCounter = 1
    selfId = 0
    for rowVar, grp in statisticsDf.groupby(rowName):
        grp.index = grp.index.droplevel(rowName)

        for columVar, group in grp.groupby(columnName):
            group.index = group.index.droplevel(columnName)

            axForDraw = fig.add_subplot(numRows, numColumns, plotCounter)
            if (plotCounter % numColumns == 1) or numColumns == 1:
                axForDraw.set_ylabel(rowName + ': {}'.format(rowVar))
            if plotCounter <= numColumns:
                axForDraw.set_title(columnName + ': {}'.format(columVar))

            axForDraw.set_ylim(0, 0.02)
            drawPerformanceLine(group, axForDraw, lineParameter,
                                prametersToDrop)
            plotCounter += 1

    plt.suptitle('MSEforPos,dt={}'.format(dt))
    plt.legend(loc='best')

    # plt.show()

    def compareV(traj, baselineTraj):
        # [print(s1[0][0]-s2[0][0],np.linalg.norm(s1[0][0][2:]-s2[0][0][2:]),np.linalg.norm(s1[0][0][:2]-s2[0][0][:2])) for s1,s2 in zip (baselineTraj,traj) ]
        SE = [
            np.linalg.norm(s1[0][0][2:] - s2[0][0][2:])**2
            for s1, s2 in zip(baselineTraj, traj)
        ]
        return SE

    measurementFunction2 = CompareTrajectories(baselineTrajs, compareV)

    computeStatistics = ComputeStatistics(loadTrajectoriesFromDf,
                                          measurementFunction2)
    statisticsDf2 = toSplitFrame.groupby(levelNames).apply(computeStatistics)
    print(statisticsDf2)

    fig = plt.figure(1)

    numRows = len(manipulatedVariables[rowName])
    numColumns = len(manipulatedVariables[columnName])
    plotCounter = 1
    for rowVar, grp in statisticsDf2.groupby(rowName):
        grp.index = grp.index.droplevel(rowName)

        for columVar, group in grp.groupby(columnName):
            group.index = group.index.droplevel(columnName)

            axForDraw = fig.add_subplot(numRows, numColumns, plotCounter)
            if (plotCounter % numColumns == 1) or numColumns == 1:
                axForDraw.set_ylabel(rowName + ': {}'.format(rowVar))
            if plotCounter <= numColumns:
                axForDraw.set_title(columnName + ': {}'.format(columVar))

            axForDraw.set_ylim(0, 0.4)
            drawPerformanceLine(group, axForDraw, lineParameter,
                                prametersToDrop)
            plotCounter += 1

    plt.suptitle('MSEforSpeed,dt={}'.format(dt))
    plt.legend(loc='best')
    plt.show()
예제 #3
0
def main():

    manipulatedVariables = OrderedDict()

    manipulatedVariables['numWolves'] = [3]
    manipulatedVariables['numSheeps'] = [1]
    manipulatedVariables['numBlocks'] = [2]
    manipulatedVariables['maxTimeStep'] = [25]
    manipulatedVariables['sheepSpeedMultiplier'] = [1.0]
    manipulatedVariables['individualRewardWolf'] = [0]
    manipulatedVariables['timeconst'] = [0.5]
    manipulatedVariables['dampratio'] = [0.2]
    manipulatedVariables['hasWalls'] = [1.0, 1.5, 2.0]
    manipulatedVariables['dt'] = [0.05]

    eavluateParmetersList = ['hasWalls', 'dt']
    lineParameter = ['sheepSpeedMultiplier']
    prametersToDrop = list(
        set(manipulatedVariables.keys()) -
        set(eavluateParmetersList + lineParameter))

    print(prametersToDrop, manipulatedVariables)

    productedValues = it.product(
        *[[(key, value) for value in values]
          for key, values in manipulatedVariables.items()])
    parametersAllCondtion = [
        dict(list(specificValueParameter))
        for specificValueParameter in productedValues
    ]

    evalNum = 500
    randomSeed = 1542
    dt = 0.05  #0.05

    dirName = os.path.dirname(__file__)

    trajectoryDirectory = os.path.join(dirName, '..', 'trajectory',
                                       'evluateWall')
    if not os.path.exists(trajectoryDirectory):
        os.makedirs(trajectoryDirectory)
    time = []
    for parameterOneCondiiton in parametersAllCondtion:
        # print(parameterOneCondiiton,evalNum,randomSeed)

        sampletime = simuliateOneParameter(parameterOneCondiiton, evalNum,
                                           randomSeed)
        time.append(sampletime)

    levelNames = list(manipulatedVariables.keys())
    levelValues = list(manipulatedVariables.values())
    modelIndex = pd.MultiIndex.from_product(levelValues, names=levelNames)
    toSplitFrame = pd.DataFrame(index=modelIndex)
    # save evaluation trajectories

    trajectoryExtension = '.pickle'
    trajectoryFixedParameters = {'randomSeed': randomSeed, 'evalNum': evalNum}

    getTrajectorySavePath = GetSavePath(trajectoryDirectory,
                                        trajectoryExtension,
                                        trajectoryFixedParameters)
    getTrajectorySavePathFromDf = lambda df: getTrajectorySavePath(
        readParametersFromDf(df))

    # compute statistics on the trajectories

    fuzzySearchParameterNames = []
    loadTrajectories = LoadTrajectories(getTrajectorySavePath, loadFromPickle,
                                        fuzzySearchParameterNames)
    loadTrajectoriesFromDf = lambda df: loadTrajectories(
        readParametersFromDf(df))

    class CheckStateOfofBound(object):
        """docstring for ClassName"""
        def __init__(self, minDistance):
            self.minDistance = minDistance

        def __call__(self, state):
            absloc = np.abs([agent[:2] for agent in state[0][:-2]])
            return np.any(absloc > self.minDistance)

    class ComputeOutOfBounds:
        def __init__(self, getTrajectories, CheckStateOfofBound):
            self.getTrajectories = getTrajectories
            self.CheckStateOfofBound = CheckStateOfofBound

        def __call__(self, oneConditionDf):
            allTrajectories = self.getTrajectories(oneConditionDf)
            minDistance = readParametersFromDf(
                oneConditionDf)['hasWalls'] + 0.05

            checkStateOfofBound = CheckStateOfofBound(minDistance)
            # allMeasurements = np.array(self.measurementFunction(allTrajectories,minDistance))
            # allMeasurements = np.array(self.measurementFunction(allTrajectories,minDistance))s
            allMeasurements = np.array([
                checkStateOfofBound(state) for traj in allTrajectories
                for state in traj
            ])
            # print(allMeasurements)
            measurementMean = np.mean(allMeasurements)
            measurementStd = np.std(allMeasurements)
            return pd.Series({'mean': measurementMean, 'std': measurementStd})

    computeStatistics = ComputeOutOfBounds(loadTrajectoriesFromDf,
                                           CheckStateOfofBound)
    statisticsDf = toSplitFrame.groupby(levelNames).apply(computeStatistics)
    print(statisticsDf)

    for i, parameterOneCondiiton in enumerate(parametersAllCondtion):
        print(parameterOneCondiiton, time[i])
def main():


    manipulatedVariables = OrderedDict()


    manipulatedVariables['damping'] = [0.0]
    manipulatedVariables['frictionloss'] = [0.0]
    manipulatedVariables['masterForce']=[0.0]

    eavluateParmetersList=['frictionloss','damping']
    lineParameter=['masterForce']
    prametersToDrop=list(set(manipulatedVariables.keys())-set(eavluateParmetersList+lineParameter))


    productedValues = it.product(*[[(key, value) for value in values] for key, values in manipulatedVariables.items()])
    conditions = [dict(list(specificValueParameter)) for specificValueParameter in productedValues]
    evalNum=500
    randomSeed=133




    for condition in conditions:
        print(condition)

        generateSingleCondition(condition,evalNum,randomSeed)


    levelNames = list(manipulatedVariables.keys())
    levelValues = list(manipulatedVariables.values())
    modelIndex = pd.MultiIndex.from_product(levelValues, names=levelNames)
    toSplitFrame = pd.DataFrame(index=modelIndex)
    # save evaluation trajectories




    trajectoriesSaveDirectory=os.path.join(dirName,'..', 'trajectory','evluateRopeFixedEnv2')
    if not os.path.exists(trajectoriesSaveDirectory):
        os.makedirs(trajectoriesSaveDirectory)
    trajectoryExtension = '.pickle'
    trajectoryFixedParameters = { 'randomSeed':randomSeed,'evalNum':evalNum}


    getTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory, trajectoryExtension, trajectoryFixedParameters)
    getTrajectorySavePathFromDf = lambda df: getTrajectorySavePath(readParametersFromDf(df))

    # compute statistics on the trajectories


    fuzzySearchParameterNames = []
    loadTrajectories = LoadTrajectories(getTrajectorySavePath, loadFromPickle, fuzzySearchParameterNames)
    loadTrajectoriesFromDf = lambda df: loadTrajectories(readParametersFromDf(df))
    def computeV(traj):
        # [print(s1[0][0]-s2[0][0],np.linalg.norm(s1[0][0][2:]-s2[0][0][2:]),np.linalg.norm(s1[0][0][:2]-s2[0][0][:2])) for s1,s2 in zip (baselineTraj,traj) ]
        vel=[np.linalg.norm(agentState[2:]) for state in traj for agentState in state[0]]
        # print (vel)
        return np.mean(vel)

    measurementFunction = computeV
    computeStatistics = ComputeStatistics(loadTrajectoriesFromDf, measurementFunction)
    statisticsDf = toSplitFrame.groupby(levelNames).apply(computeStatistics)
    print(statisticsDf)


    darwStatisticsDf=DarwStatisticsDf(manipulatedVariables,eavluateParmetersList,lineParameter)
    subtitle='velocity'
    figIndex=0
    ylimMax=1.2
    darwStatisticsDf(statisticsDf,subtitle,figIndex,ylimMax)



    class fliterMeasurement():
        """docstring for fliterMeasurement"""
        def __init__(self, splitLength,splitMeasurement):
            self.splitLength = splitLength
            self.splitMeasurement = splitMeasurement
        def __call__(self,traj):
            [splitMeasurement(traj[i:i+self.splitLength]) for i in range(len(traj)-self.splitLength) ]

    getWolfPos=lambda state :getPosFromAgentState(state[0][0])
    getSheepfPos=lambda state :getPosFromAgentState(state[0][1])
    minDistance=0.35
    isCaught=IsTerminal(minDistance,getWolfPos,getSheepfPos)
    measurementFunction2 =lambda traj: np.mean([isCaught(state) for state in traj])

    computeStatistics = ComputeStatistics(loadTrajectoriesFromDf, measurementFunction2)
    statisticsDf2 = toSplitFrame.groupby(levelNames).apply(computeStatistics)
    print(statisticsDf2)
    subtitle='caughtRatio(minDistance={})'.format(minDistance)
    figIndex=figIndex+1
    ylimMax=0.2
    darwStatisticsDf(statisticsDf2,subtitle,figIndex,ylimMax)

    getWolfPos=lambda state :getPosFromAgentState(state[0][0])
    getSheepfPos=lambda state :getPosFromAgentState(state[0][1])
    calculateWolfSheepDistance=lambda state:np.linalg.norm(getWolfPos(state)-getSheepfPos(state))
    measurementFunction3 =lambda traj: np.mean([calculateWolfSheepDistance(state) for state in traj])

    computeStatistics = ComputeStatistics(loadTrajectoriesFromDf, measurementFunction3)
    statisticsDf3 = toSplitFrame.groupby(levelNames).apply(computeStatistics)
    print(statisticsDf3)
    subtitle='WolfSheepDistance'
    figIndex=figIndex+1
    ylimMax=1.6
    darwStatisticsDf(statisticsDf3,subtitle,figIndex,ylimMax)


    getWolfPos=lambda state :getPosFromAgentState(state[0][0])
    getMasterfPos=lambda state :getPosFromAgentState(state[0][2])
    calculateWolfMasterDistance=lambda state:np.linalg.norm(getWolfPos(state)-getMasterfPos(state))
    measurementFunction3 =lambda traj: np.mean([calculateWolfMasterDistance(state) for state in traj])

    computeStatistics = ComputeStatistics(loadTrajectoriesFromDf, measurementFunction3)
    statisticsDf3 = toSplitFrame.groupby(levelNames).apply(computeStatistics)
    print(statisticsDf3)
    subtitle='WolfMasterDistance'
    figIndex=figIndex+1
    ylimMax=0.6
    darwStatisticsDf(statisticsDf3,subtitle,figIndex,ylimMax)



    getWolfPos=lambda state :getPosFromAgentState(state[0][0])
    getSheepfPos=lambda state :getPosFromAgentState(state[0][1])

    getWolfVel=lambda state :getVelFromAgentState(state[0][0])

    def calculateCrossAngle(vel1,vel2):
        vel1complex=complex(vel1[0],vel1[1])
        vel2complex=complex(vel2[0],vel2[1])
        return np.abs(np.angle(vel2complex/vel1complex))/np.pi*180
    calculateDevation= lambda state:calculateCrossAngle(getWolfPos(state)-getSheepfPos(state),getWolfVel(state))

    measurementFunction3 =lambda traj: np.mean([calculateDevation(state) for state in traj])

    computeStatistics = ComputeStatistics(loadTrajectoriesFromDf, measurementFunction3)
    statisticsDf = toSplitFrame.groupby(levelNames).apply(computeStatistics)
    print(statisticsDf)
    subtitle='Devation'
    figIndex=figIndex+1
    darwStatisticsDf(statisticsDf,subtitle,figIndex)

    plt.show()