Пример #1
0
# print all parameters again (using the alias)
aliasAlias = myData[...].aliasAlias
print('initial aliasAlias\n', aliasAlias, '\n\n')

# update the parameters via the alias
aliasAlias[1] = [5, 5, 3, 5, 5]

# store the updated parameters
myData[1].aliasAlias = aliasAlias[1]

# print all parameters one more time
print('updated parameters\n', myData[...].parameters, '\n\n')

# print all parameters again (using the alias)
print('updated context\n', myData[...].contexts, '\n\n')

# print all parameters again (using the alias)
print('updated aliasAlias\n', myData[...].aliasAlias, '\n\n')

# test vertical alignment for alias
print('vertical Alias\n', myData[...].parametersAliasVertical, '\n\n')

# test vertical alignment for alias
myData[...].parametersAliasVertical = myData[...].parametersAliasVertical * 2

print('vertical Alias after resetting values\n', myData[...].parametersAliasVertical, '\n\n')
print('Original Parameters after resetting values\n', myData[...].parameters, '\n\n')

print('Num Dim Vertical Parameters\n', dataManager.getNumDimensions('parametersAliasVertical'), '\n\n')
Пример #2
0
print('Timing:', t0 - t1)

t0 = time.time()
print('Next states:', myData[...].nextStates)
t1 = time.time()
print('Timing:', t0 - t1)

t0 = time.time()
print('Previous states:', myData[..., slice(0, 10)].lastStates)
t1 = time.time()
print('Timing:', t0 - t1)

t0 = time.time()
print('All states:', myData[...].allStates)
t1 = time.time()
print('Timing:', t0 - t1)

t0 = time.time()
print('History states:', myData[...].historyStates.shape)
t1 = time.time()
print('Timing:', t0 - t1)

t0 = time.time()
# get history states for each episode
for i in range(0, 100):
    print('History states ', i, ' shape:', myData[i].historyStates.shape)
t1 = time.time()
print('Timing:', t0 - t1)

print('History Dimensions:', dataManager.getNumDimensions('historyStates'))
Пример #3
0
                           intra_op_parallelism_threads=num_cpu)
session = tf.Session(config=tf_config)
session.__enter__()

dataManager = DataManager('data')
dataManager.addDataEntry('states', 4)
dataManager.addDataEntry('actions', 6)

settings = getDefaultSettings()

gaussian = LinearFullGaussian(dataManager, ['states'], ['actions'])

data = dataManager.createDataObject([10000])
data[...].states = np.random.normal(0, 1, data[...].states.shape)

A = np.random.normal(0, 1, (dataManager.getNumDimensions('actions'),
                            dataManager.getNumDimensions('actions')))
covMat = A.dot(A.transpose())
gaussian.param_stdmat = np.linalg.cholesky(covMat)
gaussian.param_final_w = np.random.normal(0, 1, gaussian.param_final_w.shape)
gaussian.param_final_b = np.random.normal(0, 1, gaussian.param_final_b.shape)

# Sample from Gaussian and write back in data
data[...] >> gaussian >> data

# Compute log likelihood
gaussianLearned = LinearFullGaussian(dataManager, ['states'], ['actions'])
learner = LinearGaussianMLLearner(dataManager, gaussianLearned)

data[...] >> learner