Example #1
0
print("testICOWithFilters")

with open('test_bp_filt_py.csv', 'wb') as csvfile:
    csvfile.close()

with open('test_bp_filt_py.csv', 'ab') as csvfile:
    # two input neurons, two hidden ones and one output neuron
    # two filters and min temp filter is 10 pixels and max 100 pixels
    nFiltersInput = 5
    nFiltersHidden = 5
    # nFiltersHidden = 0 means that the layer is linear without filters
    minT = 3
    maxT = 15
    net = deep_feedback_learning.DeepFeedbackLearning(2, [2], 1, nFiltersInput,
                                                      nFiltersHidden, minT,
                                                      maxT)
    # init the weights
    net.initWeights(0.001, 0, deep_feedback_learning.Neuron.MAX_OUTPUT_CONST)
    net.setBias(0)
    net.setAlgorithm(deep_feedback_learning.DeepFeedbackLearning.ico)
    net.setLearningRate(0.0001)
    net.setUseDerivative(0)
    #net.random_seed(10)
    # create the input arrays in numpy fashion
    inp = 0
    err = 0

    maxstep = 10000
    outp = np.zeros(maxstep)
    a = np.zeros(maxstep)
Example #2
0
# Enables engine output to console.
#game.set_console_enabled(True)

nFiltersInput = 0
nFiltersHidden = 0
minT = 2
maxT = 10
nHidden0 = 2
nHidden1 = 2

learningRate = 0
#0.001

net = deep_feedback_learning.DeepFeedbackLearning(
    widthNet * heightNet, [nHidden0 * nHidden0, nHidden1 * nHidden1], 1,
    nFiltersInput, nFiltersHidden, minT, maxT)
net.getLayer(0).setConvolution(widthNet, heightNet)
net.getLayer(1).setConvolution(nHidden0, nHidden0)
net.getLayer(2).setConvolution(nHidden1, nHidden1)
net.initWeights(1, 0, deep_feedback_learning.Neuron.MAX_OUTPUT_RANDOM)
net.setAlgorithm(deep_feedback_learning.DeepFeedbackLearning.backprop)
net.setLearningRate(learningRate)
net.setUseDerivative(0)
net.setMomentum(0.5)
net.setBias(0)
net.setLearningRateDiscountFactor(1)
net.getLayer(0).setActivationFunction(deep_feedback_learning.Neuron.TANH)
net.getLayer(1).setActivationFunction(deep_feedback_learning.Neuron.TANH)
epoch = 200
reflexGain = 1.0
Example #3
0
# Sets the livin reward (for each move) to -1
game.set_living_reward(-1)

# Sets ViZDoom mode (PLAYER, ASYNC_PLAYER, SPECTATOR, ASYNC_SPECTATOR, PLAYER mode is default)
game.set_mode(Mode.PLAYER)

# Enables engine output to console.
#game.set_console_enabled(True)

nFiltersInput = 0
nFiltersHidden = 0
minT = 3
maxT = 30
nHidden0 = 4
net = deep_feedback_learning.DeepFeedbackLearning(307200,
                                                  [nHidden0 * nHidden0], 1,
                                                  nFiltersInput,
                                                  nFiltersHidden, minT, maxT)
#net.enableDebugOutput()
#net.getLayer(0).setConvolution(widthNet,heightNet)
#net.getLayer(1).setConvolution(nHidden0,nHidden0)
net.setAlgorithm(deep_feedback_learning.DeepFeedbackLearning.backprop)
net.setLearningRate(0.0001)
net.setMomentum(0.5)
net.initWeights(0.001, 1, deep_feedback_learning.Neuron.MAX_OUTPUT_RANDOM)
net.setUseDerivative(0)
net.setBias(1)

# Initialize the game. Further configuration won't take any effect from now on.
game.init()

# Run this many episodes