コード例 #1
0
ファイル: autoencoder.py プロジェクト: balab23/Demalifier-2.0
#First layer
L1 = LayerNN(size_in=inputSize,
             size_out=numberOfFeatures,
             sparsity=0.1,
             beta=3,
             weightDecay=3e-3,
             activation=FunctionModel.Sigmoid)

#Second layer
L2 = LayerNN(size_in=numberOfFeatures,
             size_out=inputSize,
             weightDecay=3e-3,
             activation=FunctionModel.Sigmoid)

#Compile all together
AE = TheanoNNclass(options, (L1, L2))

#Compile train and predict functions
AE.trainCompile()
AE.predictCompile()

#Normalise CV data from 0..255 to 0..1
#X_CV = DATA_CV[:, 1:].T / 255.0

#Empty list to collect CV errors
CV_error = []

#Let's iterate!
for i in xrange(iterations):

    #Get miniBatch of defined size from whole DATA
コード例 #2
0
ファイル: SimplestExample.py プロジェクト: oeminaga/TNNF
#Let's try to train NN to see, how it solves such task
#NN part

#Common options for whole NN
options = OptionsStore(learnStep=0.05,
                       minibatch_size=dataSize,
                       CV_size=dataSize)

#Layer architecture
#We will use only one layer with 2 neurons on input and 1 on output
L1 = LayerNN(size_in=dataFeatures,
             size_out=1,
             activation=FunctionModel.Linear)

#Compile NN
NN = TheanoNNclass(options, (L1, ))

#Compile NN train
NN.trainCompile()

#Compile NN oredict
NN.predictCompile()

#Number of iterations (cycles of training)
iterations = 1000

#Set step to draw
drawEveryStep = 100
draw = False

#CV error accumulator (for network estimation)
コード例 #3
0
ファイル: fSimple.py プロジェクト: oeminaga/TNNF
trainData.X = DataMutate.deNormalizer(trainData.X, afterzero=8)
print trainData.X.shape, trainData.Y.shape

L1 = LayerNN(size_in=trainData.input,
             size_out=numberOfFeatures,
             sparsity=0.05,
             beta=7,
             weightDecay=3e-7,
             activation=FunctionModel.Sigmoid)

L2 = LayerNN(size_in=numberOfFeatures,
             size_out=trainData.input,
             weightDecay=3e-7,
             activation=FunctionModel.Sigmoid)

AE = TheanoNNclass(options, (L1, L2))

AE.trainCompile()

for i in xrange(iterations):
    X, index = trainData.miniBatch(batchSize)
    AE.trainCalc(X, X, iteration=1, debug=True)
    print i

if os.path.exists(datasetFolder + "out/"):
    shutil.rmtree(datasetFolder + "out/")

os.makedirs(datasetFolder + "out/")
os.makedirs(datasetFolder + "out/w/")

AE.modelSaver(datasetFolder + "out/" + "/AE_NEW_VERSION_MNIST.txt")
コード例 #4
0
#First layer
L1 = LayerNN(size_in=inputSize,
             size_out=numberOfFeatures,
             sparsity=0.1,
             beta=3,
             weightDecay=3e-3,
             activation=FunctionModel.Sigmoid)

#Second layer
L2 = LayerNN(size_in=numberOfFeatures,
             size_out=inputSize,
             weightDecay=3e-3,
             activation=FunctionModel.Sigmoid)

#Compile all together
AE = TheanoNNclass(options, (L1, L2))

#Compile train and predict functions
AE.trainCompile()
AE.predictCompile()

#Normalise CV data from 0..255 to 0..1
X_CV = DATA_CV[:, 1:].T / 255.0

#Empty list to collect CV errors
CV_error = []

#Let's iterate!
for i in xrange(iterations):

    #Get miniBatch of defined size from whole DATA
コード例 #5
0
ファイル: SimpleAutoEncoder.py プロジェクト: vwvolodya/TNNF
# First layer
L1 = LayerNN(
    size_in=inputSize,
    size_out=numberOfFeatures,
    sparsity=0.1,
    beta=3,
    weightDecay=3e-3,
    activation=FunctionModel.Sigmoid,
)

# Second layer
L2 = LayerNN(size_in=numberOfFeatures, size_out=inputSize, weightDecay=3e-3, activation=FunctionModel.Sigmoid)

# Compile all together
AE = TheanoNNclass(options, (L1, L2))

# Compile train and predict functions
AE.trainCompile()
AE.predictCompile()

# Normalise CV data from 0..255 to 0..1
X_CV = DATA_CV[:, 1:].T / 255.0

# Empty list to collect CV errors
CV_error = []

# Let's iterate!
for i in xrange(iterations):

    # Get miniBatch of defined size from whole DATA