コード例 #1
0
def get_Networks(data_size, predict_size):
    ReLU_net = seq.Sequential()
    ReLU_net.add(linear.Linear(data_size, 100))
    ReLU_net.add(r.ReLU())
    ReLU_net.add(linear.Linear(100, 50))
    ReLU_net.add(r.ReLU())
    ReLU_net.add(linear.Linear(50, predict_size))
    ReLU_net.add(softMax.SoftMax())

    ELU_net = seq.Sequential()
    ELU_net.add(linear.Linear(data_size, 35))
    ELU_net.add(elu.ELU())
    ELU_net.add(linear.Linear(35, predict_size))
    ELU_net.add(softMax.SoftMax())

    LeakyReLU_net = seq.Sequential()
    LeakyReLU_net.add(linear.Linear(data_size, 40))
    LeakyReLU_net.add(leaky.LeakyReLU())
    LeakyReLU_net.add(linear.Linear(40, predict_size))
    LeakyReLU_net.add(softMax.SoftMax())

    SoftPlus_net = seq.Sequential()
    SoftPlus_net.add(linear.Linear(data_size, 30))
    SoftPlus_net.add(softPlus.SoftPlus())
    SoftPlus_net.add(linear.Linear(30, predict_size))
    SoftPlus_net.add(softMax.SoftMax())

    return ReLU_net, ELU_net, LeakyReLU_net, SoftPlus_net
コード例 #2
0
def get_Networks_with_batch(data_size, predict_size):
    ReLU_net = seq.Sequential()
    ReLU_net.add(linear.Linear(data_size, 100))
    ReLU_net.add(batch.BatchNormalization(0.3))
    ReLU_net.add(batch.ChannelwiseScaling(100))
    ReLU_net.add(r.ReLU())
    ReLU_net.add(linear.Linear(100, predict_size))
    ReLU_net.add(softMax.SoftMax())

    ELU_net = seq.Sequential()
    ELU_net.add(linear.Linear(data_size, predict_size))
    ELU_net.add(batch.BatchNormalization())
    ELU_net.add(batch.ChannelwiseScaling(predict_size))
    ELU_net.add(elu.ELU())
    ELU_net.add(softMax.SoftMax())

    return ReLU_net, ELU_net
コード例 #3
0
def generate_vgg16():
    input_shape = (224, 224, 3) # 输入: 224*244,RGB三位图
    model = Sequential([
    	Conv2D(64, (3, 3), input_shape=input_shape, padding='same', activation='relu'),
    	# 卷积层,64个滤波器(卷积核),尺寸3*3,参数:输入规格,填充,激活函数
    	Conv2D(64, (3, 3), padding='same', activation='relu'),
    	# 非首层无需指定输入规格
    	MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),

    	# Block 2
    	Conv2D(128, (3, 3), padding='same', activation='relu'),
    	Conv2D(128, (3, 3), padding='same', activation='relu'),
    	MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
    	
    	# 3
    	Conv2D(256, (3, 3), padding='same', activation='relu'),
    	Conv2D(256, (3, 3), padding='same', activation='relu'),
    	Conv2D(256, (3, 3), padding='same', activation='relu'),
    	MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
    	# 4
    	Conv2D(512, (3, 3), padding='same', activation='relu'),
    	Conv2D(512, (3, 3), padding='same', activation='relu'),
    	Conv2D(512, (3, 3), padding='same', activation='relu'),
    	MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
    	# 5
    	Conv2D(512, (3, 3), padding='same', activation='relu'),
    	Conv2D(512, (3, 3), padding='same', activation='relu'),
    	Conv2D(512, (3, 3), padding='same', activation='relu'),
    	MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
    	# 全连接层
    	Flatten(),
    	Dense(4096, activation='relu'),
    	Dense(4096, activation='relu'),
    	Dense(1000, activation='softmax')
    	# 最后要做一个softmax,输出概率归一化
    	])
    return model
コード例 #4
0
from keras.layers import Dense,Embedding
from keras.layers import LSTM
from keras.datasets import imdb

# 确定一些超参数
max_features = 20000 # 使用最多的单词数
max_len = 80 # 循环截断的长度
batch_size = 32

# 加载、整理数据
(trainX,trainY),(testX,testY) = imdb.load_data(num_word = max_features)
trainX = sequence.pad_sequences(trainX,maxlen = maxlen)
testX = sequence.pad_sequences(testX,maxlen = maxlen)

# 模型构建
model = Sequential()
model.add(
    Embedding(max_features,128)
)
model.add(
    LSTM(128,dropout=0.2,recurrent_dropout=0.2)
)
model.add(
    Dense(1,activation='sigmoid')
)

# 损失函数、优化函数的配置
model.compile(loss = 'binary_crossentropy',
             optimizer = 'adam',metrics=['accuracy'])
model.fit(trainX,trainY,batch_size = batch_size,epoch=15,validation_data = (testX,testY))
コード例 #5
0
def VGG_16(weights_path=None):
    model = Sequential()
    model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    ## ADDED THIS ##
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    ## ADDED THIS ##

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model





    def __str__(self):

        self.report2 = "After " + str(self.number_of_rounds) + " games, the winner is " + self.winner + "! The score was " + str(self.points[0]) + " - " + str(self.points[1]) + " to " + self.winner + " over " + self.loser

        return self.report2




sekvens = Sequential("Sekvensielt")
rand = Random("Tilfeldig")
common = MostCommon("Mest Vanlig")
hist = Historian("Historiker", 2)



game1 = ManyGames(sekvens,rand,100)  #blir ca 50/50
game1.play_many_games_with_graphics()
print(game1)

game2 = ManyGames(sekvens,hist,100)  #historiker vinner klart
game2.play_many_games_with_graphics()
print(game2)

game1 = ManyGames(common,hist,100) #historiker vinner klart
コード例 #7
0
    exit()


def Zipper(A, B):
    C = [[A[i], B[i]] for i in range(len(A))]
    return C


def Unzipper(C):
    A = [C[i][0] for i in range(len(C))]
    B = [C[i][1] for i in range(len(C))]
    return A, B


C = Zipper(A, B)
if kind == 'r':
    shuffle(C)
    A, B = Unzipper(C)
    T = Sequential.Memorize(num, A, B)
    if er == 'y':
        T.StartM(ki)
    elif er == 'n':
        T.StartN(ki)
elif kind == 's':
    A, B = Unzipper(C)
    T = Sequential.Memorize(num, A, B)
    if er == 'y':
        T.StartM(ki)
    elif er == 'n':
        T.StartN(ki)
コード例 #8
0
a=np.sort(a)

for i in range(Y.shape[0]):
    for j in range(a.shape[0]):
        if Y[i]==a[j]:
            Y[i]=j
# convert class labels to on-hot encoding
Y = np_utils.to_categorical(Y,num_classes)  #Y.shape Y[48]
#suffle dataset
x,y = shuffle(X,Y, random_state=2)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)

# Defining the model
input_shape=X[0].shape #img_data.shape
print(input_shape)					
model = Sequential()
model.add(Conv2D(96,(11,11),subsample=(4, 4),input_shape=input_shape))#model.output_shape
model.add(MaxPooling2D(pool_size=(2, 2)))#model.output_shape
model.add(ZeroPadding2D(padding=(2, 2)))#model.output_shape
model.add(Conv2D(256, (5, 5)))#model.output_shape
model.add(MaxPooling2D(pool_size=(2, 2)))#model.output_shape
model.add(ZeroPadding2D(padding=(1, 1)))#model.output_shape
model.add(Conv2D(384, (3,3)))#model.output_shape
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(Conv2D(384, (3,3)))#model.output_shape
model.add(ZeroPadding2D(padding=(1, 1)))#model.output_shape
model.add(Conv2D(256, (3,3)))#model.output_shape
model.add(MaxPooling2D(pool_size=(2, 2)))#model.output_shape
model.add(Flatten())#model.output_shape
model.add(Dense(2048))#model.output_shape
model.add(Dense(2048,name="dense_layer"))#model.output_shape    
コード例 #9
0
        elif self.action1 < self.action2:
            self.points = [0, 1]
            self.winner = str(self.player2) + ' is the winner'

        self.player1.recieve_results(self.player2, self.action2)
        self.player2.recieve_results(self.player1, self.action1)

    def __str__(self):
        self.report = str(self.player1) + ": " + str(
            self.action1) + "\n" + str(self.player2) + ": " + str(
                self.action2) + "\n" + str(self.winner) + "\n"
        return self.report


a = Random("Frida")
b = Sequential("Simone")

spill = SingleGame(a, b)
spill.play_game()
print(spill)

d = Random("Frida")
b = Sequential("Simone")

spill = SingleGame(a, b)
spill.play_game()
print(spill)

c = Random("Frida")
b = Sequential("Simone")
コード例 #10
0
def build_classifier():
    classifier=Sequential()
    classifier.add(Convolution2D(32,3,3,input_shape=(128,128,3),activation='relu'))
    classifier.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same'))
    classifier.add(Convolution2D(64,3,3,activation='relu'))
    classifier.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same'))
    classifier.add(Flatten())
    classifier.add(Dense(output_dim=64,activation='relu'))
    classifier.add(Dropout(p=0.5))
    classifier.add(Dense(output_dim=1,activation='sigmoid'))
    classifier.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
    return classifier
コード例 #11
0
ファイル: sequential.py プロジェクト: chiragob/keras
from f keras.models import Sequential
from keras.layers import Dense
model = Sequential([Dense(2, input_dim=1), Dense(1)])
# from keras.models import Sequential
# from keras.layers import Dense
# from keras.utils import plot_model
# model = Sequential()
# model.add(Dense(2, input_dim=1))
# model.add(Dense(1))
# summarize layers
print(model.summary())
# plot graph
plot_model(model, to_file='multilayer_perceptron_graph.png')
コード例 #12
0
ファイル: WordTest.py プロジェクト: hiilynn/Eleden
num = raw_input('>')
num = int(num)
A = locals()['A' + str(num - 1)]
B = locals()['B' + str(num - 1)]


def Zipper(A, B):
    C = [[A[i], B[i]] for i in range(len(A))]
    return C


def Unzipper(C):
    for i in range(len(C)):
        A[i] = C[i][0]
        B[i] = C[i][1]
    return A, B


if kind == 's':
    T = Sequential.Test(num, A, B)
    T.Start()
elif kind == 'r':
    C = Zipper(A, B)
    shuffle(C)
    A, B = Unzipper(C)
    T = Sequential.Test(num, A, B)
    T.Start()
else:
    print "Please input correct text."
    exit()
コード例 #13
0
rom keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense

# Initialising the CNN
classifier = Sequential()

# Step 1 - Convolution
classifier.add(Convolution2D(32, 3, 3, input_shape = (64, 64, 3), activation = 'relu'))

# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))

# Adding a second convolutional layer
classifier.add(Convolution2D(32, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))

# Step 3 - Flattening
classifier.add(Flatten())

# Step 4 - Full connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))

# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])

# Part 2 - Fitting the CNN to the images
コード例 #14
0
import keras.models import Sequential
from keras.layers import Dense

model = Sequential()
(hand,field,action,next_field)
コード例 #15
0
# create train and test lists. X - patterns, Y - intents
train_x = list(training[:,0])
train_y = list(training[:,1])
print("Training data created")

"""# 4. Build machine learning model

After creating training data, build a deep neaural network that has 3 layers. The following code uses Keras' sequential API. 

The model is trained for 200 epochs, achieving 100% accuracy on the model. 

"""

# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons
# equal to number of intents to predict output intent with softmax
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))

# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

#fitting and saving the model 
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.h5', hist)

print("model created")
コード例 #16
0
import keras
import keras.models import Sequential
import keras.layers Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import TensorBoard

import os
import numpy as np
import random


model = Sequential()

model.add(Conv2D(32, (3, 3), padding = 'same', input_shape(176, 200, 3), activation = 'relu'))
model.add(Conv2D(32, (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Dropout(0.2))

model.add(Conv2D(64, (3, 3), padding = 'same', activation = 'relu'))
model.add(Conv2D(64, (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Dropout(0.2))

model.add(Conv2D(128, (3, 3), padding = 'same', activation = 'relu'))
model.add(Conv2D(128, (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Dropout(0.2))

#   fully-connected dense layer
model.add(Flatten())
model.add(Dense(512, activation = 'relu'))