示例#1
0
    def create_model(self):
        model = Sequential()
        model.add(Conv2D(256, (3, 3),
                         input_shape=env.OBSERVATION_SPACE_VALUES))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(2, 2))
        model.add(Dropout(0.2))

        model.add(Conv2D(256, (3, 3)))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(2, 2))
        model.add(Dropout(0.2))

        model.add(Flatten())
        model.add(Dense(64))

        model.add(Dense(env.ACTION_SPACE_SIZE, activation="linear"))
        model.compile(loss="mse",
                      optimizer=Adam(lr=0.001),
                      metrics=['accuracy'])
        return model
示例#2
0
def build_generator(latent_size):
    cnn = Sequential()
    cnn.add(Dense(1024, input_dim=latent_size, activation='relu'))
    cnn.add(Dense(128 * 7 * 7, activation='relu'))
    cnn.add(Reshape((128, 7, 7)))
    cnn.add(Upsampling2D(size=(2, 2)))
    cnn.add(
        Convolution2D(256,
                      5,
                      5,
                      border_model='same',
                      activation='relu',
                      init='glorot_normal'))

    cnn.add(Upsampling2D(size=(2, 2)))
    cnn.add(
        Convolution2D(128,
                      5,
                      5,
                      border_model='same',
                      activation='relu',
                      init='glorot_normal'))

    cnn.add(
        Convolution2D(1,
                      2,
                      2,
                      border_model='same',
                      activation='relu',
                      init='glorot_normal'))

    latent = Input(shape=(latent_size, ))

    image_class = Input(shape=(1, ), dtype='int32')
    cls = Flatten(Embedding(10, latent_size, init='glorot_normal',
                            image_class))
    h = merge([latent, cls], mode='mul')
    fake_image = cnn(h)

    return Model(input=[laten, image_class], output=fake_image)
示例#3
0
def define_model(vocab_size, max_length):
    # feature extractor model
    inputs1 = Input(shape=(4096, ))
    fe1 = Dropout(0.5)(inputs1)
    fe2 = Dense(256, activation='relu')(fe1)
    # sequence model
    inputs2 = Input(shape=(max_length, ))
    se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)
    se2 = Dropout(0.5)(se1)
    se3 = LSTM(256)(se2)
    # decoder model
    decoder1 = add([fe2, se3])
    decoder2 = Dense(256, activation='relu')(decoder1)
    outputs = Dense(vocab_size, activation='softmax')(decoder2)
    # tie it together [image, seq] [word]
    model = Model(inputs=[inputs1, inputs2], outputs=outputs)
    # compile model
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    # summarize model
    model.summary()
    plot_model(model, to_file='model.png', show_shapes=True)
    return model
示例#4
0
def Emojify_V2(input_sape, word_to_vec_map, word_to_index):
	sentence_indices = Input(input_sape, dtype = 'int32')
	embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
	embeddings = embedding_layer(sentence_indices)
	X = LSTM(128, return_sequences = True)(embeddings)
	X = Dropout(0.5)(X)
	X = LSTM(128, return_sequences = False)(X)
	X = Dropout(0.5)(X)
	X = Dense(5)(X)
	X = Activation('softmax')(X)
	model = Model(inputs = sentence_indices, outputs = X)

	

	model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])

	return model
示例#5
0
from keras.models import Sequential
from keras.models import Dense

import numpy as np
x = np.array([1,2,3,4,5])
y = np.array([1,2,3,4,5])

model = Sequential()
model.add(Dense(5, input_dim=1, activation='relu'))
model.add(Dense(3))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
model.fit(x,y, epochs=100, batch_size=1)

loss, acc = model.evaluate(x,y, batch_size=1)
print("acc : ", acc)
f.close()

print('Found %s word vectors.' % len(embeddings_index))

embedding_dim = 100
embedding_matrix = np.zeros((max_words, embedding_dim))
for word, i in word_index.items():
    if i < max_words:
        embedding_vector = embeddings_index.get(word)
        if embedding_vector is not None:
            embedding_matrix[i] = embedding_vector

model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()

model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False

model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train,
                    y_train,
                    epochs=10,
                    batch_size=32,
                    validation_data=(x_val, y_val))
model.save_weights('pretrained_glove_model.h5')

acc = history.history['acc']
import keras
from keras.models import Sequential
from keras.models import Dense, Dropout, Activation
from keras.optimizers import SGD

import numpy as np
x_train = np.random.random((1000, 20))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000,1)), num_classes=10)
x_test = np.random.random((100,20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100,1)), num_classes=10)

model = Sequential()
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='softmax'))

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
			  optimizer=sgd,
			  metrics=['accuracy'])

model.fit(x_train, y_train, epochs=20, batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
stsc = StandardScaler()
x_tr = stsc.fit_transform(x_tr)
x_ts = stsc.fit_transform(y_tr)

#ANN coding
import keras
from keras.models import Sequential  #to initail neural network
from keras.models import Dense  #to add different layers in ANN
# 2 ways to defining in sequence of layers or defining by graph
# we are doing it in sequence
classifier = Sequential()
#input layer
# we have 11 indepent variable so 11 input nodes

classifier.add(
    Dense(output_dim=6, init='uniform', activation='relu', input_dim=11)
)  #this is an hidden layer init is for distribution of weight uniformly(here)
classifier.add(
    Dense(output_dim=6, init='uniform', activation='relu')
)  # this is 2nd hidden layer here input_dim is not required as it know the previous outputs of hidden layer
classifier.add(
    Dense(output_dim=6, init='uniform', activation='sigmoid')
)  #sigmod because its the output layer and has only 2 outputs. If it has more than 2 layer we should use softmax function
#for compiling

classifier.compile(
    optimizer='adam', loss='binary_crossentropy',
    metrics=['accuracy'
             ])  #optimizer is for weight, adam is of stocastic gradient decent
classifier.fit(x_tr, y_tr, batch_size=10,
               nb_epoch=100)  # epoch is number of times
示例#9
0
文件: 1.py 项目: ha191180/Signate
num_l2 = 20
num_output = 1

# Dropoutの割合の定義
dropout_rate = 0.4

# 以下、ネットワークを構築
model = Sequential()
# 第1層
model.add(
    LSTM(units=num_l1,
         activation='tanh',
         batch_input_shape=(None, X_train_t.shape[1], X_train_t.shape[2])))
model.add(Dropout(dropout_rate))
# 第2層
model.add(Dense(num_l2, activation='relu'))
model.add(Dropout(dropout_rate))
# 出力層
model.add(Dense(num_output, activation='sigmoid'))
# ネットワークのコンパイル
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# モデルの学習の実行(学習の完了までには数秒から数十秒ほど時間がかかります。)
result = model.fit(x=X_train_t,
                   y=y_train_t,
                   epochs=80,
                   batch_size=24,
                   validation_data=(X_val_t, y_val_t))
示例#10
0
# Author: Vivek Singh
# Check this for more information : https://keras.io/getting-started/sequential-model-guide/
# Purpose: Sample Keras code to build first Sequential model

# import packages
from keras.models import Dense
from keras.models import Sequential

# create a sequential model
model = Sequential()

# add first layer with RELU activation function
model.add(Dense(32, input_dim=784))
model.add(Activation('relu'))
from keras.callbacks import ModelCheckPoint

model = Sequential()

model.add(Conv2D(200, (3, 3), input_shape=data.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(100, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dropout(0.5))

model.add(Dense(50, activation='relu'))

model.add(Dense(2, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

from sklearn.model_selection import train_test_split

train_data, test_data, train_target, test_target = train_test_split(
    datamtarget, test_size=0.1)

checkpoint = ModelCheckpoint('model-{epoch:03d}.model',
                             monitor='val_loss',
                             verbose=0,
#Inilize the CNN
classifier = Sequential()

#Step 1 -- Convolution
classifier.add(
    Convolution2D(32, 3, 3, input_shape=(64, 64, 3), activation='relu'))

#Step 2 -- Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))

#Step 3 -- Flattening
classifier.add(Flatten())

#Step 4 -- Connection
classifier.add(Dense(output_dim=128, activation='relu'))
classifier.add(Dense(output_dim=1, activation='sigmoid'))

#Step 5 -- Compiling the CNN
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   matrics=['accuracy'])

# ---- PART + 2 ----

#step 6 -- Fitting the CNN to images
from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
示例#13
0
# Builds the action recognizer neural network
#
# University of California, Santa Barbara
# 2019

from keras.models import Sequential
from keras.models import Dense

model.add(Dense(units=64, activation='relu', input_dim=100))
model.add(Dropout(0.5))
model.add(Dense(units=10, activation='softmax'))
model.add(Dropout(0.5))

model.compile(loss='categorical_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=5, batch_size=32)

classes = model.predict(x_test, batch_size=128)




示例#14
0
# Problem : devising a dependant variable from an unsupervised model
# Solution : Augment frauds outcome from SOM to generate dependant variable
"""
    Frauds contains customerIDs of suspected frauds. This can be used to find the index of the customer 
    customers matrix, thus allowing us to map a 1 in the dependant variable vector at the location of the
    suspected fraud customers
"""
is_fraud = np.zeros(len(dataset))
# Update suspected frauds
for i in range(len(dataset)):
    if dataset.iloc[i, 0] in frauds:
        is_fraud[i] = 1

# Feature scaling
sc = StandardScaler()
customers = sc.fit_transform(customers)

classifer = Sequential()
classifer.add(Dense(units=2, kernel_initializer='uniform',
                    activation='relu', input_dim=15))
classifer.add(
    Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
classifer.compile(optimizer='adam',
                  loss='binary_crossentropy', metrics=['accuracy'])
classifer.fit(customers, is_fraud, batch_size=1, epochs=2)
# Predicting probability of frauds
y_pred = classifer.predict(customers)
y_pred = np.concatenate((dataset.iloc[:,0:1].values, y_pred), axis=1)
# Sort y_pred by index 1
y_pred = y_pred[y_pred[:, 1].argsort()]