Ejemplo n.º 1
0
def _get_keras_vgg16():
    """
    用Keras搭建VGG166网络

    Return
        VGG16神经网络
    """

    input_shape = (224, 224, 3)
    model = Sequential([
        Conv2D(64, (3, 3),
               input_shape=input_shape,
               padding='same',
               activation='relu'),
        Conv2D(64, (3, 3), padding='same', activation='relu'),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
        Conv2D(128, (3, 3), padding='same', activation='relu'),
        Conv2D(128, (3, 3), padding='same', activation='relu'),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
        Conv2D(256, (3, 3), padding='same', activation='relu'),
        Conv2D(256, (3, 3), padding='same', activation='relu'),
        Conv2D(256, (3, 3), padding='same', activation='relu'),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
        Conv2D(512, (3, 3), padding='same', activation='relu'),
        Conv2D(512, (3, 3), padding='same', activation='relu'),
        Conv2D(512, (3, 3), padding='same', activation='relu'),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
        Conv2D(512, (3, 3), padding='same', activation='relu'),
        Conv2D(512, (3, 3), padding='same', activation='relu'),
        Conv2D(512, (3, 3), padding='same', activation='relu'),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
        Flatten(),
        Dense(units=4096, activation='relu'),
        Dense(units=4096, activation='relu'),
        Dense(units=1000, activation='softmax')
    ])
    return model
Ejemplo n.º 2
0
from keras.layers import Sequential
from keras.layers import Dense, Activation

model = Sequential()
model.add(Dense(units=64, input_dim=100))
model.add(Activation("relu"))
model.add(Dense(units=10))
model.add(Activation("softmax"))
Ejemplo n.º 3
0
def calibrateModel(logitExtractor, valImages, valLabels, classes, epochs):

    calibrate = Sequential()
    calibrate.add(
        Activation('linear', input_shape=(classes, ))
    )  ## Layer does nothing, but seems needed to initialize the model should try to fix this
    calibrate.add(
        LayerMDivide()
    )  ## Custom layer define outside of this function which divides the logits by a learnable parameter
    calibrate.add(
        Activation('softmax'))  ## Perform softmax on the scaled logits

    calibrate.compile(optimizer=keras.optimizers.RMSprop(),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

    calibrate.fit(logitExtractor.predict(valImages),
                  keras.utils.to_categorical(valLabels),
                  batch_size=128,
                  shuffle=True,
                  verbose=0,
                  epochs=epochs)

    return (calibrate)
Ejemplo n.º 4
0
def modelMaker(ROW, COL, CHANNEL, CLASSES):
    model = Sequential()
    optimizer = Adam(lr=0.0001)
    #Convolution
    model.add(
        Conv2D(32, (3, 3),
               input_shape=(ROW, COL, 3),
               padding='same',
               activation='relu',
               name='input',
               kernel_regularizer=regularizers.l2()))
    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               activation='relu',
               name='conv2d_1',
               kernel_regularizer=regularizers.l2()))
    #Pooling
    model.add(
        MaxPooling2D(pool_size=2,
                     data_format='channels_last',
                     name='maxpool2d_1'))

    #ConvolutionLayer2
    model.add(
        Conv2D(64, (3, 3),
               padding='same',
               activation='relu',
               name='conv2d_2',
               kernel_regularizer=regularizers.l2()))
    model.add(
        Conv2D(64, (3, 3),
               padding='same',
               activation='relu',
               name='conv2d_3',
               kernel_regularizer=regularizers.l2()))

    model.add(
        MaxPooling2D(pool_size=2,
                     data_format='channels_last',
                     name='maxpool2d_2'))

    model.add(
        Conv2D(128, (3, 3),
               padding='same',
               activation='relu',
               name='conv2d_4',
               kernel_regularizer=regularizers.l2()))
    model.add(
        Conv2D(128, (3, 3),
               padding='same',
               activation='relu',
               name='conv2d_5',
               kernel_regularizer=regularizers.l2()))

    model.add(
        MaxPooling2D(pool_size=2,
                     data_format='channels_last',
                     name='maxpool2d_3'))

    model.add(
        Conv2D(256, (3, 3),
               padding='same',
               activation='relu',
               name='conv2d_6',
               kernel_regularizer=regularizers.l2()))
    model.add(
        Conv2D(256, (3, 3),
               padding='same',
               activation='relu',
               name='conv2d_7',
               kernel_regularizer=regularizers.l2()))

    model.add(
        MaxPooling2D(pool_size=2,
                     data_format='channels_last',
                     name='maxpool2d_4'))

    #Flattening
    model.add(Flatten(name='flatten_1'))

    model.add(Dense(units=256, activation='relu', name='fc_1'))
    model.add(Dropout(rate=0.5, name='drop_1'))
    model.add(Dense(units=256, activation='relu', name='fc_2'))
    model.add(Dropout(rate=0.5, name='drop_2'))
    model.add(Dense(units=CLASSES, name='output'))

    model.add(Activation(activation='softmax', name='activation_out'))

    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return (model)
Ejemplo n.º 5
0
df['Embarked'].fillna('S', inplace=True)
df['Age'].fillna(df['Age'].median(), inplace=True)
df['Title'] = df['Name'].map(lambda x: get_title(x))
df['Title'] = df.apply(shorter_titles, axis=1)
df.drop(['Name', 'Cabin', 'Ticket'], axis=1, inplace=True)
df.drop('PassengerId', axis=1, inplace=True)
df.Sex.replace(('male', 'female'), (0, 1), inplace=True)
df.Embarked.replace(('S', 'C', 'Q'), (0, 1, 2), inplace=True)
df.Title.replace(
    ('Mr', 'Miss', 'Mrs', 'Master', 'Dr', 'Rev', 'Officer', 'Royalty'),
    (0, 1, 2, 3, 4, 5, 6, 7),
    inplace=True)

###################################################################

model = Sequential()
model.add(Dense(32, activation='relu', input_shape=(8, )))
model.add(Dropout(rate=0.2))
model.add(Dense(64, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dropout(rate=0.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(rate=0.2))
model.add(Dense(32, activation='relu'))
model.add(Dropout(rate=0.2))
model.add(Dense(1, activation='sigmoid'))

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
x = df.drop('Survived', axis=1)
@author: karnika
"""
#importing keras libraries and packages
#initialize (layers/graph)
from keras.layers import Sequential
#image type+basic step
from keras.layers import Convolution2D
#adding pooling layers, feature maps
from keras.layers import MaxPooling2D
#input for fully connected layers
from keras.layers import Flatten
#add fully connected layers
from keras.layers import Dense
#choosing no of feature detectors we create (hence feature maps)
#initialising
classifier = Sequential()

#step1 : convolution
classifier.add(
    Convolution2D(32, 3, 3, input_shape=(64, 64, 3), activation='relu'))
#(no of feature maps = filter use) (stride 1)
#increase filters, input shape when on gpu, 3x3 dimensions
#converting all images in the same format: input_shape argument; expected format
#b&w : 2d array, colored: 3d array(blue, green, red)

#step 2: max pooling : reduced featured map
#(stride 2)
classifier.add(MaxPooling2D(pool_size=(2, 2)))

#step 3: flattening (pooled feature map into one single vector)
#mid steps imp to provide info about how pixels are connected to each other along w individual info
from keras.utils import np_utils
import matplotlib.pyplot as plt
from keras.utils import plot_model
from keras.layers import MaxPooling2D, Sequential, Dense, Activation, Flatten, Dropout, BatchNormalization, Conv2D, MaxPooling2D
from keras import optimizers
from keras.models import Model
from keras.models import load_model


def load_pickle(filename):
    with open(filename, 'rb') as f:
        data = pickle.load(f)
    return data


model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=(64, 64, 3)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(128, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3)))
Ejemplo n.º 8
0
import keras
from keras.layers import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense

classifier = Sequential()
classifier.add(
    Convolution2D(32, 3, 3, input_shape=(64, 64, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Flatten)
classifier.add(Dense(128, activation='relu'))
classifier.add(Dense(1, activation='sigmoid'))

classifier.compiler(optimizer='adam',
                    loss='binary_crossentropy',
                    metrics='accuracy')

from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)
training_set = train_datagen.flow_directory('dataset/training_set',
                                            target_size=(64, 64),
                                            batch_size=32,
                                            class_mode='binary')
Ejemplo n.º 9
0
# 目的変数の変形
y_train_t = y_train[4:]
y_val_t = y_val[4:]
y_test_t = y_test[4:]

# ネットワークの各層のサイズの定義
num_l1 = 100
num_l2 = 20
num_output = 1

# Dropoutの割合の定義
dropout_rate = 0.4

# 以下、ネットワークを構築
model = Sequential()
# 第1層
model.add(
    LSTM(units=num_l1,
         activation='tanh',
         batch_input_shape=(None, X_train_t.shape[1], X_train_t.shape[2])))
model.add(Dropout(dropout_rate))
# 第2層
model.add(Dense(num_l2, activation='relu'))
model.add(Dropout(dropout_rate))
# 出力層
model.add(Dense(num_output, activation='sigmoid'))
# ネットワークのコンパイル
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
Ejemplo n.º 10
0
#TODO: writing a program to get predictive results from a linear input dataset

from keras.layers import Sequential
from keras.layers import Dense, Activation
import numpy

model = Sequential()
model.add(Dense(32, activation='relu', input_dim=20))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy'])

#dataset with independent vector ranging 1 to 20 and dependent vector 2 to 41 with help of numpy

x_train = numpy.arange(0, 21, 1)
y_train = numpy.arange(0, 41, 2)

x_test = numpy.arange(0, 50, 5)

#fitting the model
model.fit(x_train, y_train, epochs=10, batch_size=32)
Ejemplo n.º 11
0
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense, Sequential
from PreProcessing import a,b,c

def selectFeatures(a):
    #this will be a final dataset for
    return N_features

Dataset1=selectFeatures(a)
Dataset2=selectFeatures(b)
Dataset3=selectFeatures(c)

#number of neurons will be changed later depending on size of datasets
N=0
model = Sequential()
model.add(Dense(N))
model.add(Dense(N))

#final NN will be built later
#keras includes all functions that are needed to train, test, etc. so not need for implementation
Ejemplo n.º 12
0
print('nb sequences ', len(sentences))


# Initilaize the Input and output
x = np.zeros((len(sentences), maxlen, (len(chars)), dtype = np.bool)
y = np.zeros((len(sentences), len(chars)) , dtype = np.bool)
for i, sentence in enumerate(sentences):
    for t,char in enumerate(sentence):
        x[i,t,char_indices[char]] = 1

    y[i,char_indices[next_chars[i]]] = 1


# Model
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars), activation ='softmax'))

optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer = optimizer)

def sample(preds, temperature =1.0):
    preds = np.asarray(preds).astype('float64')
    preds = np.log(preds) / temperature
    exp_preds = np.exp(preds)
    preds = exp_preds / np.sum(exp_preds)
    probas = np.random.multinomial(1, preds, 1)
    return np.argmax(probas)

    
nb_epochs = 10
nb_fc_neurons = 512
nb_filter = 32
nb_conv2d = 3

img_width, img_height = 128, 128
    
log_name = 'keras_model_training.log'
model_name = 'keras_model.h5'
    
if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
else:
    input_shape = (img_width, img_height, 3)
    
model = Sequential()
    
model.add(Conv2D(nb_filter, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
    
model.add(Conv2D(nb_filter*2, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(nb_filter*4, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
    
model.add(Flatten())
model.add(Dense(nb_fc_neurons))
min_max_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_trainingset = min_max_scaler.fit_transform(trainingset)

xTrain = []
yTrain = []

for i in range(40, 1250):
    xTrain.append(scaled_trainingset[i - 40:i, 0])
    yTrain.append(scaled_trainingset[i, 0])

xTrain = np.arrayx(xTrain)
yTrain = np.array(yTrain)

xTrain = np.reshape(xTrain, (xTrain.shape[0], xTrain.shape[1], 1))

model = Sequential()
model.add(
    LSTM(units=100, return_sequences=True, input_shape=(xTrain.shape[1], 1)))
model.add(Dropout(0.5))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(units=50))
model.add(Dropout(0.3))
model.add(Dense(units=1))

model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(xTrain, yTrain, epochs=100, batch_size=32)

datasetTotal = pd.concat(
    (pricesDatasetTrain['adj_close'], pricesDatasetTest['adj_close']), axis=0)
inputs = datasetTotal[len(datasetTotal) - len(pricesDatasetTest) - 40:].values