Exemplo n.º 1
0
def get_model(idrop=0.2,
              edrop=0.1,
              odrop=0.25,
              rdrop=0.2,
              weight_decay=WEIGHT_DECAY):
    model = Sequential()
    model.add(
        Embedding(
            NB_WORDS,
            128,
            embeddings_regularizer=l2(weight_decay),
            input_length=MAXLEN))  # , batch_input_shape=(batch_size, maxlen)))
    if edrop:
        model.add(Dropout(edrop))
    model.add(
        LSTM(128,
             kernel_regularizer=l2(weight_decay),
             recurrent_regularizer=l2(weight_decay),
             bias_regularizer=l2(weight_decay),
             dropout=idrop,
             recurrent_dropout=rdrop))
    if odrop:
        model.add(Dropout(odrop))
    model.add(
        Dense(1,
              kernel_regularizer=l2(weight_decay),
              bias_regularizer=l2(weight_decay),
              activation='sigmoid'))
    optimizer = Adam(1e-3)
    model.compile(loss='binary_crossentropy',
                  metrics=["binary_accuracy"],
                  optimizer=optimizer)
    return model
Exemplo n.º 2
0
def get_model(idrop=0.2,
              edrop=0.1,
              odrop=0.25,
              rdrop=0.2,
              weight_decay=1e-4,
              lr=1e-3):
    model = Sequential()
    model.add(
        Embedding(NB_WORDS,
                  128,
                  embeddings_regularizer=l2(weight_decay),
                  input_length=MAXLEN))
    if edrop:
        model.add(Dropout(edrop))
    model.add(
        LSTM(128,
             kernel_regularizer=l2(weight_decay),
             recurrent_regularizer=l2(weight_decay),
             bias_regularizer=l2(weight_decay),
             dropout=idrop,
             recurrent_dropout=rdrop))
    if odrop:
        model.add(Dropout(odrop))
    model.add(
        Dense(1,
              kernel_regularizer=l2(weight_decay),
              bias_regularizer=l2(weight_decay)))
    optimizer = Adam(lr)
    model.compile(loss='mse', metrics=["mse"], optimizer=optimizer)
    return model
Exemplo n.º 3
0
def fit_lstm(train, batch_size, nb_epoch, neurons):
	X, y = train[:, 0:-1], train[:, -1]
	X = X.reshape(X.shape[0], 1, X.shape[1])
	model = Sequential()
	model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
	model.add(Dense(1))
	model.compile(loss='mean_squared_error', optimizer='adam')
	for i in range(nb_epoch):
		model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
		model.reset_states()
	return model
Exemplo n.º 4
0
def add_softmax(model: Sequential) -> Sequential:
    """ Append the softmax layers to the frontend or frontend + context net. """
    # The softmax layer doesn't work on the (width, height, channel)
    # shape, so we reshape to (width*height, channel) first.
    # https://github.com/fchollet/keras/issues/1169
    _, curr_width, curr_height, curr_channels = model.layers[-1].output_shape

    model.add(Reshape((curr_width * curr_height, curr_channels)))
    model.add(Activation('softmax'))
    # Technically, we need another Reshape here to reshape to 2d, but TF
    # the complains when batch_size > 1. We're just going to reshape in numpy.
    # model.add(Reshape((curr_width, curr_height, curr_channels)))

    return model
Exemplo n.º 5
0
def build_network(row, col, channels, num_class):
    model = Sequential()
    model.add(
        Conv2D(32, (8, 8), (4, 4),
               activation='relu',
               input_shape=(row, col, channels)))
    model.add(Conv2D(64, (4, 4), (2, 2), activation='relu'))
    model.add(Conv2D(64, (3, 3), (1, 1), activation='relu'))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dense(num_class))

    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Exemplo n.º 6
0
def grad_cam(input_model, image, category_index):
    """
    Args: model to make predictions, image to predict, index of categories and
    their predicted probabilities.
    
    Constructs a colour map showing where the classifier puts the highest weight
    for a given image in making its prediction.
    
    Returns: numpy array of same dimension as image but instead displaying colours
    according to where the classifier puts the most weight.
    """
    model = Sequential()
    model.add(input_model)
    nb_classes = 10
    target_layer = lambda x: target_category_loss(x, category_index, nb_classes
                                                  )
    model.add(Lambda(target_layer))
    loss = K.sum(model.layers[-1].output)
    conv_output = model.layers[0].layers[
        29].output  #this needs changed depending on NN structure
    grads = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input],
                                   [conv_output, grads])

    output, grads_val = gradient_function([image])
    output, grads_val = output[0, :], grads_val[0, :, :, :]

    weights = np.mean(grads_val, axis=(0, 1))
    cam = np.ones(output.shape[0:2], dtype=np.float32)

    for i, w in enumerate(weights):
        cam += w * output[:, :, i]

    cam = cv2.resize(cam, (224, 224))
    cam = np.maximum(cam, 0)
    heatmap = cam / np.max(cam)

    #Return to BGR [0..255] from the preprocessed image
    image = image[0, :]
    image -= np.min(image)
    image = np.minimum(image, 255)

    cam = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
    cam = np.float32(cam) + np.float32(image)
    cam = 255.0 * cam / np.max(cam)
    return np.uint8(cam)
def create_two_stream_classifier(
        num_fc_neurons,
        dropout_rate,
        num_classes=24):  # classifier_weights_path=None
    classifier = Sequential()
    classifier.add(
        Dense(num_fc_neurons, name='fc7', input_shape=(num_fc_neurons * 2, )))
    #classifier.add(BatchNormalization(axis=1, name='fc7_bn'))
    classifier.add(Activation('relu', name='fc7_ac'))
    classifier.add(Dropout(dropout_rate))
    classifier.add(Dense(num_classes, activation='softmax',
                         name='predictions'))
    return classifier
Exemplo n.º 8
0
 def _build_model(self):
     model = Sequential()
     model.add(Dense(3, input_dim=2, activation='tanh'))
     model.add(Dense(3, activation='tanh'))
     model.add(Dense(self.env.action_space.n, activation='linear'))
     model.compile(loss='mse', optimizer=Adam(lr=self.alpha, decay=self.alpha_decay))
     return model
Exemplo n.º 9
0
class TinyYoloV2(NetworkSkeleton):
    """
    This class handles the building and the loss of the
    tiny yolo v2 network.
    """
    def __init__(self, config):
        """
        Initializes class variables.
        :param config: Contains the networks hyperparameters
        """
        super.__init__(self)
        self.config = config
        self.network = None
        self.loss = None
        self.model = None
        self.input_shape = config.input_shape

    def _build(self, input):
        """
        Builds the tiny yolo v2 network.
        :param input: input image batch to the network
        :return: logits output from network
        """
        self.model = Sequential()
        self.model.add(
            Lambda(lambda x: x / 127.5 - 1., input_shape=self.input_shape))
        self.model.add(Convolution2D())

        return logits

    def _loss(self):
        """
        Calculates the loss of the network.
        :return: loss
        """
        raise NotImplemented

    def __load_pretrained_network(self):
        """
Exemplo n.º 10
0
    def lstm(self):
        """Build a simple LSTM network. We pass the extracted features from
        our CNN to this model predomenently."""
        # Model.
        model = Sequential()
        model.add(
            LSTM(2048,
                 return_sequences=False,
                 input_shape=self.input_shape,
                 dropout=0.5))
        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
def create_VGG16(num_fc_neurons,
                 dropout_rate,
                 num_classes=24,
                 top_model_weights_path=None,
                 img_height=224,
                 img_width=224,
                 include_loc='all',
                 activation='softmax'):
    # load pre-trained convolutional model
    base_model = VGG16(weights='imagenet',
                       include_top=False,
                       input_shape=get_input_shape(img_height, img_width))

    # build a classifier model to put on top of the convolutional model
    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    for i in range(6, 8):
        top_model.add(Dense(num_fc_neurons, name='fc' + str(i)))
        #top_model.add(BatchNormalization(axis=1, name='fc'+str(i)+'_bn'))
        top_model.add(Activation('relu', name='fc' + str(i) + '_ac'))
        top_model.add(Dropout(dropout_rate))
    top_model.add(Dense(num_classes, activation=activation,
                        name='predictions'))
    if top_model_weights_path != None:
        top_model.load_weights(top_model_weights_path)

    if include_loc == 'base':
        model = base_model
    elif include_loc == 'top':
        model = top_model
    elif include_loc == 'all':  # add the model on top of the convolutional base
        model = Model(inputs=base_model.input,
                      outputs=top_model(base_model.output))
    else:
        raise ValueError('Only "base", "top" and "all" can be included.')
    return model
Exemplo n.º 12
0
    def _build_model(self):

        # Neural Net for Deep-Q learning Model
        model = Sequential()
        model.add(Dense(24, input_dim=self.state_size, activation='relu'))
        model.add(Dense(24, activation='relu'))
        model.add(Dense(self.action_size, activation='linear'))
        model.compile(loss=self._huber_loss,
                      optimizer=Adam(lr=self.learning_rate))
        return model
from tensorflow.contrib.keras.python.keras.layers import Dense, Activation

model = Sequential([
    Dense(32, input_shape=(784, )),
    Activation('relu'),
    Dense(10),
    Activation('softmax'),
])

model.summary()
"""
You can also simply add layers via the `.add()` method:
"""

model = Sequential()
model.add(Dense(32, input_dim=784))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))

model.summary()
"""
## Specifying the input shape

The first layer needs to receive information about its input shape.
- input_shape:
	- batch_dim is not included,
	- input_shape=(interger,) or
	- input_shape= None, but it seems not working

"""
Exemplo n.º 14
0
def generator_containing_discriminator(g, d):
    model = Sequential()
    model.add(g)
    d.trainable = False
    model.add(d)
    return model
Exemplo n.º 15
0
def discriminator_model():
    model = Sequential()
    model.add(Conv2D(64, (5, 5),padding='same',input_shape=(28, 28, 1)) )
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (5, 5)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
Exemplo n.º 16
0
def generator_model():
    model = Sequential()
    model.add(Dense(1024,input_dim=100 ))
    model.add(Activation('tanh'))
    model.add(Dense(128*7*7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((7, 7, 128), input_shape=(128*7*7,)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(64, (5, 5), padding='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(1, (5, 5), padding='same'))
    model.add(Activation('tanh'))
    return model
Exemplo n.º 17
0
def create_model():
    model = Sequential()
    model.add(Dense(12, input_dim=8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
Exemplo n.º 18
0
n_train_hours = 365 * 24  # 1 year
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]
# split into input and outputs
train_X = train[:, :-1]
train_y = train[:, -1]
test_X = test[:, :-1]
test_y = test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)

# design network
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')

# fit network
history = model.fit(train_X,
                    train_y,
                    epochs=50,
                    batch_size=72,
                    validation_data=(test_X, test_y),
                    verbose=2,
                    shuffle=False)
# plot history
pyplot.plot(history.history['loss'], label='Training Loss')
pyplot.plot(history.history['val_loss'], label='Validation Loss')
pyplot.legend()
print("Accuracy = {:.2f}".format(lr.score(test_X, test_y)))  # get metrics


def one_hot_encode_object_array(arr):
    '''One hot encode a numpy array of objects (e.g. strings)'''
    uniques, ids = np.unique(
        arr, return_inverse=True)  # convert 3 words into 0, 1, 2
    return to_categorical(ids, len(uniques))  # convert 0, 1, 2 to one-hot


train_y_ohe = one_hot_encode_object_array(train_y)
test_y_ohe = one_hot_encode_object_array(test_y)

model = Sequential()

model.add(Dense(16, input_shape=(4, )))  # each sample has 4 features
model.add(Activation('sigmoid'))  # add non-linearity to hidden layer 1

model.add(Dense(3))  # add another 3 neuron final layer
model.add(Activation('softmax'))  # give it non-linearity as output
model.summary()

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=["accuracy"])

model.fit(train_X,
          train_y_ohe,
          validation_split=0.2,
          epochs=10,
          batch_size=1,
Exemplo n.º 20
0
import numpy as np

import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
# y = np.array([[0],[1],[1],[0]])
# it is better to be replaced with true target function ^ or xor function
y = X[:, 0] ^ X[:, 1]  # ^ is xor function

#######################################################
# Sequential can specify more activations layer from normal layers than Model
#######################################################
# check whether 3 models are the same or not
model = Sequential()
model.add(Dense(4, input_dim=2,
                name='dense1'))  # 2 nodes reaches 50% accuracy, 8 nodes 100%
model.add(Activation('relu', name='dense1_act'))
model.add(Dense(1, name='dense2'))
model.add(Activation('sigmoid', name='dense2_act'))  # output shaped by sigmoid
model.summary()  # see each layer of a model

# use Model instead of Sequential
input_tensor = Input(shape=(2, ), name='input')
hidden = Dense(4, activation='relu', name='dense1_relu')(input_tensor)
output = Dense(1, activation='sigmoid',
               name='dense2_sigm')(hidden)  # output shaped by sigmoid
model1 = Model(inputs=input_tensor, outputs=output)
model1.summary()  # see each layer of a model
"""
# use Model to split layer and activation
input_tensor = Input(shape=(2,))
Exemplo n.º 21
0
def mk_model():
    model = Sequential()
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=.1))
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=0.1))
    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(MaxoutDense(output_dim=256, nb_feature=4))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM, kernel_initializer='he_uniform'))
    model.add(Activation('softmax'))

    return model
Exemplo n.º 22
0
def mk_model_with_bn():
    model = Sequential()
    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=128,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(filters=128,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=256,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(filters=256,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(256, kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM, kernel_initializer='he_normal'))
    model.add(Activation('softmax'))

    return model
Exemplo n.º 23
0
def get_frontend(input_width, input_height) -> Sequential:
    model = Sequential()
    # model.add(ZeroPadding2D((1, 1), input_shape=(input_width, input_height, 3)))
    model.add(Conv2D(64, 3, activation='relu', name='conv1_1', input_shape=(input_width, input_height, 3)))
    model.add(Conv2D(64, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Conv2D(128, 3, activation='relu', name='conv2_1'))
    model.add(Conv2D(128, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Conv2D(256, 3, activation='relu', name='conv3_1'))
    model.add(Conv2D(256, 3, activation='relu', name='conv3_2'))
    model.add(Conv2D(256, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Conv2D(512, 3, activation='relu', name='conv4_1'))
    model.add(Conv2D(512, 3, activation='relu', name='conv4_2'))
    model.add(Conv2D(512, 3, activation='relu', name='conv4_3'))

    # Compared to the original VGG16, we skip the next 2 MaxPool layers,
    # and go ahead with dilated convolutional layers instead

    model.add(Conv2D(512, 3, dilation_rate=(2, 2), activation='relu', name='conv5_1'))
    model.add(Conv2D(512, 3, dilation_rate=(2, 2), activation='relu', name='conv5_2'))
    model.add(Conv2D(512, 3, dilation_rate=(2, 2), activation='relu', name='conv5_3'))

    # Compared to the VGG16, we replace the FC layer with a convolution

    model.add(Conv2D(4096, 7, dilation_rate=(4, 4), activation='relu', name='fc6'))
    model.add(Dropout(0.5))
    model.add(Conv2D(4096, 1, activation='relu', name='fc7'))
    model.add(Dropout(0.5))
    # Note: this layer has linear activations, not ReLU
    model.add(Conv2D(21, 1, activation='linear', name='fc-final'))

    # model.layers[-1].output_shape == (None, 16, 16, 21)
    return model
Exemplo n.º 24
0
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import LSTM, Dense
import numpy as np

data_dim = 16
timesteps = 8
num_classes = 10
batch_size = 32

# Expected input batch shape: (batch_size, timesteps, data_dim)
# Note that we have to provide the full batch_input_shape since the network is stateful.
# the sample of index i in batch k is the follow-up for the sample i in batch k-1.
model = Sequential()
model.add(
    LSTM(
        32,
        return_sequences=True,
        stateful=True,  #input(32,8,16)
        batch_input_shape=(batch_size, timesteps, data_dim)))  #output(32,?,32)
model.add(LSTM(32, return_sequences=True, stateful=True))  # output(32,?,32)
model.add(LSTM(32, stateful=True))  # output (32,32)
model.add(Dense(10, activation='softmax'))  # output (32, 10)

model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

# Generate dummy training data
x_train = np.random.random((batch_size * 10, timesteps, data_dim))
y_train = np.random.random((batch_size * 10, num_classes))

# Generate dummy validation data
Exemplo n.º 25
0
import numpy as np
import pylab as plt

#NN to create movie

from tensorflow.contrib.keras.python.keras.layers import ConvLSTM2D, BatchNormalization, Conv3D
from tensorflow.contrib.keras.python.keras.models import Sequential

seq = Sequential()
seq.add(
    ConvLSTM2D(filters=40,
               kernel_size=(3, 3),
               input_shape=(None, 40, 40, 1),
               padding='same',
               return_sequences=True))
seq.add(BatchNormalization())

seq.add(
    ConvLSTM2D(filters=40,
               kernel_size=(3, 3),
               padding='same',
               return_sequences=True))
seq.add(BatchNormalization())

seq.add(
    ConvLSTM2D(filters=40,
               kernel_size=(3, 3),
               padding='same',
               return_sequences=True))
seq.add(BatchNormalization())
Exemplo n.º 26
0
def add_context(model: Sequential) -> Sequential:
    """ Append the context layers to the frontend. """
    model.add(ZeroPadding2D(padding=(33, 33)))
    model.add(Conv2D(42, 3, activation='relu', name='ct_conv1_1'))
    model.add(Conv2D(42, 3, activation='relu', name='ct_conv1_2'))
    model.add(Conv2D(84, 3, 3, dilation_rate=(2, 2), activation='relu', name='ct_conv2_1'))
    model.add(Conv2D(168, 3, dilation_rate=(4, 4), activation='relu', name='ct_conv3_1'))
    model.add(Conv2D(336, 3, dilation_rate=(8, 8), activation='relu', name='ct_conv4_1'))
    model.add(Conv2D(672, 3, dilation_rate=(16, 16), activation='relu', name='ct_conv5_1'))
    model.add(Conv2D(672, 3, activation='relu', name='ct_fc1'))
    model.add(Conv2D(21, 1, name='ct_final'))

    return model
Exemplo n.º 27
0
def simple_model():
    model = Sequential()
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(filters=64, kernel_size=(3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM))
    model.add(Activation('softmax'))

    return model
Exemplo n.º 28
0
def main():
    x_train, y_train, x_test, y_test = load_data()

    model = Sequential()

    model.add(
        Conv2D(32,
               kernel_size=(11, 11),
               strides=4,
               padding="same",
               activation='relu',
               input_shape=(48, 48, 1)))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2, padding="valid"))
    model.add(
        Conv2D(32,
               kernel_size=(5, 5),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2, padding="valid"))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(7, activation='softmax'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              batch_size=128,
              epochs=5,
              verbose=1,
              validation_data=(x_test, y_test))

    model.save(expanduser("~/emotion/alex_net.h5"))

    accuracy, fbeta = test_model(model, x_test, y_test)
    print("Accuracy: %s" % accuracy)
    print("F-Beta: %s" % fbeta)
Exemplo n.º 29
0
np.max(y_train) + 1

num_classes = np.max(y_train) + 1
y_train = utils.to_categorical(y_train, num_classes)
y_test = utils.to_categorical(y_test, num_classes)

print('x_train shape :', x_train.shape)
print('x_test shape :', x_test.shape)
print('y_train shape :', y_train.shape)
print('y_test shape :', y_test.shape)

batch_size = 32
epochs = 5

model = Sequential()
model.add(Dense(512, input_shape=(max_words, )))
model.add(Activation('relu'))
model.add(Dense(num_classes))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_split=0.1)
Exemplo n.º 30
0
### MLP for binary classification:
"""
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.python.keras.optimizers import SGD
from tensorflow.contrib.keras.python.keras.utils import to_categorical
import numpy as np

# Generate dummy data
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, 20))
y_test = np.random.randint(2, size=(100, 1))

model = Sequential()
model.add(Dense(64, input_dim=20, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', # binary classification
              optimizer='rmsprop',
              metrics=['accuracy'])

hist = model.fit(x_train, y_train,
          validation_split=0.2,
          epochs=1,
          batch_size=128)

hist.history