예제 #1
0
def compile(name,
            model: Sequential,
            train_samples: pd.DataFrame,
            validation_samples: pd.DataFrame,
            gen,
            type='img'):

    # model.add(Reshape((-1, num_classes), name=RESHAPED))
    size = 5
    steps_per_epoch = len(train_samples) // size
    validation_steps = len(validation_samples) // size
    train_generator = gen(train_samples, type)(size, infinite=True)
    validation_generator = gen(validation_samples, type)(size, infinite=True)

    adam = optimizers.Adam(lr=0.0001)
    model.compile(loss='categorical_crossentropy', optimizer=adam)

    history_object = model.fit_generator(train_generator,
                                         validation_data=validation_generator,
                                         epochs=5,
                                         callbacks=None,
                                         validation_steps=validation_steps,
                                         steps_per_epoch=steps_per_epoch)

    model.save_weights(name)
    # model.save('fcn_model.h5')

    print(history_object.history.keys())
    print('Loss')
    print(history_object.history['loss'])

    print('Validation Loss')
    print(history_object.history['val_loss'])
예제 #2
0
 def _build_model(self):
     model = Sequential()
     model.add(Dense(3, input_dim=2, activation='tanh'))
     model.add(Dense(3, activation='tanh'))
     model.add(Dense(self.env.action_space.n, activation='linear'))
     model.compile(loss='mse', optimizer=Adam(lr=self.alpha, decay=self.alpha_decay))
     return model
예제 #3
0
    def _build_model(self):

        # Neural Net for Deep-Q learning Model
        model = Sequential()
        model.add(Dense(24, input_dim=self.state_size, activation='relu'))
        model.add(Dense(24, activation='relu'))
        model.add(Dense(self.action_size, activation='linear'))
        model.compile(loss=self._huber_loss,
                      optimizer=Adam(lr=self.learning_rate))
        return model
예제 #4
0
    def _build(self, input):
        """
        Builds the tiny yolo v2 network.
        :param input: input image batch to the network
        :return: logits output from network
        """
        self.model = Sequential()
        self.model.add(
            Lambda(lambda x: x / 127.5 - 1., input_shape=self.input_shape))
        self.model.add(Convolution2D())

        return logits
def create_two_stream_classifier(
        num_fc_neurons,
        dropout_rate,
        num_classes=24):  # classifier_weights_path=None
    classifier = Sequential()
    classifier.add(
        Dense(num_fc_neurons, name='fc7', input_shape=(num_fc_neurons * 2, )))
    #classifier.add(BatchNormalization(axis=1, name='fc7_bn'))
    classifier.add(Activation('relu', name='fc7_ac'))
    classifier.add(Dropout(dropout_rate))
    classifier.add(Dense(num_classes, activation='softmax',
                         name='predictions'))
    return classifier
예제 #6
0
 def __init__(self, learning_rate, layers, functions, optimizer_name,
              beta=0.0, dropout=1.0):
     
     self.n_input = layers[0]
     self.n_hidden = layers[1:-1]
     self.n_output = layers[-1]
     
     self.model = Sequential()
     
     if len(self.n_hidden) == 0:
         # single layer
         self.model.add(Dense(self.n_output, activation=functions[0],
                          kernel_regularizer=regularizers.l2(beta),
                          input_shape=(self.n_input,)))
         
     elif len(self.n_hidden) == 1:
         # hidden layer
         self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                              kernel_regularizer=regularizers.l2(beta),
                              input_shape=(self.n_input,)))
         self.model.add(Dropout(dropout))
         # output layer
         self.model.add(Dense(self.n_output, activation=functions[1],
                              kernel_regularizer=regularizers.l2(beta)))
         
     else:
         # the first hidden layer
         self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                              kernel_regularizer=regularizers.l2(beta),
                              input_shape=(self.n_input,)))
         self.model.add(Dropout(dropout))
         # the second hidden layer
         self.model.add(Dense(self.n_hidden[1], activation=functions[1],
                              kernel_regularizer=regularizers.l2(beta)))
         self.model.add(Dropout(dropout))
         # the output layer
         self.model.add(Dense(self.n_output, activation=functions[2],
                              kernel_regularizer=regularizers.l2(beta)))
     
     self.model.summary()
     
     if optimizer_name == 'Adam': optimizer = Adam(learning_rate)
     
     #self.model.compile(loss='mean_squared_error',
     #                   optimizer=optimizer,
     #                   metrics=['accuracy'])
     
     self.model.compile(loss='categorical_crossentropy',
                        optimizer=optimizer,
                        metrics=['accuracy'])
예제 #7
0
def add_softmax(model: Sequential) -> Sequential:
    """ Append the softmax layers to the frontend or frontend + context net. """
    # The softmax layer doesn't work on the (width, height, channel)
    # shape, so we reshape to (width*height, channel) first.
    # https://github.com/fchollet/keras/issues/1169
    _, curr_width, curr_height, curr_channels = model.layers[-1].output_shape

    model.add(Reshape((curr_width * curr_height, curr_channels)))
    model.add(Activation('softmax'))
    # Technically, we need another Reshape here to reshape to 2d, but TF
    # the complains when batch_size > 1. We're just going to reshape in numpy.
    # model.add(Reshape((curr_width, curr_height, curr_channels)))

    return model
예제 #8
0
    def lstm(self):
        """Build a simple LSTM network. We pass the extracted features from
        our CNN to this model predomenently."""
        # Model.
        model = Sequential()
        model.add(
            LSTM(2048,
                 return_sequences=False,
                 input_shape=self.input_shape,
                 dropout=0.5))
        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
예제 #9
0
class TinyYoloV2(NetworkSkeleton):
    """
    This class handles the building and the loss of the
    tiny yolo v2 network.
    """
    def __init__(self, config):
        """
        Initializes class variables.
        :param config: Contains the networks hyperparameters
        """
        super.__init__(self)
        self.config = config
        self.network = None
        self.loss = None
        self.model = None
        self.input_shape = config.input_shape

    def _build(self, input):
        """
        Builds the tiny yolo v2 network.
        :param input: input image batch to the network
        :return: logits output from network
        """
        self.model = Sequential()
        self.model.add(
            Lambda(lambda x: x / 127.5 - 1., input_shape=self.input_shape))
        self.model.add(Convolution2D())

        return logits

    def _loss(self):
        """
        Calculates the loss of the network.
        :return: loss
        """
        raise NotImplemented

    def __load_pretrained_network(self):
        """
예제 #10
0
 def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss=risk_estimation):
     print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." % (
     lr, n_layers, n_hidden, rate_dropout))
     self.model = Sequential()
     self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))
     for i in range(0, n_layers - 1):
         self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
                             recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                             recurrent_initializer='orthogonal', bias_initializer='zeros',
                             dropout=rate_dropout, recurrent_dropout=rate_dropout))
     self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
                         recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                         recurrent_initializer='orthogonal', bias_initializer='zeros',
                         dropout=rate_dropout, recurrent_dropout=rate_dropout))
     self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
     # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
     #               moving_variance_initializer=Constant(value=0.25)))
     self.model.add(BatchNormalization(axis=-1))
     self.model.add(Activation("relu(alpha=0., max_value=1.0)"))
     opt = RMSprop(lr=lr)
     self.model.compile(loss=loss,
                        optimizer=opt,
                        metrics=['accuracy'])
예제 #11
0
def grad_cam(input_model, image, category_index):
    """
    Args: model to make predictions, image to predict, index of categories and
    their predicted probabilities.
    
    Constructs a colour map showing where the classifier puts the highest weight
    for a given image in making its prediction.
    
    Returns: numpy array of same dimension as image but instead displaying colours
    according to where the classifier puts the most weight.
    """
    model = Sequential()
    model.add(input_model)
    nb_classes = 10
    target_layer = lambda x: target_category_loss(x, category_index, nb_classes
                                                  )
    model.add(Lambda(target_layer))
    loss = K.sum(model.layers[-1].output)
    conv_output = model.layers[0].layers[
        29].output  #this needs changed depending on NN structure
    grads = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input],
                                   [conv_output, grads])

    output, grads_val = gradient_function([image])
    output, grads_val = output[0, :], grads_val[0, :, :, :]

    weights = np.mean(grads_val, axis=(0, 1))
    cam = np.ones(output.shape[0:2], dtype=np.float32)

    for i, w in enumerate(weights):
        cam += w * output[:, :, i]

    cam = cv2.resize(cam, (224, 224))
    cam = np.maximum(cam, 0)
    heatmap = cam / np.max(cam)

    #Return to BGR [0..255] from the preprocessed image
    image = image[0, :]
    image -= np.min(image)
    image = np.minimum(image, 255)

    cam = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
    cam = np.float32(cam) + np.float32(image)
    cam = 255.0 * cam / np.max(cam)
    return np.uint8(cam)
예제 #12
0
def add_context(model: Sequential) -> Sequential:
    """ Append the context layers to the frontend. """
    model.add(ZeroPadding2D(padding=(33, 33)))
    model.add(Conv2D(42, 3, activation='relu', name='ct_conv1_1'))
    model.add(Conv2D(42, 3, activation='relu', name='ct_conv1_2'))
    model.add(Conv2D(84, 3, 3, dilation_rate=(2, 2), activation='relu', name='ct_conv2_1'))
    model.add(Conv2D(168, 3, dilation_rate=(4, 4), activation='relu', name='ct_conv3_1'))
    model.add(Conv2D(336, 3, dilation_rate=(8, 8), activation='relu', name='ct_conv4_1'))
    model.add(Conv2D(672, 3, dilation_rate=(16, 16), activation='relu', name='ct_conv5_1'))
    model.add(Conv2D(672, 3, activation='relu', name='ct_fc1'))
    model.add(Conv2D(21, 1, name='ct_final'))

    return model
"""
The `Sequential` model is a linear stack of layers.

You can create a `Sequential` model by passing a list of layer instances to the constructor:
"""
"""
Sequential

(['class Sequential(Model):\n',
  Linear stack of layers.\n',
  '\n',
  '  Arguments:\n',
  '      layers: list of layers to add to the model.\n',
  '\n',
  '  # Note\n',
  '      The first layer passed to a Sequential model\n',
  '      should have a defined input shape. What that\n',
  '      means is that it should have received an `input_shape`\n',
  '      or `batch_input_shape` argument,\n',
  '      or for some type of layers (recurrent, Dense...)\n',
  '      an `input_dim` argument.\n',

  '  def __init__(self, layers=None, name=None):\n',
  '    self.layers = []  # Stack of layers.\n',
  '    self.model = None  # Internal Model instance.\n',
  '    self.inputs = []  # List of input tensors\n',
  '    self.outputs = []  # List of length 1: the output tensor (unique).\n',
  '    self._trainable = True\n',
  '    self._initial_weights = None\n',
  '\n',
  '    # Model attributes.\n',
예제 #14
0
def generator_containing_discriminator(g, d):
    model = Sequential()
    model.add(g)
    d.trainable = False
    model.add(d)
    return model
예제 #15
0
def discriminator_model():
    model = Sequential()
    model.add(Conv2D(64, (5, 5),padding='same',input_shape=(28, 28, 1)) )
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (5, 5)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
예제 #16
0
def generator_model():
    model = Sequential()
    model.add(Dense(1024,input_dim=100 ))
    model.add(Activation('tanh'))
    model.add(Dense(128*7*7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((7, 7, 128), input_shape=(128*7*7,)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(64, (5, 5), padding='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(1, (5, 5), padding='same'))
    model.add(Activation('tanh'))
    return model
예제 #17
0
def create_model():
    model = Sequential()
    model.add(Dense(12, input_dim=8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
예제 #18
0
values = reframed.values
n_train_hours = 365 * 24  # 1 year
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]
# split into input and outputs
train_X = train[:, :-1]
train_y = train[:, -1]
test_X = test[:, :-1]
test_y = test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)

# design network
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')

# fit network
history = model.fit(train_X,
                    train_y,
                    epochs=50,
                    batch_size=72,
                    validation_data=(test_X, test_y),
                    verbose=2,
                    shuffle=False)
# plot history
pyplot.plot(history.history['loss'], label='Training Loss')
pyplot.plot(history.history['val_loss'], label='Validation Loss')
lr.fit(train_X, train_y)  # no need for words into numbers, or one-hot

print("Accuracy = {:.2f}".format(lr.score(test_X, test_y)))  # get metrics


def one_hot_encode_object_array(arr):
    '''One hot encode a numpy array of objects (e.g. strings)'''
    uniques, ids = np.unique(
        arr, return_inverse=True)  # convert 3 words into 0, 1, 2
    return to_categorical(ids, len(uniques))  # convert 0, 1, 2 to one-hot


train_y_ohe = one_hot_encode_object_array(train_y)
test_y_ohe = one_hot_encode_object_array(test_y)

model = Sequential()

model.add(Dense(16, input_shape=(4, )))  # each sample has 4 features
model.add(Activation('sigmoid'))  # add non-linearity to hidden layer 1

model.add(Dense(3))  # add another 3 neuron final layer
model.add(Activation('softmax'))  # give it non-linearity as output
model.summary()

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=["accuracy"])

model.fit(train_X,
          train_y_ohe,
          validation_split=0.2,
예제 #20
0
def mk_model():
    model = Sequential()
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=.1))
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=0.1))
    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(MaxoutDense(output_dim=256, nb_feature=4))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM, kernel_initializer='he_uniform'))
    model.add(Activation('softmax'))

    return model
예제 #21
0
def mk_model_with_bn():
    model = Sequential()
    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=128,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(filters=128,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=256,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(filters=256,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(256, kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM, kernel_initializer='he_normal'))
    model.add(Activation('softmax'))

    return model
### Multilayer Perceptron (MLP) for multi-class softmax classification:
"""

from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.python.keras.optimizers import SGD
from tensorflow.contrib.keras.python.keras.utils import to_categorical

# Generate dummy data
import numpy as np
x_train = np.random.random((1000, 20))
y_train = to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)

model = Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))

# specify optimizer
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)  # param adjust
model.compile(
    loss='categorical_crossentropy',  # multi-class
    optimizer=sgd,
    metrics=['accuracy'])
예제 #23
0
def get_frontend(input_width, input_height) -> Sequential:
    model = Sequential()
    # model.add(ZeroPadding2D((1, 1), input_shape=(input_width, input_height, 3)))
    model.add(Conv2D(64, 3, activation='relu', name='conv1_1', input_shape=(input_width, input_height, 3)))
    model.add(Conv2D(64, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Conv2D(128, 3, activation='relu', name='conv2_1'))
    model.add(Conv2D(128, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Conv2D(256, 3, activation='relu', name='conv3_1'))
    model.add(Conv2D(256, 3, activation='relu', name='conv3_2'))
    model.add(Conv2D(256, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Conv2D(512, 3, activation='relu', name='conv4_1'))
    model.add(Conv2D(512, 3, activation='relu', name='conv4_2'))
    model.add(Conv2D(512, 3, activation='relu', name='conv4_3'))

    # Compared to the original VGG16, we skip the next 2 MaxPool layers,
    # and go ahead with dilated convolutional layers instead

    model.add(Conv2D(512, 3, dilation_rate=(2, 2), activation='relu', name='conv5_1'))
    model.add(Conv2D(512, 3, dilation_rate=(2, 2), activation='relu', name='conv5_2'))
    model.add(Conv2D(512, 3, dilation_rate=(2, 2), activation='relu', name='conv5_3'))

    # Compared to the VGG16, we replace the FC layer with a convolution

    model.add(Conv2D(4096, 7, dilation_rate=(4, 4), activation='relu', name='fc6'))
    model.add(Dropout(0.5))
    model.add(Conv2D(4096, 1, activation='relu', name='fc7'))
    model.add(Dropout(0.5))
    # Note: this layer has linear activations, not ReLU
    model.add(Conv2D(21, 1, activation='linear', name='fc-final'))

    # model.layers[-1].output_shape == (None, 16, 16, 21)
    return model
예제 #24
0
[You can read more about stateful RNNs in the FAQ.](/getting-started/faq/#how-can-i-use-stateful-rnns)
"""

from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import LSTM, Dense
import numpy as np

data_dim = 16
timesteps = 8
num_classes = 10
batch_size = 32

# Expected input batch shape: (batch_size, timesteps, data_dim)
# Note that we have to provide the full batch_input_shape since the network is stateful.
# the sample of index i in batch k is the follow-up for the sample i in batch k-1.
model = Sequential()
model.add(
    LSTM(
        32,
        return_sequences=True,
        stateful=True,  #input(32,8,16)
        batch_input_shape=(batch_size, timesteps, data_dim)))  #output(32,?,32)
model.add(LSTM(32, return_sequences=True, stateful=True))  # output(32,?,32)
model.add(LSTM(32, stateful=True))  # output (32,32)
model.add(Dense(10, activation='softmax'))  # output (32, 10)

model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

# Generate dummy training data
예제 #25
0
import numpy as np
import pylab as plt

#NN to create movie

from tensorflow.contrib.keras.python.keras.layers import ConvLSTM2D, BatchNormalization, Conv3D
from tensorflow.contrib.keras.python.keras.models import Sequential

seq = Sequential()
seq.add(
    ConvLSTM2D(filters=40,
               kernel_size=(3, 3),
               input_shape=(None, 40, 40, 1),
               padding='same',
               return_sequences=True))
seq.add(BatchNormalization())

seq.add(
    ConvLSTM2D(filters=40,
               kernel_size=(3, 3),
               padding='same',
               return_sequences=True))
seq.add(BatchNormalization())

seq.add(
    ConvLSTM2D(filters=40,
               kernel_size=(3, 3),
               padding='same',
               return_sequences=True))
seq.add(BatchNormalization())
예제 #26
0
def main():
    x_train, y_train, x_test, y_test = load_data()

    model = Sequential()

    model.add(
        Conv2D(32,
               kernel_size=(11, 11),
               strides=4,
               padding="same",
               activation='relu',
               input_shape=(48, 48, 1)))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2, padding="valid"))
    model.add(
        Conv2D(32,
               kernel_size=(5, 5),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2, padding="valid"))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(7, activation='softmax'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              batch_size=128,
              epochs=5,
              verbose=1,
              validation_data=(x_test, y_test))

    model.save(expanduser("~/emotion/alex_net.h5"))

    accuracy, fbeta = test_model(model, x_test, y_test)
    print("Accuracy: %s" % accuracy)
    print("F-Beta: %s" % fbeta)
예제 #27
0
def simple_model():
    model = Sequential()
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(filters=64, kernel_size=(3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM))
    model.add(Activation('softmax'))

    return model
예제 #28
0
"""
### MLP for binary classification:
"""
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.python.keras.optimizers import SGD
from tensorflow.contrib.keras.python.keras.utils import to_categorical
import numpy as np

# Generate dummy data
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, 20))
y_test = np.random.randint(2, size=(100, 1))

model = Sequential()
model.add(Dense(64, input_dim=20, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', # binary classification
              optimizer='rmsprop',
              metrics=['accuracy'])

hist = model.fit(x_train, y_train,
          validation_split=0.2,
          epochs=1,
          batch_size=128)
예제 #29
0
np.max(y_train) + 1

num_classes = np.max(y_train) + 1
y_train = utils.to_categorical(y_train, num_classes)
y_test = utils.to_categorical(y_test, num_classes)

print('x_train shape :', x_train.shape)
print('x_test shape :', x_test.shape)
print('y_train shape :', y_train.shape)
print('y_test shape :', y_test.shape)

batch_size = 32
epochs = 5

model = Sequential()
model.add(Dense(512, input_shape=(max_words, )))
model.add(Activation('relu'))
model.add(Dense(num_classes))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_split=0.1)
예제 #30
0
from tensorflow.contrib.keras.python.keras import backend as K
import numpy as np

import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
# y = np.array([[0],[1],[1],[0]])
# it is better to be replaced with true target function ^ or xor function
y = X[:, 0] ^ X[:, 1]  # ^ is xor function

#######################################################
# Sequential can specify more activations layer from normal layers than Model
#######################################################
# check whether 3 models are the same or not
model = Sequential()
model.add(Dense(4, input_dim=2,
                name='dense1'))  # 2 nodes reaches 50% accuracy, 8 nodes 100%
model.add(Activation('relu', name='dense1_act'))
model.add(Dense(1, name='dense2'))
model.add(Activation('sigmoid', name='dense2_act'))  # output shaped by sigmoid
model.summary()  # see each layer of a model

# use Model instead of Sequential
input_tensor = Input(shape=(2, ), name='input')
hidden = Dense(4, activation='relu', name='dense1_relu')(input_tensor)
output = Dense(1, activation='sigmoid',
               name='dense2_sigm')(hidden)  # output shaped by sigmoid
model1 = Model(inputs=input_tensor, outputs=output)
model1.summary()  # see each layer of a model
"""