def main(_):
    data_generator = generator_data(FLAGS.batch_size)
    valid_generator = generator_valid(X_valid, y_valid, FLAGS.batch_size)

    # Training Architecture: inspired by NVIDIA architecture #
    input_shape = (64, 64, 3)
    model = Sequential()
    model.add(Lambda(lambda x: x / 255 - 0.5, input_shape=input_shape))
    model.add(
        Convolution2D(24,
                      5,
                      5,
                      border_mode='valid',
                      subsample=(2, 2),
                      W_regularizer=l2(0.001)))
    model.add(Activation('relu'))
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      border_mode='valid',
                      subsample=(2, 2),
                      W_regularizer=l2(0.001)))
    model.add(Activation('relu'))
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      border_mode='valid',
                      subsample=(2, 2),
                      W_regularizer=l2(0.001)))
    model.add(Activation('relu'))
    model.add(
        Convolution2D(64,
                      3,
                      3,
                      border_mode='same',
                      subsample=(2, 2),
                      W_regularizer=l2(0.001)))
    model.add(Activation('relu'))
    model.add(
        Convolution2D(64,
                      3,
                      3,
                      border_mode='valid',
                      subsample=(2, 2),
                      W_regularizer=l2(0.001)))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(80, W_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    model.add(Dense(40, W_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    model.add(Dense(16, W_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    model.add(Dense(10, W_regularizer=l2(0.001)))
    model.add(Dense(1, W_regularizer=l2(0.001)))
    adam = Adam(lr=0.0001)
    model.compile(optimizer=adam, loss='mse', metrics=['accuracy'])
    model.summary()
    model.fit_generator(data_generator,
                        samples_per_epoch=math.ceil(len(X_train)),
                        nb_epoch=FLAGS.epochs,
                        validation_data=valid_generator,
                        nb_val_samples=len(X_valid))

    print('Done Training')

    ###Saving Model and Weights###
    model_json = model.to_json()
    with open("model.json", "w") as json_file:
        json_file.write(model_json)
    model.save_weights("model.h5")
    print("Saved model to disk")
Example #2
0
def densenet169_model(img_rows,
                      img_cols,
                      color_type=1,
                      nb_dense_block=4,
                      growth_rate=32,
                      nb_filter=64,
                      reduction=0.5,
                      dropout_rate=0.0,
                      weight_decay=1e-4,
                      num_classes=None):
    '''
    DenseNet 169 Model for Keras

    Model Schema is based on
    https://github.com/flyyufelix/DenseNet-Keras

    ImageNet Pretrained Weights
    Theano: https://drive.google.com/open?id=0Byy2AcGyEVxfN0d3T1F1MXg0NlU
    TensorFlow: https://drive.google.com/open?id=0Byy2AcGyEVxfSEc5UC1ROUFJdmM

    # Arguments
        nb_dense_block: number of dense blocks to add to end
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters
        reduction: reduction factor of transition blocks.
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        classes: optional number of classes to classify images
        weights_path: path to pre-trained weights
    # Returns
        A Keras model instance.
    '''
    layers = []

    eps = 1.1e-5

    # compute compression factor
    compression = 1.0 - reduction

    # Handle Dimension Ordering for different backends
    global concat_axis
    if K.image_dim_ordering() == 'tf':
        concat_axis = 3
        img_input = Input(shape=(img_rows, img_cols, 3), name='data')
    else:
        concat_axis = 1
        img_input = Input(shape=(3, img_rows, img_cols), name='data')

    # From architecture for ImageNet (Table 1 in the paper)
    nb_filter = 64
    nb_layers = [6, 12, 32, 32]  # For DenseNet-169

    # Initial convolution
    x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
    x = Convolution2D(nb_filter,
                      7,
                      7,
                      subsample=(2, 2),
                      name='conv1',
                      bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
    x = Scale(axis=concat_axis, name='conv1_scale')(x)
    x = Activation('relu', name='relu1')(x)

    layers.append(x)

    # x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='pool1')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        stage = block_idx + 2
        x, nb_filter = dense_block(x,
                                   stage,
                                   nb_layers[block_idx],
                                   nb_filter,
                                   growth_rate,
                                   dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)

        layers.append(x)

        # Add transition_block
        x = transition_block(x,
                             stage,
                             nb_filter,
                             compression=compression,
                             dropout_rate=dropout_rate,
                             weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    final_stage = stage + 1
    x, nb_filter = dense_block(x,
                               final_stage,
                               nb_layers[-1],
                               nb_filter,
                               growth_rate,
                               dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

    x = BatchNormalization(epsilon=eps,
                           axis=concat_axis,
                           name='conv' + str(final_stage) + '_blk_bn')(x)
    x = Scale(axis=concat_axis,
              name='conv' + str(final_stage) + '_blk_scale')(x)
    x = Activation('relu', name='relu' + str(final_stage) + '_blk')(x)

    # x_fc = GlobalAveragePooling2D(name='pool'+str(final_stage))(x)
    # x_fc = Dense(1000, name='fc6')(x_fc)
    # x_fc = Activation('softmax', name='prob')(x_fc)

    # model = Model(img_input, x_fc, name='densenet')

    model = Model(img_input, x, name='densenet')

    if K.image_dim_ordering() == 'th':
        # Use pre-trained weights for Theano backend
        weights_path = '../imagenet_models/densenet169_weights_th.h5'
    else:
        # Use pre-trained weights for Tensorflow backend
        weights_path = '../imagenet_models/densenet169_weights_tf.h5'

    model.load_weights(weights_path, by_name=True)

    return model, layers
Example #3
0
def VGG_19(weights_path=None, heatmap=False):
    model = Sequential()

    if heatmap:
        model.add(ZeroPadding2D((1, 1), input_shape=(3, None, None)))
    else:
        model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    if heatmap:
        model.add(Convolution2D(4096, 7, 7, activation="relu", name="dense_1"))
        model.add(Convolution2D(4096, 1, 1, activation="relu", name="dense_2"))
        model.add(Convolution2D(1000, 1, 1, name="dense_3"))
        model.add(Softmax4D(axis=1, name="softmax"))
    else:
        model.add(Flatten())
        model.add(Dense(4096, activation='relu', name='dense_1'))
        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu', name='dense_2'))
        model.add(Dropout(0.5))
        model.add(Dense(1000, name='dense_3'))
        model.add(Activation("softmax"))

    if weights_path:
        model.load_weights(weights_path)

    return model
Example #4
0
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')

# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]

# create model
model = Sequential()
model.add(
    Convolution2D(32,
                  5,
                  5,
                  border_mode='valid',
                  input_shape=(1, 28, 28),
                  activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Fit the model
model.fit(X_train,
          y_train,
Example #5
0
def SimpleNet(darkNet, yoloNet):
    '''
    Args:
      darkNet: dark net weights, to initialize the weights of the first 13 layers
      yoloNet: yolo net, only need the structure parameters here
    Returns:
      model: A keras model which defines Tiny Yolo Net, with its first 13 layers' weights initialized by darknet
    '''
    model = Sequential()

    #Convolution Layer 2 & Max Pooling Layer 3
    model.add(ZeroPadding2D(padding=(1, 1), input_shape=(3, 448, 448)))
    model.add(
        Convolution2D(
            16,
            3,
            3,
            weights=[darkNet.layers[1].weights, darkNet.layers[1].biases],
            border_mode='valid',
            subsample=(1, 1)))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    #initialize first 13 layers using weights of darknet
    for i in range(3, 14):
        l = darkNet.layers[i]
        if (l.type == "CONVOLUTIONAL"):
            model.add(ZeroPadding2D(padding=(
                l.size // 2,
                l.size // 2,
            )))
            model.add(
                Convolution2D(l.n,
                              l.size,
                              l.size,
                              weights=[l.weights, l.biases],
                              border_mode='valid',
                              subsample=(1, 1)))
            model.add(LeakyReLU(alpha=0.1))
        elif (l.type == "MAXPOOL"):
            model.add(MaxPooling2D(pool_size=(2, 2), border_mode='valid'))

    for i in range(14, yoloNet.layer_number):
        l = yoloNet.layers[i]
        if (l.type == "CONVOLUTIONAL"):
            model.add(ZeroPadding2D(padding=(
                l.size // 2,
                l.size // 2,
            )))
            model.add(
                Convolution2D(l.n,
                              l.size,
                              l.size,
                              init='he_normal',
                              border_mode='valid',
                              subsample=(1, 1)))
            model.add(LeakyReLU(alpha=0.1))
        elif (l.type == "MAXPOOL"):
            model.add(MaxPooling2D(pool_size=(2, 2), border_mode='valid'))
        elif (l.type == "FLATTEN"):
            model.add(Flatten())
        elif (l.type == "CONNECTED"):
            model.add(Dense(l.output_size, init='he_normal'))
        elif (l.type == "LEAKY"):
            model.add(LeakyReLU(alpha=0.1))
        elif (l.type == "DROPOUT"):
            model.add(Dropout(0.5))
        else:
            print "Error: Unknown Layer Type", l.type
        #model.add(Activation('sigmoid'))
    return model
Example #6
0
#生成一个model
model = Sequential()
model.add(
    BatchNormalization(axis=1,
                       momentum=0.99,
                       epsilon=0.001,
                       center=True,
                       scale=True,
                       beta_initializer='zeros',
                       gamma_initializer='ones',
                       moving_mean_initializer='zeros',
                       moving_variance_initializer='ones',
                       input_shape=(1, 256, 256)))
model.add(
    Convolution2D(32, (3, 3),
                  strides=(2, 2),
                  padding='valid',
                  data_format='channels_first'))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))  #64*64

model.add(
    Convolution2D(64, (3, 3),
                  strides=(1, 1),
                  padding='same',
                  data_format='channels_first'))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))  #32*32

model.add(
    Convolution2D(64, (3, 3),
                  strides=(1, 1),
Example #7
0
 Setup the model architecture
"""

input_shape = (X_batch.shape[1], X_batch.shape[2], X_batch.shape[3])
pool_size = (2, 2)

model = Sequential()
model.add(
    Lambda(lambda x: x / 127.5 - 1.,
           input_shape=input_shape,
           output_shape=input_shape))

model.add(
    Convolution2D(24,
                  5,
                  5,
                  subsample=(2, 2),
                  border_mode="valid",
                  init="he_normal"))
model.add(ELU())

model.add(
    Convolution2D(36,
                  5,
                  5,
                  subsample=(2, 2),
                  border_mode="valid",
                  init="he_normal"))
model.add(ELU())

model.add(
    Convolution2D(48,
Example #8
0
X_test = X_test.astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0

# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]

# Create the model
model = Sequential()
model.add(
    Convolution2D(32,
                  3,
                  3,
                  input_shape=(3, 32, 32),
                  border_mode='same',
                  activation='relu',
                  W_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(
    Convolution2D(32,
                  3,
                  3,
                  activation='relu',
                  border_mode='same',
                  W_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', W_constraint=maxnorm(3)))
model.add(Dropout(0.5))
Example #9
0
def create_residual_of_residual(input_dim,
                                nb_classes=100,
                                N=2,
                                dropout=0.0,
                                verbose=1):
    """
    Creates a Residual Network of Residual Network with specified parameters

    Example : To create a RoR-3-110 model for CIFAR-10:
              model = create_pre_residual_of_residual((3, 32, 32), 10, N=2)

              Note : The ResNet 101 model is the RoR-3-110 model

    :param input: Input Keras object
    :param nb_classes: Number of output classes
    :param N: Depth of the network. Compute n = 6 * (N * 9 - 1) + 8.
              Example1: For a depth of 56, N = 1, n = 6 * (1 * 9 - 1) + 8 = 56
              Example2: For a depth of 110, N = 2, n = 6 * (2 * 9 - 1) + 8 = 110
              Example3: For a depth of 164, N = 3, N = 6 * (3 * 9 - 1) + 8 = 164
    :param dropout: Adds dropout if value is greater than 0.0.
                    Note : Generally not used in RoR
    :param verbose: Debug info to describe created WRN
    :return:
    """
    ip = Input(shape=input_dim)

    x = initial_conv(ip)
    nb_conv = 8

    conv0_level1_shortcut = Convolution2D(64, (1, 1),
                                          kernel_initializer='he_normal',
                                          padding='same',
                                          strides=(4, 4),
                                          name='conv0_level1_shortcut')(x)

    conv1_level2_shortcut = Convolution2D(16, (1, 1),
                                          kernel_initializer='he_normal',
                                          padding='same',
                                          name='conv1_level2_shortcut')(x)
    for i in range(N * 9 - 1):
        initial = (i == 0)
        x = conv1_block(x, dropout, initial=initial)
        nb_conv += 2

    # Add Level 2 shortcut
    x = Add()([x, conv1_level2_shortcut])
    x = Activation('relu')(x)

    x = MaxPooling2D((2, 2))(x)

    conv2_level2_shortcut = Convolution2D(32, (1, 1),
                                          kernel_initializer='he_normal',
                                          padding='same',
                                          name='conv2_level2_shortcut')(x)
    for i in range(N * 9 - 1):
        x = conv2_block(x, dropout)
        nb_conv += 2

    # Add Level 2 shortcut
    x = Add()([x, conv2_level2_shortcut])
    x = Activation('relu')(x)

    x = MaxPooling2D((2, 2))(x)

    conv3_level2_shortcut = Convolution2D(64, (1, 1),
                                          kernel_initializer='he_normal',
                                          padding='same',
                                          name='conv3_level2_shortcut')(x)
    for i in range(N * 9 - 1):
        is_last = (i == N - 1)
        x = conv3_block(x, dropout, is_last=is_last)
        nb_conv += 2

    # Add Level 2 shortcut
    x = Add()([x, conv3_level2_shortcut])

    # Add Level 1 shortcut
    x = Add()([x, conv0_level1_shortcut])
    x = Activation('relu')(x)

    x = AveragePooling2D((8, 8))(x)
    x = Flatten()(x)

    x = Dense(nb_classes, activation='softmax')(x)

    model = Model(ip, x)

    if verbose: print("Residual-in-Residual-Network-%d created." % (nb_conv))
    return model
Example #10
0
def create_model(input_size, weights=False, summary=True):

    # vgg19_model = Sequential()
    input_ = Input(shape=input_size)
    x = input_

    # x =ZeroPadding2D((1, 1),input_shape=(3, 224, 224)))
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(256, 3, 3, activation='relu', name='conv3_4')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(512, 3, 3, activation='relu', name='conv4_4')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(512, 3, 3, activation='relu', name='conv5_1')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(512, 3, 3, activation='relu', name='conv5_2')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(512, 3, 3, activation='relu', name='conv5_3')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = Convolution2D(512, 3, 3, activation='relu', name='conv5_4')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    # Classification layer
    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='dense_1')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, activation='relu', name='dense_2')(x)
    x = Dropout(0.5)(x)
    x = Dense(NUM_CLASSES, activation='softmax', name='predictions')(x)
    # x = Activation("softmax")(x)
    return Model(inputs=input_, outputs=x)

    if weights:
        filepath = download_file('vgg19_weights.h5', _VGG_19_WEIGHTS_URL)
        vgg19_model.load_weights(filepath)

    if summary:
        print(vgg19_model.summary())
Example #11
0
    preprocess_image(style_reference_image_path))

# this will contain our generated image
combination_image = K.placeholder((1, 3, img_width, img_height))

# combine the 3 images into a single Keras tensor
input_tensor = K.concatenate(
    [base_image, style_reference_image, combination_image], axis=0)

# build the VGG16 network with our 3 images as input
first_layer = ZeroPadding2D((1, 1))
first_layer.set_input(input_tensor, shape=(3, 3, img_width, img_height))

model = Sequential()
model.add(first_layer)
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(pooling_func())

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(pooling_func())

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
Example #12
0
X_data, Y_data = shuffle(X_data, Y_data)

X_train = X_data[:n_train, :, :, :]
X_test = X_data[n_train:, :, :, :]
Y_train = Y_data[:n_train, :]
Y_test = Y_data[n_train:, :]

print('X_train: ' + str(X_train.shape))
print('X_test: ' + str(X_test.shape))
print('Y_train: ' + str(Y_train.shape))
print('Y_test: ' + str(Y_test.shape))

# Build a model
input_img = Input(shape=(32, 64, 3))

conv1 = Convolution2D(64, 3, 3, border_mode='same')(input_img)
conv1 = Convolution2D(16, 1, 1, border_mode='same')(conv1)
act1 = Activation('relu')(conv1)
pool1 = MaxPooling2D((3, 3))(act1)
# drop1 = Dropout(0.5)(pool1)

conv2 = Convolution2D(128, 3, 3, border_mode='same')(pool1)
conv2 = Convolution2D(32, 1, 1, border_mode='same')(conv2)
act2 = Activation('relu')(conv2)
pool2 = MaxPooling2D((2, 2))(act2)
# drop2 = Dropout(0.5)(pool2)

conv3 = Convolution2D(256, 3, 3, border_mode='same')(pool2)
conv3 = Convolution2D(64, 1, 1, border_mode='same')(conv3)
act3 = Activation('relu')(conv3)
pool3 = MaxPooling2D((2, 2))(act3)
Example #13
0
    def AlexNet(self, weights_path=None):
        def crosschannelnormalization(alpha=1e-4,
                                      k=2,
                                      beta=0.75,
                                      n=5,
                                      **kwargs):
            def f(X):
                b, ch, r, c = X._shape
                half = n // 2
                square = K.square(X)
                extra_channels = K.spatial_2d_padding(K.permute_dimensions(
                    square, (0, 2, 3, 1)), (0, half),
                                                      dim_ordering='th')
                extra_channels = K.permute_dimensions(extra_channels,
                                                      (0, 3, 1, 2))
                scale = k
                for i in range(n):
                    scale += alpha * extra_channels[:, i:i + int(ch), :, :]
                scale = scale**beta
                return X / scale

            return Lambda(f,
                          output_shape=lambda input_shape: input_shape,
                          **kwargs)

        def splittensor(axis=1, ratio_split=1, id_split=0, **kwargs):
            def f(X):
                div = int(X._shape[axis]) // ratio_split
                if axis == 0:
                    output = X[id_split * div:(id_split + 1) * div, :, :, :]
                elif axis == 1:
                    output = X[:, id_split * div:(id_split + 1) * div, :, :]
                elif axis == 2:
                    output = X[:, :, id_split * div:(id_split + 1) * div, :]
                elif axis == 3:
                    output = X[:, :, :, id_split * div:(id_split + 1) * div]
                else:
                    raise ValueError('This axis is not possible')
                return output

            def g(input_shape):
                output_shape = list(input_shape)
                output_shape[axis] = output_shape[axis] // ratio_split
                return tuple(output_shape)

            return Lambda(f,
                          output_shape=lambda input_shape: g(input_shape),
                          **kwargs)

        # model = Sequential()
        inputs = Input(shape=(3, 227, 227))
        conv_1 = Convolution2D(nb_filter=96,
                               nb_row=11,
                               nb_col=11,
                               activation='relu',
                               subsample=(4, 4),
                               name='conv_1')(inputs)
        conv_2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(conv_1)
        conv_2 = crosschannelnormalization(name='convpool_1')(conv_2)

        conv_2 = ZeroPadding2D((2, 2))(conv_2)
        conv_2 = merge([
            Convolution2D(
                128, 5, 5, activation='relu', name='conv_2_' + str(i + 1))(
                    splittensor(ratio_split=2, id_split=i)(conv_2))
            for i in range(2)
        ],
                       mode='concat',
                       concat_axis=1,
                       name='conv_2')

        conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
        conv_3 = crosschannelnormalization()(conv_3)
        conv_3 = ZeroPadding2D((1, 1))(conv_3)
        conv_3 = Convolution2D(384, 3, 3, activation='relu',
                               name='conv_3')(conv_3)

        conv_4 = ZeroPadding2D((1, 1))(conv_3)
        conv_4 = merge([
            Convolution2D(
                192, 3, 3, activation='relu', name='conv_4_' + str(i + 1))(
                    splittensor(ratio_split=2, id_split=i)(conv_4))
            for i in range(2)
        ],
                       mode='concat',
                       concat_axis=1,
                       name='conv_4')

        conv_5 = ZeroPadding2D((1, 1))(conv_4)
        conv_5 = merge([
            Convolution2D(
                128, 3, 3, activation='relu', name='conv_5_' + str(i + 1))(
                    splittensor(ratio_split=2, id_split=i)(conv_5))
            for i in range(2)
        ],
                       mode='concat',
                       concat_axis=1,
                       name='conv_5')

        dense_1 = MaxPooling2D((3, 3), strides=(2, 2),
                               name='convpool_5')(conv_5)

        dense_1 = Flatten(name='flatten')(dense_1)
        dense_1 = Dense(4096, activation='relu', name='dense_1')(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096, activation='relu', name='dense_2')(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(1000, name='dense_3')(dense_3)
        prediction = Activation('softmax', name='softmax')(dense_3)

        model = Model(input=inputs, output=prediction)

        if weights_path:
            model.load_weights(weights_path)
        # print(model.summary())
        return model
Example #14
0
def main():
    #parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs', default=10, type=int)
    parser.add_argument(
        '--remove_straight_angle',
        default=None,
        type=float,
        help=
        "Remove all training data with steering angle less than this. Useful for getting rid of straight bias"
    )
    parser.add_argument('--save_generated_images',
                        action='store_true',
                        "Location to save generated images to")
    parser.add_argument(
        '--load_model',
        type=str,
        help="For transfer learning, here's the model to start with")
    parser.add_argument('--directory',
                        type=str,
                        default='data',
                        help="Directory for training data")
    parser.add_argument('--learning_rate', type=float, default=.001)
    args = parser.parse_args()

    driving_log_filename = 'driving_log.csv'

    #Load data from computer into a dictionary
    data = get_driving_log_info(
        driving_log_filename,
        directory=args.directory,
        remove_straight_angle=args.remove_straight_angle)
    labels = np.array(data['steering'])

    train_test = train_test_split(data['center'],
                                  labels,
                                  test_size=.1,
                                  train_size=.4)
    center_train = np.array(train_test[0])
    center_val = np.array(train_test[1])
    y_train = np.array(train_test[2])
    y_val = np.array(train_test[3])

    #Get data set up for image generator
    train_datagen = ImageDataGenerator(width_shift_range=.1,
                                       height_shift_range=.1,
                                       rescale=1. / 255,
                                       fill_mode='constant',
                                       cval=0)

    val_datagen = ImageDataGenerator(rescale=1. / 255)

    center_generator = train_datagen.flow(
        center_train,
        y_train,
        batch_size=128,
        shuffle=True,
        save_to_dir='generated' if args.save_data else None)

    center_val_generator = val_datagen.flow(center_val,
                                            y_val,
                                            batch_size=128,
                                            shuffle=True)

    if args.load_model:
        #Load previous model
        print('loading model')
        model = load_model(args.load_model)
    #Make a new model
    else:
        model = Sequential()

        #Convolution 1. Input: (?, 66, 200, 3) Output: (?, 31, 98, 24)
        model.add(
            Convolution2D(24,
                          5,
                          5,
                          activation='relu',
                          subsample=(2, 2),
                          input_shape=(66, 200, 3)))

        #Convolution 2. Input: (?, 31, 98, 24) Output: (?, 14, 47, 36)
        model.add(Convolution2D(36, 5, 5, activation='relu', subsample=(2, 2)))

        #Convolution 3. Input: (?, 14, 47, 36) Output: (?, 5, 22, 48)
        model.add(Convolution2D(48, 5, 5, activation='relu', subsample=(2, 2)))

        #Convolution 4. Input: (?, 5, 22, 48) Output: (?, 3, 20, 64)
        model.add(Convolution2D(64, 3, 3, activation='relu'))

        #Convolution 5. Input: (?, 3, 20, 64) Output: (?, 1, 18, 64)
        model.add(Convolution2D(64, 3, 3, activation='relu'))

        model.add(Dropout(.5))

        #Flatten the layers. Input: (?, 1, 18, 64) Output: (?, 1152)
        model.add(Flatten())

        #Fully Connected #1. Input: (?, 1152) Output: (?, 100)
        model.add(Dense(100, activation='relu'))

        #Fully connected #2. Input: (?, 100) Output: (?, 50)
        model.add(Dense(50, activation='relu'))

        #Fully connected #3. Input: (?, 50) Output: (?, 10)
        model.add(Dense(10, activation='relu'))

        model.add(Dropout(.5))

        #Output layer: 1 output
        model.add(Dense(1))

        for i in range(len(model.layers)):
            model.layers[i].name += str(i)

        optimizer = Nadam(lr=args.learning_rate)
        model.compile(optimizer=optimizer, loss='mean_squared_error')

    #Save json file
    with open('model.json', 'w') as file:
        json_model = json.loads(model.to_json())
        json.dump(json_model, file)

    checkpoint_callback = ModelCheckpoint('model.h5',
                                          verbose=1,
                                          save_best_only=True)
    model.fit_generator(center_generator,
                        nb_epoch=args.epochs,
                        samples_per_epoch=len(center_train),
                        validation_data=center_val_generator,
                        nb_val_samples=len(y_val),
                        callbacks=[checkpoint_callback])

    model.save('model.md5')
Example #15
0
nb_tensors = len(image_tensors)
nb_style_images = nb_tensors - 2  # Content and Output image not considered

# combine the various images into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)

if K.image_dim_ordering() == "th":
    shape = (nb_tensors, 3, img_width, img_height)
else:
    shape = (nb_tensors, img_width, img_height, 3)

ip = Input(tensor=input_tensor, batch_shape=shape)

# build the VGG16 network with our 3 images as input
x = Convolution2D(64, (3, 3),
                  activation='relu',
                  name='conv1_1',
                  padding='same')(ip)
x = Convolution2D(64, (3, 3),
                  activation='relu',
                  name='conv1_2',
                  padding='same')(x)
x = pooling_func(x)

x = Convolution2D(128, (3, 3),
                  activation='relu',
                  name='conv2_1',
                  padding='same')(x)
x = Convolution2D(128, (3, 3),
                  activation='relu',
                  name='conv2_2',
                  padding='same')(x)
Example #16
0
nb_filters3 = 64
conv1_size = 3
conv2_size = 2
conv3_size = 5
pool_size = 2
# We have 2 classes, buy and sell
classes_num = 2
batch_size = 128
lr = 0.001
chanDim = 3

model = Sequential()
model.add(
    Convolution2D(nb_filters1,
                  conv1_size,
                  conv1_size,
                  padding='same',
                  input_shape=(img_height, img_width, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

model.add(Convolution2D(nb_filters2, conv2_size, conv2_size, padding="same"))
model.add(Activation('relu'))
model.add(
    MaxPooling2D(pool_size=(pool_size, pool_size),
                 data_format='channels_last'))

model.add(Convolution2D(nb_filters3, conv3_size, conv3_size, padding='same'))
model.add(Activation('relu'))
model.add(
    MaxPooling2D(pool_size=(pool_size, pool_size),
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# normalize inputs from 0-255 to 0.0-1.0
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# Create the model
model = Sequential()
model.add(
    Convolution2D(32,
                  3,
                  3,
                  input_shape=(3, 32, 32),
                  activation='relu',
                  border_mode='same'))
model.add(Dropout(0.2))
model.add(Convolution2D(32, 3, 3, activation='relu', border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3, activation='relu', border_mode='same'))
model.add(Dropout(0.2))
model.add(Convolution2D(64, 3, 3, activation='relu', border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, 3, 3, activation='relu', border_mode='same'))
model.add(Dropout(0.2))
model.add(Convolution2D(128, 3, 3, activation='relu', border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
Example #18
0
printing("\n")

printing(
    "---------------------------------------------------------------------------------"
)

model = Sequential()

model.add(Activation('linear',
                     input_shape=(channels, patchHeight, patchWidth)))  # 32
model.add(
    Convolution2D(48,
                  3,
                  3,
                  border_mode='valid',
                  trainable=True,
                  init=initialization,
                  W_regularizer=l2(regularizer),
                  subsample=(1, 1),
                  activation="relu"))  # 30
model.add(
    Convolution2D(48,
                  3,
                  3,
                  border_mode='valid',
                  trainable=True,
                  init=initialization,
                  W_regularizer=l2(regularizer),
                  subsample=(1, 1),
                  activation="relu"))  # 28
model.add(
# Compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=128)
validation_generator = generator(validation_samples, batch_size=64)

#Model ARCHITECTURE OF 2 CONVOLUTIONAL LAYER AND 3 DENSE LAYER
model = Sequential()

#Cropping layer to crop image to see only the road section
model.add(Cropping2D(cropping=((60, 20), (0, 0)), input_shape=(160, 320, 3)))

#Lambda function for data preprocessing
model.add(Lambda(data_preprocess))

#1st convolutional layer using 5x5 filter and relu activation function
model.add(Convolution2D(6, 5, 5, activation='relu'))
model.add(MaxPooling2D())

#2nd convolutional layer using 5x5 filter and relu acrivation function
model.add(Convolution2D(12, 5, 5, activation='relu'))
model.add(MaxPooling2D())

#Flatten layer to get single output
model.add(Flatten())

#1st dense layer to get output as 100
model.add(Dense(100))
model.add(Activation('relu'))

#2nd dense layer to get output as 50
model.add(Dense(50))
 def ConvBlock(self, layers, filters):
     model = self.model
     for i in range(layers):
         model.add(ZeroPadding2D((1, 1)))
         model.add(Convolution2D(filters, 3, 3, activation='relu'))
     model.add(MaxPooling2D((2, 2), strides=(2, 2)))
Example #21
0
X_train, y_train, X_test, y_test = getMNISTData()

# We need to rehape the data back into a 1x28x28 image
X_train = np.reshape(X_train, (X_train.shape[0], 1, 28, 28))
X_test = np.reshape(X_test, (X_test.shape[0], 1, 28, 28))

## Categorize the labels
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)

#################################
# Design the model here
model = Sequential()
model.add(
    Convolution2D(nb_filter=32,
                  nb_row=3,
                  nb_col=3,
                  input_shape=(1, X_train.shape[2], X_train.shape[3])))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(
    Convolution2D(nb_filter=64,
                  nb_row=3,
                  nb_col=3,
                  input_shape=(1, X_train.shape[2], X_train.shape[3])))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(output_dim=128))
model.add(Dropout(0.5))
model.add(Dense(output_dim=10))
    img_right = cv2.imread(line[2])

    images.append(img_left)
    images.append(img_center)
    images.append(img_right)

    measurements.append(steering_left)
    measurements.append(steering_center)
    measurements.append(steering_right)

X_train = np.array(images)
y_train = np.array(measurements)
print(X_train.shape)
print(y_train.shape)
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation="relu"))
model.add(Convolution2D(64, 3, 3, activation="relu"))
model.add(Convolution2D(64, 3, 3, activation="relu"))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=5)
model.save('model-test.h5')
Example #23
0
def VGG_16(weights_path=None):
    model = Sequential()
    model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model
Example #24
0
    correct_pred = tf.equal(max_idx_p, max_idx_l)
    _result = tf.map_fn(fn=lambda e: tf.reduce_all(e),elems=correct_pred,dtype=tf.bool)
    return tf.reduce_mean(tf.cast(_result, tf.float32))
	
	



#创建输入,结构为 高,宽,3通道
input_tensor = Input( shape=(height, width, 3))

x = input_tensor

#构建卷积网络
#两层卷积层,一层池化层,重复3次。因为生成的验证码比较小,padding使用same
x = Convolution2D(32, 3, padding='same', activation='relu')(x)
x = Convolution2D(32, 3, padding='same', activation='relu')(x)
x = MaxPooling2D((2, 2))(x)


x = Convolution2D(64, 3, padding='same', activation='relu')(x)
x = Convolution2D(64, 3, padding='same', activation='relu')(x)
x = MaxPooling2D((2, 2))(x)


x = Convolution2D(128, 3, padding='same', activation='relu')(x)
x = Convolution2D(128, 3, padding='same', activation='relu')(x)
x = MaxPooling2D((2, 2))(x)

#Flatten层用来将输入“压平”,即把多维的输入一维化,常用在从卷积层到全连接层的过渡。
x = Flatten()(x)
Example #25
0
from keras.optimizers import RMSprop, Adadelta
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Dense, Activation, Dropout, Reshape, Flatten
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping

import json
img_shape = (10, 64, 64, 3)
model = Sequential()

model.add(
    Lambda(lambda x: x * 1. / 127.5 - 1,
           input_shape=(img_shape),
           output_shape=(img_shape),
           name='Normalization'))
model.add(
    TimeDistributed(Convolution2D(8, 4, 4, border_mode='valid'),
                    input_shape=(10, 64, 64, 3)))
#model.add(Activation('relu'))
model.add(ELU())
model.add(TimeDistributed(Convolution2D(16, 3, 3, border_mode='valid')))
model.add(ELU())
#model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2),border_mode='valid')))
#model.add(Activation('relu'))
model.add(TimeDistributed(Convolution2D(8, 3, 3, border_mode='valid')))
model.add(ELU())
#model.add(Activation('relu'))
#model.add(Reshape((maxToAdd,np.prod(model.output_shape[-3:])))) #this line updated to work with keras 1.0.2
model.add(TimeDistributed(Flatten()))
model.add(Activation('relu'))
model.add(LSTM(output_dim=100, return_sequences=True))
model.add(LSTM(output_dim=50, return_sequences=False))
Example #26
0
def VGG_16_pretrain(weights_path=None,
                    img_width=128,
                    img_height=128,
                    channel=3):
    model = Sequential()
    model.add(
        ZeroPadding2D((1, 1), input_shape=(channel, img_width, img_height)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    # model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    layer_dict = dict([(layer.name, layer) for layer in model.layers])

    weights_path = 'data/vgg16_weights.h5'

    f = h5py.File(weights_path)
    for k in range(len(model.layers)):
        if isinstance(model.layers[k], Convolution2D):
            attr = model.layers[k].name
            g = f[attr]
            weights = [g[attr + '_W'], g[attr + '_b']]
            model.layers[k].set_weights(weights)
    f.close()

    print('Model loaded.')

    return model
Example #27
0
            self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
            self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))

    def on_train_end(self, logs={}):
        anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
        # anim.save('test_gif.gif', fps=15, writer='imagemagick')
        plt.show()

# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))

model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full')) 
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))

model.add(Convolution2D(64, 32, 3, 3, border_mode='full')) 
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(Dense(256, 10, W_regularizer = l2(0.1)))
img_rows , img_cols = 80, 80  # All images are downsampled to 80 x 80
img_channels = 4  # Stack 4 frames to infer movement

# In the newest version of Keras (February 2017), you need to pass a kwarg called "dim_ordering"
def my_init(shape, name=None, dim_ordering=None):
    return initializations.normal(shape, scale=0.01, name=name)

# Build the model using the same specifications as the DeepMind paper
print ("Building CNN Model")
model = Sequential()

# 1st Convolutional layer
model.add(Convolution2D(32, 8, 8,
                        subsample=(4, 4),
                        init=my_init,
                        border_mode='same',
                        input_shape=(img_rows, img_cols, img_channels)))
model.add(Activation('relu'))

# 2nd Convolutional layer
model.add(Convolution2D(64, 4, 4,
                        subsample=(2, 2),
                        init=my_init,
                        border_mode='same'))
model.add(Activation('relu'))

# 3rd Convolutional layer
model.add(Convolution2D(64, 3, 3,
                        subsample=(1, 1),
                        init=my_init,
Example #29
0
def AlexNet(weights_path=None, heatmap=False):
    if heatmap:
        inputs = Input(shape=(3, None, None))
    else:
        inputs = Input(shape=(3, 227, 227))

    conv_1 = Convolution2D(96,
                           11,
                           11,
                           subsample=(4, 4),
                           activation='relu',
                           name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)
    conv_2 = merge([
        Convolution2D(
            128, 5, 5, activation="relu", name='conv_2_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_2))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_2")

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1, 1))(conv_3)
    conv_3 = Convolution2D(384, 3, 3, activation='relu', name='conv_3')(conv_3)

    conv_4 = ZeroPadding2D((1, 1))(conv_3)
    conv_4 = merge([
        Convolution2D(
            192, 3, 3, activation="relu", name='conv_4_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_4))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_4")

    conv_5 = ZeroPadding2D((1, 1))(conv_4)
    conv_5 = merge([
        Convolution2D(
            128, 3, 3, activation="relu", name='conv_5_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_5))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_5")

    dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name="convpool_5")(conv_5)

    if heatmap:
        dense_1 = Convolution2D(4096, 6, 6, activation="relu",
                                name="dense_1")(dense_1)
        dense_2 = Convolution2D(4096, 1, 1, activation="relu",
                                name="dense_2")(dense_1)
        dense_3 = Convolution2D(1000, 1, 1, name="dense_3")(dense_2)
        prediction = Softmax4D(axis=1, name="softmax")(dense_3)
    else:
        dense_1 = Flatten(name="flatten")(dense_1)
        dense_1 = Dense(4096, activation='relu', name='dense_1')(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096, activation='relu', name='dense_2')(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(1000, name='dense_3')(dense_3)
        prediction = Activation("softmax", name="softmax")(dense_3)

    model = Model(input=inputs, output=prediction)

    if weights_path:
        model.load_weights(weights_path)

    return model
from keras.utils.io_utils import HDF5Matrix
from keras.layers.customtest import GaussianModel, BallModel, ConvertToXY
import sys

sys.setrecursionlimit(32768)

BATCHSIZE = 16
NB_EPOCH = 100
HEIGHT = 96
WIDTH = 96
BALLNUM = 48
SIGMA = 30

model = Graph()
model.add_input(name='inputL', ndim=4)
model.add_node(Convolution2D(16, 1, 5, 5, activation='relu'),
               name='convL1',
               input='inputL')
model.add_node(MaxPooling2D(poolsize=(4, 4)), name='poolL1', input='convL1')
model.add_node(Convolution2D(32, 16, 2, 2, activation='relu'),
               name='convL2',
               input='poolL1')
model.add_node(MaxPooling2D(poolsize=(2, 2)), name='poolL2', input='convL2')
model.add_node(Flatten(), name='FL', input='poolL2')

model.add_node(MaxPooling2D(poolsize=(2, 2)), name='inputM', input='inputL')
model.add_node(Convolution2D(16, 1, 3, 3, activation='relu'),
               name='convM1',
               input='inputM')
model.add_node(MaxPooling2D(poolsize=(2, 2)), name='poolM1', input='convM1')
model.add_node(Convolution2D(32, 16, 2, 2, activation='relu'),