print(x_train_processed.shape)
print(x_test_processed.shape)
print(x_valid_processed.shape)

i = random.randint(1, len(x_train))
plt.imshow(x_train_processed[i].squeeze(), cmap='gray')
plt.figure()
plt.imshow(x_train[i].squeeze())

#Deep CNN model
model = Sequential()

model.add(Conv2D(32, (5, 5), activation='relu',
                 input_shape=(32, 32, 1)))  #Add convolution layer 1

model.add(MaxPooling2D(pool_size=(2, 2)))  #pooling layer

model.add(Dropout(0.25))

model.add(Conv2D(64, (5, 5), activation='relu'))  #Add convolution layer 2

model.add(MaxPooling2D(pool_size=(2, 2)))  #pooling layer

model.add(Flatten())  #Image flattener

model.add(Dense(256, activation='relu'))  #Image density

model.add(Dropout(0.5))  #dropout rate layer

model.add(Dense(43, activation='softmax'))
예제 #2
0
				offsetbox.OffsetImage(ims[i]),
				X[i],pad=0)
			ax.add_artist(imagebox)
	plt.xticks([]), plt.yticks([])
	if title is not None:
		plt.title(title)

def PIL2array(img):
	return np.array(img.getdata()).reshape(img.size[1], img.size[0], 3)

# initialize cae
cae = Sequential()

# convolution + pooling 1
cae.add(Convolution2D(8, filterSize, filterSize, input_shape=(3, sampSize, sampSize), border_mode='same'))
cae.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
cae.add(Activation('relu'))

# convolution + pooling 2
cae.add(Convolution2D(16, filterSize, filterSize, border_mode='same'))
cae.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
cae.add(Activation('relu'))

# convolution + pooling 3
cae.add(Convolution2D(32, filterSize, filterSize, border_mode='same'))
cae.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
cae.add(Activation('relu'))

# convolution + pooling 4
cae.add(Convolution2D(64, filterSize, filterSize, border_mode='same'))
cae.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
예제 #3
0
	Pool_Valid_Acc = np.zeros(shape=(nb_epoch, 1)) 	
	Pool_Train_Acc = np.zeros(shape=(nb_epoch, 1)) 
	x_pool_All = np.zeros(shape=(1))

	Y_train = np_utils.to_categorical(y_train, nb_classes)

	print('Training Model Without Acquisitions in Experiment', e)



	model = Sequential()
	model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
	model.add(Activation('relu'))
	model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
	model.add(Dropout(0.25))
	
	model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
	model.add(Activation('relu'))
	model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
	model.add(Dropout(0.25))

	c = 10
	Weight_Decay = c / float(X_train.shape[0])
	model.add(Flatten())
	model.add(Dense(128, W_regularizer=l2(Weight_Decay)))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))
예제 #4
0
    def build(width=224,
              height=224,
              depth=3,
              classes=10,
              l2_reg=0.,
              weights=None):
        img_shape = (height, width, depth)
        # Initialize model
        alexnet = Sequential()

        # Layer 1
        alexnet.add(
            Conv2D(96, (11, 11),
                   input_shape=img_shape,
                   padding='same',
                   kernel_regularizer=l2(l2_reg)))
        alexnet.add(BatchNormalization())
        alexnet.add(Activation('relu'))
        alexnet.add(MaxPooling2D(pool_size=(2, 2)))

        # Layer 2
        alexnet.add(Conv2D(256, (5, 5), padding='same'))
        alexnet.add(BatchNormalization())
        alexnet.add(Activation('relu'))
        alexnet.add(MaxPooling2D(pool_size=(2, 2)))

        # Layer 3
        alexnet.add(ZeroPadding2D((1, 1)))
        alexnet.add(Conv2D(512, (3, 3), padding='same'))
        alexnet.add(BatchNormalization())
        alexnet.add(Activation('relu'))
        alexnet.add(MaxPooling2D(pool_size=(2, 2)))

        # Layer 4
        alexnet.add(ZeroPadding2D((1, 1)))
        alexnet.add(Conv2D(1024, (3, 3), padding='same'))
        alexnet.add(BatchNormalization())
        alexnet.add(Activation('relu'))

        # Layer 5
        alexnet.add(ZeroPadding2D((1, 1)))
        alexnet.add(Conv2D(1024, (3, 3), padding='same'))
        alexnet.add(BatchNormalization())
        alexnet.add(Activation('relu'))
        alexnet.add(MaxPooling2D(pool_size=(2, 2)))

        # Layer 6
        alexnet.add(Flatten())
        alexnet.add(Dense(3072))
        alexnet.add(BatchNormalization())
        alexnet.add(Activation('relu'))
        alexnet.add(Dropout(0.5))

        # Layer 7
        alexnet.add(Dense(4096))
        alexnet.add(BatchNormalization())
        alexnet.add(Activation('relu'))
        alexnet.add(Dropout(0.5))

        # Layer 8
        alexnet.add(Dense(classes))
        alexnet.add(BatchNormalization())
        alexnet.add(Activation('softmax'))

        if weights is not None:
            alexnet.load_weights(weights)

        return alexnet
예제 #5
0
def vgg_16(weights_path=None, h=224, w=224):
    model = DeepFaceHashSequential()

    model.add(ZeroPadding2D((1, 1), input_shape=(3, h, w)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten(name="flatten"))

    # model.add(Dense(4096, activation='relu', name='dense_1'))
    # model.add(Dropout(0.5))

    # model.add(Dense(4096, activation='relu', name='dense_2'))
    # model.add(Dropout(0.5))

    # model.add(Dense(1000, name='dense_3'))
    # model.add(Activation("softmax",name="softmax"))

    if weights_path:
        print("Trying to load weights...")

        excluded = [
            'dense_1', 'dense_2', 'dense_3', 'dropout_5', 'dropout_6'
            'softmax'
        ]
        model.load_weights(weights_path, excluded)

        print("Weights loaded!!!")
    return model
예제 #6
0
# combine the 3 images into a single Keras tensor
input_tensor = K.concatenate([base_image,
                              style_reference_image,
                              combination_image], axis=0)

# build the VGG16 network with our 3 images as input
first_layer = ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height))
first_layer.input = input_tensor

model = Sequential()
model.add(first_layer)
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
예제 #7
0
def googlenet_block(x):
    if gnet:
        """                    _______
            (1, 1)-------------|      |
            (3, 3)-------------| con- |_____
            (5, 5)-------------| cate |
            (1, 1)-------------|______|
        """
        # 1x1 conv
        conv1 = Convolution2D(conv_2D[0], (1, 1),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal',
                              use_bias=False)(x)
        # 3x3 conv
        conv3 = Convolution2D(conv_2D[1], (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal',
                              use_bias=False)(x)
        # 5x5 conv
        conv5 = Convolution2D(conv_2D[2], (5, 5),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal',
                              use_bias=False)(x)
        # 3x3 max pooling
        pool = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(x)
        pool_conv = Convolution2D(conv_2D[0], (1, 1),
                                  padding='same',
                                  activation='relu',
                                  kernel_initializer='he_normal',
                                  use_bias=False)(pool)
        # concatenate filters, assumes filters/channels last
        layer_out = concatenate([conv1, conv3, conv5, pool_conv])

        layers = 6
        conv_n = 4
        return [layer_out, layers, conv_n]
    else:
        """                    _______
            (1, 1)-------------|      |
            (1, 1) -> (3, 3)---| con- |_____
            (1, 1) -> (5, 5)---| cate |
            pool() -> (1, 1)---|______|
        """
        # 1x1 conv
        conv1 = Convolution2D(conv_2D[0], (1, 1),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal',
                              use_bias=False)(x)
        # 3x3 conv
        conv3_conv = Convolution2D(conv_2D[0], (1, 1),
                                   padding='same',
                                   activation='relu',
                                   kernel_initializer='he_normal',
                                   use_bias=False)(x)
        conv3 = Convolution2D(conv_2D[1], (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal',
                              use_bias=False)(conv3_conv)
        # 5x5 conv
        conv5_conv = Convolution2D(conv_2D[0], (1, 1),
                                   padding='same',
                                   activation='relu',
                                   kernel_initializer='he_normal',
                                   use_bias=False)(x)
        conv5 = Convolution2D(conv_2D[2], (5, 5),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal',
                              use_bias=False)(conv5_conv)
        # 3x3 max pooling
        pool = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(x)
        pool_conv = Convolution2D(conv_2D[0], (1, 1),
                                  padding='same',
                                  activation='relu',
                                  kernel_initializer='he_normal',
                                  use_bias=False)(pool)
        # concatenate filters, assumes filters/channels last
        layer_out = concatenate([conv1, conv3, conv5, pool_conv])

        layers = 8
        conv_n = 6
        return [layer_out, layers, conv_n]
예제 #8
0
    def build(width, height, depth, classes):
        model = Sequential()
        inputShape = (height, width, depth)
        chanDim = -1

        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)
            chanDim = 1
        # Block #1: first CONV => ELU => CONV => ELU => POOL
        # layer set
        model.add(
            Conv2D(32, (3, 3),
                   padding="same",
                   kernel_initializer="he_normal",
                   input_shape=inputShape))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(
            Conv2D(32, (3, 3), kernel_initializer="he_normal", padding="same"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        # Block #2: second CONV => ELU => CONV => ELU => POOL
        # layer set
        model.add(
            Conv2D(64, (3, 3), padding="same", kernel_initializer="he_normal"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(
            Conv2D(64, (3, 3), padding="same", kernel_initializer="he_normal"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        # Block #3: third CONV => ELU => CONV => ELU => POOL
        # layer set
        model.add(
            Conv2D(128, (3, 3), padding="same",
                   kernel_initializer="he_normal"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(
            Conv2D(128, (3, 3), padding="same",
                   kernel_initializer="he_normal"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        # Block  4: set Full-Connected => Relu layers
        # layer set
        model.add(Flatten())
        model.add(Dense(64, kernel_initializer="he_normal"))
        model.add(ELU())
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        # Block  5: second set Full-Connected => Relu layers
        # layer set
        model.add(Dense(64, kernel_initializer="he_normal"))
        model.add(ELU())
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        # Block 7: Softmax classifier
        model.add(Dense(classes, kernel_initializer="he_normal"))
        model.add(Activation("softmax"))

        return model
예제 #9
0
    def build(width,
              height,
              depth,
              classes,
              stages,
              filters,
              reg=0.0001,
              bnEps=2e-5,
              bnMom=0.9):

        # Initialise inputShape and chanDim based on channels last or channels first
        # initialise the input shape the be "channels last" and the channels dimension itself
        inputShape = (height, width, depth)
        chanDim = -1

        # if using channels first update input shape and channels dimension
        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)
            chanDim = 1

        # set input and apply BN
        inputs = Input(shape=inputShape)
        x = BatchNormalization(axis=chanDim, epsilon=bnEps,
                               momentum=bnMom)(inputs)

        # ResNet uses BN as the first layer as an added level of normalisation to the input
        # apply Conv => BN => Act => POOL to reduce spatial size
        x = Conv2D(filters[0], (5, 5),
                   use_bias=False,
                   padding="same",
                   kernel_regularizer=l2(reg))(x)
        x = BatchNormalization(axis=chanDim, epsilon=bnEps, momentum=bnMom)(x)
        x = Activation("relu")(x)
        x = ZeroPadding2D((1, 1))(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        # Stack residual layers on top of each other
        # loop over number of stages
        for i in range(0, len(stages)):
            # initialise the stride and apply a residual module used to reduce spatial size of the input volume
            # to reduce volume size without pooling layers change stride of convolution
            # first entry in the stage has stride of (1,1) ie no downsampling
            # every stage after stride is (2, 2) which decreases volume size
            stride = (1, 1) if i == 0 else (2, 2)
            x = ResNet.residual_module(x,
                                       filters[i + 1],
                                       stride,
                                       chanDim,
                                       red=True,
                                       bnEps=bnEps,
                                       bnMom=bnMom)

            # loop over the number of layers in the stage
            for j in range(0, stages[i] - 1):
                # apply a ResNet module
                x = ResNet.residual_module(x,
                                           filters[i + 1], (1, 1),
                                           chanDim,
                                           bnEps=bnEps,
                                           bnMom=bnMom)

        # avoid dense fully connected layers by applying average pooling to reduce volume size to 1x1xclasses
        # apply BN => Act => POOL
        x = BatchNormalization(axis=chanDim, epsilon=bnEps, momentum=bnMom)(x)
        x = Activation("relu")(x)
        x = AveragePooling2D((8, 8))(x)

        # create dense layer for total number of classes to learn then apply
        # softmax activation to generate final output probs
        # softmax classifier
        x = Flatten()(x)
        x = Dense(classes, kernel_regularizer=l2(reg))(x)
        x = Activation("softmax")(x)

        # create model
        model = Model(inputs, x, name="resnet")

        # return the constructed network architecture
        return model
예제 #10
0
def train(run_name, start_epoch, stop_epoch, img_w):
    # Input Parameters
    img_h = 64
    words_per_epoch = 16000
    val_split = 0.2
    val_words = int(words_per_epoch * (val_split))

    # Network parameters
    conv_num_filters = 16
    filter_size = 3
    pool_size = 2
    time_dense_size = 32
    rnn_size = 512

    if K.image_dim_ordering() == 'th':
        input_shape = (1, img_w, img_h)
    else:
        input_shape = (img_w, img_h, 1)

    fdir = os.path.dirname(
        get_file('wordlists.tgz',
                 origin='http://www.isosemi.com/datasets/wordlists.tgz',
                 untar=True))

    img_gen = TextImageGenerator(
        monogram_file=os.path.join(fdir, 'wordlist_mono_clean.txt'),
        bigram_file=os.path.join(fdir, 'wordlist_bi_clean.txt'),
        minibatch_size=32,
        img_w=img_w,
        img_h=img_h,
        downsample_factor=(pool_size**2),
        val_split=words_per_epoch - val_words)
    act = 'relu'
    input_data = Input(name='the_input', shape=input_shape, dtype='float32')
    inner = Convolution2D(conv_num_filters,
                          filter_size,
                          filter_size,
                          border_mode='same',
                          activation=act,
                          init='he_normal',
                          name='conv1')(input_data)
    inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
    inner = Convolution2D(conv_num_filters,
                          filter_size,
                          filter_size,
                          border_mode='same',
                          activation=act,
                          init='he_normal',
                          name='conv2')(inner)
    inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)

    conv_to_rnn_dims = (img_w // (pool_size**2),
                        (img_h // (pool_size**2)) * conv_num_filters)
    inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)

    # cuts down input size going into RNN:
    inner = Dense(time_dense_size, activation=act, name='dense1')(inner)

    # Two layers of bidirecitonal GRUs
    # GRU seems to work as well, if not better than LSTM:
    gru_1 = GRU(rnn_size, return_sequences=True, init='he_normal',
                name='gru1')(inner)
    gru_1b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 init='he_normal',
                 name='gru1_b')(inner)
    gru1_merged = merge([gru_1, gru_1b], mode='sum')
    gru_2 = GRU(rnn_size, return_sequences=True, init='he_normal',
                name='gru2')(gru1_merged)
    gru_2b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 init='he_normal',
                 name='gru2_b')(gru1_merged)

    # transforms RNN output to character activations:
    inner = Dense(img_gen.get_output_size(), init='he_normal',
                  name='dense2')(merge([gru_2, gru_2b], mode='concat'))
    y_pred = Activation('softmax', name='softmax')(inner)
    Model(input=[input_data], output=y_pred).summary()

    labels = Input(name='the_labels',
                   shape=[img_gen.absolute_max_string_len],
                   dtype='float32')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')
    # Keras doesn't currently support loss funcs with extra parameters
    # so CTC loss is implemented in a lambda layer
    loss_out = Lambda(ctc_lambda_func, output_shape=(1, ),
                      name='ctc')([y_pred, labels, input_length, label_length])

    # clipnorm seems to speeds up convergence
    sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)

    model = Model(input=[input_data, labels, input_length, label_length],
                  output=[loss_out])

    # the loss calc occurs elsewhere, so use a dummy lambda func for the loss
    model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
    if start_epoch > 0:
        weight_file = os.path.join(
            OUTPUT_DIR,
            os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
        model.load_weights(weight_file)
    # captures output of softmax so we can decode the output during visualization
    test_func = K.function([input_data], [y_pred])

    viz_cb = VizCallback(run_name, test_func, img_gen.next_val())

    model.fit_generator(generator=img_gen.next_train(),
                        samples_per_epoch=(words_per_epoch - val_words),
                        nb_epoch=stop_epoch,
                        validation_data=img_gen.next_val(),
                        nb_val_samples=val_words,
                        callbacks=[viz_cb, img_gen],
                        initial_epoch=start_epoch)
예제 #11
0
파일: keras1.py 프로젝트: Shadesfear/SIP
def keras_own():

    ## Configure the network

    #batch_size to train
    batch_size = 32
    # number of output classes
    nb_classes = 135
    # number of epochs to train
    nb_epoch = 40

    # number of convolutional filters to use
    nb_filters = 20
    # size of pooling area for max pooling
    nb_pool = 2
    # convolution kernel size
    nb_conv = 5

    model = Sequential()

    model.add(
        Convolution2D(nb_filters,
                      nb_conv,
                      nb_conv,
                      dim_ordering='th',
                      border_mode='valid',
                      input_shape=(1, 29, 29)))
    convout1 = Activation('relu')
    model.add(convout1)
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool), dim_ordering="th"))
    #model.add(Dropout(0.5))

    model.add(
        Convolution2D(nb_filters,
                      nb_conv,
                      nb_conv,
                      dim_ordering='th',
                      border_mode='valid'))
    convout2 = Activation('relu')
    model.add(convout2)
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool), dim_ordering="th"))
    #model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(4000, batch_input_shape=(None, 1, 29, 29)))
    model.add(Activation('relu'))
    #model.add(Dropout(0.25))
    model.add(Dense(135))
    model.add(Activation('softmax'))

    adam1 = adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam1,
                  metrics=['accuracy'])

    # ## Train model - uncoment to perform the training yourself
    #
    # train = numpy.load('train.npz')
    # x_train = train['x_train']
    # y_train = train['y_train']
    #
    # for m in range(0,nb_epoch):
    #     for jj in range(0,x_train.shape[0]-256*20,256*20):
    #         print('Epoch number is', m)

    #         xx = x_train[jj:jj+256*20,:]
    #         yy = y_train[jj:jj+256*20,:]

    #         earlyStopping=EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')
    #         model.fit(xx,yy,epochs=1,batch_size=1, callbacks=[earlyStopping], validation_split=0.20)
    #     print("saving weights")
    #     model.save_weights('data/keras-epoch-' + str(m) + '.h5')

    # model.save_weights('keras.h5')

    ## Load the pretrained network
    model.load_weights('./Week_7_export/keras.h5')
    return model
예제 #12
0
def vgg16_model(img_rows, img_cols, color_type=1):
    model = Sequential()
    model.add(
        ZeroPadding2D((1, 1), input_shape=(color_type, img_rows, img_cols)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    model.load_weights('vgg16_weights.h5')

    # Code above loads pre-trained data and
    model.layers.pop()
    model.outputs = [model.layers[-1].output]
    model.layers[-1].outbound_nodes = []

    model.add(Dense(10, activation='softmax'))

    # Learning rate is changed to 0.001
    sgd = SGD(lr=5e-4, decay=4e-3, momentum=0.9, nesterov=True)
    # adam = Adam()
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
예제 #13
0
def vgg16_256_model(img_rows, img_cols, color_type=1):
    model = Sequential()
    model.add(
        ZeroPadding2D((1, 1), input_shape=(color_type, img_rows, img_cols)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    weight_path = 'vgg16_weights.h5'

    f = h5py.File(weight_path)
    for k in range(f.attrs['nb_layers']):
        if k >= len(model.layers):
            # we don't look at the last (fully-connected) layers in the savefile
            break
        g = f['layer_{}'.format(k)]
        if k == 32:
            weights = []
            weight_small = g['param_0']
            zero = np.zeros((7680, 4096))
            weights.append(np.vstack((weight_small, zero)))
            weights.append(g['param_1'])
        else:
            weights = [
                g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])
            ]
        model.layers[k].set_weights(weights)
    f.close()

    model.add(Dense(10, activation='softmax'))

    # Learning rate is changed to 0.001
    sgd = SGD(lr=5e-4, decay=4e-3, momentum=0.9, nesterov=True)
    adam = Adam()
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
예제 #14
0
def getModel1():
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 32, 32)))
    model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weightDecay)))
    #model.add(Conv2D(128, (1, 1) ,kernel_regularizer=l2_reg))
    model.add(Activation('relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    #model.add(MaxPooling2D((2,2)))
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
    model.add(Dropout(0.5))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    #model.add(MaxPooling2D((2,2)))
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
    model.add(Dropout(0.5))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    #model.add(MaxPooling2D((2,2)))
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
    model.add(Dropout(0.5))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(1024, 3, 3, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(1024, 3, 3, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(1024, 3, 3, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    #model.add(MaxPooling2D((2,2)))
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(outNeurons, W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    #model.add(Dense(outNeurons/2,W_regularizer=l2(weightDecay)))
    model.add(Dense(int(outNeurons / 2), W_regularizer=l2(weightDecay)))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes, W_regularizer=l2(weightDecay)))
    model.add(Activation('softmax'))
    return model
예제 #15
0
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator

original_dataset = '/media/lac/DATA/virtualenv/deep/train/'
base_dir = '/media/lac/DATA/virtualenv/deep/cats_dogs'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')

model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(150, 150, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D((2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D((2, 2)))

model.add(Conv2D(128, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D((2, 2)))

model.add(Conv2D(128, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D((2, 2)))

model.add(Flatten())
model.add(Dropout(0.5))
import json

img_w = 200
img_h = 200
n_labels = 2

kernel = 3

encoding_layers = [
    Conv2D(64, kernel, padding='same', input_shape=(img_h, img_w, 14)),
    BatchNormalization(),
    Activation('relu'),
    Conv2D(64, kernel, padding='same'),
    BatchNormalization(),
    Activation('relu'),
    MaxPooling2D(),
    Conv2D(128, kernel, padding='same'),
    BatchNormalization(),
    Activation('relu'),
    Conv2D(128, kernel, padding='same'),
    BatchNormalization(),
    Activation('relu'),
    MaxPooling2D(),
    Conv2D(256, kernel, padding='same'),
    BatchNormalization(),
    Activation('relu'),
    Conv2D(256, kernel, padding='same'),
    BatchNormalization(),
    Activation('relu'),
    Conv2D(256, kernel, padding='same'),
    BatchNormalization(),
예제 #17
0
파일: lenet.py 프로젝트: lucafiore/LeNet5
    def build(
        numChannels,
        imgRows,
        imgCols,
        numClasses,  #parameters passed in the main script
        activation="relu",
        weightsPath=None
    ):  #weightsPath is addictional parameter passed to the main script

        # initialize the model
        model = Sequential(
        )  #this put layers sequentially as they are defined layer-by-layer (contrary of functional)
        inputShape = (imgRows, imgCols, numChannels)

        # if we are using "channels first", update the input shape
        # to verify the configuration go to $HOME/.keras/.keras.json
        if K.image_data_format() == "channels_first":
            inputShape = (numChannels, imgRows, imgCols)

        # this is a zero padding layer to resize dataset mnist into 32x32 images
        model.add(ZeroPadding2D(padding=(2, 2), input_shape=inputShape))
        # ---C1
        # define the first set of CONV => ACTIVATION => POOL layers
        model.add(
            Conv2D(filters=6,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   padding="valid"))
        ##model.add(Conv2D(filters=6, kernel_size=(5, 5), strides=(1, 1), padding="valid", input_shape=inputShape))
        # padding "same" to have a classic zero padding
        model.add(
            Activation(activation)
        )  # we could write activation='relu' directly as argument of Conv2

        # ---S2
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        ##model.add(AveragePooling2D(pool_size=(2, 2), strides=(2, 2)))

        # ---C3
        # define the second set of CONV => ACTIVATION => POOL layers
        model.add(
            Conv2D(filters=16,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   padding="valid"))
        #model.add(Conv2D(filters=16, kernel_size=(5, 5), padding="same"))
        model.add(Activation(activation))

        # ---S4
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        # ---C5
        # define the first FC => ACTIVATION layers
        model.add(Flatten())  #vectorize the input matrixs
        model.add(
            Dense(units=120, activation=None,
                  use_bias=True))  #output=activation(dot(input,kernel) + bias)
        # units --> dimensionality of the output space
        model.add(Activation(activation))

        # ---FC6
        # define the second FC layer
        model.add(Dense(units=84, activation=None, use_bias=True))
        model.add(Activation(activation))

        # ---FC7
        # define the third FC layer
        model.add(Dense(units=numClasses, use_bias=True))

        # Output
        # lastly, define the soft-max classifier
        model.add(Activation("softmax"))

        # if a weights path is supplied (indicating that the model was
        # pre-trained), then load the weights
        if weightsPath is not None:
            model.load_weights(weightsPath)

        # return the constructed network architecture
        return model
예제 #18
0
def train(img_w, train_data, val_data):
    # Input Parameters
    img_h = 64

    # Network parameters
    conv_filters = 16
    kernel_size = (3, 3)
    pool_size = 2
    time_dense_size = 32
    rnn_size = 512

    if K.image_data_format() == 'channels_first':
        input_shape = (1, img_w, img_h)
    else:
        input_shape = (img_w, img_h, 1)

    batch_size = 32
    downsample_factor = pool_size**2
    tiger_train = ImageGenerator(train_data, img_w, img_h, batch_size,
                                 downsample_factor)
    tiger_train.build_data()
    tiger_val = ImageGenerator(val_data, img_w, img_h, batch_size,
                               downsample_factor)
    tiger_val.build_data()

    act = 'relu'
    input_data = Input(name='the_input', shape=input_shape, dtype='float32')
    inner = Conv2D(conv_filters,
                   kernel_size,
                   padding='same',
                   activation=act,
                   kernel_initializer='he_normal',
                   name='conv1')(input_data)
    inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
    inner = Conv2D(conv_filters,
                   kernel_size,
                   padding='same',
                   activation=act,
                   kernel_initializer='he_normal',
                   name='conv2')(inner)
    inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)

    conv_to_rnn_dims = (img_w // (pool_size**2),
                        (img_h // (pool_size**2)) * conv_filters)
    inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)

    # cuts down input size going into RNN:
    inner = Dense(time_dense_size, activation=act, name='dense1')(inner)

    # Two layers of bidirecitonal LSTMs
    lstm_1 = LSTM(rnn_size,
                  return_sequences=True,
                  kernel_initializer='he_normal',
                  name='lstm1')(inner)
    lstm_1b = LSTM(rnn_size,
                   return_sequences=True,
                   go_backwards=True,
                   kernel_initializer='he_normal',
                   name='lstm1_b')(inner)
    lstm1_merged = add([lstm_1, lstm_1b])
    lstm_2 = LSTM(rnn_size,
                  return_sequences=True,
                  kernel_initializer='he_normal',
                  name='lstm2')(lstm1_merged)
    lstm_2b = LSTM(rnn_size,
                   return_sequences=True,
                   go_backwards=True,
                   kernel_initializer='he_normal',
                   name='lstm2_b')(lstm1_merged)

    # transforms RNN output to character activations:
    inner = Dense(tiger_train.get_output_size(),
                  kernel_initializer='he_normal',
                  name='dense2')(concatenate([lstm_2, lstm_2b]))
    y_pred = Activation('softmax', name='softmax')(inner)
    Model(inputs=input_data, outputs=y_pred).summary()

    labels = Input(name='the_labels',
                   shape=[tiger_train.max_text_len],
                   dtype='float32')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')
    # Keras doesn't currently support loss funcs with extra parameters
    # so CTC loss is implemented in a lambda layer
    loss_out = Lambda(ctc_lambda_func, output_shape=(1, ),
                      name='ctc')([y_pred, labels, input_length, label_length])

    # clipnorm seems to speeds up convergence
    sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)

    model = Model(inputs=[input_data, labels, input_length, label_length],
                  outputs=loss_out)

    # the loss calc occurs elsewhere, so use a dummy lambda func for the loss
    model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)

    model.fit_generator(generator=tiger_train.next_batch(),
                        steps_per_epoch=tiger_train.n,
                        epochs=1,
                        validation_data=tiger_val.next_batch(),
                        validation_steps=tiger_val.n)
    return model
예제 #19
0
def SimpleNet(yoloNet):
    model = Sequential()

    #Convolution Layer 2 & Max Pooling Layer 3
    # model.add(ZeroPadding2D(padding=(1,1),input_shape=(3,448,448)))
    l = yoloNet.layers[1]
    if l.weights is None or l.biases is None:
        model.add(
            Convolution2D(64,
                          7,
                          7,
                          input_shape=(3, 448, 448),
                          init='he_uniform',
                          border_mode='same',
                          subsample=(2, 2)))
    else:
        model.add(
            Convolution2D(
                64,
                7,
                7,
                input_shape=(3, 448, 448),
                weights=[yoloNet.layers[1].weights, yoloNet.layers[1].biases],
                border_mode='same',
                subsample=(2, 2)))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    layer_cnt = 3

    #Use a for loop to replace all manually defined layers
    for i in range(3, yoloNet.layer_number):
        l = yoloNet.layers[i]
        # print i, len(model.layers)
        if (l.type == "CONVOLUTIONAL"):
            # print l.size, l.n, l.c, l.h, l.w
            sub = (1, 1)
            if i == 26:  # modify convolution stride
                sub = (2, 2)
            model.add(ZeroPadding2D(padding=(
                l.size // 2,
                l.size // 2,
            )))
            if l.weights is None or l.biases is None:
                model.add(
                    Convolution2D(l.n,
                                  l.size,
                                  l.size,
                                  init='he_uniform',
                                  border_mode='valid',
                                  subsample=sub))
            else:
                model.add(
                    Convolution2D(l.n,
                                  l.size,
                                  l.size,
                                  weights=[l.weights, l.biases],
                                  border_mode='valid',
                                  subsample=sub))
            model.add(LeakyReLU(alpha=0.1))
            layer_cnt += 3
        elif (l.type == "MAXPOOL"):
            model.add(MaxPooling2D(pool_size=(2, 2), border_mode='valid'))
            layer_cnt += 1
        elif (l.type == "FLATTEN"):
            model.add(Flatten())
            layer_cnt += 1
        elif (l.type == "CONNECTED"):
            # print l.input_size, l.output_size, l.weights.shape, l.biases.shape
            if l.weights is None or l.biases is None:
                model.add(Dense(l.output_size, init='he_uniform'))
            else:
                model.add(Dense(l.output_size, weights=[l.weights, l.biases]))
            layer_cnt += 1
        elif (l.type == "LEAKY"):
            model.add(LeakyReLU(alpha=0.1))
            layer_cnt += 1
        elif (l.type == "DROPOUT"):
            model.add(Dropout(0.5))
            layer_cnt += 1
        else:
            print "Error: Unknown Layer Type", l.type
    return model
def VGG_16():
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), stride=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), stride=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), stride=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), stride=(2,2)))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), stride=(2,2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))

    f = h5py.File('weights/vgg16_weights.h5')
    for k in range(f.attrs['nb_layers']):
        if k >= len(model.layers):
            # we don't look at the last (fully-connected) layers in the savefile
            break
        g = f['layer_{}'.format(k)]
        weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
        model.layers[k].set_weights(weights)
    f.close()
    print('Model loaded.')

    model.add(Dense(10, activation='softmax'))

    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy')

    return model
예제 #21
0
def pooling_func(x):
    if pooltype == 1:
        return AveragePooling2D((2, 2), strides=(2, 2))(x)
    else:
        return MaxPooling2D((2, 2), strides=(2, 2))(x)
예제 #22
0
    def build_cross(input_shape, num_outputs, block_fn, repetitions):
        """Builds a custom ResNet like architecture.

        Args:
            input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
            num_outputs: The number of outputs at final softmax layer
            block_fn: The block function to use. This is either `basic_block` or `bottleneck`.
                The original paper used basic_block for layers < 50
            repetitions: Number of repetitions of various block units.
                At each block unit, the number of filters are doubled and the input size is halved

        Returns:
            The keras `Model`.
        """
        _handle_dim_ordering()
        if len(input_shape) != 3:
            raise Exception(
                "Input shape should be a tuple (nb_channels, nb_rows, nb_cols)"
            )

        # Permute dimension order if necessary
        if K.image_dim_ordering() == 'tf':
            input_shape = (input_shape[1], input_shape[2], input_shape[0])

        # Load function from str if needed.
        block_fn = _get_block(block_fn)

        input = Input(shape=input_shape)
        conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7),
                              strides=(2, 2))(input)
        pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                             padding="same")(conv1)

        init = pool1

        #first branch
        block = init
        filters = 64
        branch_list_1 = []
        for i, r in enumerate(repetitions):
            block = _residual_block(block_fn,
                                    filters=filters,
                                    repetitions=r,
                                    is_first_layer=(i == 0))(block)
            filters *= 2
            branch_list_1.append(block)
        block_branch_1 = block

        #second branch
        block = init
        filters = 64
        branch_list_2 = []
        for i, r in enumerate(repetitions):
            #print(i)
            if (i == 0):
                block = _shortcut(branch_list_1[3], block)
                #block = block
            elif (i == 1):
                block = _shortcut(branch_list_1[0], block)
            elif (i == 2):
                block = _shortcut(branch_list_1[1], block)
            elif (i == 3):
                block = _shortcut(branch_list_1[2], block)
            block = _residual_block(block_fn,
                                    filters=filters,
                                    repetitions=r,
                                    is_first_layer=(i == 0))(block)
            filters *= 2
            branch_list_2.append(block)
        block_branch_2 = block

        #third branch
        block = init
        filters = 64
        branch_list_3 = []
        for i, r in enumerate(repetitions):
            if (i == 0):
                block = _shortcut(branch_list_2[3], block)
                #block =block
            elif (i == 1):
                block = _shortcut(branch_list_2[0], block)
            elif (i == 2):
                block = _shortcut(branch_list_2[1], block)
            elif (i == 3):
                block = _shortcut(branch_list_2[2], block)
            block = _residual_block(block_fn,
                                    filters=filters,
                                    repetitions=r,
                                    is_first_layer=(i == 0))(block)
            filters *= 2
            branch_list_3.append(block)
        block_branch_3 = block
        print("init block size", block_branch_3)
        #merge
        block = AddWeight(weight_size=3, name="add_weight")(
            [block_branch_1, block_branch_2, block_branch_3])
        #block = block_branch_3
        #print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!",block.shape)
        # Last activation
        block = _bn_relu(block)

        # Classifier block
        block_shape = K.int_shape(block)
        pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS],
                                            block_shape[COL_AXIS]),
                                 strides=(1, 1))(block)
        flatten1 = Flatten()(pool2)
        dense = Dense(units=num_outputs,
                      kernel_initializer="he_normal",
                      activation="softmax")(flatten1)

        model = Model(inputs=input, outputs=dense)
        return model
예제 #23
0
    def build(width=224, height=224, depth=3, classes=1000, weightsPath=None):

        input_shape = (height, width, depth)

        model = Sequential()

        model.add(Conv2D(32, (3, 3), padding="same", input_shape=input_shape))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(Conv2D(64, (3, 3), padding="same", input_shape=input_shape))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(Conv2D(128, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(64, (1, 1), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(128, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(Conv2D(256, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(128, (1, 1), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(256, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(Conv2D(512, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(256, (1, 1), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(512, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(256, (1, 1), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(512, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(Conv2D(1024, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(512, (1, 1), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(1024, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(512, (1, 1), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(Conv2D(1024, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        '''model.add(Conv2D(classes, (1, 1), padding="same"))
        model.add(BatchNormalization())
        model.add(LeakyReLU(alpha=0.1))'''

        model.add(GlobalAveragePooling2D(data_format="channels_last"))

        model.add(Dense(classes))
        model.add(BatchNormalization())
        model.add(Activation("sigmoid"))

        # model.add(Dropout(0.5)) #Não há dropout no paper original
        # model.add(Dense(classes))
        '''if classes == 1:
            model.add(Activation("sigmoid"))
        else:
            model.add(Activation("softmax"))'''

        if weightsPath is not None:
            model.load_weights(weightsPath)

        return model
예제 #24
0
def init_model(preload=None,
               declare=False,
               use_inception=True,
               use_resnet=False):
    print 'Compiling model...'
    if use_multiscale and use_inception and use_resnet:
        raise ValueError('Incorrect params')
    if not declare and preload: return load_model(preload)
    if use_multiscale: return multiscale_model(preload)
    if use_multicrop: return multicrop_model(preload)

    if use_resnet:
        if not preload:
            weights_path = ROOT + '/resnet50_tf_notop.h5'
            body = ResNet50(input_shape=(img_width, img_width, channels),
                            weights_path=weights_path)
        for layer in body.layers:
            layer.trainable = False

        head = body.output
        batchnormed = BatchNormalization(axis=3)(head)
        avgpooled = GlobalAveragePooling2D()(batchnormed)
        # dropout = Dropout(0.3) (avgpooled)
        dense = Dense(128)(avgpooled)
        batchnormed = BatchNormalization()(dense)
        relu = PReLU()(batchnormed)
        dropout = Dropout(0.2)(relu)
        output = Dense(1, activation="sigmoid")(dropout)
        model = Model(body.input, output)

        if preload: model.load_weights(preload)
        return model

    if use_inception:
        if preload: return load_model(preload)
        return inception_v4()
    else:
        model = Sequential()
        model.add(
            ZeroPadding2D((1, 1),
                          input_shape=(img_width, img_height, channels)))
        model.add(Convolution2D(16, 3, 3, activation="linear"))
        model.add(ELU())
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(16, 3, 3, activation="linear"))
        model.add(ELU())
        model.add(MaxPooling2D(pool_size=(5, 5)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(32, 3, 3, activation="linear"))
        model.add(ELU())
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(32, 3, 3, activation="linear"))
        model.add(ELU())
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(32, 3, 3, activation="linear"))
        model.add(ELU())
        model.add(MaxPooling2D(pool_size=(3, 3)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(64, 3, 3, activation="linear"))
        model.add(ELU())
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(64, 3, 3, activation="linear"))
        model.add(ELU())
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(64, 3, 3, activation="linear"))
        model.add(ELU())
        model.add(MaxPooling2D(pool_size=(3, 3)))

        model.add(Flatten())
        model.add(Dense(64, activation='linear'))
        model.add(ELU())
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))

        if preload: model.load_weights(preload)
    return model
예제 #25
0
def train_model(X_train,
                y_train,
                X_validation,
                y_validation,
                batch_size=128):  #64
    '''
    Trains 2D convolutional neural network
    :param X_train: Numpy array of mfccs
    :param y_train: Binary matrix based on labels
    :return: Trained model
    '''

    # Get row, column, and class sizes
    rows = X_train[0].shape[0]
    cols = X_train[0].shape[1]
    val_rows = X_validation[0].shape[0]
    val_cols = X_validation[0].shape[1]
    num_classes = len(y_train[0])

    # input image dimensions to feed into 2D ConvNet Input layer
    input_shape = (rows, cols, 1)
    X_train = X_train.reshape(X_train.shape[0], rows, cols, 1)
    X_validation = X_validation.reshape(X_validation.shape[0], val_rows,
                                        val_cols, 1)

    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'training samples')

    model = Sequential()

    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               data_format="channels_last",
               input_shape=input_shape))

    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])

    # Stops training if accuracy does not change at least 0.005 over 10 epochs
    es = EarlyStopping(monitor='acc',
                       min_delta=.005,
                       patience=10,
                       verbose=1,
                       mode='auto')

    # Creates log file for graphical interpretation using TensorBoard
    tb = TensorBoard(log_dir='../logs',
                     histogram_freq=0,
                     batch_size=32,
                     write_graph=True,
                     write_grads=True,
                     write_images=True,
                     embeddings_freq=0,
                     embeddings_layer_names=None,
                     embeddings_metadata=None)

    # Image shifting
    datagen = ImageDataGenerator(width_shift_range=0.05)

    # Fit model using ImageDataGenerator
    model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
                        steps_per_epoch=len(X_train) / 32,
                        epochs=EPOCHS,
                        callbacks=[es, tb],
                        validation_data=(X_validation, y_validation))

    return (model)
예제 #26
0
def multiscale_model(preload=None):

    shared_conv_1 = Convolution2D(num_filters, 3, 3, activation="linear")
    shared_conv_2 = Convolution2D(num_filters, 3, 3, activation="linear")

    input_1 = Input(shape=(channels, w1, h1))
    zero_pad_1 = ZeroPadding2D((1, 1))(input_1)
    conved_1 = shared_conv_1(zero_pad_1)
    elu_1 = ELU()(conved_1)
    zero_pad_2 = ZeroPadding2D((1, 1))(elu_1)
    conved_2 = shared_conv_2(zero_pad_2)
    elu_2 = ELU()(conved_2)
    pool_1 = MaxPooling2D(pool_size=(4, 4), strides=(4, 4))(elu_2)

    input_2 = Input(shape=(channels, w2, h2))
    zero_pad_1 = ZeroPadding2D((1, 1))(input_2)
    conved_1 = shared_conv_1(zero_pad_1)
    elu_1 = ELU()(conved_1)
    zero_pad_2 = ZeroPadding2D((1, 1))(elu_1)
    conved_2 = shared_conv_2(zero_pad_2)
    elu_2 = ELU()(conved_2)
    pool_2 = MaxPooling2D(pool_size=(3, 3), strides=(3, 3))(elu_2)

    input_3 = Input(shape=(channels, w3, h3))
    zero_pad_1 = ZeroPadding2D((1, 1))(input_3)
    conved_1 = shared_conv_1(zero_pad_1)
    elu_1 = ELU()(conved_1)
    zero_pad_2 = ZeroPadding2D((1, 1))(elu_1)
    conved_2 = shared_conv_2(zero_pad_2)
    elu_2 = ELU()(conved_2)
    pool_3 = MaxPooling2D(pool_size=(5, 5), strides=(5, 5))(elu_2)

    multiscaleInputBlock = merge([pool_1, pool_2, pool_3],
                                 mode='concat',
                                 concat_axis=1)

    zero_pad_1 = ZeroPadding2D((1, 1))(multiscaleInputBlock)
    conved_1 = Convolution2D(64, 3, 3, activation='linear')(zero_pad_1)
    elu_1 = ELU()(conved_1)
    zero_pad_2 = ZeroPadding2D((1, 1))(elu_1)
    conved_2 = Convolution2D(64, 3, 3, activation='linear')(zero_pad_2)
    elu_2 = ELU()(conved_2)
    zero_pad_3 = ZeroPadding2D((1, 1))(elu_2)
    conved_3 = Convolution2D(64, 3, 3, activation='linear')(zero_pad_3)
    elu_3 = ELU()(conved_3)
    dropout = Dropout(0.5)(elu_3)
    pool = MaxPooling2D(pool_size=(5, 5))(dropout)

    zero_pad_1 = ZeroPadding2D((1, 1))(pool)
    conved_1 = Convolution2D(128, 3, 3, activation='linear')(zero_pad_1)
    elu_1 = ELU()(conved_1)
    zero_pad_2 = ZeroPadding2D((1, 1))(elu_1)
    conved_2 = Convolution2D(128, 3, 3, activation='linear')(zero_pad_2)
    elu_2 = ELU()(conved_2)
    zero_pad_3 = ZeroPadding2D((1, 1))(elu_2)
    conved_3 = Convolution2D(128, 3, 3, activation='linear')(zero_pad_3)
    elu_3 = ELU()(conved_3)
    dropout = Dropout(0.5)(elu_3)
    pool = MaxPooling2D(pool_size=(3, 3))(dropout)

    flat = Flatten()(pool)
    dense_1 = Dense(1024, activation="linear")(flat)
    elu_1 = ELU()(dense_1)
    dropout_1 = Dropout(0.5)(elu_1)
    output = Dense(1, activation="sigmoid")(dropout_1)

    model = Model(input=[input_1, input_2, input_3], output=output)

    if preload:
        model.load_weights(preload)

    return model
예제 #27
0
print("   <Setting> root location: %s"%(root))
print("   <Setting> relative image location: %s"%(images_path))
print("   <Setting> relative pairs.txt location: %s\n"%(pair_txt_path))

raw_images, flipped_raw_images, raw_labels = read_dataset(path=root+images_path, size=32)
trainData1, trainData2, train_labels, testData1, testData2, test_labels = lfw_train_test(root, pair_txt_path, raw_images, flipped_raw_images, raw_labels, foldnum, includeflipped=True)

train_data = np.concatenate((trainData1, trainData2), axis=-1) / 255.0
test_data = np.concatenate((testData1, testData2), axis=-1) /255.0
train_labels = np_utils.to_categorical(train_labels, 2)
test_labels = np_utils.to_categorical(test_labels, 2)

model = Sequential()
model.add(Convolution2D(filters = 20, kernel_size = (5, 5), padding = "same", input_shape = (32, 32, 2)))
model.add(Activation(activation = "relu"))
model.add(MaxPooling2D(pool_size = (2, 2), strides =  (2, 2)))
model.add(Convolution2D(filters = 50, kernel_size = (5, 5), padding = "same"))
model.add(Activation(activation = "relu"))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Flatten())
model.add(Dense(150))
model.add(Activation(activation = "relu"))
model.add(Dense(50))
model.add(Activation(activation = "relu"))
model.add(Dense(2))
model.add(Activation("softmax"))

model.compile(loss = "categorical_crossentropy", optimizer = keras.optimizers.RMSprop(), metrics = ["accuracy"])

history = model.fit(train_data, train_labels, batch_size = 128, nb_epoch = 100, verbose = 1, validation_data=(test_data, test_labels),shuffle=True)
예제 #28
0
파일: vgg16.py 프로젝트: walobit/courses
 def ConvBlock(self, layers, filters):
     model = self.model
     for i in range(layers):
         model.add(ZeroPadding2D((1, 1)))
         model.add(Convolution2D(filters, 3, 3, activation='relu'))
     model.add(MaxPooling2D((2, 2), strides=(2, 2)))
 # 모델 구성(Input -> CONV(ReLU) -> CONV(ReLU) -> Pool(2) -> FC(relu) -> FC(softmax))
 model = Sequential()
 #필터가 10개이고, 필터 사이즈가 (3,3), Stride가 1인 Conv Layer를 추가합니다.
 model.add(
     Convolution2D(input_shape=(28, 28, 1),
                   filters=10,
                   kernel_size=(3, 3),
                   strides=1,
                   padding='same'))
 #Activation Function 으로 Relu를 사용하였습니다.
 model.add(Activation('relu'))
 #필터가 20개이고, 필터 사이즈가 (3,3), Stride가 1인 Conv Layer를 추가합니다.
 model.add(Convolution2D(filters=20, kernel_size=(3, 3), strides=1))
 model.add(Activation('relu'))
 #이미지의 차원을 줄이기 위해서 Pooling Layer를 넣었습니다.
 model.add(MaxPooling2D(pool_size=(2, 2)))
 #0.25의 확률의 Dropout 시행
 model.add(Dropout(0.25))
 #FC Layer에 넣기 위해서 2차원 배열로 되어 있는 이미지 인풋을 Flattern 해 줍니다.
 model.add(Flatten())
 #Output Node 가 128개인 FC Layer
 model.add(Dense(128))
 model.add(Activation('relu'))
 #Output Node 가 10개인 FC Layer
 model.add(Dense(10))
 #Output 벡터의 합을 1로 만들어주기 위해서 softmax 함수를 사용했습니다.
 model.add(Activation('softmax'))
 # #Keras Documentation 을 참고하여 Adam Optimizer 를 이용하였습니다.
 model.compile(loss='mean_squared_error',
               optimizer='sgd',
               metrics=['accuracy'])
예제 #30
0
파일: network.py 프로젝트: dupf/speechemo
def maintrainnet():
    tupledata=processwav()    
    for i  in np.arange( len(tupledata)  ):      
        tupledata[i][0]=tupledata[i][0][0:40]
        
    sessionTrain, sessionTest = train_test_split(tupledata, test_size=0.1)
    start = time.time()
    window=2
    allUt = []
    allLabels = []
    utteranceByFeat = []
    labelsByFeat = []
    portionSelection=1.0
    for utterance in (sessionTrain):
        allUt.append([utterance[0]])        
        allLabels.append(encodeLabels(utterance[1]))
    print '----SHAPE--- ',np.array(allUt).shape

    testUtterance = []
    X_test = []
    Y_test = []
    #X_train=np.array()
    for utterance in (sessionTest):
        X_test.append([utterance[0]])
        Y_test.append(encodeLabels(utterance[1]))    
   
    end = time.time()   
    print 'segment level feature extraction total time: ' +str(end - start)
    print 'building middle layer'
    
    #nb_filters = 100
    # size of pooling area for max pooling
    nb_pool = 2
    # convolution kernel size
    nb_conv = 2
    voice_rows=240
    voice_cols=100
    nb_filters = 32
    # size of pooling area for max pooling
    nb_pool = 2
    # convolution kernel size
    nb_conv = 3    
    model=Sequential()
   
    model.add(
              Convolution2D( 20, 3, 3, border_mode='same',input_shape=(1, 40, 40) )
              )
    model.add(Activation('relu'))
    
    #model.add(MaxPooling2D(poolsize=(2, 2)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #model.add(Convolution2D(10,5,5))
    model.add(Dropout(0.25))
    
    
    model.add(Convolution2D( 40, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    #model.add(Convolution2D(20, 5, 5))
    
    model.add(Convolution2D(60, 3, 3, border_mode='same'))
    
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))    
    
    model.add(Convolution2D(80, 2, 2))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    
    model.add(Flatten())
    
    model.add(Dense(80*2*2))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(6))
    model.add(Activation('softmax')) 
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=["accuracy"])
    
    #print 'allUt shape is ',np.array(allUt).shape
    X_train=np.array(allUt)
    X_test=np.array(X_test)
    print 'shape is ',X_test.shape
    X_train = X_train.reshape(X_train.shape[0], 1, 40, 40)
    X_train = X_train.astype('float32')
    #X_test = X_test.reshape(X_test.shape[0], 1, 40, 40)
    X_test = X_test.astype('float32')
   
    print 'X_train.shape IS ',X_train.shape
    print 'X_test.shape IS ',X_test.shape

    model.fit(X_train, np.array(allLabels),
          validation_data=(X_test, np.array(Y_test)), nb_epoch=50, batch_size=40,show_accuracy=True, shuffle=True)

    print 'estimating emotion state prob. distribution'
    stateEmotions = []
    #score = model.evaluate(X_test, np.array(Y_test), show_accuracy=False, verbose=0)
    score = model.evaluate(X_test, np.array(Y_test), show_accuracy=True, verbose=0)
    print 'Test scores is ',score