def initial_num_char_phase1():
    """
    识别二值化图像的字符个数
    :param bw: 二值图像
    :return:
    """
    # 加载模型
    model = Sequential()
    model.add(Convolution2D(4, 5, 5, input_shape=(1, 30, 40), border_mode='valid'))
    model.add(Activation('tanh'))

    model.add(Convolution2D(8, 5, 5, input_shape=(1, 26, 36), border_mode='valid'))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.55))

    model.add(Convolution2D(16, 4, 4, input_shape=(1, 11, 16), border_mode='valid'))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.60))

    model.add(Flatten())
    model.add(Dense(input_dim=16*4*6, output_dim=256, init='glorot_uniform'))
    model.add(Activation('tanh'))

    model.add(Dense(input_dim=256, output_dim=2, init='glorot_uniform'))
    model.add(Activation('softmax'))

    # 加载权值
    model.load_weights('model/train_len_size1.d5')

    sgd = SGD(l2=0.0, lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd, class_mode="categorical")

    return model
Example #2
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Example #3
0
def test_merge_overlap():
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss < 0.7)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss == nloss)
Example #4
0
def getmodel():
	nb_classes = 2
	model = Sequential()
	model.add(Convolution2D(32, 3, 3, border_mode='same',
	                        input_shape=(1, RECEP_HEI, RECEP_WEI)))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Convolution2D(32, 3, 3))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Flatten())
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))
	model.add(Dense(nb_classes))
	model.add(Activation('softmax'))

	sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
	model.load_weights('weights.hdf5')
	model.compile(loss='categorical_crossentropy',
	              optimizer=sgd,
	              metrics=['accuracy'])
	# model.fit(X_train, Y_train, batch_size=32, nb_epoch=1,
          # verbose=1, shuffle = True ,validation_split=0.25)
	return model
def neural_net(num_sensors, params, load=''):
    model = Sequential()

    # First layer.
    model.add(Dense(
        params[0], init='lecun_uniform', input_shape=(num_sensors,)
    ))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))

    # Second layer.
    model.add(Dense(params[1], init='lecun_uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))

    # Output layer.
    model.add(Dense(3, init='lecun_uniform'))
    model.add(Activation('linear'))

    rms = RMSprop()
    model.compile(loss='mse', optimizer=rms)

    if load:
        model.load_weights(load)

    return model
    def get_model(self):
        classes = 36
        # data = np.empty((57218, 1, 24, 24), dtype="float32")
        model = Sequential()
        model.add(Convolution2D(4, 5, 5, border_mode='valid', input_shape=(1, 24, 24)))
        model.add(BatchNormalization())
        model.add(Activation('relu'))

        model.add(Convolution2D(8, 3, 3, border_mode='valid'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Convolution2D(16, 3, 3, border_mode='valid'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        # model.add(Dropout(0.5))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(128, init='normal'))
        model.add(BatchNormalization())
        model.add(Activation('tanh'))
        model.add(Dense(classes, init='normal'))
        model.add(Activation('softmax'))
        sgd = SGD(l2=0.0, lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd, class_mode="categorical")
        model.load_weights("eduLogin/captcha/tmp/weights.11-0.05.h5")
        return model
def inference_dense(input_dim, class_num, optimizer='sgd', weights_file=''):
    model = Sequential()
    
    model.add(Dense(2048, input_dim=input_dim))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    
#    model.add(Dense(256))
#    model.add(Activation('relu'))
#    model.add(Dropout(0.5))
    
    model.add(Dense(class_num))
    model.add(Activation('softmax'))
    
    if weights_file:
        model.load_weights(weights_file)
#    adadelta = Adadelta(lr=1.0, rho=0.95, epsilon=1e-06)
    if optimizer == 'sgd':
        opt = SGD(lr=1e-4, decay=1e-6, momentum=0.9, nesterov=True)
    elif optimizer == 'adam':
        opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    elif optimizer == 'adagrad':
        opt = Adagrad(lr=0.01, epsilon=1e-08)
    elif optimizer == 'adadelta':
        opt = Adadelta(lr=1.0, rho=0.95, epsilon=1e-08)
    elif optimizer == 'rmsprop':
        opt = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08)
    print('compiling model....')
    model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
    
    return model
Example #8
0
def Colorize(weights_path=None):
    model = Sequential()
    # input: 100x100 images with 3 channels -> (3, 100, 100) tensors.
    # this applies 32 convolution filters of size 3x3 each.

    model.add(Convolution2D(512, 1, 1, border_mode='valid',input_shape=(960,224,224)))
    model.add(Activation('relu'))
    model.add(normalization.BatchNormalization())

    model.add(Convolution2D(256, 1, 1, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(normalization.BatchNormalization())

    model.add(Convolution2D(112, 1, 1, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(normalization.BatchNormalization())
   
    print "output shape: ",model.output_shape
    #softmax
    model.add(Reshape((112,224*224)))

    print "output_shape after reshaped: ",model.output_shape
    model.add(Activation('softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model
Example #9
0
def model(df, parent_id, go_id):

    # Training
    batch_size = 64
    nb_epoch = 64

    # Split pandas DataFrame
    n = len(df)
    split = 0.8
    m = int(n * split)
    train, test = df[:m], df[m:]


    # train, test = train_test_split(
    #     labels, data, batch_size=batch_size)

    train_label, train_data = train['labels'], train['data']

    if len(train_data) < 100:
        raise Exception("No training data for " + go_id)

    test_label, test_data = test['labels'], test['data']
    test_label_rep = test_label


    train_data = train_data.as_matrix()

    test_data = test_data.as_matrix()
    train_data = numpy.hstack(train_data).reshape(train_data.shape[0], 8000)
    test_data = numpy.hstack(test_data).reshape(test_data.shape[0], 8000)
    shape = numpy.shape(train_data)

    print('X_train shape: ', shape)
    print('X_test shape: ', test_data.shape)
    model = Sequential()
    model.add(Dense(8000, activation='relu', input_dim=8000))
    model.add(Highway())
    model.add(Dense(1, activation='sigmoid'))

    model.compile(
        loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')

    model_path = DATA_ROOT + parent_id + '/' + go_id + '.hdf5'
    checkpointer = ModelCheckpoint(
        filepath=model_path, verbose=1, save_best_only=True)
    earlystopper = EarlyStopping(monitor='val_loss', patience=7, verbose=1)

    model.fit(
        X=train_data, y=train_label,
        batch_size=batch_size, nb_epoch=nb_epoch,
        show_accuracy=True, verbose=1,
        validation_split=0.2,
        callbacks=[checkpointer, earlystopper])

    # Loading saved weights
    print 'Loading weights'
    model.load_weights(model_path)
    pred_data = model.predict_classes(
        test_data, batch_size=batch_size)
    return classification_report(list(test_label_rep), pred_data)
def make_model_full(inshape, num_classes, weights_file=None):
    model = Sequential()
    model.add(KL.InputLayer(input_shape=inshape[1:]))
    # model.add(KL.Conv2D(32, (3, 3), padding='same', input_shape=inshape[1:]))
    model.add(KL.Conv2D(32, (3, 3), padding='same'))
    model.add(KL.Activation('relu'))
    model.add(KL.Conv2D(32, (3, 3)))
    model.add(KL.Activation('relu'))
    model.add(KL.MaxPooling2D(pool_size=(2, 2)))
    model.add(KL.Dropout(0.25))

    model.add(KL.Conv2D(64, (3, 3), padding='same'))
    model.add(KL.Activation('relu'))
    model.add(KL.Conv2D(64, (3, 3)))
    model.add(KL.Activation('relu'))
    model.add(KL.MaxPooling2D(pool_size=(2, 2)))
    model.add(KL.Dropout(0.25))

    model.add(KL.Flatten())
    model.add(KL.Dense(512))
    model.add(KL.Activation('relu'))
    model.add(KL.Dropout(0.5))
    model.add(KL.Dense(num_classes))
    model.add(KL.Activation('softmax'))

    if weights_file is not None and os.path.exists(weights_file):
        model.load_weights(weights_file)

    return model
def temporalNet(weights=None):
    model = Sequential()
    #3D convolutional layer with 32x32 optical flow as input
    model.add(Convolution3D(30, 20, 17, 17, subsample=(4,2,2), input_shape=(1, 120,32,32)))
    model.add(Activation(LeakyReLU()))
    model.add(BatchNormalization())
    model.add(MaxPooling3D(pool_size=(13, 2, 2), strides=(13,2, 2)))
    model.add(Reshape((60, 4, 4)))


    model.add(Convolution2D(100, 3, 3))
    model.add(Activation(LeakyReLU()))
    model.add(BatchNormalization())
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(Flatten())


    model.add(Dense(400))
    model.add(Activation(LeakyReLU()))
    model.add(Dropout(0.2))
    model.add(Dense(50))
    model.add(Activation(LeakyReLU()))
    model.add(BatchNormalization())
    model.add(Dense(4, activation='softmax'))

    if weights:
        model.load_weights(weights)

    return model
Example #12
0
def create_model(load_weights=False):
    nn = Sequential()
    nn.add(Convolution2D(32, 1, 3, 3, border_mode='same', activation='relu'))
    nn.add(Convolution2D(32, 32, 3, 3, border_mode='same', activation='relu'))
    nn.add(MaxPooling2D(poolsize=(2,2)))
    nn.add(Dropout(0.25))
    
    nn.add(Convolution2D(64, 32, 3, 3, border_mode='same', activation='relu')) 
    nn.add(Convolution2D(64, 64, 3, 3, border_mode='same', activation='relu'))
    nn.add(MaxPooling2D(poolsize=(2,2)))
    nn.add(Dropout(0.25))
    
    nn.add(Flatten())
    nn.add(Dense(64*7*7, 256, activation='relu'))
    nn.add(Dropout(0.5))
    
    nn.add(Dense(256,10, activation='softmax'))

    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    nn.compile(loss='categorical_crossentropy', optimizer=sgd)
    
    if load_weights:
        nn.load_weights('cnn_weights.hdf5')
    
    return nn
Example #13
0
def vgg_basic(img_size, weights_path = None, lr = 0.001):
    '''
    INPUT: img_size = size of images to train/ model was trained on
           weights_path = path to get weights of trained model
    OUTPUT: the fitted/unfitted model depending on if a weights path was
            specified

    A basic convolutional neural net. I found this one to have the best results.
    '''
    model = Sequential()

    model.add(ZeroPadding2D((1,1),input_shape=(3, img_size, img_size)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(Flatten())
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(5, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path)

    adam = Adam(lr = lr)

    model.compile(optimizer=adam,
                  loss='categorical_crossentropy')

    return model
Example #14
0
def logistic_regression(model_folder, layer, dimension, number_of_feature,
                        cost="binary_crossentropy", learning_rate=1e-6, dropout_rate=0.5, nepoch=10, activation="relu"):

    model = Sequential()
    model.add(Dense(dimension, input_dim=number_of_feature, init="uniform", activation=activation))
    model.add(Dropout(dropout_rate))

    for idx in range(0, layer-2, 1):
        model.add(Dense(dimension, input_dim=dimension, init="uniform", activation=activation))
        model.add(Dropout(dropout_rate))

    model.add(Dense(1, init="uniform", activation="sigmoid"))

    optimizer = RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-06)
    model.compile(loss=cost, optimizer=optimizer, metrics=['accuracy'])

    filepath_model = get_newest_model(model_folder)
    if filepath_model:
        model.load_weights(filepath_model)

        log("Load weights from {}".format(filepath_model), INFO)
    else:
        log("A new one model, {}".format(model_folder), INFO)

    return model
Example #15
0
def predict_ranking(evalFile, outFile):

    X, qids, pids = load_data(evalFile)
    input_dim = X[0].shape[1]

    assert len(pids[0]) == len(X[0])

    model = Sequential()
    model.add(Dense(64, input_dim=input_dim, init='uniform', activation='relu'))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    weightsFile = '../model/weights.hdf5'
    model.load_weights(weightsFile)

    Y_p = []
    for x in X:
        Y_p.append(model.predict(x))

    f = open(outFile, 'w')

    for n, qid in enumerate(qids):
        tupes = zip(Y_p[n], pids[n])
        sortedTupes = sorted(tupes, key=lambda x: x[0], reverse=True)
        for n, (y, pid) in enumerate(sortedTupes):
            f.write('{}\tITER\t{}\t{}\t{}\tSOMEID\n'.format(qid, pid, n, 1001-n))
def M8(weights_path=None, input_shape=(1, 64, 64), n_output=None):
    model = Sequential()
    model.add(Convolution2D(
        32, 3, 3, border_mode='same', input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Convolution2D(128, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(128, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(n_output))
    model.add(Activation('softmax'))
    if weights_path:
        model.load_weights(weights_path)
    return model
Example #17
0
def get_nn_model(token_dict_size):
    _logger.info('Initializing NN model with the following params:')
    _logger.info('Input dimension: %s (token vector size)' % TOKEN_REPRESENTATION_SIZE)
    _logger.info('Hidden dimension: %s' % HIDDEN_LAYER_DIMENSION)
    _logger.info('Output dimension: %s (token dict size)' % token_dict_size)
    _logger.info('Input seq length: %s ' % INPUT_SEQUENCE_LENGTH)
    _logger.info('Output seq length: %s ' % ANSWER_MAX_TOKEN_LENGTH)
    _logger.info('Batch size: %s' % SAMPLES_BATCH_SIZE)

    model = Sequential()
    seq2seq = SimpleSeq2seq(
        input_dim=TOKEN_REPRESENTATION_SIZE,
        input_length=INPUT_SEQUENCE_LENGTH,
        hidden_dim=HIDDEN_LAYER_DIMENSION,
        output_dim=token_dict_size,
        output_length=ANSWER_MAX_TOKEN_LENGTH,
        depth=1
    )

    model.add(seq2seq)
    model.compile(loss='mse', optimizer='rmsprop')

    model.save_weights(NN_MODEL_PATH)

    # use previously saved model if it exists
    _logger.info('Looking for a model %s' % NN_MODEL_PATH)

    if os.path.isfile(NN_MODEL_PATH):
        _logger.info('Loading previously calculated weights...')
        model.load_weights(NN_MODEL_PATH)

    _logger.info('Model is built')
    return model
Example #18
0
def C3D_Sports1M(weights_path=None):
    model = Sequential()
    # 1st layer group
    model.add(Convolution3D(64, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv1',
                            subsample=(1, 1, 1),
                            input_shape=(3, 16, 112, 112)))
    model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
                           border_mode='valid', name='pool1'))

    # 2nd layer group
    model.add(Convolution3D(128, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv2',
                            subsample=(1, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           border_mode='valid', name='pool2'))

    # 3rd layer group
    model.add(Convolution3D(256, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv3a',
                            subsample=(1, 1, 1)))
    model.add(Convolution3D(256, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv3b',
                            subsample=(1, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           border_mode='valid', name='pool3'))

    # 4th layer group
    model.add(Convolution3D(512, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv4a',
                            subsample=(1, 1, 1)))
    model.add(Convolution3D(512, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv4b',
                            subsample=(1, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           border_mode='valid', name='pool4'))

    # 5th layer group
    model.add(Convolution3D(512, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv5a',
                            subsample=(1, 1, 1)))
    model.add(Convolution3D(512, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv5b',
                            subsample=(1, 1, 1)))
    model.add(ZeroPadding3D(padding=(0, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           border_mode='valid', name='pool5'))
    model.add(Flatten())

    # FC layers group
    model.add(Dense(4096, activation='relu', name='fc6'))
    model.add(Dropout(.5))
    model.add(Dense(4096, activation='relu', name='fc7'))
    model.add(Dropout(.5))
    model.add(Dense(487, activation='softmax', name='fc8'))

    if weights_path:
        model.load_weights(weights_path)

    return model
def vgg_train(weights=None):
    print "Compiling VGG Model..." 
    model = Sequential()
    # input: 64x64 images with 3 channels -> (3, 64, 64) tensors.
    # this applies 32 convolution filters of size 3x3 each.
    model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(3,64,64)))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.50))

    model.add(Flatten())
    # Note: Keras does automatic shape inference.
    model.add(Dense(256,W_regularizer=WeightRegularizer(l1=1e-6,l2=1e-6)))
    model.add(Activation('relu'))
    model.add(Dropout(0.50))

    model.add(Dense(200,W_regularizer=WeightRegularizer(l1=1e-5,l2=1e-5)))
    model.add(Activation('softmax'))
    if weights!=None: 
        model.load_weights(weights) 
    return model 
Example #20
0
File: model.py Project: blazer82/ai
class Model:
	def __init__(self, init='normal', activation='relu', batch_size=32, lr=1e-2, load=None):
		self.batch_size = batch_size
		self.model = Sequential()

		self.model.add(Convolution1D(8, 1, input_shape=(2, 4), init=init))
		self.model.add(Activation(activation))

		self.model.add(Flatten())

		self.model.add(Dense(8, init=init))
		self.model.add(BatchNormalization())
		self.model.add(Activation(activation))

		self.model.add(Dense(8, init=init))
		self.model.add(BatchNormalization())
		self.model.add(Activation(activation))

		self.model.add(Dense(2, init=init))

		if load != None:
			self.model.load_weights(load)

		self.model.compile(SGD(lr=lr), loss='mse')

	def predict(self, X):
		return self.model.predict(X.reshape((1,) + X.shape))[0]

	def learn(self, X, y):
		return self.model.fit(X, y, nb_epoch=1, batch_size=self.batch_size, shuffle=True, verbose=0)

	def save(self, filename):
		return self.model.save_weights(filename, overwrite=True)
Example #21
0
def part4(weights):
    global X_train, Y_train, X_test, Y_test
    size = 3
    model = Sequential()

    model.add(Convolution2D(32, size, size, border_mode="same", input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model = add_top_layer(model)

    model.load_weights(weights)

    model = copy_freeze_model(model, 4)

    # add top dense layer
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Activation("softmax"))

    X_train, Y_train, X_test, Y_test = load_data(10)

    model = train(model, auto=False)
    print("Classification rate %02.5f" % (model.evaluate(X_test, Y_test, show_accuracy=True)[1]))
def temporalNet(weights=None):
    model = Sequential()

    model.add(Convolution3D(30, 20, 17, 17, activation='relu', trainable=False, subsample=(4,2,2), input_shape=(1, 120,32,32)))


    model.add(MaxPooling3D(pool_size=(13, 2, 2), strides=(13,2, 2), trainable=False))

    model.add(Reshape((60, 4, 4)))


    model.add(Convolution2D(100, 3, 3, activation='relu', trainable=False))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), trainable=False))
    model.add(Flatten())


    model.add(Dense(400, activation='relu', trainable=False))

    model.add(Dense(50, activation='relu'))
    #model.add(Dense(14, activation='relu'))
    model.add(Dense(1, activation='relu'))

    if weights:
        model.load_weights(weights)

    return model
def get_conv(input_shape=(48, 48, 1), filename=None):
    model = Sequential()
#    model.add(Lambda(lambda x: (x-np.mean(x))/np.std(x),input_shape=input_shape, output_shape=input_shape))
    model.add(Conv2D(32, (3, 3), activation='relu', name='conv1',
                     input_shape=input_shape, padding="same"))
    model.add(Conv2D(32, (3, 3), activation='relu',
                     name='conv2', padding="same"))
    model.add(MaxPooling2D(pool_size=(4, 4)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), name='conv3', padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), name='conv4', padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(4, 4)))
    model.add(Dropout(0.25))

    model.add(Conv2D(256, (3, 3), activation="relu",
                     name="dense1"))  # This was Dense(128)
    model.add(Dropout(0.5))
    # This was Dense(1)
    model.add(Conv2D(1, (1, 1), name="dense2", activation="tanh"))
    if filename:
        model.load_weights(filename)
    return model
def spatialNet(weights=None):
    model = Sequential()
    leakyrelu = ELU()

    model.add(Convolution3D(30, 10, 17, 17, subsample=(2,2,2), trainable=False, input_shape=(1, 60,32,32)))
    model.add(Activation(leakyrelu))
    model.add(BatchNormalization(mode=2))
    model.add(MaxPooling3D(pool_size=(13, 2, 2),trainable=False, strides=(13,2, 2)))

    model.add(Reshape((60, 4, 4)))


    model.add(Convolution2D(100, 3, 3, trainable=False))
    model.add(Activation(leakyrelu))
    model.add(BatchNormalization(mode=2))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), trainable=False))
    model.add(Flatten())


    model.add(Dense(400, trainable=False))
    model.add(Activation(leakyrelu))
    model.add(BatchNormalization(mode=2))
    model.add(Dense(50))
    model.add(Activation(leakyrelu))
    model.add(BatchNormalization(mode=2))

    model.add(Dense(1, activation='sigmoid'))
    model.add(BatchNormalization(mode=2))

    if weights:
        model.load_weights(weights)

    return model
    def __init__(self, restore=None, session=None, Dropout=Dropout, num_labels=10):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = num_labels

        model = Sequential()

        nb_filters = 64
        layers = [Conv2D(nb_filters, (5, 5), strides=(2, 2), padding="same",
                         input_shape=(28, 28, 1)),
                  Activation('relu'),
                  Conv2D(nb_filters, (3, 3), strides=(2, 2), padding="valid"),
                  Activation('relu'),
                  Conv2D(nb_filters, (3, 3), strides=(1, 1), padding="valid"),
                  Activation('relu'),
                  Flatten(),
                  Dense(32),
                  Activation('relu'),
                  Dropout(.5),
                  Dense(num_labels)]

        for layer in layers:
            model.add(layer)

        if restore != None:
            model.load_weights(restore)
        
        self.model = model
def home_made_convnet(Xshape, Yshape):
    print 'initializing model...'

    model = Sequential()
    model.add(ZeroPadding2D((2, 2), input_shape=(Xshape)))  # 0
    model.add(Convolution2D(64, 5, 5, activation='relu'))  # 1
    model.add(Convolution2D(64, 5, 5, activation='relu'))  # 3
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))  # 4

    model.add(Convolution2D(128, 3, 3, activation='relu'))  # 6
    model.add(Convolution2D(128, 3, 3, activation='relu'))  # 8
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))  # 9

    model.add(Flatten())  # 31
    model.add(Dense(256, activation='relu'))  # 32
    model.add(Dropout(0.5))  # 33
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(Yshape, activation='softmax'))

    model.load_weights('weights_f.h5')

    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Example #27
0
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='rmsprop', metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)

    new_model = load_model(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test load_weights on model file
    model.load_weights(fname)
    os.remove(fname)
Example #28
0
  def build(width, height, depth, num_classes, weights_path=None, dropout=True):
    model = Sequential()

    # First set of CONV => RELU => POOL.
    model.add(Convolution2D(20, 5, 5, border_mode='same',
                            input_shape=(height, width, depth)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Second set of CONV => RELU => POOL.
    model.add(Convolution2D(50, 5, 5, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Last set of FC => RELU layers
    model.add(Flatten())
    model.add(Dense(500))
    model.add(Activation('relu'))
    if dropout:
        model.add(Dropout(.5))

    # softmax classifier
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    # If a weights path is supplied (inicating that the model was
    # pre-trained), then load the weights
    if weights_path is not None:
      model.load_weights(weights_path)

    # return the constructed network architecture
    return model
def CIFAR_10(img_rows, img_cols, img_channels=3, nb_classes=5, weights_path=None):
    model = Sequential()

    model.add(Convolution2D(32, 3, 3, border_mode='same',
                            input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model
def Model(weights_path=None):
    model = Sequential()
    model.add(GaussianNoise(0.01, input_shape=(1, img_rows, img_cols)))
    model.add(Convolution2D(nb_filters[0], nb_conv, nb_conv,
                            border_mode='valid'))
    model.add(Activation('relu'))

    model.add(Convolution2D(nb_filters[1], nb_conv, nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(nb_filters[2], nb_conv, nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(nb_filters[3], nb_conv, nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model