Example #1
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Example #2
0
def test_nested_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = "test_nested_sequential_temp.h5"
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
Example #3
0
def test_merge_overlap():
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss < 0.7)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss == nloss)
Example #4
0
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Example #5
0
    def test_merge_concat(self):
        print('Test merge: concat')
        left = Sequential()
        left.add(Dense(nb_hidden, input_shape=(input_dim,)))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(nb_hidden, input_shape=(input_dim,)))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

        loss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        model.predict([X_test, X_test], verbose=0)
        model.predict_classes([X_test, X_test], verbose=0)
        model.predict_proba([X_test, X_test], verbose=0)
        model.get_config(verbose=0)

        print('test weight saving')
        fname = 'test_merge_concat_temp.h5'
        model.save_weights(fname, overwrite=True)
        left = Sequential()
        left.add(Dense(nb_hidden, input_shape=(input_dim,)))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(nb_hidden, input_shape=(input_dim,)))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))

        model.add(Dense(nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights(fname)
        os.remove(fname)

        nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
        assert(loss == nloss)
Example #6
0
def test_siamese_1():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss < 0.8)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config(verbose=0)

    # test weight saving
    fname = 'test_siamese_1.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)
def evaluate_keras_classification_model(X_train, X_test, y_train, y_test):
    X_train = X_train.astype(theano.config.floatX)
    X_test = X_test.astype(theano.config.floatX)

    print("First 3 labels: %s" % y_train[:3])

    y_train_ohe = np_utils.to_categorical(y_train)
    print('\nFirst 3 labels (one-hot):\n', y_train_ohe[:3])

    model = Sequential()
    model.add(Dense(
        input_dim=X_train.shape[1],
        output_dim=50,
        init='uniform',
        activation='tanh',
    ))
    model.add(Dense(
        input_dim=50,
        output_dim=50,
        init='uniform',
        activation='tanh',
    ))
    model.add(Dense(
        input_dim=50,
        output_dim=y_train_ohe.shape[1],
        init='uniform',
        activation='softmax',
    ))

    sgd = SGD(lr=0.001, decay=1e-7, momentum=0.9)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    model.fit(
        X_train,
        y_train_ohe,
        nb_epoch=5,
        batch_size=300,
        verbose=1,
        validation_split=0.1,
        show_accuracy=True,
    )

    y_train_pred = model.predict_classes(X_train, verbose=0)
    print('First 3 predictions: ', y_train_pred[:3])

    train_acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]
    print("Training accuracy: %.2f%%" % (train_acc * 100))

    y_test_pred = model.predict_classes(X_test, verbose=0)
    test_acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]
    print("Test accuracy: %.2f%%" % (test_acc * 100))
Example #8
0
    def test_sequential(self):
        print('Test sequential')
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        model.predict(X_test, verbose=0)
        model.predict_classes(X_test, verbose=0)
        model.predict_proba(X_test, verbose=0)
        model.get_config(verbose=0)

        print('test weight saving')
        fname = 'test_sequential_temp.h5'
        model.save_weights(fname, overwrite=True)
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights(fname)
        os.remove(fname)

        nloss = model.evaluate(X_train, y_train, verbose=0)
        assert(loss == nloss)

        # test json serialization
        json_data = model.to_json()
        model = model_from_json(json_data)

        # test yaml serialization
        yaml_data = model.to_yaml()
        model = model_from_yaml(yaml_data)
Example #9
0
class LSTMSentiment:

    def __init__(self):
       self.in_dim = 500
       self.n_prev=25
       self.future=50
       out_dim = 1
       hidden_neurons = 500
       self.max_length = 100
       max_features = 20000
       
       # Initializing a sequential Model
       self.model = Sequential()
       self.model.add(Embedding(max_features, 128, input_length=self.max_length))
       self.model.add(Dropout(0.2))
       #self.model.add(LSTM(output_dim=128,input_dim=500,activation='relu'))
       self.model.add(LSTM(128))

       self.model.add(Dropout(0.2))
       self.model.add(Dense(1))
       self.model.add(Activation('linear'))


    def configureLSTMModel(self,TrainX,TrainY):
       print('Configuring the LSTM Model')
       self.model.compile(loss='binary_crossentropy', optimizer='adam',class_mode="binary")
       self.model.fit(TrainX, TrainY, nb_epoch=10,batch_size=32, show_accuracy=True,validation_split=0.3)
       #,validation_data =(ValidX,ValidY))


    def evaluateLSTMModel(self,TestX,TestY):
       obj_sc,acc = self.model.evaluate(TestX, TestY, batch_size=32,show_accuracy=True)
       print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
       print('Objective Score : ',obj_sc)
       print('Accuracy : ' ,acc)



    def predictSentiment(self,testX):
       sentiment = self.model.predict_classes(testX,batch_size=32)
       return sentiment

    def printSummary(self):
       print(self.model.summary())


    def getTrainTestData(self):
       print('Loading Training and Test data')
       trainX=[]
       trainY=[]
       testX=[]
       testY = []

       f= open('trainingdata.pkl','rb')
       (trainX,trainY) = cPickle.load(f)
       
       f= open('testingdata.pkl','rb')
       (testX,testY)  = cPickle.load(f)

       return ((trainX,trainY),(testX,testY))
Example #10
0
def evaluate_mlp_model(dataset,num_classes,extra_layers=0,num_hidden=512,dropout=0.5,graph_to=None,verbose=True):
    (X_train, Y_train), (X_test, Y_test) = dataset
    batch_size = 32
    nb_epoch = 5
    max_features = 20000
    maxlen = 125
    
    if verbose:
        print(len(X_train), 'train sequences')
        print(len(X_test), 'test sequences')
        print('X_train shape:', X_train.shape)
        print('X_test shape:', X_test.shape)
        print('Y_train shape:', Y_train.shape)
        print('Y_test shape:', Y_test.shape)
        print('Building model...')
    model = Sequential()
    model.add(Dense(num_hidden))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))
    for i in range(extra_layers):
        model.add(Dense(num_hidden))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    plotter = Plotter(save_to_filepath=graph_to, show_plot_window=True)
    callbacks = [plotter] if graph_to else []
    history = model.fit(X_train, Y_train, nb_epoch=nb_epoch, batch_size=batch_size, verbose=1 if verbose else 0, show_accuracy=True, validation_split=0.1,callbacks=callbacks)
    score = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=1 if verbose else 0, show_accuracy=True)
    if verbose:
        print('Test score:',score[0])
        print('Test accuracy:', score[1])
    predictions = model.predict_classes(X_test,verbose=1 if verbose else 0)
    return predictions,score[1]
Example #11
0
def evaluate_conv_model(dataset, num_classes, maxlen=125,embedding_dims=250,max_features=5000,nb_filter=300,filter_length=3,num_hidden=250,dropout=0.25,verbose=True,pool_length=2,with_lstm=False):
    (X_train, Y_train), (X_test, Y_test) = dataset
    
    batch_size = 32
    nb_epoch = 5

    if verbose:
        print('Loading data...')
        print(len(X_train), 'train sequences')
        print(len(X_test), 'test sequences')
        print('Pad sequences (samples x time)')
    
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
    X_test = sequence.pad_sequences(X_test, maxlen=maxlen)

    if verbose:
        print('X_train shape:', X_train.shape)
        print('X_test shape:', X_test.shape)
        print('Build model...')

    model = Sequential()
    # we start off with an efficient embedding layer which maps
    # our vocab indices into embedding_dims dimensions
    model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
    model.add(Dropout(dropout))

    # we add a Convolution1D, which will learn nb_filter
    # word group filters of size filter_length:
    model.add(Convolution1D(nb_filter=nb_filter,
                            filter_length=filter_length,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    if pool_length:
        # we use standard max pooling (halving the output of the previous layer):
        model.add(MaxPooling1D(pool_length=2))
    if with_lstm:
        model.add(LSTM(125))
    else:
        # We flatten the output of the conv layer,
        # so that we can add a vanilla dense layer:
        model.add(Flatten())

        #We add a vanilla hidden layer:
        model.add(Dense(num_hidden))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

    # We project onto a single unit output layer, and squash it with a sigmoid:
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',optimizer='adam')
    model.fit(X_train, Y_train, batch_size=batch_size,nb_epoch=nb_epoch, show_accuracy=True,validation_split=0.1)
    score = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=1 if verbose else 0, show_accuracy=True)
    if verbose:
        print('Test score:',score[0])
        print('Test accuracy:', score[1])
    predictions = model.predict_classes(X_test,verbose=1 if verbose else 0)
    return predictions,score[1]
Example #12
0
def model(labels, data, parent_id, go_id):

    # Training
    batch_size = 64
    nb_epoch = 64

    train, test = train_test_split(
        labels, data, batch_size=batch_size)
    train_label, train_data = train

    if len(train_data) < 100:
        raise Exception("No training data for " + go_id)

    test_label, test_data = test
    test_label_rep = test_label

    model = Sequential()
    model.add(Convolution1D(input_dim=20,
                            input_length=MAXLEN,
                            nb_filter=320,
                            filter_length=20,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=10, stride=10))
    model.add(Dropout(0.25))
    model.add(Convolution1D(nb_filter=32,
                            filter_length=32,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=8))
    model.add(LSTM(128))
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(
        loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')

    model_path = DATA_ROOT + parent_id + '/' + go_id + '.hdf5'
    checkpointer = ModelCheckpoint(
        filepath=model_path, verbose=1, save_best_only=True)
    earlystopper = EarlyStopping(monitor='val_loss', patience=7, verbose=1)

    model.fit(
        X=train_data, y=train_label,
        batch_size=batch_size, nb_epoch=nb_epoch,
        show_accuracy=True, verbose=1,
        validation_split=0.2,
        callbacks=[checkpointer, earlystopper])

    # Loading saved weights
    print 'Loading weights'
    model.load_weights(model_path)
    pred_data = model.predict_classes(
        test_data, batch_size=batch_size)
    return classification_report(list(test_label_rep), pred_data)
Example #13
0
def ConvJS(X_train, X_test, y_train, y_test):
    model = Sequential()
    model.add(ZeroPadding2D((1,1),input_shape=(1, 36, 36)))
    model.add(Convolution2D(nb_filter = 64, nb_row = 3, nb_col = 3, border_mode='valid', activation = 'relu', init='glorot_normal'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(1,1)))
    model.add(Dropout(0.25))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(nb_filter = 64, nb_row = 3, nb_col = 3, border_mode='valid', activation = 'relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(1,1)))
    model.add(Dropout(0.25))

    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(nb_filter = 32, nb_row = 3, nb_col = 3, border_mode='valid', activation = 'relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(32, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(1,1)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(2,init='glorot_normal'))
    model.add(Activation('softmax'))

    sgd = SGD(lr=0.001, decay=0.01, momentum=0.9)
    model.compile(loss='categorical_crossentropy', optimizer='sgd')
    model.fit(X_train,y_train, batch_size=2, nb_epoch=15, verbose =1, validation_split=0.2)
    y_pred=model.predict_classes(X_test)
    print "Multipayer 2d-conv net Result"
    print classification_report(y_test[:,1], y_pred)
    return model
Example #14
0
def lstm_model(X_train, y_train, X_test, y_test):
    X_train = sequence.pad_sequences(X_train, maxlen=max_len, padding='post')
    X_test = sequence.pad_sequences(X_test, maxlen=max_len, padding='post')
    print X_train.shape, y_train.shape
    print X_test.shape, y_test.shape

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=max_len))
    model.add(LSTM(128))  # try using a GRU instead, for fun
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    # print X_train.shape, y_train.shape
    # print X_test.shape, y_test.shape

    model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")

    print("Train...")
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=4, validation_data=(X_test, y_test), show_accuracy=True)
    acc, score = model.evaluate(X_test, y_test, batch_size=batch_size, show_accuracy=True)
    print('Test score:', score)
    print('Test accuracy:', acc)
    pred_labels = model.predict_classes(X_test)
    # print pred_labels
    accuracy = accuracy_score(y_test, pred_labels)
    precision, recall, f1, supp = precision_recall_fscore_support(y_test, pred_labels, average='weighted')
    print precision, recall, f1, supp

    return accuracy, precision, recall, f1
Example #15
0
class OcrModel(object):
    def __init__(self, shape_pixels, num_classes):
        # flattend input shape
        self.num_pixels = shape_pixels[0] * shape_pixels[1]

        self.model = Sequential()
        self.model.add(Dense(output_dim=self.num_pixels * 2, input_dim=self.num_pixels))
        self.model.add(Activation('sigmoid'))
        self.model.add(Dense(output_dim=num_classes))
        self.model.add(Activation('softmax'))

        self.model.compile(
            loss='categorical_crossentropy',
            optimizer=SGD(lr=1, momentum=0.9, nesterov=True))

    def flatten_pixels(self, inputs):
        return inputs.reshape((-1, self.num_pixels))

    def train(self, inputs, labels, epochs=1):
        history = self.model.fit(self.flatten_pixels(inputs),
                                 labels,
                                 batch_size=inputs.shape[0],
                                 nb_epoch=epochs,
                                 verbose=0)
        # return loss of last epoch
        return history.history['loss'][-1]

    def predict(self, inputs):
        return self.model.predict_classes(self.flatten_pixels(inputs), verbose=0)
def tipdm_chapter5_nn_test():
	# 参数初始化
	filename = '../../../MyFile/chapter5/data/sales_data.xls'
	data = pd.read_excel(filename, index_col = u'序号')	# 导入数据

	# 数据是类别标签,要将它转化为数据形式
	# 对于属性“高”、“好”和“是”使用1表示,对于“低”、“坏”和“否”使用-1表示
	data[data == u'高'] = 1
	data[data == u'是'] = 1
	data[data == u'好'] = 1
	data[data != 1] = -1
	x = data.iloc[:,:3].as_matrix().astype(int)
	y = data.iloc[:,3].as_matrix().astype(int)

	# model and training
	# 三个输入节点,10个隐含节点,一个输出节点
	model = Sequential()
	model.add(Dense(10, input_dim = 3))
	model.add(Activation('relu'))	# 用relu作为激活函数,可以大幅提高准确度
	model.add(Dense(1, input_dim = 10))
	model.add(Activation('sigmoid'))	# 由于是0-1输出,用sigmoid函数作为激活函数

	# compilation before training : configure the learning process
	# 此处为二元分类,所以我们指定损失函数为binary_crossentropy,以及模式为bianry
	# 另外常见的损失函数还有mean_squared_error, categorical_crossentropy等
	# 求解方法我们指定adam,此外还有sgd,rmsprop等可选
	model.compile(loss = 'binary_crossentropy', optimizer = 'adam', class_mode = 'binary')

	# training and predict
	model.fit(x, y, nb_epoch = 500, batch_size = 10)	# 训练模型,学习一千次
	yp = model.predict_classes(x).reshape(len(y))	#分类预测

	cm_plot(y, yp).show()
Example #17
0
def bidirectional_lstm(X_train, y_train, X_test, y_test):
    X_train = sequence.pad_sequences(X_train, maxlen=max_len)
    X_test = sequence.pad_sequences(X_test, maxlen=max_len)
    lstm = LSTM(output_dim=64)
    gru = GRU(output_dim=64)  # original examples was 128, we divide by 2 because results will be concatenated
    brnn = Bidirectional(forward=lstm, backward=gru)
    print X_train.shape, y_train.shape
    print X_test.shape, y_test.shape

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=max_len))
    model.add(brnn)  # try using another Bidirectional RNN inside the Bidirectional RNN. Inception meets callback hell.
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")
    # model.compile(loss='binary_crossentropy', optimizer='rmsprop')
    print("Train...")
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=4, validation_data=(X_test, y_test), show_accuracy=True)
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size, show_accuracy=True)
    print('Test score:', score)
    print('Test accuracy:', acc)
    pred_labels = model.predict_classes(X_test)
    # print pred_labels
    accuracy = accuracy_score(y_test, pred_labels)
    precision, recall, f1, supp = precision_recall_fscore_support(y_test, pred_labels, average='weighted')
    print precision, recall, f1, supp

    return accuracy, precision, recall, f1
def tune_model(X_train, y_train, X_test, y_test, settings) :
	(optimizer, loss_func, activation, nb_epoch, LSTM_in, LSTM_out) = settings
	print("Loading data...")
	print(len(X_train), 'train sequences')
	print('X_train shape:', X_train.shape)

	# train LSTM so that we can extract representation
	print('Build model...')
	model = Sequential()
	model.add(Embedding(max_features, LSTM_in))
	model.add(LSTM(LSTM_in, LSTM_out))
	model.add(Dropout(0.5))
	model.add(Dense(LSTM_out, 1))
	model.add(Activation(activation))
	model.compile(loss=loss_func, optimizer=optimizer, class_mode="binary")

	print("Train...")

	model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
	    validation_split=0.1, show_accuracy=True, verbose=2)
	classes = model.predict_classes(X_test, batch_size=batch_size)
	acc = np_utils.accuracy(classes, y_test)
	print('LSTM accuracy:', acc)

	print('Building partial model...')
	# early fusion for testing, average over each application
	early_fusion_model = Sequential()
	early_fusion_model.add(Embedding(max_features, LSTM_in, 
		weights=model.layers[0].get_weights()))
	early_fusion_model.add(LSTM(LSTM_in, LSTM_out, 
		weights=model.layers[1].get_weights()))
	early_fusion_model.compile(loss=loss_func, 
		optimizer=optimizer, class_mode="binary")
	return early_fusion_model
Example #19
0
def feed_forward():
    # Loading data
    f = gzip.open('data/mnist.pkl.gz', 'rb')
    train_Xy, valid_Xy, test_Xy = pickle.load(f)
    f.close()
    
    X, y = train_Xy
    classes, y = np.unique(y, return_inverse=True)
    Y = np.zeros((y.shape[0], len(classes)))
    for i, yi in np.ndenumerate(y):
        Y[i, yi] = 1
    
    # Model definition
    nn = Sequential()
    nn.add(Dense(X.shape[1], 300, init='he_uniform', activation='relu'))
    nn.add(Dense(300, 300, init='he_uniform', activation='relu'))
    nn.add(Dense(300, 10, init='he_uniform', activation='softmax'))
    
    sgd = SGD(lr=0.1, decay=1e-9, momentum=0.5, nesterov=True)
    nn.compile(loss='categorical_crossentropy', optimizer=sgd)
    
    # Training
    nn.fit(X, Y, nb_epoch=20, batch_size=16, verbose=2)
    
    # Evaluating
    Xval, yval = valid_Xy
    classes = {v: i for (i, v) in enumerate(classes)}
    yval = np.array([classes[yi] for yi in yval], dtype=int)
    ypred = nn.predict_classes(Xval, verbose=0)
    
    score = (ypred == yval).sum() / float(ypred.shape[0])

    print('Validation set accuracy: {0:.3f}.'.format(score))
Example #20
0
def model(df, parent_id, go_id):

    # Training
    batch_size = 64
    nb_epoch = 64

    # Split pandas DataFrame
    n = len(df)
    split = 0.8
    m = int(n * split)
    train, test = df[:m], df[m:]


    # train, test = train_test_split(
    #     labels, data, batch_size=batch_size)

    train_label, train_data = train['labels'], train['data']

    if len(train_data) < 100:
        raise Exception("No training data for " + go_id)

    test_label, test_data = test['labels'], test['data']
    test_label_rep = test_label


    train_data = train_data.as_matrix()

    test_data = test_data.as_matrix()
    train_data = numpy.hstack(train_data).reshape(train_data.shape[0], 8000)
    test_data = numpy.hstack(test_data).reshape(test_data.shape[0], 8000)
    shape = numpy.shape(train_data)

    print('X_train shape: ', shape)
    print('X_test shape: ', test_data.shape)
    model = Sequential()
    model.add(Dense(8000, activation='relu', input_dim=8000))
    model.add(Highway())
    model.add(Dense(1, activation='sigmoid'))

    model.compile(
        loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')

    model_path = DATA_ROOT + parent_id + '/' + go_id + '.hdf5'
    checkpointer = ModelCheckpoint(
        filepath=model_path, verbose=1, save_best_only=True)
    earlystopper = EarlyStopping(monitor='val_loss', patience=7, verbose=1)

    model.fit(
        X=train_data, y=train_label,
        batch_size=batch_size, nb_epoch=nb_epoch,
        show_accuracy=True, verbose=1,
        validation_split=0.2,
        callbacks=[checkpointer, earlystopper])

    # Loading saved weights
    print 'Loading weights'
    model.load_weights(model_path)
    pred_data = model.predict_classes(
        test_data, batch_size=batch_size)
    return classification_report(list(test_label_rep), pred_data)
Example #21
0
def train(in_dim, out_dim, X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense(100000, input_dim = in_dim, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(100000, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(out_dim, init='uniform'))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='sgd',\
            metrics=['accuracy'])

    hist = model.fit(X_train, Y_train, nb_epoch=5, batch_size=32,\
            validation_split=0.1, shuffle=True)
    print(hist.history)

    loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)

    classes = model.predict_classes(X_test, batch_size=32)

    proba = model.predict_proba(X_test, batch_size=32)
Example #22
0
    def RNN(self):
        featureNum = len(self.X[0]) / 6
        X = np.empty((len(self.X), 6, featureNum))
        X_test = np.empty((len(self.X_test), 6, featureNum))
        self.X = self.X.reshape(len(self.X), featureNum, 6)
        self.X_test = self.X_test.reshape(len(self.X_test), featureNum, 6)
        for i in range(len(self.X)):
            X[i] = self.X[i].transpose()
        for i in range(len(self.X_test)):
            X_test[i] = self.X_test[i].transpose()

        np.random.seed(0)
        model = Sequential()
        model.add(SimpleRNN(20, batch_input_shape=(None, 6, 28)))
        model.add(Dropout(0.1))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        model.fit(X, self.y, verbose=2)
        predicted = model.predict_classes(X_test, verbose=0)
        # Final evaluation of the model
        scores = model.evaluate(X_test, self.expected, verbose=0)
        print("Accuracy: %.2f%%" % (scores[1]*100))
        self.ptLocal(self.fout, "Classification report for classifier:\n%s", \
            (metrics.classification_report(self.expected, predicted)))
        self.ptLocal(self.fout, "Confusion matrix:\n%s", \
            metrics.confusion_matrix(self.expected, predicted))
        self.ptLocal(self.fout, "Random pick successful rate: %.3f\n",\
            round(float(sum(self.expected)) / len(self.expected), 3))
Example #23
0
def mlp_model(X_train, y_train, X_test, y_test):
    tokenizer = Tokenizer(nb_words=1000)
    nb_classes = np.max(y_train) + 1

    X_train = tokenizer.sequences_to_matrix(X_train, mode="freq")
    X_test = tokenizer.sequences_to_matrix(X_test, mode="freq")

    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    print("Building model...")
    model = Sequential()
    model.add(Dense(512, input_shape=(max_len,)))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='adam', class_mode='categorical')

    history = model.fit(X_train, Y_train, nb_epoch=nb_epoch, batch_size=batch_size, verbose=1, show_accuracy=True, validation_split=0.1)
    model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=1, show_accuracy=True)
    # print('Test score:', score[0])
    # print('Test accuracy:', score[1])
    pred_labels = model.predict_classes(X_test)
    # print pred_labels
    # print y_test
    accuracy = accuracy_score(y_test, pred_labels)
    precision, recall, f1, supp = precision_recall_fscore_support(y_test, pred_labels, average='weighted')
    print precision, recall, f1, supp

    return accuracy, precision, recall, f1
def dnn_class(X_train,y_train,X_test,y_test):
    
    model=Sequential()
    model.add(Dense(1000,input_dim=1583,kernel_initializer="glorot_uniform",activation="relu"))
    model.add(BatchNormalization())
    model.add(Dense(1000,kernel_initializer="glorot_uniform",activation="relu"))
    model.add(BatchNormalization())
# =============================================================================
#     model.add(Dropout(0.6))
# =============================================================================
    model.add(Dense(1,activation="sigmoid",kernel_initializer="glorot_uniform"))
    adam=Adam(lr=0.01)   # model.add(Dense(units=1,activation="relu",kernel_initializer="glorot_uniform"))
    sgd=SGD(lr=0.01, momentum=0.01, decay=0.0, nesterov=True)
    rms=RMSprop(lr=0.005)
    model.compile(optimizer=adam,loss="binary_crossentropy",metrics=["accuracy"])
    callbacks=[EarlyStopping(monitor='val_loss', patience=2),
             ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)]
    print(model.summary())
    model.fit(X_train,y_train,batch_size=4,epochs=50,verbose=1,callbacks=callbacks,validation_data=(X_test,y_test))
    pkl_filename = "keras_model.joblib"
    with open(pkl_filename, 'wb') as file:
        joblib.dump(model, file)
    y_pred=model.predict_classes(X_test)
    print(y_pred)
    evaluation2(y_test,y_pred,model,X_test,X_train,y_train)
Example #25
0
def brain(x_train, y_train, x_test, y_test):
    from keras.models import Sequential
    from keras.layers.core import Dense, Activation
    from keras.optimizers import SGD

    number_of_classes = y_train.shape[1]
    model = Sequential()

    model.add(Dense(output_dim=64, input_dim=x_train.shape[1]))
    model.add(Activation("relu"))
    model.add(Dense(output_dim=number_of_classes))
    model.add(Activation("sigmoid"))

    #model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',#SGD(lr=0.01, momentum=0.9, nesterov=True),
                  metrics=['accuracy']
                 )
    model.fit(x_train, y_train, nb_epoch=5, batch_size=32)

    loss_and_metrics = model.evaluate(x_test, y_test, batch_size=32)
    print("Metrics:")
    print(loss_and_metrics)

    classes = model.predict_classes(x_test, batch_size=32)
    proba = model.predict_proba(x_test, batch_size=32)
Example #26
0
 def generateModel(self,docSeries):
     topics = docSeries.topicSeries.keys()
     seriesLength = 50
     sequenceTuples = []
     for j in range(len(topics)):
         topic = topics[j]
         topicLength = len(docSeries.topicSeries[topic])
         for i in range(0,topicLength):
             if i+seriesLength < topicLength:
                 sequenceTuples.append((docSeries.topicSeries[topic][i:i+seriesLength],j))
     random.shuffle(sequenceTuples)
     X = []
     y = []
     for s,l in sequenceTuples:
         X.append(s)
         y.append(l)
     X = np.array(X).astype(np.uint8)
     y = np_utils.to_categorical(np.array(y)).astype(np.bool)
     X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
     print len(X_train),len(y_train)
     print X.shape,y.shape
     model = Sequential()
     model.add(Embedding(50, 64, input_length = seriesLength, mask_zero = True))
     model.add(LSTM(64,init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences=False))
     model.add(Dropout(0.5))
     model.add(Dense(len(topics)))
     model.add(Activation('softmax'))
     model.compile(loss='categorical_crossentropy', optimizer='adam', class_mode='categorical')
     early_stopping = EarlyStopping(patience=5, verbose=1)
     model.fit(X_train, y_train,nb_epoch=20,show_accuracy=True,verbose=1,shuffle=True)
     preds = model.predict_classes(X_test, batch_size=64, verbose=0)
     print '\n'
     print(classification_report(np.argmax(y_test, axis=1), preds, target_names=topics))
Example #27
0
def test_merge_recursivity():
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    righter = Sequential()
    righter.add(Dense(nb_hidden, input_shape=(input_dim,)))
    righter.add(Activation('relu'))

    intermediate = Sequential()
    intermediate.add(Merge([left, right], mode='sum'))
    intermediate.add(Dense(nb_hidden))
    intermediate.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([intermediate, righter], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
    assert(loss < 0.7)

    model.predict([X_test, X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test, X_test], verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_recursivity_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
    assert(loss == nloss)
def model(
        train_data, train_label, val_data, val_label, test_data, test_label):
    # set parameters:
    max_features = 50000
    batch_size = 64
    embedding_dims = 100
    nb_filters = 250
    hidden_dims = 250
    nb_epoch = 12

    # pool lengths
    pool_length = 2
    # level of convolution to perform
    filter_length = 3

    # length of APAAC
    maxlen = 20 + 2 * LAMBDA

    test_label_rep = test_label

    # Convert labels to categorical
    nb_classes = max(train_label) + 1
    train_label = np_utils.to_categorical(train_label, nb_classes)
    val_label = np_utils.to_categorical(val_label, nb_classes)
    test_label = np_utils.to_categorical(test_label, nb_classes)

    model = Sequential()
    model.add(Embedding(max_features, embedding_dims))
    model.add(Dropout(0.25))
    model.add(Convolution1D(
        input_dim=embedding_dims,
        nb_filter=nb_filters,
        filter_length=filter_length,
        border_mode='valid',
        activation='relu',
        subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(Flatten())
    output_size = nb_filters * (((maxlen - filter_length) / 1) + 1) / 2
    model.add(Dense(output_size, hidden_dims))
    model.add(Dropout(0.25))
    model.add(Activation('relu'))
    model.add(Dense(hidden_dims, nb_classes))
    model.add(Activation('sigmoid'))
    model.compile(
        loss='categorical_crossentropy', optimizer='adam')
    # weights_train = [1.0 if y == 1 else 1.0 for y in train_label]
    model.fit(
        X=train_data, y=train_label,
        batch_size=batch_size, nb_epoch=nb_epoch,
        show_accuracy=True, verbose=1,
        validation_data=(val_data, val_label))
    score = model.evaluate(
        test_data, test_label,
        batch_size=batch_size, verbose=1, show_accuracy=True)
    print "Loss:", score[0], "Accuracy:", score[1]
    pred_data = model.predict_classes(test_data, batch_size=batch_size)
    print(classification_report(list(test_label_rep), pred_data))
Example #29
0
class CNNclassifier:
    def __init__(self):
        pass

    def load_model(self):
        self.model = Sequential()
        self.__add_convolutional_layers()
        self.__do_flattening()
        self.__add_fully_connected_layers()
        self.__add_optimizer()


    def __add_convolutional_layers(self):
        # first convolutional layer
        self.model.add(ZeroPadding2D((1,1),input_shape=(1,28,28)))
        self.model.add(Convolution2D(32,3,3, activation='relu'))

        # second convolutional layer
        self.model.add(ZeroPadding2D((1,1)))
        self.model.add(Convolution2D(48,3,3, activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))

        # third convolutional layer
        self.model.add(ZeroPadding2D((1,1)))
        self.model.add(Convolution2D(32,3,3, activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))

    def __do_flattening(self):
        # convert convolutional filters to flatt so they can be feed to
        # fully connected layers
        self.model.add(Flatten())

    def __add_fully_connected_layers(self):
        # first fully connected layer
        self.model.add(Dense(128, init='lecun_uniform'))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.25))

        # second fully connected layer
        self.model.add(Dense(128, init='lecun_uniform'))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.25))

        # last fully connected layer which output classes
        self.model.add(Dense(10, init='lecun_uniform'))
        self.model.add(Activation('softmax'))

    def __add_optimizer(self):
        sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(loss='categorical_crossentropy', optimizer=sgd)

    def fit(self, train_x, train_y):
        self.model.fit(train_x, train_y, \
                        nb_epoch=10, batch_size=1000, \
                            validation_split=0.2, show_accuracy=True)

    def predict(self, test_x):
        return self.model.predict_classes(test_x, verbose=0)
Example #30
0
def test_merge_overlap():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    print(model.layers)
    model.save_weights(fname, overwrite=True)
    print(model.trainable_weights)

    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
Example #31
0
from keras.preprocessing import image
gen = image.ImageDataGenerator()

#split
from sklearn.model_selection import train_test_split

X=X_train
y=y_train

X_train,X_val,y_train,y_val=train_test_split(X_train,y_train,test_size=0.2)

batches=gen.flow(X_train,y_train,batch_size=64)
val_batches=gen.flow(X_val,y_val,batch_size=64)



history = model.fit_generator(generator=batches,steps_per_epoch=batches.n,
                              epochs=2,validation_data=val_batches,
                              validation_steps=val_batches.n)

model.optimizer.lr=0.01
batches=gen.flow(X,y,batch_size=64)
history=model.fit_generator(generator=batches,steps_per_epoch=batches.n,epochs=2)

predictions=model.predict_classes(X_test,verbose=0)
submissions=pd.DataFrame({"ImageId":list(range(1,len(predictions)+1)),"Label":predictions})

submissions.to_csv("fullynormalnn1.csv",index=False,header=True)

Example #32
0
model = Sequential()
model.add(Dense(units=128, activation="relu", input_shape=(784, )))
model.add(Dense(units=128, activation="relu"))
model.add(Dense(units=128, activation="relu"))
model.add(Dense(units=10, activation="softmax"))
model.compile(optimizer=SGD(0.001),
              loss="categorical_crossentropy",
              metrics=["accuracy"])
#model.load_weights("mnistmodel.h5")

model.fit(train_x, train_y, batch_size=32, epochs=10, verbose=1)
model.save("mnistmodel.h5")

img = image.load_img(path="testimage1.png",
                     grayscale=True,
                     target_size=(28, 28, 1))
img = image.img_to_array(img)
test_img = img.reshape((1, 784))

img_class = model.predict_classes(test_img)
prediction = img_class[0]

classname = img_class[0]
print("Class: ", classname)

img = img.reshape((28, 28))
plt.imshow(img)
plt.title(classname)
plt.show()
Example #33
0
try:
  combine_model.fit( X , Y , batch_size = 32 , nb_epoch=10, shuffle=True , validation_split=0.1 , show_accuracy=True)
except KeyboardInterrupt:
  print('Stop')

del prob_cv_1
del prob_cv_2
del X
del Y



X_te = np.concatenate( (prob_te_1,prob_te_2) , axis=1 )

ans2 = combine_model.predict_classes(X_te)

f_pred = open( 'LSTM_modified_'+pred_filename , 'w')
w = csv.writer(f_pred)
w.writerow(['Id','Prediction'])
predictions = []
for n,a in itr.izip(name,ans2):
  predictions.append( [n , dict_48_39[ label_set[int(a)] ]] )
w.writerows(predictions)


f_pred_48 = open('48_LSTM__modified_'+pred_filename , 'w')
w1 = csv.writer(f_pred_48)
w1.writerow(['Id','Prediction'])
predictions = []
for n,a in itr.izip(name,ans2):
classifier.fit_generator(
    training_set,
    steps_per_epoch=11,  ## train images
    epochs=10,  ## approx 30min/epoch on gpu tf backend
    validation_data=test_set,
    validation_steps=11)  ## test images

classifier.save('cat_dog_classifier.h5')

## Acc - 89% Loss - 25%

############################################################## Single Prediction with CNN ##########################################

from skimage.io import imread
from skimage.transform import resize
import numpy as np

class_labels = {v: k for k, v in training_set.class_indices.items()}

img = imread('FILEPATH')
img = resize(img, (128, 128))
img = np.expand_dims(img, axis=0)

if (np.max(img) > 1):

    img = img / 255.0

prediction = classifier.predict_classes(img)

print(class_labels[prediction[0][0]])
Example #35
0
class SceneNetwork:
    def __init__(self):
        self.model = None

    def build_model(self, input_shape):
        self.model = Sequential()
        self.conv_block(32, input_shape)
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.conv_block(64)
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.conv_block(128)
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.conv_block(256)
        self.model.add(GlobalAveragePooling2D())
        self.model.add(Dense(1024, activation='relu'))
        self.model.add(Dense(15, activation='softmax'))

        optimizer = adam()

        self.model.compile(loss='categorical_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])
        self.model.summary()
        # We do not concatenate. Is it necessary?
        # self.model.add(Dense(1024, activation='relu'))
        # self.model.add(Dense(15, activation='softmax'))

    def conv_block(self, n_filters, input_shape=None):
        if input_shape is not None:
            self.model.add(BatchNormalization(input_shape=input_shape))
        else:
            self.model.add(BatchNormalization())
        self.model.add(Activation('relu'))
        self.model.add(Conv2D(n_filters, (3, 3), padding='same'))
        self.model.add(BatchNormalization())
        self.model.add(Activation('relu'))
        self.model.add(Conv2D(n_filters, (3, 3), padding='same'))

    def train_model(self,
                    x_train,
                    y_train,
                    x_val,
                    y_val,
                    batch_size=32,
                    epochs=50):
        history = self.model.fit(x_train,
                                 y_train,
                                 batch_size=batch_size,
                                 epochs=epochs,
                                 shuffle=True,
                                 validation_data=(x_val, y_val))
        # list all data in history
        print(history.history.keys())
        # summarize history for accuracy
        plt.plot(history.history['acc'])
        plt.plot(history.history['val_acc'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()
        # summarize history for loss
        plt.plot(history.history['loss'])
        plt.plot(history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()

    def evaluate_model(self, x_eval, y_eval):
        scores = self.model.evaluate(x_eval, y_eval, verbose=1)
        print('Test loss:', scores[0])
        print('Test accuracy:', scores[1])
        y_pred = self.model.predict_classes(x_eval, batch_size=32, verbose=1)
        y_real = np.argmax(y_eval, axis=1)
        matrix = metrics.confusion_matrix(y_real, y_pred)
        print(matrix)

    def train_model_generator(self,
                              train_generator,
                              validation_generator=None,
                              epochs=50):
        self.model.fit_generator(generator=train_generator,
                                 validation_data=validation_generator,
                                 epochs=epochs)

    def evaluate_model_generator(self, evaluate_generator):
        scores = self.model.evaluate_generator(generator=evaluate_generator,
                                               verbose=1)
        print('Test loss:', scores[0])
        print('Test accuracy:', scores[1])

    def save_model(self, model_path):
        self.model.save(model_path)
Example #36
0
print(model.summary())

#可视化模型
plot_model(model, to_file='model.png')

#训练模型
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, verbose=1)

#保存模型
model.save_weights("weights")
with open("model.json", "w") as f:
    f.write(model.to_json())

#评估模型
loss, accuracy = model.evaluate(x_test, y_test)
y_pred = model.predict_classes(x_test)
y_true = np.argmax(y_test, axis=1)

TP = np.sum(np.logical_and(np.equal(y_true, 0), np.equal(y_pred, 0)))
FP = np.sum(np.logical_and(np.equal(y_true, 1), np.equal(y_pred, 0)))
TN = np.sum(np.logical_and(np.equal(y_true, 1), np.equal(y_pred, 1)))
FN = np.sum(np.logical_and(np.equal(y_true, 0), np.equal(y_pred, 1)))

precision = TP / (TP + FP)
recall = TP / (TP + FN)
F_score = (2 * precision * recall) / (precision + recall)
N_score = TN / (TN + FP)

print('\n')
print('loss: ', loss)
print('Accuracy: ', accuracy)
Example #37
0
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])  #組裝神經網路
#CNN到此已完成!
model.summary()
print("model建立完成")
#%%
model.fit(X_train, Y_train, batch_size=50, epochs=12)
print("model訓練完成")
evalu = model.evaluate(X_test, Y_test)  #結果測試-分數
print('loss=', evalu[0])
print('acc=', evalu[1])
#%%
model.save('my_model_10000.h5')
print("model儲存完成")
#%%讀取原有model
model = tf.contrib.keras.models.load_model('my_model_10000.h5')
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

#%%
predict = model.predict_classes(X_test)
#%%比較正確度
rad = np.random.randint(0, 3000)
print("預測為:", predict[rad])
print("實際為:", Y_test[rad])
image = X_test[rad].reshape(400, 400)
plt.figure(num='haha', figsize=(8, 8))
plt.imshow(image, cmap='gray')
Example #38
0
import matplotlib.pyplot as plt

accuracy = history.history['acc']
val_accuracy = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, 'bo', label='Training accuracy')
plt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()

#get the predictions for the test data
predicted_classes = model.predict_classes(X_test)

#get the indices to be plotted
y_true = data_test.iloc[:, 0]
correct = np.nonzero(predicted_classes == y_true)[0]
incorrect = np.nonzero(predicted_classes != y_true)[0]

from sklearn.metrics import classification_report
target_names = ["Class {}".format(i) for i in range(num_classes)]
print(
    classification_report(y_true, predicted_classes,
                          target_names=target_names))
Example #39
0
X_new = np.reshape(X, (len(X), seq_length, 1))
X_new = X_new / float(len(characters))
y_new = np_utils.to_categorical(y)
del (y, text)

#create model
model = Sequential()
model.add(
    LSTM(400,
         input_shape=(X_new.shape[1], X_new.shape[2]),
         return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(400))
model.add(Dropout(0.2))
model.add(Dense(y_new.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')

model.fit(X_new, y_new, epochs=1, batch_size=100)

#text gen
string_mapped = X[555555]
for i in range(seq_length):
    x = np.reshape(string_mapped, (1, len(string_mapped), 1))
    x = x / float(len(characters))
    pred_index = model.predict_classes(x, verbose=0)
    seq = [n_to_char[value] for value in string_mapped]
    string_mapped.append(pred_index[0])
    string_mapped = string_mapped[1:len(string_mapped)]

print(''.join(seq))
Example #40
0
class Model:
    def __init__(self):
        self.model = None

    def evaluate(self, dataset):
        score = self.model.evaluate(dataset.test_images,
                                    dataset.test_labels,
                                    verbose=1)
        print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))

    MODEL_PATH = './train.model.h5'

    def save_model(self, file_path=MODEL_PATH):
        self.model.save(file_path)

    def load_model(self, file_path=MODEL_PATH):
        self.model = load_model(file_path)

    # 建立模型
    def build_model(self, dataset, nb_classes=2):
        # 构建一个空的网络模型,它是一个线性堆叠模型,各神经网络层会被顺序添加,专业名称为序贯模型或线性堆叠模型
        self.model = Sequential()

        # 以下代码将顺序添加CNN网络需要的各层,一个add就是一个网络层
        self.model.add(
            Convolution2D(32,
                          3,
                          3,
                          border_mode='same',
                          input_shape=dataset.input_shape))  # 1 2维卷积层
        self.model.add(Activation('relu'))  # 2 激活函数层

        self.model.add(Convolution2D(32, 3, 3))  # 3 2维卷积层
        self.model.add(Activation('relu'))  # 4 激活函数层

        self.model.add(MaxPooling2D(pool_size=(2, 2)))  # 5 池化层
        self.model.add(Dropout(0.25))  # 6 Dropout层

        self.model.add(Convolution2D(64, 3, 3, border_mode='same'))  # 7  2维卷积层
        self.model.add(Activation('relu'))  # 8  激活函数层

        self.model.add(Convolution2D(64, 3, 3))  # 9  2维卷积层
        self.model.add(Activation('relu'))  # 10 激活函数层

        self.model.add(MaxPooling2D(pool_size=(2, 2)))  # 11 池化层
        self.model.add(Dropout(0.25))  # 12 Dropout层

        self.model.add(Flatten())  # 13 Flatten层
        self.model.add(Dense(512))  # 14 Dense层,又被称作全连接层
        self.model.add(Activation('relu'))  # 15 激活函数层
        self.model.add(Dropout(0.5))  # 16 Dropout层
        self.model.add(Dense(nb_classes))  # 17 Dense层
        self.model.add(Activation('softmax'))  # 18 分类层,输出最终结果

        # 输出模型概况
        self.model.summary()

    # 训练模型
    def train(self,
              dataset,
              batch_size=20,
              nb_epoch=8,
              data_augmentation=True):
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9,
                  nesterov=True)  # 采用SGD+momentum的优化器进行训练,首先生成一个优化器对象
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=sgd,
                           metrics=['accuracy'])  # 完成实际的模型配置工作

        # 不使用数据提升,所谓的提升就是从我们提供的训练数据中利用旋转、翻转、加噪声等方法创造新的
        # 训练数据,有意识的提升训练数据规模,增加模型训练量
        if not data_augmentation:
            self.model.fit(dataset.train_images,
                           dataset.train_labels,
                           batch_size=batch_size,
                           nb_epoch=nb_epoch,
                           validation_data=(dataset.valid_images,
                                            dataset.valid_labels),
                           shuffle=True)
        # 使用实时数据提升
        # else:
        #     # 定义数据生成器用于数据提升,其返回一个生成器对象datagen,datagen每被调用一
        #     # 次其生成一组数据(顺序生成),节省内存,其实就是python的数据生成器
        #     datagen = ImageDataGenerator(
        #         featurewise_center=False,  # 是否使输入数据去中心化(均值为0),
        #         samplewise_center=False,  # 是否使输入数据的每个样本均值为0
        #         featurewise_std_normalization=False,  # 是否数据标准化(输入数据除以数据集的标准差)
        #         samplewise_std_normalization=False,  # 是否将每个样本数据除以自身的标准差
        #         zca_whitening=False,  # 是否对输入数据施以ZCA白化
        #         rotation_range=20,  # 数据提升时图片随机转动的角度(范围为0~180)
        #         width_shift_range=0.2,  # 数据提升时图片水平偏移的幅度(单位为图片宽度的占比,0~1之间的浮点数)
        #         height_shift_range=0.2,  # 同上,只不过这里是垂直
        #         horizontal_flip=True,  # 是否进行随机水平翻转
        #         vertical_flip=False)  # 是否进行随机垂直翻转

        #     # 计算整个训练样本集的数量以用于特征值归一化、ZCA白化等处理
        #     datagen.fit(dataset.train_images)

        #     # 利用生成器开始训练模型
        #     self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels,
        #                                           batch_size=batch_size),
        #                              samples_per_epoch=dataset.train_images.shape[0],
        #                              nb_epoch=nb_epoch,
        #                              validation_data=(dataset.valid_images, dataset.valid_labels))

    # 识别
    def face_predict(self, image):
        # 依然是根据后端系统确定维度顺序
        if K.image_dim_ordering() == 'th' and image.shape != (1, 3, IMAGE_SIZE,
                                                              IMAGE_SIZE):
            image = resize_image(
                image)  # 尺寸必须与训练集一致都应该是IMAGE_SIZE x IMAGE_SIZE
            image = image.reshape(
                (1, 3, IMAGE_SIZE, IMAGE_SIZE))  # 与模型训练不同,这次只是针对1张图片进行预测
        elif K.image_dim_ordering() == 'tf' and image.shape != (1, IMAGE_SIZE,
                                                                IMAGE_SIZE, 3):
            image = resize_image(image)
            image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3))

            # 浮点并归一化
        image = image.astype('float32')
        image /= 255

        # 给出输入属于各个类别的概率,我们是二值类别,则该函数会给出输入图像属于0和1的概率各为多少
        result = self.model.predict_proba(image)
        print('result:', result)

        # 给出类别预测:0或者1
        result = self.model.predict_classes(image)

        # 返回类别预测结果
        return result[0]
                  optimizer=Adam(),
                  metrics=['accuracy'])
    #)
    X_test_fea = vectorizer.transform(X_test)
    history = model.fit(X_train_fea,
                        y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=1,
                        validation_data=(X_test_fea, y_test),
                        callbacks=callbacks_list)

    score = model.evaluate(X_test_fea, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    predicte = model.predict_classes(X_test_fea)
    print predicte
    print f1_score(y_f1_test, predicte, average=None)
    with open(''.join(('tfidf', str(idx), 'uk.pkl')), 'w') as f:
        pkl.dump(vectorizer, f)
    '''
    clf.fit(X_train_fea, y_train) 
    X_test_fea = vectorizer.transform(X_test)
    predicted = clf.predict(X_test_fea)
    print predicted
    print(np.mean(y_test==predicted))
    #trace_fea = vectorizer.transform(trace_data)
    #predicted = clf.predict(trace_fea)
    #print(np.mean(trace_label==predicted))
    with open(''.join(('tfidf', str(idx), 'uk.pkl')), 'w') as f:
        pkl.dump(vectorizer, f)
Example #42
0
plt.ylabel('accuracy')
plt.title('train_acc vs val_acc')
plt.grid(True)
plt.legend(['train','val'],loc=4)
plt.style.use(['classic'])
#%%

score = model.evaluate(X_test, y_test,verbose=0)
print('Test Loss:', score[0])
print('Test accuracy:', score[1])
#%%
test_image = X_test[0:1]
print (test_image.shape)

print(model.predict(test_image))
print(model.predict_classes(test_image))
print(y_test[0:1])
#%%

test_image = cv2.imread('testSet/img_1006.jpg')
test_image=cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
test_image=cv2.resize(test_image,(128,128))
test_image = np.array(test_image)
test_image = test_image.astype('float32')
test_image /= 255
print (test_image.shape)
   
if num_channel==1:
	if K.common.image_dim_ordering()=='th':
		test_image= np.expand_dims(test_image, axis=0)
		test_image= np.expand_dims(test_image, axis=0)
Example #43
0
    def test_merge_overlap(self):
        print('Test merge overlap')
        left = Sequential()
        left.add(Dense(nb_hidden, input_shape=(input_dim, )))
        left.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, left], mode='sum'))

        model.add(Dense(nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit(X_train,
                  y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  show_accuracy=True,
                  verbose=1,
                  validation_data=(X_test, y_test))
        model.fit(X_train,
                  y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  show_accuracy=False,
                  verbose=2,
                  validation_data=(X_test, y_test))
        model.fit(X_train,
                  y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  show_accuracy=True,
                  verbose=2,
                  validation_split=0.1)
        model.fit(X_train,
                  y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  show_accuracy=False,
                  verbose=1,
                  validation_split=0.1)
        model.fit(X_train,
                  y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  verbose=0)
        model.fit(X_train,
                  y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  verbose=1,
                  shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.6:
            raise Exception('Score too low, learning issue.')
        preds = model.predict(X_test, verbose=0)
        classes = model.predict_classes(X_test, verbose=0)
        probas = model.predict_proba(X_test, verbose=0)
        print(model.get_config(verbose=1))

        model.save_weights('temp.h5', overwrite=True)
        model.load_weights('temp.h5')

        nloss = model.evaluate(X_train, y_train, verbose=0)
        print(nloss)
        assert (loss == nloss)
Example #44
0
              metrics=[crf.accuracy])
print(model.summary())

# fit model
history = model.fit(Xtrain,
                    ytrain,
                    batch_size=batch_size,
                    epochs=epochs,
                    validation_data=(Xdev, ydev),
                    verbose=1)

# model evaluate
score, acc = model.evaluate(Xtest, ytest, verbose=1)

#################################################################################################
yhat_test = model.predict_classes(Xtest, verbose=1)
np.save(filepath + '/predict_test.npy', arr=yhat_test)

yhat_train = model.predict_classes(Xtrain, verbose=1)
np.save(filepath + '/predict_train.npy', arr=yhat_train)

yhat_dev = model.predict_classes(Xdev, verbose=1)
np.save(filepath + '/predict_dev.npy', arr=yhat_dev)

##################################################################################################
with open(filepath + '/model_loss_accuracy', 'w+') as file:
    file.write('bidirectional lstm base model with adadelta optimizer\n')
    file.write('epochs:' + str(epochs) + '\n')
    file.write('test score : ' + str(score) + '\n')
    file.write('test accuracy: ' + str(acc) + '\n')
    file.write('\n')
Example #45
0
model.add(MaxPool2D(pool_size=(2, 2)))

#Second set of layers
#Convolutional Layer 1
#model.add(Conv2D(filters=64,kernel_size=(4,4),input_shape=(28,28,1),activation='relu'))
#Convolutional Layer 2
#model.add(Conv2D(filters=64,kernel_size=(4,4),input_shape=(28,28,1),activation='relu'))
#Pooling Layer
#model.add(MaxPool2D(pool_size=(2,2)))

#Flattening from 28 by 28 to 764
model.add(Flatten())
#Hidden Layer
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

model.summary()

model.fit(x_train, y_cat_train, verbose=1, epochs=10)

model.metrics_names

model.evaluate(x_test, y_cat_test)

from sklearn.metrics import classification_report
predictions = model.predict_classes(x_test)
print(classification_report(y_test, predictions))
Example #46
0
# from tensorflow.keras.layers import Dense, Activation

# 读入数据并建立训练集和测试集的特征和标签
data_train = pd.read_excel('../../data2/C10_train_neural_network_data.xls')
data_test = pd.read_excel('../../data2/C10_test_neural_network_data.xls')

x_train = data_train.iloc[:, 5:17].values
y_train = data_train.iloc[:, 4].values
x_test = data_test.iloc[:, 5:17].values
y_test = data_test.iloc[:, 4].values

# 构建多层神经网络模型
neural_model = Sequential()
neural_model.add(Dense(17, input_dim=11, activation='relu'))  # 添加输入层、隐藏层1的连接
# model.add(Activation('relu'))
neural_model.add(Dense(10, input_dim=17, activation='relu'))  # 添加隐藏层1、隐藏层2的连接
# model.add(Activation('relu'))
neural_model.add(Dense(1, input_dim=10, activation='sigmoid'))  # 添加隐藏层2、输出层的连接
# model.add(Activation('sigmoid'))

neural_model.compile(loss='binary_crossentropy',
                     optimizer='adam',
                     metrics=['accuracy'])
neural_model.fit(x_train, y_train, epochs=200, batch_size=1)
neural_model.save_weights('../outputfiles/二层神经网络权重.model')

r = pd.DataFrame(neural_model.predict_classes(x_test), columns=['预测结果'])
pd.concat([data_test.iloc[:, :5], r],
          axis=1).to_excel('../outputfiles/测试集测试结果.xls')
print(neural_model.predict(x_test))  # 显示预测结果,0或1
Example #47
0
    # train validation compare
    X_train_tmp, X_test_tmp, y_train_tmp, y_test_tmp = train_test_split(
        X_train_sequences,
        cuis_preprocess.y_train,
        test_size=0.2,
        random_state=1)
    train_pred = model.predict_proba(x=X_train_tmp, batch_size=1)
    fpr, tpr, _ = roc_curve(y_train_tmp, train_pred)
    print("max model training roc auc: " + str(auc(fpr, tpr)))
    val_pred = model.predict_proba(x=X_test_tmp, batch_size=1)
    fpr, tpr, _ = roc_curve(y_test_tmp, val_pred)
    print("max model validation roc auc: " + str(auc(fpr, tpr)))

    # graph
    class_pred = model.predict_classes(X_test_sequences, batch_size=16)
    acc = accuracy_score(cuis_preprocess.y_test, class_pred)
    print("deep max model test set accuracy " + str(acc))

    prob_pred = model.predict_proba(X_test_sequences, batch_size=16)
    roc_auc = roc_auc_score(cuis_preprocess.y_test, prob_pred) * 100
    print('{:0.2}'.format(roc_auc))

    loss = x.history['loss']
    val_loss = x.history['val_loss']
    epochs = range(1, len(loss) + 1)
    plt.plot(epochs, loss, 'b', color='b', label='Training loss')
    plt.plot(epochs, val_loss, 'b', color='g', label='Validation loss')
    plt.title('Traning and Validation loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')