Example #1
0
def TrainAndValidation1(X_train,y_train,X_test,y_test,bEarlyStopByTestData=True):    
    
    print "#\tTraining shape:" , X_train.shape
    print "#\tTraining label:" , y_train.shape   
    #============================================
    # Model preparation
    #============================================
    model = Sequential()
    model.add(Dense(output_dim=64,input_dim=X_train.shape[1], init='uniform'))
    model.add(Activation(DEEP_AVF))

    model.add(Dense(64, init='uniform'))
    model.add(Activation(DEEP_AVF))

    model.add(Dense(sparkcore.WORKING_KLABEL, init='uniform'))
    model.add(Activation('softmax'))
    sgd = SGD(lr=DEEP_SGDLR, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss=DEEP_LOSSFUNC, optimizer=sgd)
    
    a = model.fit(X_train, y_train,nb_epoch=5)
    score0 = model.evaluate(X_train, y_train, batch_size=DEEP_BSIZE)

    if not X_test is None:
        score1  = model.evaluate(X_test, y_test, batch_size=DEEP_BSIZE)
        predicted = model.predict(X_test)
        v_precision,v_recall,TP, FP, TN, FN,thelogloss = sparkcore.MyEvaluation(y_test,predicted)
        return score0,score1,v_precision,v_recall,TP, FP, TN, FN, thelogloss,model
    else:
        return score0, 0,0,0,0,0,0,0,0,model        
Example #2
0
    def test_conv2d(self):
        # Generate dummy data
        x_train = np.random.random((100, 100, 100, 3))
        y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
        x_test = np.random.random((20, 100, 100, 3))
        y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)

        model = Sequential()
        # input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
        # this applies 32 convolution filters of size 3x3 each.
        model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

        # This throws if libcudnn is not properly installed with on a GPU
        model.compile(loss='categorical_crossentropy', optimizer=sgd)
        model.fit(x_train, y_train, batch_size=32, epochs=1)
        model.evaluate(x_test, y_test, batch_size=32)
Example #3
0
def test_nested_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = "test_nested_sequential_temp.h5"
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
Example #4
0
    def test_recursive(self):
        print('test layer-like API')

        graph = containers.Graph()
        graph.add_input(name='input1', ndim=2)
        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 4), name='dense3', input='dense1')
        graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')

        seq = Sequential()
        seq.add(Dense(32, 32, name='first_seq_dense'))
        seq.add(graph)
        seq.add(Dense(4, 4, name='last_seq_dense'))

        seq.compile('rmsprop', 'mse')

        history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
        loss = seq.evaluate(X_test, y_test)
        print(loss)
        assert(loss < 1.4)

        loss = seq.evaluate(X_test, y_test, show_accuracy=True)
        pred = seq.predict(X_test)
        seq.get_config(verbose=1)
Example #5
0
def test_sequential_fit_generator():
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    def data_generator(train):
        if train:
            max_batch_index = len(x_train) // batch_size
        else:
            max_batch_index = len(x_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (x_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (x_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_class))
    model.pop()
    model.add(Dense(num_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit_generator(data_generator(True), 5, epochs)
    model.fit_generator(data_generator(True), 5, epochs,
                        validation_data=(x_test, y_test))
    model.fit_generator(data_generator(True), 5, epochs,
                        validation_data=data_generator(False),
                        validation_steps=3)
    model.fit_generator(data_generator(True), 5, epochs, max_queue_size=2)
    model.evaluate(x_train, y_train)
Example #6
0
def test_recursive():
    # test layer-like API

    graph = containers.Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32,)))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)
    assert(loss < 2.5)

    loss = seq.evaluate(X_test_graph, y_test_graph, show_accuracy=True)
    seq.predict(X_test_graph)
    seq.get_config(verbose=1)
Example #7
0
    def test_recursive(self):
        print("test layer-like API")

        graph = containers.Graph()
        graph.add_input(name="input1", ndim=2)
        graph.add_node(Dense(32, 16), name="dense1", input="input1")
        graph.add_node(Dense(32, 4), name="dense2", input="input1")
        graph.add_node(Dense(16, 4), name="dense3", input="dense1")
        graph.add_output(name="output1", inputs=["dense2", "dense3"], merge_mode="sum")

        seq = Sequential()
        seq.add(Dense(32, 32, name="first_seq_dense"))
        seq.add(graph)
        seq.add(Dense(4, 4, name="last_seq_dense"))

        seq.compile("rmsprop", "mse")

        history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
        loss = seq.evaluate(X_test, y_test)
        print(loss)
        assert loss < 2.5

        loss = seq.evaluate(X_test, y_test, show_accuracy=True)
        pred = seq.predict(X_test)
        seq.get_config(verbose=1)
Example #8
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Example #9
0
def mlp_model(X_train, y_train, X_test, y_test):
    tokenizer = Tokenizer(nb_words=1000)
    nb_classes = np.max(y_train) + 1

    X_train = tokenizer.sequences_to_matrix(X_train, mode="freq")
    X_test = tokenizer.sequences_to_matrix(X_test, mode="freq")

    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    print("Building model...")
    model = Sequential()
    model.add(Dense(512, input_shape=(max_len,)))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='adam', class_mode='categorical')

    history = model.fit(X_train, Y_train, nb_epoch=nb_epoch, batch_size=batch_size, verbose=1, show_accuracy=True, validation_split=0.1)
    model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=1, show_accuracy=True)
    # print('Test score:', score[0])
    # print('Test accuracy:', score[1])
    pred_labels = model.predict_classes(X_test)
    # print pred_labels
    # print y_test
    accuracy = accuracy_score(y_test, pred_labels)
    precision, recall, f1, supp = precision_recall_fscore_support(y_test, pred_labels, average='weighted')
    print precision, recall, f1, supp

    return accuracy, precision, recall, f1
Example #10
0
def test_merge_overlap():
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss < 0.7)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss == nloss)
Example #11
0
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Example #12
0
    def test_merge_concat(self):
        print('Test merge: concat')
        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))

        model.add(Dense(nb_hidden * 2, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

        loss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.6:
            raise Exception('Score too low, learning issue.')
        preds = model.predict([X_test, X_test], verbose=0)
        classes = model.predict_classes([X_test, X_test], verbose=0)
        probas = model.predict_proba([X_test, X_test], verbose=0)
        print(model.get_config(verbose=1))

        print('test weight saving')
        model.save_weights('temp.h5', overwrite=True)
        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))

        model.add(Dense(nb_hidden * 2, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights('temp.h5')

        nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print(nloss)
        assert(loss == nloss)
Example #13
0
def test_siamese_1():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss < 0.8)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config(verbose=0)

    # test weight saving
    fname = 'test_siamese_1.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)
Example #14
0
def train_given_optimiser(optimiser):
    model = Sequential()
    model.add(Dense(1, input_dim=500))
    model.add(Activation(activation='sigmoid'))
    model.compile(optimizer=optimiser, loss='binary_crossentropy', metrics=['accuracy'])
    data = np.random.random((1000, 500))
    labels = np.random.randint(2, size=(1000, 1))
    score = model.evaluate(data,labels, verbose=0)
    print( "Optimiser: ", optimiser )
    print( "Before Training:", list(zip(model.metrics_names, score)) )
    model.fit(data, labels, nb_epoch=10, batch_size=32, verbose=0)
    score = model.evaluate(data,labels, verbose=0)
    print( "After Training:", list(zip(model.metrics_names, score)) )
Example #15
0
    def test_merge_recursivity(self):
        print('Test merge recursivity')

        left = Sequential()
        left.add(Dense(nb_hidden, input_shape=(input_dim,)))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(nb_hidden, input_shape=(input_dim,)))
        right.add(Activation('relu'))

        righter = Sequential()
        righter.add(Dense(nb_hidden, input_shape=(input_dim,)))
        righter.add(Activation('relu'))

        intermediate = Sequential()
        intermediate.add(Merge([left, right], mode='sum'))
        intermediate.add(Dense(nb_hidden))
        intermediate.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([intermediate, righter], mode='sum'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

        loss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        model.predict([X_test, X_test, X_test], verbose=0)
        model.predict_classes([X_test, X_test, X_test], verbose=0)
        model.predict_proba([X_test, X_test, X_test], verbose=0)
        model.get_config(verbose=0)

        fname = 'test_merge_recursivity_temp.h5'
        model.save_weights(fname, overwrite=True)
        model.load_weights(fname)
        os.remove(fname)

        nloss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
        print(nloss)
        assert(loss == nloss)
Example #16
0
    def test_sequential(self):
        print('Test sequential')
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        model.predict(X_test, verbose=0)
        model.predict_classes(X_test, verbose=0)
        model.predict_proba(X_test, verbose=0)
        model.get_config(verbose=0)

        print('test weight saving')
        fname = 'test_sequential_temp.h5'
        model.save_weights(fname, overwrite=True)
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights(fname)
        os.remove(fname)

        nloss = model.evaluate(X_train, y_train, verbose=0)
        assert(loss == nloss)

        # test json serialization
        json_data = model.to_json()
        model = model_from_json(json_data)

        # test yaml serialization
        yaml_data = model.to_yaml()
        model = model_from_yaml(yaml_data)
Example #17
0
def run_problem_1_with_keras(epochs=9, batch_size=128, hidden_nodes=1024):
    log_folder = os.path.join(data_root, 'assignment2', 'keras')
    model = Sequential()
    model.add(Dense(units=hidden_nodes, activation='relu', input_dim=image_size * image_size))
    model.add(Dense(units=num_labels, activation='softmax'))
    model.compile(loss=categorical_crossentropy, metrics=['accuracy'],
                  optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True))
    model.fit(train_dataset, train_labels, epochs=epochs, batch_size=batch_size, verbose=False,
              callbacks=[TensorBoard(log_folder)])
    print("Validate accuracy: %.1f%%" % (model.evaluate(valid_dataset, valid_labels, verbose=False)[1] * 100))
    acc = 100 * model.evaluate(test_dataset, test_labels, verbose=False)[1]
    print("Test accuracy: %.1f%%" % acc)
    predictions = model.predict(test_dataset, verbose=False)
    display_sample_prediction(predictions, test_labels, acc)
    pass
Example #18
0
class MLP:
    '''
    [(output_dim, input_dim, init, activation, dropout)]
    '''
    def __init__(self\
                 , structure\
                 , sgd_params_init = sgd_params(0.1,1e-6,0.9,True)\
                 , loss_name = 'mean_squared_error'):
        
        self.model = Sequential()
        for layers in structure:
            self.model.add(Dense(output_dim = layers.output_dim\
                                 , input_dim = layers.input_dim\
                                 , init = layers.init\
                                 , activation = layers.activation))
            if layers.dropout != None:
                self.model.add(Dropout(layers.dropout))
                sgd = SGD(lr = sgd_params_init.lr\
                          , decay = sgd_params_init.decay\
                          , momentum = sgd_params_init.momentum\
                          , nesterov = sgd_params_init.nesterov)

        self.model.compile(loss = loss_name, optimizer = sgd)

    def train(self, X_train, y_train, nb_epoch = 20, batch_size = 16):
        self.model.fit(X_train, y_train, nb.epoch, batch_size)    

    def test(self, X_test, y_test, batch_size = 16):
        return self.model.evaluate(X_test, y_test, batch_size)   
Example #19
0
def model(X_train, Y_train, X_test, Y_test):
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms)

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test,
                           show_accuracy=True, verbose=0)
    print('Test accuracy:', score[1])
    return {'loss': -score[1], 'status': STATUS_OK}
def train():

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen, dropout=0.2))
    model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2))  # try using a GRU instead, for fun
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    print(X_train.shape)
    print(y_train.shape)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test,
                                batch_size=batch_size)
    print('Test score:', score)
    print('Test accuracy:', acc)

    with open("save_weight_lstm.pickle", mode="wb") as f:
        pickle.dump(model.get_weights(),f)
Example #21
0
def evaluate(lr, pos):
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    X_train = (X_train.astype("float32")).reshape((60000, 784))
    X_test = (X_test.astype("float32")).reshape((10000, 784))
    X_train /= 255
    X_test /= 255

    Y_train = np_utils.to_categorical(y_train, 10)
    Y_test = np_utils.to_categorical(y_test, 10)

    model = Sequential()
    model.add(Dense(output_dim=layer1, input_dim=784))
    if pos == 0:
        model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(Dense(output_dim=layer2, input_dim=layer1))
    if pos == 1:
        model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(Dense(output_dim=10, input_dim=layer2))
    if pos == 2:
        model.add(BatchNormalization())
    model.add(Activation("softmax"))

    model.compile(
        loss="categorical_crossentropy", optimizer=SGD(lr=lr, momentum=0.9, nesterov=True), metrics=["accuracy"]
    )

    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, verbose=0)
    return score[1]
Example #22
0
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #23
0
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(LSTM(128))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)

    model.fit(X_train, y_train,
              batch_size={{choice([32, 64, 128])}},
              nb_epoch=1,
              validation_split=0.08,
              callbacks=[early_stopping, checkpointer])

    score, acc = model.evaluate(X_test, y_test, verbose=0)

    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #24
0
def part4(weights):
    global X_train, Y_train, X_test, Y_test
    size = 3
    model = Sequential()

    model.add(Convolution2D(32, size, size, border_mode="same", input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model = add_top_layer(model)

    model.load_weights(weights)

    model = copy_freeze_model(model, 4)

    # add top dense layer
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Activation("softmax"))

    X_train, Y_train, X_test, Y_test = load_data(10)

    model = train(model, auto=False)
    print("Classification rate %02.5f" % (model.evaluate(X_test, Y_test, show_accuracy=True)[1]))
Example #25
0
def create_model(x_train, y_train, x_test, y_test):
    """
    Create your model...
    """
    layer_1_size = {{quniform(12, 256, 4)}}
    l1_dropout = {{uniform(0.001, 0.7)}}
    params = {
        'l1_size': layer_1_size,
        'l1_dropout': l1_dropout
    }
    num_classes = 10
    model = Sequential()
    model.add(Dense(int(layer_1_size), activation='relu'))
    model.add(Dropout(l1_dropout))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(),
                  metrics=['accuracy'])
    model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    out = {
        'loss': -acc,
        'score': score,
        'status': STATUS_OK,
        'model_params': params,
    }
    # optionally store a dump of your model here so you can get it from the database later
    temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
    model.save(temp_name)
    with open(temp_name, 'rb') as infile:
        model_bytes = infile.read()
    out['model_serial'] = model_bytes
    return out
Example #26
0
def imdb_lstm():
    max_features = 20000
    maxlen = 80  # cut texts after this number of words (among top max_features most common words)
    batch_size = 32
    (X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
    print type(X_train)
    exit(0)
    print len(X_train), 'train sequences'
    print len(X_test), 'test sequences'
    print('Pad sequences (samples x time)')
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
    X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, dropout=0.2))
    model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2))  # try using a GRU instead, for fun
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
                        validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
    print('Test score:', score)
    print('Test accuracy:', acc)
Example #27
0
def train_rnn(character_corpus, seq_len, train_test_split_ratio):
    model = Sequential()
    model.add(Embedding(character_corpus.char_num(), 256))
    model.add(LSTM(256, 5120, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
    model.add(Dropout(0.5))
    model.add(TimeDistributedDense(5120, character_corpus.char_num()))
    model.add(Activation('time_distributed_softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    seq_X, seq_Y = character_corpus.make_sequences(seq_len)

    print "Sequences are made"

    train_seq_num = train_test_split_ratio*seq_X.shape[0]
    X_train = seq_X[:train_seq_num]
    Y_train = to_time_distributed_categorical(seq_Y[:train_seq_num], character_corpus.char_num())

    X_test = seq_X[train_seq_num:]
    Y_test = to_time_distributed_categorical(seq_Y[train_seq_num:], character_corpus.char_num())

    print "Begin train model"
    checkpointer = ModelCheckpoint(filepath="model.step", verbose=1, save_best_only=True)
    model.fit(X_train, Y_train, batch_size=256, nb_epoch=100, verbose=2, validation_data=(X_test, Y_test), callbacks=[checkpointer])

    print "Model is trained"

    score = model.evaluate(X_test, Y_test, batch_size=512)

    print "valid score = ", score

    return model
Example #28
0
def CNN_3_layer(activation):
    Xtrain, ytrain, XCV, yCV, Xtest, ytest = load_data("mnist.pkl.gz")
    Xtrain = Xtrain.reshape(Xtrain.shape[0], 1, 28, 28)
    Xtest = Xtest.reshape(Xtest.shape[0], 1, 28, 28)
    XCV = Xtest.reshape(XCV.shape[0], 1, 28, 28)
    # 0~9 ten classes
    ytrain = np_utils.to_categorical(ytrain, 10)
    ytest = np_utils.to_categorical(ytest, 10)
    yCV = np_utils.to_categorical(yCV, 10)
    # Build the model
    model = Sequential()
    model.add(Convolution2D(32,3,3,border_mode='valid',input_shape=(1,28,28)))
    model.add(Activation(activation))
    model.add(Convolution2D(32,3,3))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(16,3,3))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation(activation))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Activation('softmax'))
	# fit module
    print "fit module"
    model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
    model.fit(Xtrain,ytrain,batch_size=100,nb_epoch=20,verbose=1,validation_data=(XCV,yCV))
    score = model.evaluate(Xtest,ytest, verbose=0)
    print score[0]
    print score[1]
Example #29
0
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    embedding_size = 300
    pool_length = 4
    lstm_output_size = 100
    batch_size = 200
    nb_epoch = 1

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    # Note that we use unnamed parameters here, which is bad style, but is used here
    # to demonstrate that it works. Always prefer named parameters.
    model.add(Convolution1D({{choice([64, 128])}},
                            {{choice([6, 8])}},
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #30
0
def model(X_train, X_test, Y_train, Y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([400, 512, 600])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    nb_epoch = 10
    batch_size = 128

    model.fit(X_train, Y_train,
              batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=2,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}