Ejemplo n.º 1
0
def test_sequential_fit_generator():
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    def data_generator(train):
        if train:
            max_batch_index = len(x_train) // batch_size
        else:
            max_batch_index = len(x_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (x_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (x_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_class))
    model.pop()
    model.add(Dense(num_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit_generator(data_generator(True), 5, epochs)
    model.fit_generator(data_generator(True), 5, epochs,
                        validation_data=(x_test, y_test))
    model.fit_generator(data_generator(True), 5, epochs,
                        validation_data=data_generator(False),
                        validation_steps=3)
    model.fit_generator(data_generator(True), 5, epochs, max_queue_size=2)
    model.evaluate(x_train, y_train)
Ejemplo n.º 2
0
def test_merge_overlap():
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss < 0.7)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss == nloss)
Ejemplo n.º 3
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Ejemplo n.º 4
0
def test_nested_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = "test_nested_sequential_temp.h5"
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
Ejemplo n.º 5
0
    def test_recursive(self):
        print('test layer-like API')

        graph = containers.Graph()
        graph.add_input(name='input1', ndim=2)
        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 4), name='dense3', input='dense1')
        graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')

        seq = Sequential()
        seq.add(Dense(32, 32, name='first_seq_dense'))
        seq.add(graph)
        seq.add(Dense(4, 4, name='last_seq_dense'))

        seq.compile('rmsprop', 'mse')

        history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
        loss = seq.evaluate(X_test, y_test)
        print(loss)
        assert(loss < 1.4)

        loss = seq.evaluate(X_test, y_test, show_accuracy=True)
        pred = seq.predict(X_test)
        seq.get_config(verbose=1)
Ejemplo n.º 6
0
def TrainAndValidation1(X_train,y_train,X_test,y_test,bEarlyStopByTestData=True):    
    
    print "#\tTraining shape:" , X_train.shape
    print "#\tTraining label:" , y_train.shape   
    #============================================
    # Model preparation
    #============================================
    model = Sequential()
    model.add(Dense(output_dim=64,input_dim=X_train.shape[1], init='uniform'))
    model.add(Activation(DEEP_AVF))

    model.add(Dense(64, init='uniform'))
    model.add(Activation(DEEP_AVF))

    model.add(Dense(sparkcore.WORKING_KLABEL, init='uniform'))
    model.add(Activation('softmax'))
    sgd = SGD(lr=DEEP_SGDLR, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss=DEEP_LOSSFUNC, optimizer=sgd)
    
    a = model.fit(X_train, y_train,nb_epoch=5)
    score0 = model.evaluate(X_train, y_train, batch_size=DEEP_BSIZE)

    if not X_test is None:
        score1  = model.evaluate(X_test, y_test, batch_size=DEEP_BSIZE)
        predicted = model.predict(X_test)
        v_precision,v_recall,TP, FP, TN, FN,thelogloss = sparkcore.MyEvaluation(y_test,predicted)
        return score0,score1,v_precision,v_recall,TP, FP, TN, FN, thelogloss,model
    else:
        return score0, 0,0,0,0,0,0,0,0,model        
Ejemplo n.º 7
0
    def test_conv2d(self):
        # Generate dummy data
        x_train = np.random.random((100, 100, 100, 3))
        y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
        x_test = np.random.random((20, 100, 100, 3))
        y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)

        model = Sequential()
        # input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
        # this applies 32 convolution filters of size 3x3 each.
        model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

        # This throws if libcudnn is not properly installed with on a GPU
        model.compile(loss='categorical_crossentropy', optimizer=sgd)
        model.fit(x_train, y_train, batch_size=32, epochs=1)
        model.evaluate(x_test, y_test, batch_size=32)
Ejemplo n.º 8
0
def test_recursive():
    # test layer-like API

    graph = containers.Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32,)))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)
    assert(loss < 2.5)

    loss = seq.evaluate(X_test_graph, y_test_graph, show_accuracy=True)
    seq.predict(X_test_graph)
    seq.get_config(verbose=1)
Ejemplo n.º 9
0
def mlp_model(X_train, y_train, X_test, y_test):
    tokenizer = Tokenizer(nb_words=1000)
    nb_classes = np.max(y_train) + 1

    X_train = tokenizer.sequences_to_matrix(X_train, mode="freq")
    X_test = tokenizer.sequences_to_matrix(X_test, mode="freq")

    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    print("Building model...")
    model = Sequential()
    model.add(Dense(512, input_shape=(max_len,)))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='adam', class_mode='categorical')

    history = model.fit(X_train, Y_train, nb_epoch=nb_epoch, batch_size=batch_size, verbose=1, show_accuracy=True, validation_split=0.1)
    model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=1, show_accuracy=True)
    # print('Test score:', score[0])
    # print('Test accuracy:', score[1])
    pred_labels = model.predict_classes(X_test)
    # print pred_labels
    # print y_test
    accuracy = accuracy_score(y_test, pred_labels)
    precision, recall, f1, supp = precision_recall_fscore_support(y_test, pred_labels, average='weighted')
    print precision, recall, f1, supp

    return accuracy, precision, recall, f1
Ejemplo n.º 10
0
    def test_recursive(self):
        print("test layer-like API")

        graph = containers.Graph()
        graph.add_input(name="input1", ndim=2)
        graph.add_node(Dense(32, 16), name="dense1", input="input1")
        graph.add_node(Dense(32, 4), name="dense2", input="input1")
        graph.add_node(Dense(16, 4), name="dense3", input="dense1")
        graph.add_output(name="output1", inputs=["dense2", "dense3"], merge_mode="sum")

        seq = Sequential()
        seq.add(Dense(32, 32, name="first_seq_dense"))
        seq.add(graph)
        seq.add(Dense(4, 4, name="last_seq_dense"))

        seq.compile("rmsprop", "mse")

        history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
        loss = seq.evaluate(X_test, y_test)
        print(loss)
        assert loss < 2.5

        loss = seq.evaluate(X_test, y_test, show_accuracy=True)
        pred = seq.predict(X_test)
        seq.get_config(verbose=1)
Ejemplo n.º 11
0
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Ejemplo n.º 12
0
    def test_merge_concat(self):
        print('Test merge: concat')
        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))

        model.add(Dense(nb_hidden * 2, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

        loss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.6:
            raise Exception('Score too low, learning issue.')
        preds = model.predict([X_test, X_test], verbose=0)
        classes = model.predict_classes([X_test, X_test], verbose=0)
        probas = model.predict_proba([X_test, X_test], verbose=0)
        print(model.get_config(verbose=1))

        print('test weight saving')
        model.save_weights('temp.h5', overwrite=True)
        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))

        model.add(Dense(nb_hidden * 2, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights('temp.h5')

        nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print(nloss)
        assert(loss == nloss)
Ejemplo n.º 13
0
def test_siamese_1():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss < 0.8)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config(verbose=0)

    # test weight saving
    fname = 'test_siamese_1.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)
Ejemplo n.º 14
0
def train_given_optimiser(optimiser):
    model = Sequential()
    model.add(Dense(1, input_dim=500))
    model.add(Activation(activation='sigmoid'))
    model.compile(optimizer=optimiser, loss='binary_crossentropy', metrics=['accuracy'])
    data = np.random.random((1000, 500))
    labels = np.random.randint(2, size=(1000, 1))
    score = model.evaluate(data,labels, verbose=0)
    print( "Optimiser: ", optimiser )
    print( "Before Training:", list(zip(model.metrics_names, score)) )
    model.fit(data, labels, nb_epoch=10, batch_size=32, verbose=0)
    score = model.evaluate(data,labels, verbose=0)
    print( "After Training:", list(zip(model.metrics_names, score)) )
Ejemplo n.º 15
0
    def test_merge_recursivity(self):
        print('Test merge recursivity')

        left = Sequential()
        left.add(Dense(nb_hidden, input_shape=(input_dim,)))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(nb_hidden, input_shape=(input_dim,)))
        right.add(Activation('relu'))

        righter = Sequential()
        righter.add(Dense(nb_hidden, input_shape=(input_dim,)))
        righter.add(Activation('relu'))

        intermediate = Sequential()
        intermediate.add(Merge([left, right], mode='sum'))
        intermediate.add(Dense(nb_hidden))
        intermediate.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([intermediate, righter], mode='sum'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

        loss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        model.predict([X_test, X_test, X_test], verbose=0)
        model.predict_classes([X_test, X_test, X_test], verbose=0)
        model.predict_proba([X_test, X_test, X_test], verbose=0)
        model.get_config(verbose=0)

        fname = 'test_merge_recursivity_temp.h5'
        model.save_weights(fname, overwrite=True)
        model.load_weights(fname)
        os.remove(fname)

        nloss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
        print(nloss)
        assert(loss == nloss)
Ejemplo n.º 16
0
    def test_sequential(self):
        print('Test sequential')
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        model.predict(X_test, verbose=0)
        model.predict_classes(X_test, verbose=0)
        model.predict_proba(X_test, verbose=0)
        model.get_config(verbose=0)

        print('test weight saving')
        fname = 'test_sequential_temp.h5'
        model.save_weights(fname, overwrite=True)
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights(fname)
        os.remove(fname)

        nloss = model.evaluate(X_train, y_train, verbose=0)
        assert(loss == nloss)

        # test json serialization
        json_data = model.to_json()
        model = model_from_json(json_data)

        # test yaml serialization
        yaml_data = model.to_yaml()
        model = model_from_yaml(yaml_data)
Ejemplo n.º 17
0
def run_problem_1_with_keras(epochs=9, batch_size=128, hidden_nodes=1024):
    log_folder = os.path.join(data_root, 'assignment2', 'keras')
    model = Sequential()
    model.add(Dense(units=hidden_nodes, activation='relu', input_dim=image_size * image_size))
    model.add(Dense(units=num_labels, activation='softmax'))
    model.compile(loss=categorical_crossentropy, metrics=['accuracy'],
                  optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True))
    model.fit(train_dataset, train_labels, epochs=epochs, batch_size=batch_size, verbose=False,
              callbacks=[TensorBoard(log_folder)])
    print("Validate accuracy: %.1f%%" % (model.evaluate(valid_dataset, valid_labels, verbose=False)[1] * 100))
    acc = 100 * model.evaluate(test_dataset, test_labels, verbose=False)[1]
    print("Test accuracy: %.1f%%" % acc)
    predictions = model.predict(test_dataset, verbose=False)
    display_sample_prediction(predictions, test_labels, acc)
    pass
Ejemplo n.º 18
0
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    embedding_size = 300
    pool_length = 4
    lstm_output_size = 100
    batch_size = 200
    nb_epoch = 1

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    # Note that we use unnamed parameters here, which is bad style, but is used here
    # to demonstrate that it works. Always prefer named parameters.
    model.add(Convolution1D({{choice([64, 128])}},
                            {{choice([6, 8])}},
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Ejemplo n.º 19
0
def create_model(x_train, y_train, x_test, y_test):
    """
    Create your model...
    """
    layer_1_size = {{quniform(12, 256, 4)}}
    l1_dropout = {{uniform(0.001, 0.7)}}
    params = {
        'l1_size': layer_1_size,
        'l1_dropout': l1_dropout
    }
    num_classes = 10
    model = Sequential()
    model.add(Dense(int(layer_1_size), activation='relu'))
    model.add(Dropout(l1_dropout))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(),
                  metrics=['accuracy'])
    model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    out = {
        'loss': -acc,
        'score': score,
        'status': STATUS_OK,
        'model_params': params,
    }
    # optionally store a dump of your model here so you can get it from the database later
    temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
    model.save(temp_name)
    with open(temp_name, 'rb') as infile:
        model_bytes = infile.read()
    out['model_serial'] = model_bytes
    return out
Ejemplo n.º 20
0
def train_rnn(character_corpus, seq_len, train_test_split_ratio):
    model = Sequential()
    model.add(Embedding(character_corpus.char_num(), 256))
    model.add(LSTM(256, 5120, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
    model.add(Dropout(0.5))
    model.add(TimeDistributedDense(5120, character_corpus.char_num()))
    model.add(Activation('time_distributed_softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    seq_X, seq_Y = character_corpus.make_sequences(seq_len)

    print "Sequences are made"

    train_seq_num = train_test_split_ratio*seq_X.shape[0]
    X_train = seq_X[:train_seq_num]
    Y_train = to_time_distributed_categorical(seq_Y[:train_seq_num], character_corpus.char_num())

    X_test = seq_X[train_seq_num:]
    Y_test = to_time_distributed_categorical(seq_Y[train_seq_num:], character_corpus.char_num())

    print "Begin train model"
    checkpointer = ModelCheckpoint(filepath="model.step", verbose=1, save_best_only=True)
    model.fit(X_train, Y_train, batch_size=256, nb_epoch=100, verbose=2, validation_data=(X_test, Y_test), callbacks=[checkpointer])

    print "Model is trained"

    score = model.evaluate(X_test, Y_test, batch_size=512)

    print "valid score = ", score

    return model
Ejemplo n.º 21
0
def model(X_train, Y_train, X_test, Y_test):
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms)

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test,
                           show_accuracy=True, verbose=0)
    print('Test accuracy:', score[1])
    return {'loss': -score[1], 'status': STATUS_OK}
Ejemplo n.º 22
0
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Ejemplo n.º 23
0
def evaluate(lr, pos):
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    X_train = (X_train.astype("float32")).reshape((60000, 784))
    X_test = (X_test.astype("float32")).reshape((10000, 784))
    X_train /= 255
    X_test /= 255

    Y_train = np_utils.to_categorical(y_train, 10)
    Y_test = np_utils.to_categorical(y_test, 10)

    model = Sequential()
    model.add(Dense(output_dim=layer1, input_dim=784))
    if pos == 0:
        model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(Dense(output_dim=layer2, input_dim=layer1))
    if pos == 1:
        model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(Dense(output_dim=10, input_dim=layer2))
    if pos == 2:
        model.add(BatchNormalization())
    model.add(Activation("softmax"))

    model.compile(
        loss="categorical_crossentropy", optimizer=SGD(lr=lr, momentum=0.9, nesterov=True), metrics=["accuracy"]
    )

    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, verbose=0)
    return score[1]
Ejemplo n.º 24
0
def train():

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen, dropout=0.2))
    model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2))  # try using a GRU instead, for fun
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    print(X_train.shape)
    print(y_train.shape)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test,
                                batch_size=batch_size)
    print('Test score:', score)
    print('Test accuracy:', acc)

    with open("save_weight_lstm.pickle", mode="wb") as f:
        pickle.dump(model.get_weights(),f)
Ejemplo n.º 25
0
def model(X_train, X_test, Y_train, Y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([400, 512, 600])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    nb_epoch = 10
    batch_size = 128

    model.fit(X_train, Y_train,
              batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=2,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Ejemplo n.º 26
0
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(LSTM(128))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)

    model.fit(X_train, y_train,
              batch_size={{choice([32, 64, 128])}},
              nb_epoch=1,
              validation_split=0.08,
              callbacks=[early_stopping, checkpointer])

    score, acc = model.evaluate(X_test, y_test, verbose=0)

    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Ejemplo n.º 27
0
def part4(weights):
    global X_train, Y_train, X_test, Y_test
    size = 3
    model = Sequential()

    model.add(Convolution2D(32, size, size, border_mode="same", input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model = add_top_layer(model)

    model.load_weights(weights)

    model = copy_freeze_model(model, 4)

    # add top dense layer
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Activation("softmax"))

    X_train, Y_train, X_test, Y_test = load_data(10)

    model = train(model, auto=False)
    print("Classification rate %02.5f" % (model.evaluate(X_test, Y_test, show_accuracy=True)[1]))
Ejemplo n.º 28
0
def imdb_lstm():
    max_features = 20000
    maxlen = 80  # cut texts after this number of words (among top max_features most common words)
    batch_size = 32
    (X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
    print type(X_train)
    exit(0)
    print len(X_train), 'train sequences'
    print len(X_test), 'test sequences'
    print('Pad sequences (samples x time)')
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
    X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, dropout=0.2))
    model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2))  # try using a GRU instead, for fun
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
                        validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
    print('Test score:', score)
    print('Test accuracy:', acc)
Ejemplo n.º 29
0
class MLP:
    '''
    [(output_dim, input_dim, init, activation, dropout)]
    '''
    def __init__(self\
                 , structure\
                 , sgd_params_init = sgd_params(0.1,1e-6,0.9,True)\
                 , loss_name = 'mean_squared_error'):
        
        self.model = Sequential()
        for layers in structure:
            self.model.add(Dense(output_dim = layers.output_dim\
                                 , input_dim = layers.input_dim\
                                 , init = layers.init\
                                 , activation = layers.activation))
            if layers.dropout != None:
                self.model.add(Dropout(layers.dropout))
                sgd = SGD(lr = sgd_params_init.lr\
                          , decay = sgd_params_init.decay\
                          , momentum = sgd_params_init.momentum\
                          , nesterov = sgd_params_init.nesterov)

        self.model.compile(loss = loss_name, optimizer = sgd)

    def train(self, X_train, y_train, nb_epoch = 20, batch_size = 16):
        self.model.fit(X_train, y_train, nb.epoch, batch_size)    

    def test(self, X_test, y_test, batch_size = 16):
        return self.model.evaluate(X_test, y_test, batch_size)   
Ejemplo n.º 30
0
def CNN_3_layer(activation):
    Xtrain, ytrain, XCV, yCV, Xtest, ytest = load_data("mnist.pkl.gz")
    Xtrain = Xtrain.reshape(Xtrain.shape[0], 1, 28, 28)
    Xtest = Xtest.reshape(Xtest.shape[0], 1, 28, 28)
    XCV = Xtest.reshape(XCV.shape[0], 1, 28, 28)
    # 0~9 ten classes
    ytrain = np_utils.to_categorical(ytrain, 10)
    ytest = np_utils.to_categorical(ytest, 10)
    yCV = np_utils.to_categorical(yCV, 10)
    # Build the model
    model = Sequential()
    model.add(Convolution2D(32,3,3,border_mode='valid',input_shape=(1,28,28)))
    model.add(Activation(activation))
    model.add(Convolution2D(32,3,3))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(16,3,3))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation(activation))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Activation('softmax'))
	# fit module
    print "fit module"
    model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
    model.fit(Xtrain,ytrain,batch_size=100,nb_epoch=20,verbose=1,validation_data=(XCV,yCV))
    score = model.evaluate(Xtest,ytest, verbose=0)
    print score[0]
    print score[1]
Ejemplo n.º 31
0
print(x_train_std)

print(imputed_data.shape)

#  rete neurale
# modello neurale
model1 = Sequential()

model1.add(layers.Dense(50, input_dim=9, activation='relu'))
model1.add(layers.Dense(40, activation='relu'))
model1.add(layers.Dense(30, activation='relu'))
model1.add(layers.Dense(25, activation='relu'))
model1.add(layers.Dense(2, activation='sigmoid'))

print(model1.summary())

model1.compile(loss='binary_crossentropy',
               optimizer='adam',
               metrics=['accuracy'])

history = model1.fit(imputed_data,
                     y_train1,
                     batch_size=10,
                     epochs=50,
                     verbose=1,
                     validation_split=0.3)

score1 = model1.evaluate(imputed_data, y_train1)

print(score1)
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])

print('compiled')
history = model.fit(X_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_data=(X_val, y_val))
score = model.evaluate(X_test, y_test, verbose=0)
model.save('mnist_fashion.h5')

print('Test loss:', score[0])
print('Test accuracy:', score[1])
'''
0 	T-shirt/top
1 	Trouser
2 	Pullover
3 	Dress
4 	Coat
5 	Sandal
6 	Shirt
7 	Sneaker
8 	Bag
9 	Ankle boot
Ejemplo n.º 33
0
(etreino,streino), (eteste,steste) = mnist.load_data()
entradas = etreino.reshape(etreino.shape[0], 28, 28, 1)
entradas = entradas.astype('float32')
entradas /= 255
saidas = np_utils.to_categorical(streino, 10)

kfold = StratifiedKFold(n_splits = 5, shuffle = True, random_state = seed)
resultados = []

a = np.zeros(5)
b = np.zeros(shape = (saidas.shape[0], 1))

for evalcruzada,svalcruzada in kfold.split(entradas, 
                                            np.zeros(shape=(saidas.shape[0],1))):
    classificador = Sequential()
    classificador.add(Conv2D(32, (3,3), input_shape=(28,28,1), activation='relu'))
    classificador.add(MaxPooling2D(pool_size = (2,2)))
    classificador.add(Flatten())
    classificador.add(Dense(units = 128, activation = 'relu'))
    classificador.add(Dense(units = 10, activation = 'softmax'))
    classificador.compile(loss = 'categorical_crossentropy', optimizer='adam',
                          metrics = ['accuracy'])
    classificador.fit(entradas[evalcruzada], saidas[evalcruzada],
                      batch_size = 128, epochs = 5)
    precisao = classificador.evaluate(entradas[svalcruzada], saidas[svalcruzada])
    resultados.append(precisao[1])

media = sum(resultados) / len(resultados)

Ejemplo n.º 34
0
from keras.layers import Dense
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
'''np.random.seed(0)

data = input_data.read_data_sets('/',one_hot=True)

print(data.shape)'''

df = np.loadtxt("pima-indians-diabetes.csv", delimiter=',')

x = df[:, 0:8]

y = df[:, 8]

print(x)
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(x, y, epochs=1000, batch_size=40)

scores = model.evaluate(x, y)

print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
Ejemplo n.º 35
0
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
print(X_train[0])

embedding_vector_length = 32

model = Sequential()
model.add(Embedding(400, embedding_vector_length, input_length=max_review_length))
model.add(LSTM(64, input_dim=64, return_sequences=True))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(16, return_sequences=False))
model.add(Dense(3, activation='sigmoid'))

adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='sparse_categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
print(model.summary())

model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=10)

scores = model.evaluate(X_test, y_test, verbose=1)

print("Accuracy: %.2f%%" % (scores[1]*100))

model_json = model.to_json()
with open("model.json", "w") as json_file :
    json_file.write(model_json)

model.save_weights("model.h5")
print("Saved model to disk")

Ejemplo n.º 36
0
# 데이터를 훈련 데이터셋과 테스트 데이터셋으로 분할
X = df.loc[:, df.columns != 'Outcome']
y = df.loc[:, 'Outcome']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

# 케라스 신경망을 구축
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=8))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=200, verbose=False)

# 결과 - 정확도
scores = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: %.2f%%\n" % (scores[1]*100))
scores = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: %.2f%%\n" % (scores[1]*100))

# 결과 - 혼동 행렬
y_test_pred = model.predict_classes(X_test)
c_matrix = confusion_matrix(y_test, y_test_pred)
ax = sns.heatmap(c_matrix, annot=True, xticklabels=['No Diabetes', 'Diabetes'], yticklabels=['No Diabetes', 'Diabetes'], cbar=False, cmap='Blues')
ax.set_xlabel("Prediction")
ax.set_ylabel("Actual")
plt.show()
plt.clf()

# 결과 - ROC 곡선
y_test_pred_probs = model.predict(X_test)
Ejemplo n.º 37
0
wavelons = 100
f_layer = WNN(wavelons, 1, input_dim=slice)

model = Sequential()
model.add(f_layer)

model.compile(loss='mean_squared_error', optimizer='adagrad', metrics=['mae'])

model.fit(np.array(x_train),
          np.array(y_train),
          epochs=200,
          verbose=1,
          batch_size=100)

score = model.evaluate(np.array(x_test), np.array(y_test), verbose=True)
print(score)

pred = model.predict(np.array(x_test))

plt.ion()
plt.show()
plt.clf()
plt.title('tensor sig 1')
plt.ylabel('n')
plt.xlabel('y')

plt.plot(y_test, label='test', c=(0, 0, 0))
plt.plot(pred, label='pred', c=(1, 0, 0))
plt.show()
plt.pause(120)
Ejemplo n.º 38
0
print(model.summary())

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
print(model.summary())

# Fit the model
history = model.fit(X_train,
                    Y_train,
                    validation_data=(X_validation, Y_validation),
                    epochs=3,
                    batch_size=5,
                    verbose=1)
# Final evaluation of the model
scores = model.evaluate(X_validation, Y_validation, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))

print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

test = ['Check out this video on YouTube:']
test1 = test
Ejemplo n.º 39
0
model.add(Dropout(0.1))
model.add(Dense(1))
model.add(Activation('sigmoid'))

'''
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(filepath="logs/5/checkpoint-{epoch:02d}.hdf5", verbose=1, save_best_only=True, monitor='val_acc',mode='max')
csv_logger = CSVLogger('logs/5/training_set_iranalysis.csv',separator=',', append=False)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1000, validation_data=(X_test, y_test),callbacks=[checkpointer,csv_logger])
model.save("logs/5/lstm1layer_model.hdf5")
'''

model.load_weights("logs/5/checkpoint-499.hdf5")
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
loss, accuracy = model.evaluate(X_train, y_train)
print("\nLoss: %.2f, Accuracy: %.2f%%" % (loss, accuracy*100))


from sklearn.metrics import (precision_score, recall_score,f1_score, accuracy_score,mean_squared_error,mean_absolute_error)

expected = y_train
predicted = model.predict_classes(X_train)

accuracy = accuracy_score(expected, predicted)
precision = precision_score(expected, predicted)
recall = recall_score(expected, predicted, average="binary")
f1 = f1_score(expected, predicted , average="binary")

print("Accuracy")
print("%.3f" %accuracy)
Ejemplo n.º 40
0
print "Running Test Data"
for ii in tqdm(range(size_test)):
    X_batch = get_next_batch(test_X['songID'].iloc[ii])

    if X_batch is None:
        continue

    Y_batch = int(test_Y.iloc[ii][0])
    # encode target value
    encodedOutput = np.zeros((1, 4))
    np.put(encodedOutput, [Y_batch], [1])

    # pad training and test sets
    X_batch = np.vstack((X_batch, np.zeros((maxlen - X_batch.shape[0], 300))))
    X_batch = np.expand_dims(X_batch, axis=0)

    # predict class labels
    scores = model.evaluate(X_batch, encodedOutput, verbose=0)
    sumLoss = scores[0] + sumLoss
    sum_score = scores[1] + sum_score
    num_scores += 1

print "Size of Training Set Size: %f" % size_training
print "Size of Test Set Size: %f" % size_test

print sum_score
print num_scores
print("\n Avg Accu: %.2f%%" % (sum_score * 100 / num_scores))

print("\n Avg Loss: %.2f" % (sumLoss / num_scores))
if K.image_data_format() == 'channels_first':
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
    x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)

model = Sequential()

# ... paste text from model.txt here

score = model.evaluate(x_test, y_test, verbose=1)
print('\n')
print('Test score: ', score[0])
print('Test accuracy: ', score[1])
Ejemplo n.º 42
0
    fin_array = np.true_divide(vector, count)
    array_list.append(fin_array)

for lines in file2:
    list_review.append(0)
    for words in lines.split():

        count = count + 1
        if words in model:
            vector = model.wv[words]
            fin_array = np.add(fin_array, vector)
        else:
            continue
    fin_array = np.true_divide(vector, count)
    array_list.append(fin_array)

array_list = np.array(array_list)
X = array_list
y = list_review
model = Sequential()
model.add(Dense(50, input_dim=300, activation='sigmoid'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(X, y, epochs=50, batch_size=8)
_, accuracy = model.evaluate(X, y)
print('Accuracy: %.2f' % (accuracy * 100))
# Fully connected layer 1 input shape (64 * 7 * 7) = (3136), output shape (1024)
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('relu'))

# Fully connected layer 2 to shape (10) for 10 classes
model.add(Dense(2))
model.add(Activation('softmax'))

# Another way to define your optimizer
adam = Adam(lr=1e-4)

# We add metrics to get more results you want to see
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

print('Training ------------')
# Another way to train the model
model.fit(X_train, y_train, batch_size=50, nb_epoch=11)

print('\nTesting ------------')
# Evaluate the model with the metrics we defined earlier
loss, accuracy = model.evaluate(X_test, y_test)

print('\ntest loss: ', loss)
print('\ntest accuracy: ', accuracy)

# In[ ]:
Ejemplo n.º 44
0
def train_detector(X_train, X_test, Y_train, Y_test, nb_filters = 32, batch_size=128, nb_epoch=5, nb_classes=2, do_augment=False, save_file='models/detector_model.hdf5'):
    """ vgg-like deep convolutional network """
    
    np.random.seed(1337)  # for reproducibility
      
    # input image dimensions
    img_rows, img_cols = X_train.shape[1], X_train.shape[2]
    
    # size of pooling area for max pooling
    pool_size = (2, 2)
    # convolution kernel size
    kernel_size = (3, 3) 
    input_shape = (img_rows, img_cols, 1)


    model = Sequential()
    model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                            border_mode='valid',
                            input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    # (16, 8, 32)
     
    model.add(Convolution2D(nb_filters*2, kernel_size[0], kernel_size[1]))
    model.add(Activation('relu'))
    model.add(Convolution2D(nb_filters*2, kernel_size[0], kernel_size[1]))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    # (8, 4, 64) = (2048)
        
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
        
    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])
    
    if do_augment:
        datagen = ImageDataGenerator(
            rotation_range=20,
            width_shift_range=0.2,
            height_shift_range=0.2,
            shear_range=0.2,
            zoom_range=0.2)
        datagen.fit(X_train)
        model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                            samples_per_epoch=len(X_train), nb_epoch=nb_epoch,
                            validation_data=(X_test, Y_test))
    else:
        model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=1, validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])
    model.save(save_file)  
hist = full_model.fit(X_train,
                      y_train,
                      validation_data=(X_test, y_test),
                      epochs=20,
                      batch_size=16)

#model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
#model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
print(full_model.summary())

np.random.seed(seed)
#hist = model.fit(X_train, y_train, validation_data=(X_test, y_test),
#         epochs=epochs, batch_size=10, shuffle=True, callbacks=[earlyStopping])
#hist = model.load_weights('./64.15/model.h5');
# Final evaluation of the model
scores = full_model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.legend(['train', 'test'])
plt.title('loss')
plt.savefig("loss7.png", dpi=300, format="png")
plt.figure()
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.legend(['train', 'test'])
plt.title('accuracy')
plt.savefig("accuracy7.png", dpi=300, format="png")
model_json = full_model.to_json()
with open("model7.json", "w") as json_file:
    json_file.write(model_json)
Ejemplo n.º 46
0
class DeepCNN:
    """
    Implements a predefined
    CNN architecture
    """
    def __init__(self, aDataManager):
        """
        aDataManager == data feeder
        """
        self.myDataManager = aDataManager
        self.fitLabelData()
        self.fitImages()
        self.nbClass = aDataManager.Ytest.shape[1]  # number of classes
        self.myModel = Sequential()  # CNN
        # fix random seed for reproducibility
        self.seed = 7
        np.random.seed(self.seed)

        ###############################################
        #  (OTHER) HYPERPARAMETERS                    #
        ###############################################
        # for model compiling
        self.epochs = 25
        self.lrate = 0.01
        self.decay = (self.lrate / self.epochs)
        self.sgd = SGD(lr=self.lrate,
                       momentum=0.9,
                       decay=self.decay,
                       nesterov=False)
        #											  #
        ###############################################

        # self.modelBuildingDeeper()
        self.modelBuilding()
        self.modelCompiling()
        self.modelTraining()
        self.modelEvaluation()
        self.modelSaving()

    def fitLabelData(self):
        """
        Fits label data for the CNN to
        one hot encode outputs
        """

        self.myDataManager.Ytraining = np_utils.to_categorical(
            self.myDataManager.Ytraining)
        self.myDataManager.Ytest = np_utils.to_categorical(
            self.myDataManager.Ytest)
        self.myDataManager.Yvalidation = np_utils.to_categorical(
            self.myDataManager.Yvalidation)

    def fitImages(self):
        """
        Fits images data for the CNN :
        from 32x32x3 to 3X32x32 OR
        from 32x32x1 to 1X32x32
        """

        self.myDataManager.Xtraining = np.rollaxis(
            self.myDataManager.Xtraining, 3, 1)
        self.myDataManager.Xtest = np.rollaxis(self.myDataManager.Xtest, 3, 1)
        self.myDataManager.Xvalidation = np.rollaxis(
            self.myDataManager.Xvalidation, 3, 1)

    def modelBuilding(self):
        """
        Adding layers
        """
        self.myModel.add(
            Conv2D(32, (3, 3),
                   input_shape=(1, 32, 32),
                   padding='same',
                   activation='relu',
                   kernel_constraint=maxnorm(3)))
        self.myModel.add(Dropout(0.2))
        self.myModel.add(
            Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   kernel_constraint=maxnorm(3)))
        self.myModel.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
        self.myModel.add(Flatten())
        self.myModel.add(
            Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
        self.myModel.add(Dropout(0.5))
        self.myModel.add(Dense(self.nbClass, activation='softmax'))

    def modelBuildingDeeper(self):
        """
        Adding layers
        """
        self.myModel.add(
            Conv2D(32, (3, 3),
                   input_shape=(1, 32, 32),
                   activation='relu',
                   padding='same'))
        self.myModel.add(Dropout(0.2))
        self.myModel.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
        self.myModel.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
        self.myModel.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
        self.myModel.add(Dropout(0.2))
        self.myModel.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
        self.myModel.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
        self.myModel.add(Conv2D(128, (3, 3), activation='relu',
                                padding='same'))
        self.myModel.add(Dropout(0.2))
        self.myModel.add(Conv2D(128, (3, 3), activation='relu',
                                padding='same'))
        self.myModel.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
        self.myModel.add(Flatten())
        self.myModel.add(Dropout(0.2))
        self.myModel.add(
            Dense(1024, activation='relu', kernel_constraint=maxnorm(3)))
        self.myModel.add(Dropout(0.2))
        self.myModel.add(
            Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
        self.myModel.add(Dropout(0.2))
        self.myModel.add(Dense(self.nbClass, activation='softmax'))

    def modelCompiling(self):
        self.myModel.compile(loss='categorical_crossentropy',
                             optimizer=self.sgd,
                             metrics=['accuracy'])
        print(self.myModel.summary())

    def modelTraining(self):
        np.random.seed(self.seed)
        self.myModel.fit(self.myDataManager.Xtraining,
                         self.myDataManager.Ytraining,
                         validation_data=(self.myDataManager.Xvalidation,
                                          self.myDataManager.Yvalidation),
                         epochs=self.epochs,
                         batch_size=64)

    def modelEvaluation(self):
        # Final evaluation of the model
        scores = self.myModel.evaluate(self.myDataManager.Xtest,
                                       self.myDataManager.Ytest,
                                       verbose=0)
        print("Accuracy: %.2f%%" % (scores[1] * 100))

    def modelSaving(self):
        # saving the model
        saveDir = "resultsTAI/"
        if not os.path.exists(saveDir):
            os.mkdir(saveDir)
        modelFileName = "trafficTAI.h5"
        modelPath = os.path.join(saveDir, modelFileName)
        self.myModel.save(modelPath)
        print('Saved trained model at %s ' % modelPath)
Ejemplo n.º 47
0
# 3. 컴파일(훈련준비),실행(훈련)

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['acc'])

modelpath = './model/{epoch:02d}--{val_loss:.4f}.hdf5'
chpoint = ModelCheckpoint(filepath=modelpath,
                          monitor='val_loss',
                          save_best_only=True,
                          save_weights_only=False,
                          mode='auto',
                          verbose=1)

hist = model.fit(x_train,
                 y_train,
                 epochs=10,
                 batch_size=150,
                 callbacks=[chpoint],
                 validation_split=0.1)
# hist에 callback반환

# 4. 평가, 예측
loss_accuracy = model.evaluate(x_test, y_test, batch_size=150)

print(f"loss : {loss_accuracy[0]}")
print(f"accuracy : {loss_accuracy[1]}")
''' 저장된 가중치
loss : 0.3134315144922584
accuracy : 0.9146000146865845'''
Ejemplo n.º 48
0
# Compiling Neural Networks Model
#sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=["acc"])

X_trainMatrix = np.array(X)
results = model.fit(X_trainMatrix, y_categorical, epochs=10000, batch_size=128)

data2 = pd.read_csv('data/trainPartTest.csv')
x_test = data2.ix[:, 'tempo':]
y_test = data2['genre']

x_test = normalize(x_test)
y_categorical_test = np_utils.to_categorical(y_test)
X_trainMatrix_test = np.array(x_test)
scores = model.evaluate(X_trainMatrix_test, y_categorical_test)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

test_data = pd.read_csv('data/testComplete.csv')
X_test = test_data.ix[:,'tempo':]

X_test = normalize(X_test)
pred_X = model.predict(X_test.as_matrix())

test_pred = []
for row in pred_X:
    max_val, max_id = 0, 0
    for i in range(len(row)):
        if row[i] > max_val:
            max_id = i
            max_val = row[i]
Ejemplo n.º 49
0
# convert class vectors to binary class matrices
# y_train = 5,8,9,1 ..
# Y_train 60000x 10 columns 1 for the digit, eg Y_train[0,5]=1
Y_train = np_utils.to_categorical(y_train, NB_CLASSES)
Y_test = np_utils.to_categorical(y_test, NB_CLASSES)
# final layer is the softmax a generalization of the sigmoid
model = Sequential()
model.add(Dense(NB_CLASSES, input_shape=(RESHAPED,)))

model.add(Activation('softmax'))
model.summary()
# compile the model
model.compile(loss='categorical_crossentropy', optimizer=OPTIMIZER, metrics=['accuracy'])
# if the compilation is successful we can trin it with the fit() function
history = model.fit(X_train, Y_train, batch_size=BATH_SIZE, epochs=NB_EPOCH, verbose=VERBOSE, validation_split=VALIDATION_SPLIT)
score = model.evaluate(X_test, Y_test, verbose=VERBOSE)
print("Test score:", score[0])
print("Test accuracy:", score[1])

# Add hidden layers (not connected from input nor output)
model = Sequential()
model.add(Dense(N_HIDDEN, input_shape=(RESHAPED,))) # after the input later a hidden layer
model.add(Activation('relu'))
model.add(Dense(N_HIDDEN)) #after the first hidden layer
model.add(Activation('relu'))
model.add(Dense(NB_CLASSES)) # output of 10 neurons
model.add(Activation('softmax'))

# regularizer
model.add(Dense(64, input_dim = 64, kernel_regularizer=regularizers.l2(0.01)))
# calculate predictions
Ejemplo n.º 50
0
                    input_length=max_review_length))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(1, activation=None))

optim = optimizers.Adam(lr=0.001,
                        decay=0.001)
model.compile(loss='mse',
              optimizer='adam',
              metrics = ['mse'])
tensorboard = TensorBoard(log_dir='./logs', write_graph=True)
earlystopping = EarlyStopping(monitor='val_loss',
                              min_delta=0,
                              patience=2,
                              verbose=0,
                              mode='auto')


model.fit(X_train, y_train,
          batch_size=64,
          epochs=20,
          callbacks=[tensorboard, earlystopping],
          validation_split=val_ratio,
          shuffle=True,
          verbose=1)

results = model.evaluate(X_test, y_test, verbose=0)
print 'Test RMSE: {}'.format(results[0]**0.5)


Ejemplo n.º 51
0
    batch_size=2,
    epochs=100,
    verbose=1,
    # validation_split=0.2,
    validation_data=[x_val, y_val])

model.save("mean.net")

x_test = np.array([[2, 5, 4.5, 1], [9, 16, 11, 10.5], [100, 95, 99, 102]])

y_test = np.array([
    [3.125],
    [11.625],
    [99.0],
])

output = model.evaluate(x_test, y_test)

print("")
print("=== Evaluation ===")
print(model.metrics_names)
print(output)

x_predict = np.array([[1.5, 2, 3.5, 4], [1000, 2000, 3000, 4000]])

prediction = model.predict(x_predict)

print("")
print("Expected: [[2.75], [2500]]")
print("Actual:", prediction)
model = Sequential()
model.add(Dense(input_dim=13, units=50, activation='relu'))
model.add(Dense(units=16, activation='relu'))
model.add(Dense(units=2, activation='sigmoid'))

model.compile(optimizer=Adam(lr=0.01),
              loss='binary_crossentropy',
              metrics=['accuracy'])

History = model.fit(x_train,
                    y_train,
                    validation_data=(x_test, y_test),
                    epochs=10,
                    verbose=1)
predicted = model.predict_classes(x_test)
scores = model.evaluate(X, Y, verbose=0)

print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

#print (predicted)
#model.save('final_model.h5')

##### SAVING MODEL ######

#model_json = model.to_json()
#with open("model_final.json", "w") as json_file:
#json_file.write(model_json)
## serialize weights to HDF5
#model.save_weights("model_final.h5")
#print("Saved model to disk")
Ejemplo n.º 53
0
model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])
model.summary()

#모델 학습
optimizer_history = model.fit(X_train,
                              y_train,
                              epochs=300,
                              batch_size=1500,
                              verbose=1,
                              validation_data=(X_test, y_test))

#모델 평가
results = model.evaluate(X_test, y_test)

print('loss: ', results[0])
print('accuracy: ', results[1])

# model.fit(X_train,y_train, epochs=200, batch_size=16, verbose=2)
# trainPredict = model.predict(X_train)
# testPredict= model.predict(X_test)
# predicted=np.concatenate((trainPredict,testPredict),axis=0)

# trainScore = model.evaluate(X_test, y_test, verbose=0)
# print(trainScore)

# model = tf.keras.Sequential()
# # model.add(Input(shape=(2,)))
# # model.add(Embedding(10000, 32))
Ejemplo n.º 54
0
model.add(Dropout(0.25))
# Слой преобразования данных из 2D представления в плоское
model.add(Flatten())
# Полносвязный слой для классификации
model.add(Dense(512, activation='relu'))
# Слой регуляризации Dropout
model.add(Dropout(0.5))
# Выходной полносвязный слой
model.add(Dense(nb_classes, activation='softmax'))

# Задаем параметры оптимизации
# lr = 0.01
lr = 0.02
sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)

model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])
# Обучаем модель
model.fit(X_train,
          Y_train,
          batch_size=batch_size,
          nb_epoch=nb_epoch,
          validation_split=0.1,
          shuffle=True)

# Оцениваем качество обучения модели на тестовых данных
scores = model.evaluate(X_test, Y_test, verbose=0)

print("Точность работы на тестовых данных:")
print(round(scores[1] * 100, 3))
Ejemplo n.º 55
0
    def Initialize(self):
        '''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''

        self.session = K.get_session()
        self.graph = tf.get_default_graph()

        self.SetStartDate(2018, 8, 1)  #Set Start Date
        self.SetEndDate(2018, 11, 21)  #Set End Date
        self.SetCash(100000)  #Set Strategy Cash

        ## start the Keras/ Tensorflow session
        self.session = K.get_session()
        self.graph = tf.get_default_graph()

        ## set the currency pair that we are trading, and the correlated currency pair
        self.currency = "AUDUSD"
        self.AddForex(self.currency, Resolution.Daily)

        self.correl_currency = "USDCHF"
        self.AddForex(self.correl_currency, Resolution.Daily)

        ## define a long list, short list and portfolio
        self.long_list, self.short_list = [], []

        # Initialise indicators
        self.rsi = RelativeStrengthIndex(9)
        self.bb = BollingerBands(14, 2, 2)
        self.macd = MovingAverageConvergenceDivergence(12, 26, 9)
        self.stochastic = Stochastic(14, 3, 3)
        self.ema = ExponentialMovingAverage(9)

        ## Arrays to store the past indicators
        prev_rsi, prev_bb, prev_macd, lower_bb, upper_bb, sd_bb, prev_stochastic, prev_ema = [],[],[],[],[],[],[],[]

        ## Make history calls for both currency pairs
        self.currency_data = self.History(
            [self.currency], 150,
            Resolution.Daily)  # Drop the first 20 for indicators to warm up
        self.correl_data = self.History([self.correl_currency], 150,
                                        Resolution.Daily)

        ## save the most recent open and close
        ytd_open = self.currency_data["open"][-1]
        ytd_close = self.currency_data["close"][-1]

        ## remove yesterday's data. We will query this onData
        self.currency_data = self.currency_data[:-1]
        self.correl_data = self.correl_data[:-1]

        ## iterate over past data to update the indicators
        for tup in self.currency_data.loc[self.currency].itertuples():
            # making Ibasedatabar for stochastic
            bar = QuoteBar(
                tup.Index, self.currency,
                Bar(tup.bidclose, tup.bidhigh, tup.bidlow, tup.bidopen), 0,
                Bar(tup.askclose, tup.askhigh, tup.asklow, tup.askopen), 0,
                timedelta(days=1))

            self.stochastic.Update(bar)
            prev_stochastic.append(float(self.stochastic.ToString()))

            self.rsi.Update(tup.Index, tup.close)
            prev_rsi.append(float(self.rsi.ToString()))

            self.bb.Update(tup.Index, tup.close)
            prev_bb.append(float(self.bb.ToString()))
            lower_bb.append(float(self.bb.LowerBand.ToString()))
            upper_bb.append(float(self.bb.UpperBand.ToString()))
            sd_bb.append(float(self.bb.StandardDeviation.ToString()))

            self.macd.Update(tup.Index, tup.close)
            prev_macd.append(float(self.macd.ToString()))

            self.ema.Update(tup.Index, tup.close)
            prev_ema.append(float(self.ema.ToString()))

        ## Forming the Indicators df
        ## This is common to the Price Prediction
        rsi_df = pd.DataFrame(prev_rsi, columns=["rsi"])
        macd_df = pd.DataFrame(prev_macd, columns=["macd"])
        upper_bb_df = pd.DataFrame(upper_bb, columns=["upper_bb"])
        lower_bb_df = pd.DataFrame(lower_bb, columns=["lower_bb"])
        sd_bb_df = pd.DataFrame(sd_bb, columns=["sd_bb"])
        stochastic_df = pd.DataFrame(prev_stochastic, columns=["stochastic"])
        ema_df = pd.DataFrame(prev_ema, columns=["ema"])

        self.indicators_df = pd.concat([
            rsi_df, macd_df, upper_bb_df, lower_bb_df, sd_bb_df, stochastic_df,
            ema_df
        ],
                                       axis=1)
        self.indicators_df = self.indicators_df.iloc[20:]
        self.indicators_df.reset_index(inplace=True, drop=True)

        ## Currency Data Price
        self._currency_data = deepcopy(self.currency_data)
        self._currency_data = self._currency_data.reset_index(level=[0, 1],
                                                              drop=True)

        self._currency_data.drop(columns=[
            "askopen", "askhigh", "asklow", "askclose", "bidopen", "bidhigh",
            "bidhigh", "bidlow", "bidclose"
        ],
                                 inplace=True)
        self._currency_data = self._currency_data.iloc[20:]
        self._currency_data.reset_index(inplace=True, drop=True)

        ## saving the previous 6 days OHLC for the price prediction model
        _close_prev_prices = self._previous_prices(
            "close", self._currency_data["close"], 6)
        _open_prev_prices = self._previous_prices("open",
                                                  self._currency_data["open"],
                                                  6)
        _high_prev_prices = self._previous_prices("high",
                                                  self._currency_data["high"],
                                                  6)
        _low_prev_prices = self._previous_prices("low",
                                                 self._currency_data["low"], 6)

        _all_prev_prices = pd.concat([
            _close_prev_prices, _open_prev_prices, _high_prev_prices,
            _low_prev_prices
        ],
                                     axis=1)

        _final_table = self._currency_data.join(_all_prev_prices, how="outer")
        _final_table = _final_table.join(self.indicators_df, how="outer")

        # Drop NaN from feature table
        self._features = _final_table.dropna()

        self._features.reset_index(inplace=True, drop=True)

        # Make labels for LSTM model
        self._labels = self._features["close"]
        self._labels = pd.DataFrame(self._labels)
        self._labels.index -= 1
        self._labels = self._labels[1:]
        _new_row = pd.DataFrame({"close": [ytd_close]})
        self._labels = self._labels.append(_new_row)
        self._labels.reset_index(inplace=True, drop=True)

        # Currency Data Direction
        self.currency_data_direction = self.currency_data.reset_index(
            level=[0, 1], drop=True)

        self.currency_data_direction.drop(columns=[
            "askopen", "askhigh", "asklow", "askclose", "bidopen", "bidhigh",
            "bidhigh", "bidlow", "bidclose", "open", "high", "low"
        ],
                                          inplace=True)
        self.currency_data_direction = self.currency_data_direction.iloc[20:]
        self.currency_data_direction.reset_index(inplace=True, drop=True)

        # Correlation Currency Data
        self.correl_data = self.correl_data.reset_index(level=[0, 1],
                                                        drop=True)
        self.correl_data.drop(columns=[
            "askopen", "askhigh", "asklow", "askclose", "bidopen", "bidhigh",
            "bidhigh", "bidlow", "bidclose", "open", "high", "low"
        ],
                              inplace=True)
        self.correl_data = self.correl_data.iloc[20:]
        self.correl_data.reset_index(inplace=True, drop=True)
        self.correl_data.rename(index=str,
                                columns={"close": "correl_close"},
                                inplace=True)

        # Close Price Direction Change
        self.close_dir_change = self.direction_change(
            "close", self.currency_data_direction["close"], 11)

        # Correlation Currency Direction Change
        self.correl_dir_change = self.direction_change(
            "correl_close", self.correl_data["correl_close"], 11)

        # Join the tables
        joined_table_direction = self.currency_data_direction.join(
            self.close_dir_change, how="outer")
        joined_table_direction = joined_table_direction.join(
            self.correl_dir_change, how="outer")
        joined_table_direction = joined_table_direction.join(
            self.indicators_df, how="outer")

        # Features Direction
        self.features_direction = joined_table_direction.dropna()
        self.features_direction.reset_index(inplace=True, drop=True)

        ## lowerBB and upperBB should change to the difference
        self.features_direction["lower_bb_diff"] = self.features_direction[
            "close"] - self.features_direction["lower_bb"]
        self.features_direction["upper_bb_diff"] = self.features_direction[
            "upper_bb"] - self.features_direction["close"]
        self.features_direction["ema_diff"] = self.features_direction[
            "ema"] - self.features_direction["close"]

        self.features_direction.drop(columns=["upper_bb", "lower_bb", "ema"],
                                     inplace=True)

        # Make raw df for labels

        self.labels = self.features_direction["close"]
        self.labels = pd.DataFrame(self.labels)
        self.labels.index -= 1

        self.labels = self.labels[1:]

        new_row = pd.DataFrame({"close": [ytd_close]})
        self.labels = self.labels.append(new_row)

        self.labels.reset_index(inplace=True, drop=True)

        ## Form the binary labels: 1 for up and 0 for down
        self.labels_direction_new = pd.DataFrame(columns=["direction"])
        for row in self.labels.iterrows():

            new_close, old_close = row[1], self.features_direction["close"][
                row[0]]
            change = (new_close - old_close)[0]
            percent_change = 100 * change / old_close

            if percent_change >= 0:
                this_df = pd.DataFrame({"direction": [1]})

            elif percent_change < 0:
                this_df = pd.DataFrame({"direction": [0]})

            self.labels_direction_new = self.labels_direction_new.append(
                this_df)

        self.labels_direction_new.reset_index(inplace=True, drop=True)

        ## Test out different features
        self.features_direction.drop(
            columns=["rsi", "stochastic", "close", "sd_bb"], inplace=True)

        self.scaler_X = MinMaxScaler()
        self.scaler_X.fit(self.features_direction)
        scaled_features_direction = self.scaler_X.transform(
            self.features_direction)

        # Hyperparameters Funetuning
        max_depth = [10, 15, 20, 30]
        n_estimators = [100, 200, 300, 500]
        criterion = ["gini", "entropy"]

        tscv = TimeSeriesSplit(n_splits=4)

        params_df = pd.DataFrame(
            columns=["depth", "n_est", "criterion", "acc_score"])

        for depth in max_depth:
            for n_est in n_estimators:
                for crn in criterion:
                    acc_scores = []
                    for train_index, test_index in tscv.split(
                            scaled_features_direction):
                        X_train, X_test = scaled_features_direction[
                            train_index], scaled_features_direction[test_index]
                        #Y_train, Y_test = labels_direction.loc[train_index], labels_direction.loc[test_index]

                        Y_train, Y_test = self.labels_direction_new[
                            "direction"][
                                train_index], self.labels_direction_new[
                                    "direction"][test_index]

                        Y_train, Y_test = Y_train.astype('int'), Y_test.astype(
                            'int')

                        RF = RandomForestClassifier(criterion=crn,
                                                    n_estimators=n_est,
                                                    max_depth=depth,
                                                    random_state=12345)
                        RF_model = RF.fit(X_train, Y_train)

                        y_pred = RF_model.predict(X_test)

                        acc_score = accuracy_score(Y_test, y_pred)
                        acc_scores.append(acc_score)

                    average_acc = np.mean(acc_scores)
                    # self.Debug("ACC")
                    # self.Debug(average_acc)
                    ## make this df for cells, epoch and mse and append to params_df
                    this_df = pd.DataFrame({
                        "depth": [depth],
                        "n_est": [n_est],
                        "criterion": [crn],
                        "acc_score": [average_acc]
                    })
                    params_df = params_df.append(this_df)

        opt_values = params_df[params_df['acc_score'] ==
                               params_df['acc_score'].max()]
        opt_depth, opt_n_est, opt_crn = opt_values["depth"][0], opt_values[
            "n_est"][0], opt_values["criterion"][0]

        self.RF = RandomForestClassifier(criterion="gini",
                                         n_estimators=300,
                                         max_depth=10,
                                         random_state=123)
        self.RF_model = self.RF.fit(
            scaled_features_direction,
            self.labels_direction_new["direction"].astype('int'))

        ## Define scaler for this class
        self._scaler_X = MinMaxScaler()
        self._scaler_X.fit(self._features)
        self._scaled_features = self._scaler_X.transform(self._features)

        self._scaler_Y = MinMaxScaler()
        self._scaler_Y.fit(self._labels)
        self._scaled_labels = self._scaler_Y.transform(self._labels)

        ## fine tune the model to determine hyperparameters
        ## only done once (upon inititialize)

        _tscv = TimeSeriesSplit(n_splits=2)
        _cells = [100, 200]
        _epochs = [50, 100]

        ## create dataframee to store optimal hyperparams
        _params_df = pd.DataFrame(columns=["cells", "epoch", "mse"])

        # ## loop thru all combinations of cells and epochs
        for i in _cells:
            for j in _epochs:

                print("CELL", i, "EPOCH", j)

                # list to store the mean square errors
                cvscores = []

                for train_index, test_index in _tscv.split(
                        self._scaled_features):
                    #print(train_index, test_index)
                    X_train, X_test = self._scaled_features[
                        train_index], self._scaled_features[test_index]
                    Y_train, Y_test = self._scaled_labels[
                        train_index], self._scaled_labels[test_index]

                    X_train = np.reshape(
                        X_train, (X_train.shape[0], 1, X_train.shape[1]))
                    X_test = np.reshape(X_test,
                                        (X_test.shape[0], 1, X_test.shape[1]))

                    model = Sequential()
                    model.add(
                        LSTM(i,
                             input_shape=(1, X_train.shape[2]),
                             return_sequences=True))
                    model.add(Dropout(0.10))
                    model.add(LSTM(i, return_sequences=True))
                    model.add(LSTM(i))
                    model.add(Dropout(0.10))
                    model.add(Dense(1))
                    model.compile(loss='mean_squared_error',
                                  optimizer='rmsprop',
                                  metrics=['mean_squared_error'])
                    model.fit(X_train, Y_train, epochs=j, verbose=0)

                    scores = model.evaluate(X_test, Y_test)
                    cvscores.append(scores[1])

                ## get average value of mean sq error
                MSE = np.mean(cvscores)

                ## make this df for cells, epoch and mse and append to params_df
                this_df = pd.DataFrame({
                    "cells": [i],
                    "epoch": [j],
                    "mse": [MSE]
                })
                # self.Debug(this_df)
                # params_df = params_df.append(this_df)

                _params_df = _params_df.append(this_df)
                self.Debug(_params_df)

        # # Check the optimised values (O_values) obtained from cross validation
        # # This code gives the row which has minimum mse and store the values to O_values
        # _O_values = _params_df[_params_df['mse'] == _params_df['mse'].min()]

        # # Extract the optimised values of cells and epochcs from abbove row (having min mse)
        self._opt_cells = 200
        self._opt_epochs = 100
        # self._opt_cells = _O_values["cells"][0]
        # self._opt_epochs = _O_values["epoch"][0]

        _X_train = np.reshape(self._scaled_features,
                              (self._scaled_features.shape[0], 1,
                               self._scaled_features.shape[1]))
        _y_train = self._scaled_labels

        self._session = K.get_session()
        self._graph = tf.get_default_graph()

        # Intialise the model with optimised parameters
        self._model = Sequential()
        self._model.add(
            LSTM(self._opt_cells,
                 input_shape=(1, _X_train.shape[2]),
                 return_sequences=True))
        self._model.add(Dropout(0.20))
        self._model.add(LSTM(self._opt_cells, return_sequences=True))
        self._model.add(Dropout(0.20))
        self._model.add(LSTM(self._opt_cells, return_sequences=True))
        self._model.add(LSTM(self._opt_cells))
        self._model.add(Dropout(0.20))
        self._model.add(Dense(1))

        # self.model.add(Activation("softmax"))
        self._model.compile(loss='mean_squared_error',
                            optimizer='adam',
                            metrics=['mean_squared_error'])
Ejemplo n.º 56
0
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)

model = Sequential()
model.add(Dense(
    25,
    input_dim=784,
))
model.add(Dense(10))
model.add(Dense(7))
model.add(Dense(5))
model.add(Dense(10))
model.add(Dense(10))
model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='loss', patience=20)

model.fit(x_train,
          y_train,
          validation_split=0.2,
          epochs=100,
          batch_size=1,
          verbose=1,
          callbacks=[early_stopping])

acc = model.evaluate(x_test, y_test)

print(acc)
Ejemplo n.º 57
0
model.add(Activation('softmax'))
"""
DROPOUT = 0.3
model = Sequential()
model.add(Dense(N_HIDDEN, input_shape=(RESHAPED,)))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(N_HIDDEN))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))

print(model.summary())
model.compile(loss='categorical_crossentropy', optimizer=OPTIMIZER, metrics=['accuracy'])



history = model.fit(X_train, Y_train,
batch_size=BATCH_SIZE, epochs=NB_EPOCH,
verbose=VERBOSE, validation_split=VALIDATION_SPLIT)


score = model.evaluate(X_test, Y_test, verbose=VERBOSE)
print("Test score:", score[0])
print('Test accuracy:', score[1])




Ejemplo n.º 58
0
class Model:
    def __init__(self):
        self.model = None

    #建立模型
    def build_model(self, dataset, nb_classes=2):
        #构建一个空的网络模型,它是一个线性堆叠模型,各神经网络层会被顺序添加,专业名称为序贯模型或线性堆叠模型
        self.model = Sequential()

        #以下代码将顺序添加CNN网络需要的各层,一个add就是一个网络层
        self.model.add(
            Convolution2D(32,
                          3,
                          3,
                          border_mode='same',
                          input_shape=dataset.input_shape))  #1 2维卷积层
        self.model.add(Activation('relu'))  #2 激活函数层

        self.model.add(Convolution2D(32, 3, 3))  #3 2维卷积层
        self.model.add(Activation('relu'))  #4 激活函数层

        self.model.add(MaxPooling2D(pool_size=(2, 2)))  #5 池化层
        self.model.add(Dropout(0.25))  #6 Dropout层

        self.model.add(Convolution2D(64, 3, 3, border_mode='same'))  #7  2维卷积层
        self.model.add(Activation('relu'))  #8  激活函数层

        self.model.add(Convolution2D(64, 3, 3))  #9  2维卷积层
        self.model.add(Activation('relu'))  #10 激活函数层

        self.model.add(MaxPooling2D(pool_size=(2, 2)))  #11 池化层
        self.model.add(Dropout(0.25))  #12 Dropout层

        self.model.add(Flatten())  #13 Flatten层
        self.model.add(Dense(512))  #14 Dense层,又被称作全连接层
        self.model.add(Activation('relu'))  #15 激活函数层
        self.model.add(Dropout(0.5))  #16 Dropout层
        self.model.add(Dense(nb_classes))  #17 Dense层
        self.model.add(Activation('softmax'))  #18 分类层,输出最终结果

        #输出模型概况
        self.model.summary()
#训练模型

    def train(self,
              dataset,
              batch_size=20,
              nb_epoch=10,
              data_augmentation=True):
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9,
                  nesterov=True)  #采用SGD+momentum的优化器进行训练,首先生成一个优化器对象
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=sgd,
                           metrics=['accuracy'])  #完成实际的模型配置工作

        #不使用数据提升,所谓的提升就是从我们提供的训练数据中利用旋转、翻转、加噪声等方法创造新的
        #训练数据,有意识的提升训练数据规模,增加模型训练量
        if not data_augmentation:
            self.model.fit(dataset.train_images,
                           dataset.train_labels,
                           batch_size=batch_size,
                           nb_epoch=nb_epoch,
                           validation_data=(dataset.valid_images,
                                            dataset.valid_labels),
                           shuffle=True)
        #使用实时数据提升
        else:
            #定义数据生成器用于数据提升,其返回一个生成器对象datagen,datagen每被调用一
            #次其生成一组数据(顺序生成),节省内存,其实就是python的数据生成器
            datagen = ImageDataGenerator(
                featurewise_center=False,  #是否使输入数据去中心化(均值为0),
                samplewise_center=False,  #是否使输入数据的每个样本均值为0
                featurewise_std_normalization=False,  #是否数据标准化(输入数据除以数据集的标准差)
                samplewise_std_normalization=False,  #是否将每个样本数据除以自身的标准差
                zca_whitening=False,  #是否对输入数据施以ZCA白化
                rotation_range=20,  #数据提升时图片随机转动的角度(范围为0~180)
                width_shift_range=0.2,  #数据提升时图片水平偏移的幅度(单位为图片宽度的占比,0~1之间的浮点数)
                height_shift_range=0.2,  #同上,只不过这里是垂直
                horizontal_flip=True,  #是否进行随机水平翻转
                vertical_flip=False)  #是否进行随机垂直翻转

            #计算整个训练样本集的数量以用于特征值归一化、ZCA白化等处理
            datagen.fit(dataset.train_images)

            #利用生成器开始训练模型
            self.model.fit_generator(
                datagen.flow(dataset.train_images,
                             dataset.train_labels,
                             batch_size=batch_size),
                samples_per_epoch=dataset.train_images.shape[0],
                nb_epoch=nb_epoch,
                validation_data=(dataset.valid_images, dataset.valid_labels))

    MODEL_PATH = './model/me.face.model.h5'

    def save_model(self, file_path=MODEL_PATH):
        self.model.save(file_path)

    def load_model(self, file_path=MODEL_PATH):
        self.model = load_model(file_path)

    def evaluate(self, dataset):
        score = self.model.evaluate(dataset.test_images,
                                    dataset.test_labels,
                                    verbose=1)
        print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))


#识别人脸

    def face_predict(self, image):
        #依然是根据后端系统确定维度顺序
        if K.image_dim_ordering() == 'th' and image.shape != (1, 3, IMAGE_SIZE,
                                                              IMAGE_SIZE):
            image = resize_image(image)  #尺寸必须与训练集一致都应该是IMAGE_SIZE x IMAGE_SIZE
            image = image.reshape(
                (1, 3, IMAGE_SIZE, IMAGE_SIZE))  #与模型训练不同,这次只是针对1张图片进行预测
        elif K.image_dim_ordering() == 'tf' and image.shape != (1, IMAGE_SIZE,
                                                                IMAGE_SIZE, 3):
            image = resize_image(image)
            image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3))

        #浮点并归一化
        image = image.astype('float32')
        image /= 255

        #给出输入属于各个类别的概率,我们是二值类别,则该函数会给出输入图像属于0和1的概率各为多少
        result = self.model.predict_proba(image)
        print('result:', result)

        #给出类别预测:0或者1
        result = self.model.predict_classes(image)

        #返回类别预测结果
        return result[0]
Ejemplo n.º 59
0
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD

model = Sequential()
model.add(Dense(64, activation='relu', input_dim=559))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(6, activation='softmax'))

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])

model.fit(x_train, y_train,
          epochs=20,
          batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)



y_pred = model.predict(x_test)
y_pred = (y_pred > 0.5)


from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test.values.argmax(axis=1), y_pred.values.argmax(axis=1))
Ejemplo n.º 60
0
        Dense(16,
              activation="relu",
              kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4),
              bias_regularizer=regularizers.l2(1e-4),
              activity_regularizer=regularizers.l2(1e-5)))

    model.add(Dense(out_dim))

    #opt = optimizers.Adam(learning_rate=0.001)
    #model.compile(loss='mse', optimizer=opt)

    model.compile(loss='mse', optimizer='Adam')
    model.summary()

    model.fit(trainx, trainy, batch_size=32, epochs=250, verbose=1)
    trainScore = model.evaluate(trainx, trainy, verbose=0)
    print(trainScore)

    predtest = model.predict(testx)

    rmse_y1 = sqrt(mean_squared_error(testy[:, 0], predtest[:, 0]))
    rmse_y2 = sqrt(mean_squared_error(testy[:, 1], predtest[:, 1]))
    print("RMSE y1: %.2f y2: %.2f" % (rmse_y1, rmse_y2))

#%%
plt.figure()

plt.plot(Time[:-step], testy[:, 0], label="y1-test", color="c")
plt.plot(Time[:-step], predtest[:, 0], label="y1-pred")
plt.plot(Time[:-step], testy[:, 1], label="y2-test", color="m")
plt.plot(Time[:-step], predtest[:, 1], label="y2-pred")