Beispiel #1
0
def SA_sst():
    (
        (
            x_train_idx_data,
            y_train_valence,
            y_train_labels,
            x_test_idx_data,
            y_test_valence,
            y_test_labels,
            x_valid_idx_data,
            y_valid_valence,
            y_valid_labels,
            x_train_polarity_idx_data,
            y_train_polarity,
            x_test_polarity_idx_data,
            y_test_polarity,
            x_valid_polarity_idx_data,
            y_valid_polarity,
        ),
        W,
    ) = build_keras_input()

    maxlen = 200  # cut texts after this number of words (among top max_features most common words)
    batch_size = 32
    (X_train, y_train), (X_test, y_test), (X_valid, y_valide) = (
        (x_train_polarity_idx_data, y_train_polarity),
        (x_test_polarity_idx_data, y_test_polarity),
        (x_valid_polarity_idx_data, y_valid_polarity),
    )
    print(len(X_train), "train sequences")
    print(len(X_test), "test sequences")
    # m= 0
    # for i in X_train:
    #     if len(i) >0:
    #         for j in i:
    #             if j > m:
    #                 m=j
    # print(m)
    max_features = W.shape[0]  # shape of W: (13631, 300)

    print("Pad sequences (samples x time)")
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
    X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
    print("X_train shape:", X_train.shape)
    print("X_test shape:", X_test.shape)

    model = dan_dropout_position(W)

    # try using different optimizers and different optimizer configs
    model.compile(loss="binary_crossentropy", optimizer="adagrad", class_mode="binary")

    print("Train...")
    model.fit(
        X_train, y_train, batch_size=batch_size, nb_epoch=30, validation_data=(X_test, y_test), show_accuracy=True
    )
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size, show_accuracy=True)
    print("Test score:", score)
    print("Test accuracy:", acc)
Beispiel #2
0
def my_function(param1, param2, param3):
    ((x_train_idx_data, y_train_valence, y_train_labels,
     x_test_idx_data, y_test_valence, y_test_labels,
     x_valid_idx_data, y_valid_valence, y_valid_labels,
     x_train_polarity_idx_data, y_train_polarity,
     x_test_polarity_idx_data, y_test_polarity,
     x_valid_polarity_idx_data, y_valid_polarity), W) = build_keras_input()


    maxlen = 200  # cut texts after this number of words (among top max_features most common words)
    batch_size = 32
    (X_train, y_train), (X_test, y_test), (X_valid, y_valide) = (x_train_polarity_idx_data, y_train_polarity), (x_test_polarity_idx_data, y_test_polarity), (x_valid_polarity_idx_data, y_valid_polarity)
    print(len(X_train), 'train sequences')
    print(len(X_test), 'test sequences')
    # m= 0
    # for i in X_train:
    #     if len(i) >0:
    #         for j in i:
    #             if j > m:
    #                 m=j
    # print(m)
    max_features = W.shape[0] # shape of W: (13631, 300) , changed to 14027 through min_df = 3

    print("Pad sequences (samples x time)")
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
    X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
    X_valid = sequence.pad_sequences(X_valid, maxlen=maxlen)
    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)
    nb_classes = 2
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)
    y_valide = np_utils.to_categorical(y_valide, nb_classes)

    model = dan_pre_trained(W, param1,param2,param3)
    plot(model, to_file='./images/model.png')

    # try using different optimizers and different optimizer configs
    # model.compile(loss='binary_crossentropy', optimizer='adagrad', class_mode="binary")
    model.compile(loss='categorical_crossentropy', optimizer='adagrad')

    print("Train...")
    early_stopping = EarlyStopping(monitor='val_loss', patience=10)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=100, validation_data=(X_test, y_test), show_accuracy=True, callbacks=[early_stopping])
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size, show_accuracy=True)
    print('Test score:', score)
    print('Test accuracy:', acc)
    return acc
Beispiel #3
0
def SA_sst():
    ((x_train_idx_data, y_train_valence, y_train_labels,
      x_test_idx_data, y_test_valence, y_test_labels,
      x_valid_idx_data, y_valid_valence, y_valid_labels,
      x_train_polarity_idx_data, y_train_polarity,
      x_test_polarity_idx_data, y_test_polarity,
      x_valid_polarity_idx_data, y_valid_polarity), W) = build_keras_input()

    maxlen = 200  # cut texts after this number of words (among top max_features most common words)
    batch_size = 32
    (X_train, y_train), (X_test, y_test), (X_valid, y_valide) = (x_train_polarity_idx_data, y_train_polarity), (
    x_test_polarity_idx_data, y_test_polarity), (x_valid_polarity_idx_data, y_valid_polarity)
    print(len(X_train), 'train sequences')
    print(len(X_test), 'test sequences')
    # m= 0
    # for i in X_train:
    #     if len(i) >0:
    #         for j in i:
    #             if j > m:
    #                 m=j
    # print(m)
    max_features = W.shape[0]  # shape of W: (13631, 300)

    print("Pad sequences (samples x time)")
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
    X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    model = dan_dropout_position(W)

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy', optimizer='adagrad', class_mode="binary")

    print("Train...")
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=30, validation_data=(X_test, y_test),
              show_accuracy=True)
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size, show_accuracy=True)
    print('Test score:', score)
    print('Test accuracy:', acc)