Exemplo n.º 1
0
def generate_seq(
        model : Model,
        seed,
        numchars,
        size):

    ls = seed.shape[0]

    # Due to the way Keras RNNs work, we feed the model the
    # whole sequence each time, constantly sampling the nect character.
    # It's a little bit inefficient, but that doesn't matter much when generating

    tokens = np.concatenate([seed, np.zeros(size - ls)])

    for i in range(ls, size-1):

        toh = util.to_categorical(tokens[None,:], numchars)
        probs = model.predict(toh)

        # Extract the i-th probability vector and sample an index from it
        next_token = sample(probs[0, i, :])

        tokens[i+1] = next_token

    return [int(t) for t in tokens]
Exemplo n.º 2
0
def cal_auc(pred, true, is_torch=True, is_softmax=False, isbin=True):
    if is_torch:
        true = true.data.cpu().numpy()
        pred = pred.data.cpu().numpy()
    if is_softmax:
        pred = np.argmax(pred, axis=1)
    if isbin:
        true = to_categorical(true, 2)
    # trues = to_categorical(true,2)
    auc = metrics.roc_auc_score(true, pred)
    return auc
Exemplo n.º 3
0
def generate_seq(
        model : Model,
        seed,
        numchars,
        size,
        temperature=1.0):
    """
    :param model: The complete RNN language model
    :param seed: The first few wordas of the sequence to start generating from
    :param size: The total size of the sequence to generate
    :param temperature: This controls how much we follow the probabilities provided by the network. For t=1.0 we just
        sample directly according to the probabilities. Lower temperatures make the high-probability words more likely
        (providing more likely, but slightly boring sentences) and higher temperatures make the lower probabilities more
        likely (resulting is weirder sentences). For temperature=0.0, the generation is _greedy_, i.e. the word with the
        highest probability is always chosen.
    :return: A list of integers representing a samples sentence
    """
    ls = seed.shape[0]

    # Due to the way Keras RNNs work, we feed the model a complete sequence each time. At first it's just the seed,
    # zero-padded to the right length. With each iteration we sample and set the next character.

    tokens = np.concatenate([seed, np.zeros(size - ls)])

    # convert the integer sequence to a categorical one
    toh = util.to_categorical(tokens[None, :], numchars)

    for i in range(ls, size-1):

        # predict next characters (for the whole sequence)
        probs = model.predict(toh)

        # Extract the i-th probability vector and sample an index from it
        next_token = util.sample(probs[0, i-1, :], temperature)

        tokens[i] = next_token

        # update the one-hot encoding
        toh[0, i, 0] = 0
        toh[0, i, next_token] = 1

    return [int(t) for t in tokens]
Exemplo n.º 4
0
# Brief: cnn网络结构

import csv
import os

import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn

import config
import data_helpers
from util import to_categorical

if config.eval_all_train_data:
    x_raw, y = data_helpers.load_data_labels(config.data_dir)
    y = to_categorical(y)
    y = np.argmax(y, axis=1)
elif config.infer_data_path:
    infer_datas = list(
        open(config.infer_data_path, "r", encoding="utf-8").readlines())
    infer_datas = [s.strip() for s in infer_datas]
    x_raw = data_helpers.load_infer_data(infer_datas)
    y = []
else:
    x_raw = data_helpers.load_infer_data([
        "do you think it is right.", "everything is off.", "i hate you .",
        "it is a bad film.", "good man and bad person.",
        "价格不是最便宜的,招商还是浦发银行是238*12=2856.00人家还可以分期的。",
        u"驱动还有系统要自装,还有显卡太鸡巴低了.还有装系统太麻烦了"
    ])
    y = [1, 0, 0, 0, 1, 0, 1]
Exemplo n.º 5
0
def go(options):
    lstm_hidden = options.lstm_capacity

    print('devices', device_lib.list_local_devices())

    if options.task == 'europarl':

        dir = options.data_dir
        x, numchars, char_to_ix, ix_to_char = \
            util.load_char_data(dir+os.sep+'europarl-v8.fi-en.en', limit=options.limit, length=options.sequence_length)

        x_max_len = max([len(sentence) for sentence in x])

        print('max sequence length ', x_max_len)
        print(len(ix_to_char), ' distinct characters')

        # x = util.batch_pad(x, options.batch)
        x = sequence.pad_sequences(x, x_max_len, padding='post', truncating='post')

        def decode(seq):
            return ''.join(ix_to_char[id] for id in seq)

    else:
        raise Exception('Dataset name not recognized.')

    print('Data Loaded.')

    # print(sum([b.shape[0] for b in x]), ' sentences loaded')
    #
    # for i in range(3):
    #     batch = random.choice(x)
    #     print(batch[0, :])
    #     print(decode(batch[0, :]))

    ## Define model
    input = Input(shape=(None, numchars))

    h = LSTM(lstm_hidden, return_sequences=True)(input)

    if options.extra is not None:
        for _ in range(options.extra):
            h = LSTM(lstm_hidden, return_sequences=True)(h)

    out = TimeDistributed(Dense(numchars, activation='softmax'))(h)

    model = Model(input, out)

    opt = keras.optimizers.Adam(lr=options.lr)

    model.compile(opt, 'categorical_crossentropy')
    model.summary()

    epochs = 0

    n = x.shape[0]

    x_shifted = np.concatenate([np.ones((n, 1)), x], axis=1)  # prepend start symbol
    x_shifted = util.to_categorical(x_shifted, numchars)

    x_out = np.concatenate([x, np.zeros((n, 1))], axis=1)  # append pad symbol
    x_out = util.to_categorical(x_out, numchars)  # output to one-hots

    def generate():
        for i in range(CHECK):
            b = random.randint(0, n - 1)

            seed = x[b, :20]
            seed = np.insert(seed, 0, 1)
            gen = generate_seq(model, seed, numchars, 120)

            print('seed   ', decode(seed))
            print('out    ', decode(gen))

            print()

    # Train the model
    generate_stuff = keras.callbacks.LambdaCallback(
        on_epoch_end=lambda epoch, logs: generate())

    model.fit(x_shifted, x_out, epochs=options.epochs, batch_size=64, callbacks=[generate_stuff])
Exemplo n.º 6
0
import layer
import optimizers
import pickle
import util
import numpy
import matplotlib.pyplot as plt

train_set, val_set, test_set = pickle.load(open("mnist.pkl", "rb"),
                                           encoding='latin1')

model = model.Sequence()
model.add(layer.Dense(300, input_dim=28 * 28, activation="Relu"))
#model.add(layer.Dense(300, activation="Relu"))
model.add(layer.Dense(10))

train_y = util.to_categorical(train_set[1])
idx = numpy.random.choice(train_set[0].shape[0], 50000)
train_set = train_set[0][idx]
train_y = train_y[idx]

model.init()
model.fit(input_data=train_set, output_data=train_y, epoch=500, batch_num=10)
model.compile(optimizer=optimizers.SGD(model, 0.1), loss="Mean_squared_error")
model.train()

id = 0
rightnum = 0
for now in val_set[0]:
    # plt.imshow(numpy.reshape(now,(28,28)))
    # plt.show()
    ans = val_set[1][id]
Exemplo n.º 7
0
def process_y(y):
    return util.to_categorical(y)
Exemplo n.º 8
0
def go(options):

    if options.seed < 0:
        seed = random.randint(0, 1000000)
        print('random seed: ', seed)
        np.random.seed(seed)
    else:
        np.random.seed(options.seed)


    ## Load the data
    if options.task == 'alice':

        dir = options.data_dir
        x, char_to_ix, ix_to_char = \
            util.load_characters(util.DIR + '/datasets/alice.txt', limit=options.limit, length=options.sequence_length)

        x_max_len = max([len(sentence) for sentence in x])
        numchars = len(ix_to_char)
        print(numchars, ' distinct characters found')

        x = sequence.pad_sequences(x, x_max_len, padding='post', truncating='post')

    elif options.task == 'shakespeare':

        dir = options.data_dir
        x, char_to_ix, ix_to_char = \
            util.load_characters(util.DIR + '/datasets/shakespeare.txt', limit=options.limit, length=options.sequence_length)

        x_max_len = max([len(sentence) for sentence in x])
        numchars = len(ix_to_char)
        print(numchars, ' distinct characters found')

        x = sequence.pad_sequences(x, x_max_len, padding='post', truncating='post')

    elif options.task == 'file':

        dir = options.data_dir
        x, char_to_ix, ix_to_char = \
            util.load_characters(options.da, limit=options.limit, length=options.sequence_length)

        x_max_len = max([len(sentence) for sentence in x])
        numchars = len(ix_to_char)
        print(numchars, ' distinct characters found')

        x = sequence.pad_sequences(x, x_max_len, padding='post', truncating='post')

    else:
        raise Exception('Dataset name ({}) not recognized.'.format(options.task))

    def decode(seq):
        return ''.join(ix_to_char[id] for id in seq)

    print('Data Loaded.')

    ## Shape the data. The inputs get a start symbol (1) prepended. We shorten the sequences by one so that the lengths
    #  match
    n = x.shape[0]

    x_in  = np.concatenate([np.ones((n, 1)), x[:, :-1]], axis=1)  # prepend start symbol
    x_out = x
    assert x_in.shape == x_out.shape

    #  convert from integer sequences to sequences of one-hot vectors
    x_in = util.to_categorical(x_in, numchars)
    x_out = util.to_categorical(x_out, numchars)  # output to one-hots

    ## Define the model

    input = Input(shape=(None, numchars))
    #- We define the model as variable-length (even though all training data has fixed length). This allows us to generate
    #  longer sequences during inference.

    h = LSTM(options.lstm_capacity, return_sequences=True)(input)

    if options.extra is not None:
        for _ in range(options.extra):
            h = LSTM(options.lstm_capacity, return_sequences=True)(h)

    #  Apply a single dense layer to all timesteps of the resulting sequence to convert back to characters
    out = TimeDistributed(Dense(numchars, activation='softmax'))(h)

    model = Model(input, out)

    opt = keras.optimizers.Adam(lr=options.lr)

    model.compile(opt, 'categorical_crossentropy')
    #- For each timestep the model outputs a probability distribution over all characters. Categorical crossentopy mean
    #  that we try to optimize the log-probability of the probability of the correct character (averaged over all
    #  characters in all sequences.

    model.summary()

    ## Create callback to generate some samples after each epoch

    def generate(epoch):
        if epoch % options.out_every == 0 and epoch > 0:
            for i in range(CHECK):
                b = random.randint(0, n - 1)

                seed = x[b, :20]
                seed = np.insert(seed, 0, 1)
                gen = generate_seq(model, seed, numchars, options.gen_length)

                print('*** [', decode(seed), '] ', decode(gen[len(seed):]))
                print()

    # Train the model
    generate_stuff = keras.callbacks.LambdaCallback(
        on_epoch_end=lambda epoch, logs: generate(epoch))

    model.fit(x_in, x_out,
              validation_split=1/10,
              epochs=options.epochs, batch_size=options.batch,
              callbacks=[generate_stuff])
Exemplo n.º 9
0
from layers import Conv2D, Dense, Dropout, Activation, Flatten
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score

# Data Processing
# mnist = fetch_mldata('MNIST original')
# X = mnist.data
# y = mnist.target

# X = (X.astype(np.float32) - 127.5) / 127.5

dataset = datasets.load_digits()
X = dataset.data
y = dataset.target

y = to_categorical(y.astype("int"))

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# print(X_train.shape)
X_train = X_train[:256]
y_train = y_train[:256]
X_train = X_train.reshape((-1, 1, 8, 8))
X_test = X_test.reshape((-1, 1, 8, 8))
X_test = X_train[:256]
y_test = y_train[:256]

# Model
model = NeuralNetwork(SquareLoss(), (X_test, y_test))
model.add(
    Conv2D(16,
           filter_shape=(3, 3),
Exemplo n.º 10
0
print('id: ', eid)
print('num of epochs:', n_epochs)

#### Load imdb data ####
imdb = np.load('../data/imdb_word_emb.npz')
X_train = imdb['X_train']  #(25000, 80, 128)
Y_train = imdb['y_train']  #(25000, ) => 0 or 1
X_test = imdb['X_test']
Y_test = imdb['y_test']
n_samples = X_train.shape[0]
n_time_steps = X_train.shape[1]
n_input = X_train.shape[2]
n_classes = 2
n_iters = int(n_epochs * n_samples / batch_size)
# transform to one-hot
Y_train = to_categorical(Y_train, 2)
Y_test = to_categorical(Y_test, 2)
# Convert to Dataset instance
train_dataset = Dataset(X_train, Y_train, batch_size)


#### Define RNN Models ####
def RNN(x_sequence, n_hidden):
    state = tf.Variable(tf.zeros([batch_size, n_hidden]))
    U = tf.Variable(tf.random_normal([n_input, n_hidden], stddev=1))
    W = tf.Variable(tf.random_normal([n_hidden, n_hidden], stddev=1))
    hidden_bias = tf.Variable(
        tf.random_normal([batch_size, n_hidden], stddev=1))
    # Unroll timesteps
    for x in x_sequence:
        state = tf.tanh(tf.matmul(x, U) + tf.matmul(state, W) + hidden_bias)