コード例 #1
0
def main(_):
    np.random.seed(3)  #固定seed让每次的random都一样
    TIME_STEPS = FLAGS.N
    IMPUT_SIZE = 625
    BATCH_SIZE = 30
    BATCH_INDEX = 0
    OUTPUT_SIZE = 2
    CELL_SIZE = 175
    LR = 0.001

    totalData = []
    totalDataLabel = []
    counter = 0
    totalDoc = 0
    totalpost = 0
    tdlist1 = 0
    Pos = 0
    Neg = 0
    maxpost = 0
    minpost = 62827

    thulac_pip = thulac.thulac(seg_only=True)  #只进行分词,不进行词性标注
    EventList = GetEventList()

    print("Processing data with N = ", TIME_STEPS, " ...")
    for event in EventList:
        totalDoc += 1
        Eid = event["eid"]
        Label = event["label"]
        # print("Eid : ", Eid, "Label: ", Label)
        WeiboPostIdList = event["posts"]
        if len(WeiboPostIdList) == 1:
            tdlist1 += 1
            continue
        if len(WeiboPostIdList) >= maxpost:
            maxpost = len(WeiboPostIdList)
        if len(WeiboPostIdList) <= minpost:
            minpost = len(WeiboPostIdList)

        event_file_path = os.path.join(Weibo_Json_Dir, Eid + ".json")
        event_file = open(event_file_path, "r")
        event_json = json.load(event_file)

        WeiboPostList = []
        index = 0
        for WeiboPostId in WeiboPostIdList:
            totalpost += 1
            WeiboJson = event_json[index]
            index += 1
            WeiboText = WeiboJson["text"]
            Time = WeiboJson["t"]
            WeiboPost = {"text": WeiboText, "time": Time}
            WeiboPostList.append(WeiboPost)
        if Label == "0":
            Pos += 1
        else:
            Neg += 1
        #Sort by time
        WeiboPostList = sorted(WeiboPostList, key=lambda k: k['time'])

        #find Time Invertal of weibo
        TotalTimeLine = WeiboPostList[-1]['time'] - WeiboPostList[0]['time']
        IntervalTime = TotalTimeLine / TIME_STEPS
        k = 0
        PreConInt = []
        while True:
            k += 1
            WeiboIndex = 0
            output = []
            if TotalTimeLine == 0:
                for weibo in WeiboPostList:
                    weibo_text = thulac_pip.cut(weibo["text"], text=True)
                    output.append(weibo_text)
                break
            Start = WeiboPostList[0]['time']
            Interval = int(TotalTimeLine / IntervalTime)
            Intset = []
            for inter in range(0, Interval):
                empty = 0
                interval = []
                for q in range(WeiboIndex, len(WeiboPostList)):
                    if WeiboPostList[q]['time'] >= Start and WeiboPostList[q][
                            'time'] < Start + IntervalTime:
                        empty += 1
                        weibo_text = thulac_pip.cut(WeiboPostList[q]["text"],
                                                    text=True)
                        interval.append(weibo_text)
                    #记录超出interval的weibo位置,下次可直接从此开始
                    elif WeiboPostList[q]['time'] >= Start + IntervalTime:
                        WeiboIndex = q - 1
                        break
                # empty interval
                if empty == 0:
                    output.append([])
                else:
                    #add the last weibo
                    if WeiboPostList[-1]['time'] == Start + IntervalTime:
                        weibo_text = thulac_pip.cut(WeiboPostList[-1]["text"],
                                                    text=True)
                        interval.append(weibo_text)
                    Intset.append(inter)
                    output.append(interval)
                Start = Start + IntervalTime
            ConInt = ContinuousInterval(Intset)
            if len(ConInt) < TIME_STEPS and len(ConInt) > len(PreConInt):
                IntervalTime = int(IntervalTime * 0.5)
                PreConInt = ConInt
                if IntervalTime == 0:
                    output = output[ConInt[0]:ConInt[-1] + 1]
                    break
            else:
                # print(len(ConInt))
                output = output[ConInt[0]:ConInt[-1] + 1]
                break
        counter += 1
        event_file.close()
        # print (counter)
        # 把Interval的所有字都串在一起
        for q in range(0, len(output)):
            output[q] = ''.join(s for s in output[q])

        try:
            #Caculate Tfidf
            vectorizer = CountVectorizer()
            transformer = TfidfTransformer()
            #print(output)
            tf = vectorizer.fit_transform(output)
            tfidf = transformer.fit_transform(tf)
            # Debug
            # print(tfidf.toarray())
            Allvocabulary = vectorizer.get_feature_names()
        except ValueError:
            BlackList.append(Eid)
            continue

        # print(vectorizer.get_feature_names())
        Input = []

        for interval in tfidf.toarray():
            interval = sorted(interval, reverse=True)
            while len(interval) < IMPUT_SIZE:
                interval.append(0.0)
            Input.append(interval[:IMPUT_SIZE])
        if len(Input) < TIME_STEPS:
            for q in range(0, TIME_STEPS - len(Input)):
                Input.insert(0, [0.0] * IMPUT_SIZE)
        totalData.append(Input[:TIME_STEPS])
        totalDataLabel.append(Label)

    print("Processing data with N = ", TIME_STEPS, " done")
    print("\t totalDoc : " + str(totalDoc))
    print("\t tdlist1 : " + str(tdlist1))
    print("\t Pos : " + str(Pos))
    print("\t Neg : " + str(Neg))
    print("\t totalpost : " + str(totalpost))
    print("\t maxpost : " + str(maxpost))
    print("\t minpost : " + str(minpost))

    X_train = np.array(totalData[:int(counter / 5 * 4)])
    y_train = np.array(totalDataLabel[:int(counter / 5 * 4)])

    # for q in X_train:
    # 	print(q)
    # for q in y_train:
    # 	print(q)

    print(X_train.shape)
    X_test = np.array(totalData[int(counter / 5 * 4):])
    y_test = np.array(totalDataLabel[int(counter / 5 * 4):])
    print(X_test.shape)

    ###加上array(len)
    ###找到实际分类的节点,tf需要用到

    #RNN
    X_train = X_train.reshape(-1, TIME_STEPS, IMPUT_SIZE)  #normalize
    X_test = X_test.reshape(-1, TIME_STEPS, IMPUT_SIZE)  #normalize
    y_train = np_utils.to_categorical(y_train, num_classes=2)
    y_test = np_utils.to_categorical(y_test, num_classes=2)
    print(y_train.shape)
    model = Sequential()

    #RNN cell
    model.add(SimpleRNN(CELL_SIZE, input_shape=(TIME_STEPS, IMPUT_SIZE)))

    #output layer
    model.add(Dense(OUTPUT_SIZE))
    model.add(Activation('softmax'))

    #optimizer优化器
    Adagrad = keras.optimizers.Adagrad(LR)

    model.compile(optimizer=Adagrad,
                  loss='mean_squared_error',
                  metrics=['accuracy'])

    #train
    print("Training---------")

    model.fit(X_train, y_train, epochs=8000, batch_size=BATCH_SIZE)

    print("\nTesting---------")
    loss_and_metrics = model.evaluate(X_test,
                                      y_test,
                                      batch_size=y_test.shape[0],
                                      verbose=False)
    print('test loss: ', loss_and_metrics[0])
    print('test accuracy: ', loss_and_metrics[1])
コード例 #2
0
max_len = 7
X_data = pad_sequences(X_data, maxlen=max_len)  # x의 길이 전부 7로 맞추기 위해 패딩

from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_data,
                                                    y_data,
                                                    test_size=0.2,
                                                    random_state=42)

from keras.layers import SimpleRNN, Dense, Embedding
from keras.models import Sequential

# model 구성
model = Sequential()
model.add(Embedding(voca_size, 10))  # embedding벡터차원 10
model.add(SimpleRNN(128))  # RNN레이어 128개
model.add(Dense(32))
model.add(Dense(y_data.shape[1], activation='softmax'))

from keras.callbacks import EarlyStopping
early_stop = EarlyStopping(monitor='loss', patience=10, mode='auto')

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['acc'])
hist = model.fit(X_train,
                 y_train,
                 epochs=500,
                 batch_size=2,
                 validation_split=0.1,
                 callbacks=[early_stop])
コード例 #3
0
ファイル: rnn_functional.py プロジェクト: firebitsbr/sciann
    def __init__(self,
                 fields=None,
                 variables=None,
                 hidden_layers=None,
                 rnn_type="SimpleRNN",
                 activation="tanh",
                 recurrent_activation=None,
                 enrichment="linear",
                 kernel_initializer=default_kernel_initializer(),
                 bias_initializer=default_bias_initializer(),
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 dtype=None,
                 trainable=True,
                 **kwargs):
        # check data-type.
        if dtype is None:
            dtype = K.floatx()
        elif not K.floatx() == dtype:
            K.set_floatx(dtype)
        # check for copy constructor.
        if all([x in kwargs for x in ('inputs', 'outputs', 'layers')]):
            self._inputs = kwargs['inputs'].copy()
            self._outputs = kwargs['outputs'].copy()
            self._layers = kwargs['layers'].copy()
            return
        # prepare regularizers.
        kernel_regularizer = default_regularizer(kernel_regularizer)
        bias_regularizer = default_regularizer(bias_regularizer)
        # prepares fields.
        fields = to_list(fields)
        if all([isinstance(fld, str) for fld in fields]):
            outputs = [
                RNNField(
                    name=fld,
                    dtype=dtype,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer,
                    kernel_regularizer=kernel_regularizer,
                    bias_regularizer=bias_regularizer,
                    trainable=trainable,
                )
                for fld in fields
            ]
        elif all([validations.is_field(fld) for fld in fields]):
            outputs = fields
        else:
            raise TypeError(
                'Please provide a "list" of field names of'
                + ' type "String" or "Field" objects.'
            )
        # prepare inputs/outputs/layers.
        inputs = []
        layers = []
        variables = to_list(variables)
        if all([isinstance(var, RNNFunctional) for var in variables]):
            for var in variables:
                inputs += var.outputs
            for var in variables:
                for lay in var.layers:
                    layers.append(lay)
        else:
            raise TypeError(
                "Input error: Please provide a `list` of `Functional`s. \n"
                "Provided - {}".format(variables)
            )
        # prepare hidden layers.
        if hidden_layers is None:
            hidden_layers = []
        else:
            hidden_layers = to_list(hidden_layers)
        # Check and convert activation functions to proper format.
        assert not isinstance(activation, list), \
            'Expected an activation function name not a "list". '
        afunc = get_activation(activation)

        # check enrichment functions.
        enrichment = to_list(enrichment)
        efuncs = get_activation(enrichment)

        # Input layers.
        if len(inputs) == 1:
            net_input = inputs[0]
        else:
            layer = Concatenate()
            layer.name = "conct_" + layer.name.split("_")[-1]
            net_input = layer(inputs)

        # Define the output network.
        net = []
        for enrich in efuncs:
            net.append(net_input)
            for nLay, nNeuron in enumerate(hidden_layers):
                # Add the layer.
                if rnn_type=='LSTM':
                    layer = LSTM(
                        nNeuron,
                        return_sequences=True,
                        recurrent_activation=recurrent_activation,
                        kernel_initializer=kernel_initializer,
                        bias_initializer=bias_initializer,
                        kernel_regularizer=kernel_regularizer,
                        bias_regularizer=bias_regularizer,
                        trainable=trainable,
                        dtype=dtype,
                        unroll=True,
                    )
                elif rnn_type=='SimpleRNN':
                    layer = SimpleRNN(
                        nNeuron,
                        return_sequences=True,
                        kernel_initializer=kernel_initializer,
                        bias_initializer=bias_initializer,
                        kernel_regularizer=kernel_regularizer,
                        bias_regularizer=bias_regularizer,
                        trainable=trainable,
                        dtype=dtype,
                        unroll=True,
                    )
                else:
                    raise ValueError(
                        'Invalid entry for `rnn_type` -- '
                        'accepts from (`SimpleRNN`, `LSTM`).'
                    )
                layer.name = "D{:d}b_".format(nNeuron) + layer.name.split("_")[-1]
                layers.append(layer)
                net[-1] = layer(net[-1])
                # Apply the activation.
                if nLay<len(hidden_layers)-1 and afunc.__name__ != 'linear':
                    layer = Activation(afunc)
                    layer.name = "{}_".format(afunc.__name__) + layer.name.split("_")[-1]
                    layers.append(layer)
                    net[-1] = layer(net[-1])

            # add the activations.
            if enrich.__name__ != 'linear':
                layer = Activation(enrich)
                layer.name = "{}_".format(enrich.__name__) + layer.name.split("_")[-1]
                layers.append(layer)
                net[-1] = layer(net[-1])

        # store output layers.
        for out in outputs:
            layers.append(out)

        # Assign to the output variable
        if len(net) == 1:
            net_output = net[0]
        else:
            layer = Concatenate()
            layer.name = "conct_" + layer.name.split("_")[-1]
            net_output = layer(net)

        # Define the final outputs of each network
        outputs = [out(net_output) for out in outputs]

        self._inputs = inputs
        self._outputs = outputs
        self._layers = layers
コード例 #4
0
word2vec_model = gensim.models.Word2Vec.load('../word2vec/word2vec.model')
embedding_weights = numpy.zeros((nb_words, embedding_dim))

for word, index in dictionary.items():
    if word in word2vec_model:
        embedding_weights[index, :] = word2vec_model[word]

model = Sequential()
model.add(
    Embedding(nb_words,
              embedding_dim,
              input_length=max_length,
              mask_zero=True,
              weights=[embedding_weights]))
model.add(
    SimpleRNN(128, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))
model.add(TimeDistributed(Dense(encoded_Y.shape[2], activation='softmax')))

optimiser = Nadam(lr=0.002,
                  beta_1=0.9,
                  beta_2=0.999,
                  epsilon=1e-08,
                  schedule_decay=0.004)
model.compile(loss='categorical_crossentropy', optimizer=optimiser)

# plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)

print(model.summary())
print(model.get_config())

# early_stopping_monitor = EarlyStopping(monitor='loss', patience=5)
コード例 #5
0
ファイル: mnist_rnn.py プロジェクト: Anushka1610/applied-ai
x_test = x_test.reshape(-1, 28, 28)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# Converts class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, classes)
y_test = keras.utils.to_categorical(y_test, classes)
model=Sequential()

#RNN Model
model.add(SimpleRNN(128,input_shape=(28,28)))
model.add(Dense(classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
            optimizer='rmsprop',
            metrics=['accuracy'])

# Training.
model.fit(x_train, y_train,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        validation_data=(x_test, y_test))

# Evaluation.
scores = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', scores[0])
コード例 #6
0
OUTPUT_SIZE = 10
CELL_SIZE = 50
LR = 0.001

(X_train, y_train), (X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(-1, 28, 28) / 255
X_test = X_test.reshape(-1, 28, 28) / 255
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)

model = Sequential()

# RNN cell
model.add(
    SimpleRNN(batch_input_shape=(None, TIME_STEPS, INPUT_SIZE),
              units=CELL_SIZE))
# output layer
model.add(Dense(OUTPUT_SIZE))
model.add(Activation('softmax'))

# optimizer
adam = Adam(LR)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# training
for step in range(4001):
    X_batch = X_train[BATCH_INDEX:BATCH_SIZE + BATCH_INDEX, :, :]
    Y_batch = y_train[BATCH_INDEX:BATCH_SIZE + BATCH_INDEX, :]
    cost = model.train_on_batch(X_batch, Y_batch)
コード例 #7
0
from keras.layers import Dense, Dropout, Embedding, SimpleRNN

seed = 10
np.random.seed(seed)  # 指定亂數種子
# 載入 IMDb 資料集
top_words = 1000
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(num_words=top_words)
# 資料預處理
max_words = 100
X_train = sequence.pad_sequences(X_train, maxlen=max_words)
X_test = sequence.pad_sequences(X_test, maxlen=max_words)
# 定義模型
model = Sequential()
model.add(Embedding(top_words, 32, input_length=max_words))
model.add(Dropout(0.25))
model.add(SimpleRNN(32))
model.add(Dropout(0.25))
model.add(Dense(1, activation="sigmoid"))
model.summary()  # 顯示模型摘要資訊
# 編譯模型
model.compile(loss="binary_crossentropy",
              optimizer="rmsprop",
              metrics=["accuracy"])
# 訓練模型
history = model.fit(X_train,
                    Y_train,
                    validation_split=0.2,
                    epochs=5,
                    batch_size=128,
                    verbose=2)
# 評估模型
コード例 #8
0
# X shape (60,000 28x28), y shape (10,000, )
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# data pre-processing
X_train = X_train.reshape(-1, 28, 28) / 255.  # normalize
X_test = X_test.reshape(-1, 28, 28) / 255.  # normalize
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)

# build RNN model
model = Sequential()

#RNN cell
model.add(
    SimpleRNN(
        batch_input_shape=(BATCH_SIZE, TIME_STEPS, INPUT_SIZE),
        output_dim=CELL_SIZE,
    ))

#output layer
model.add(Dense(OUTPUT_SIZE))
model.add(Activation('softmax'))

# optimizer
adam = Adam(LR)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# training
for step in range(4001):
    # data shape = (batch_num, steps, inputs/outputs)
コード例 #9
0
print "x shape:", trainX.shape
print "y.shape:", trainY.shape

# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))

# create and fit the LSTM network
num_units = 2
layer_name = 'rnn'
model = Sequential()
# model.add(LSTM(num_units, return_sequences=True, input_shape=(1, look_back)))
# model.add(LSTM(num_units))
# model.add(LSTM(num_units, input_shape=(1, look_back)))
# model.add(GRU(num_units, input_shape=(1, look_back)))
model.add(SimpleRNN(num_units, input_shape=(1, look_back)))

model.add(Dense(1))
adam = keras.optimizers.Adam(lr=0.001)
model.compile(loss='mean_squared_error', optimizer=adam)

loss_hist = np.zeros((30, 2))

t1 = datetime.now()

for i in range(30):
    model.fit(trainX, trainY, epochs=1, batch_size=1, verbose=2)

    # make predictions
    trainPredict = model.predict(trainX)
    testPredict = model.predict(testX)
コード例 #10
0
print('# Test Data = ', len(x_test))

# Data Preprocessing
print('Preprocessing Data..')
x_train = sequence.pad_sequences(x_train, maxlen=max_length)
x_test = sequence.pad_sequences(x_test, maxlen=max_length)

# Design Neural Network Architecture with SimpleRNN
print('Building Simple RNN Model..')

RNN_model = Sequential()
# Add Embedding layer
RNN_model.add(Embedding(max_features, embedding_size, input_length=max_length))
RNN_model.add(Dropout(dropout_rate))
# Add Simple RNN layer
RNN_model.add(SimpleRNN(input_dim=1, output_dim=25, batch_input_shape=(1, 3)))
# Add Dense Hidden Layer
RNN_model.add(Dense(hidden_layer_size, activation='relu'))
RNN_model.add(Dropout(dropout_rate))
# Output Layer
RNN_model.add(Dense(no_classes, activation='sigmoid'))

# Configure model
RNN_model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

# TensorBoard
tensorboard = TensorBoard('./logs/SimpleRNN')

# Train!
コード例 #11
0
def main(rnn_model):
    def message_to_array(msg):
        msg = msg.lower().split(' ')
        test_seq = np.array([word_index[word] for word in msg])

        test_seq = np.pad(test_seq, (500-len(test_seq), 0), 'constant', constant_values=(0))
        test_seq = test_seq.reshape(1, 500)
        
        return test_seq

    data = pd.read_csv("./spam_text_message_data.csv")
    print(data.head())
    print(data.tail())

    messages = []
    labels = []
    for index, row in data.iterrows():
        messages.append(row['Message'])
        if row['Category'] == 'ham':
            labels.append(0)
        else:
            labels.append(1)

    messages = np.asarray(messages)
    labels = np.asarray(labels)

   # print("Number of messages: ", len(messages))
    #print("Number of labels: ", len(labels))

    max_vocab = 10000
    max_len = 500

    # Ignore all words except the 10000 most common words
    tokenizer = Tokenizer(num_words=max_vocab)
    # Calculate the frequency of words
    tokenizer.fit_on_texts(messages)
    # Convert array of messages to list of sequences of integers
    sequences = tokenizer.texts_to_sequences(messages)

    # Dict keeping track of words to integer index
    word_index = tokenizer.word_index

    # Convert the array of sequences(of integers) to 2D array with padding
    # maxlen specifies the maximum length of sequence (truncated if longer, padded if shorter)
    data = pad_sequences(sequences, maxlen=max_len)

    print("data shape: ", data.shape)

    # We will use 80% of data for training & validation(80% train, 20% validation) and 20% for testing
    train_samples = int(len(messages)*0.8)

    messages_train = data[:train_samples]
    labels_train = labels[:train_samples]

    messages_test = data[train_samples:len(messages)-2]
    labels_test = labels[train_samples:len(messages)-2]

    embedding_mat_columns=40
    # Construct the SimpleRNN model
    model = Sequential()
    ## Add embedding layer to convert integer encoding to word embeddings(the model learns the
    ## embedding matrix during training), embedding matrix has max_vocab as no. of rows and chosen
    ## no. of columns 
    model.add(Embedding(input_dim=max_vocab, output_dim=embedding_mat_columns, input_length=max_len))
    model.add(Dropout(0.2))
    model.add(Conv1D(64, 5, activation='sigmoid'))
    model.add(MaxPooling1D(pool_size=4))
    
    if rnn_model == 'SimpleRNN':
        model.add(SimpleRNN(units=embedding_mat_columns))
    elif rnn_model == 'LSTM':
        model.add(LSTM(units=embedding_mat_columns))
    else:
        model.add(GRU(units=embedding_mat_columns))
    model.add(Dense(1, activation='sigmoid'))
    
    model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
    model.summary()

    #plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)
    # Training the model
    model.fit(messages_train, labels_train, epochs=1, batch_size=60, validation_split=0.2)

    # Testing the model
    pred = model.predict_classes(messages_test)
    acc = model.evaluate(messages_test, labels_test)
      
    print("Test loss is {0:.2f} accuracy is {1:.2f}  ".format(acc[0],acc[1]))
    
# =============================================================================
#     print(pred)
#     print(labels_test)
# =============================================================================
    
    print("precision is about ", metrics.precision_score(labels_test, pred,average="macro"))
    print("recall is about ", metrics.recall_score(labels_test, pred, average="macro")) 
    print("f-score is about ", metrics.f1_score(labels_test, pred, average="macro"))
    print(metrics.confusion_matrix(labels_test, pred))
コード例 #12
0
#set parameters
max_features = 7000
embedding_size = 50
maxlen = 400

batch_size = 64
epochs = 4

print('Build the RNN model...')
model2 = Sequential()

# Embedding layer
model2.add(Embedding(max_features, embedding_size, input_length=maxlen))
model2.add(Dropout(0.35))

model2.add(SimpleRNN(units=16))

model2.add(Dense(units=256, activation='relu'))
model2.add(Dropout(0.35))
model2.add(Dense(units=1, activation='sigmoid'))

model2.compile(loss='binary_crossentropy',
               optimizer='adam',
               metrics=['accuracy'])

model2.summary()

# In[21]:

hist2 = model2.fit(x_train,
                   y_train,
コード例 #13
0
    #y_train_enc = np_utils.to_categorical(sentiment_train,)
    #le = preprocessing.LabelEncoder()
    #le.fit(sentiment_train)
    #v = le.transform(sentiment_train)
    #print(v)
    #y_train_enc = np_utils.to_categorical(v)

    #le1 = preprocessing.LabelEncoder()
    #le1.fit(sentiment_test)
    #v1 = le1.transform(sentiment_test)

    #LSTM
    print "fitting LSTM ..."
    model = Sequential()
    model.add(Embedding(dictionary_size, 256, dropout=0.2))
    model.add(SimpleRNN(256, dropout_W=0.2, dropout_U=0.2))
    model.add(Dense(num_labels))
    model.add(Activation('softmax'))
    model.load_weights("logs1/rnn_model.hdf5")
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    checkpointer = callbacks.ModelCheckpoint(
        filepath="logs2/checkpoint-{epoch:02d}.hdf5",
        verbose=1,
        save_best_only=True,
        monitor='loss')
    csv_logger = CSVLogger('logs1/training_set_iranalysis1.csv',
                           separator=',',
                           append=False)
コード例 #14
0
input_shape = (time_steps, feature )
input_length = timesteps
input_dim = feature
                 x      | y
            ---------------- 
batch_size   1   2   3  | 4     : x의 한 행에 들어간 값을 몇개씩 자르느냐 = feature
             2   3   4  | 5       ex) feature 1 : [1], [2], [3]
             3   4   5  | 6       ex) feature 3 : [1, 2, 3]
             4   5   6  | 7 
              time_step
'''

#2. 모델구성
model = Sequential()
# model.add(LSTM(10, activation='relu', input_shape = (3, 1)))
model.add(SimpleRNN(800, input_length=3,
                    input_dim=1))  # input_length : time_step (열)
model.add(Dense(100))
model.add(Dense(100))
model.add(Dense(100))
model.add(Dense(100))
model.add(Dense(100))
model.add(Dense(100))
model.add(Dense(100))
model.add(Dense(1))

model.summary()
'''
LSTM_parameter 계산
num_params = 4 * ( num_units   +   input_dim   +   1 )  *  num_units
                (output node값)  (잘라준 data)   (bias)  (output node값)
           = 4 * (    5      +       1       +   1 )  *     5          = 140     
コード例 #15
0
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# data pre-processing
X_train = X_train.reshape(-1, 28, 28) / 255.  # normalize
X_test = X_test.reshape(-1, 28, 28) / 255.  # normalize
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)

# build RNN model
model = Sequential()

# RNN cell
model.add(SimpleRNN(
    # for batch_input_shape, if using tensorflow as the backend, we have to put None for the batch_size.
    # Otherwise, model.evaluate() will get error.
    batch_input_shape=(None, TIME_STEPS, INPUT_SIZE),  # Or: input_dim=INPUT_SIZE, input_length=TIME_STEPS,
    output_dim=CELL_SIZE,
    unroll=True,
))

# output layer
model.add(Dense(OUTPUT_SIZE))
model.add(Activation('softmax'))

# optimizer
adam = Adam(LR)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# training
コード例 #16
0
ファイル: models.py プロジェクト: maxblu/Linda-Speech
def brnn(units, input_dim=26, output_dim=29, dropout=0.2, numb_of_dense=3, n_layers=1):
    """
    :param units: Hidden units per layer
    :param input_dim: Size of input dimension (number of features), default=26
    :param output_dim: Output dim of final layer of model (input to CTC layer), default=29
    :param dropout: Dropout rate, default=0.2
    :param numb_of_dense: Number of fully connected layers before recurrent, default=3
    :param n_layers: Number of bidirectional recurrent layers, default=1
    :return: network_model: brnn

    Default model contains:
     1 layer of masking
     3 layers of fully connected clipped ReLu (DNN) with dropout 20 % between each layer
     1 layer of BRNN
     1 layers of fully connected clipped ReLu (DNN) with dropout 20 % between each layer
     1 layer of softmax
    """

    # Input data type
    dtype = 'float32'
    # Kernel and bias initializers for fully connected dense layers
    kernel_init_dense = 'random_normal'
    bias_init_dense = 'random_normal'

    # Kernel and bias initializers for recurrent layer
    kernel_init_rnn = 'glorot_uniform'
    bias_init_rnn = 'zeros'

    # ---- Network model ----
    # x_input layer, dim: (batch_size * x_seq_size * features)
    input_data = Input(name='the_input',shape=(None, input_dim), dtype=dtype)

    # Masking layer
    x = Masking(mask_value=0., name='masking')(input_data)

    # Default 3 fully connected layers DNN ReLu
    # Default dropout rate 20 % at each FC layer
    for i in range(0, numb_of_dense):
        x = TimeDistributed(Dense(units=units, kernel_initializer=kernel_init_dense, bias_initializer=bias_init_dense,
                                  activation=clipped_relu), name='fc_'+str(i+1))(x)
        x = TimeDistributed(Dropout(dropout), name='dropout_'+str(i+1))(x)

    # Bidirectional RNN (with ReLu)
    for i in range(0, n_layers):
        x = Bidirectional(SimpleRNN(units, activation='relu', kernel_initializer=kernel_init_rnn, dropout=0.2,
                                    bias_initializer=bias_init_rnn, return_sequences=True),
                          merge_mode='concat', name='bi_rnn'+str(i+1))(x)

    # 1 fully connected layer DNN ReLu with default 20% dropout
    x = TimeDistributed(Dense(units=units, kernel_initializer=kernel_init_dense, bias_initializer=bias_init_dense,
                              activation='relu'), name='fc_4')(x)
    x = TimeDistributed(Dropout(dropout), name='dropout_4')(x)

    # Output layer with softmax
    y_pred = TimeDistributed(Dense(units=output_dim, kernel_initializer=kernel_init_dense,
                                   bias_initializer=bias_init_dense, activation='softmax'), name='softmax')(x)

    # ---- CTC ----
    # y_input layers (transcription data) for CTC loss
    labels = Input(name='the_labels', shape=[None], dtype=dtype)        # transcription data (batch_size * y_seq_size)
    input_length = Input(name='input_length', shape=[1], dtype=dtype)   # unpadded len of all x_sequences in batch
    label_length = Input(name='label_length', shape=[1], dtype=dtype)   # unpadded len of all y_sequences in batch

    # Lambda layer with ctc_loss function due to Keras not supporting CTC layers
    loss_out = Lambda(function=ctc_lambda_func, name='ctc', output_shape=(1,))(
                      [y_pred, labels, input_length, label_length])

    network_model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)

    return network_model
def model_fit(x, y, x_test, y_test, n_epochs, n_batch_size, n_neurons,
              model_arch, reg):
    es = EarlyStopping(monitor='val_loss',
                       min_delta=0.005,
                       patience=5,
                       verbose=0,
                       mode='auto')

    if model_arch == "RNN":
        model = Sequential()
        model.add(
            SimpleRNN(n_neurons,
                      activation='relu',
                      input_shape=(x.shape[1], x.shape[2]),
                      kernel_regularizer=reg))
        model.add(Dense(12, activation="softmax",
                        kernel_initializer='uniform'))
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=[metrics.categorical_accuracy])

        #Fit the model
        history = model.fit(x,
                            y,
                            epochs=n_epochs,
                            validation_data=(x_test, y_test),
                            batch_size=n_batch_size,
                            callbacks=[es])

    elif model_arch == "LSTM":
        model = Sequential()
        model.add(
            LSTM(n_neurons,
                 input_shape=(x.shape[1], x.shape[2]),
                 kernel_regularizer=reg))
        model.add(Dense(12, activation="softmax",
                        kernel_initializer="uniform"))
        model.compile(
            loss='binary_crossentropy',
            optimizer='adam',
            metrics=[metrics.categorical_accuracy, metrics.binary_accuracy])

        #Fit the model
        history = model.fit(x,
                            y,
                            epochs=n_epochs,
                            validation_data=(x_test, y_test),
                            batch_size=n_batch_size,
                            callbacks=[es])
    elif model_arch == "GRU":
        model = Sequential()
        model.add(
            GRU(n_neurons,
                input_shape=(x.shape[1], x.shape[2]),
                kernel_regularizer=reg))
        model.add(Dense(12, activation="softmax",
                        kernel_initializer="uniform"))
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=[metrics.categorical_accuracy])

        #Fit the model
        history = model.fit(x,
                            y,
                            epochs=n_epochs,
                            validation_data=(x_test, y_test),
                            batch_size=n_batch_size,
                            callbacks=[es])
    else:
        print("Error Model Input")
        exit()

    model.save(
        r'D:\Tugas Akhir\Final Dev-4\Model h5\%s_%s_REGULARIZER_%s_E%s_B%s_N%s.h5'
        % (date, model_arch, name, n_epochs, n_batch_size, n_neurons))

    data = pd.DataFrame()
    data['loss'] = history.history['loss']
    data['categorical_accuracy'] = history.history['categorical_accuracy']
    data['val_loss'] = history.history['val_loss']
    data['val_categorical_accuracy'] = history.history[
        'val_categorical_accuracy']
    data.to_csv(
        r'D:\Tugas Akhir\Final Dev-4\Results Data\%s_DATA_FINAL_REGULARIZER_%s_%s_E%s_B%s_N%s.csv'
        % (date, model_arch, name, n_epochs, n_batch_size, n_neurons))

    #Sumarize history for accuracy
    plt.figure()
    plt.plot(history.history['categorical_accuracy'])
    plt.plot(history.history['val_categorical_accuracy'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.savefig(
        r'D:\Tugas Akhir\Final Dev-4\Results Data\%s_ACC_FINAL_REGULARIZER_%s_%s_E%s_B%s_N%s.png'
        % (date, model_arch, name, n_epochs, n_batch_size, n_neurons))

    #Summarize history for loss
    plt.figure()
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.savefig(
        r'D:\Tugas Akhir\Final Dev-4\Results Data\%s_LOSS_FINAL_REGULARIZER_%s_%s_E%s_B%s_N%s.png'
        % (date, model_arch, name, n_epochs, n_batch_size, n_neurons))

    #Predict Value
    y_test_pred = model.predict(x_test)

    #Plot Confusion Matrix
    cnf_matrix = confusion_matrix(y_test.argmax(axis=1),
                                  y_test_pred.argmax(axis=1))
    np.set_printoptions(precision=2)

    class_names = np.array([
        'Jari Abduksi', 'Jari Fleksi', 'Jari Hiperekstensi', 'Jempol Abduksi',
        'Jempol Fleksi', 'Jempol Hiperekstensi', 'Lengan Pronasi',
        'Lengan Supinasi', 'Pergelangan Abduksi', 'Pergelangan Adduksi',
        'Pergelangan Fleksi', 'Pergelangan Hiperekstensi'
    ])
    plot_confusion_matrix(model_arch,
                          n_epochs,
                          n_batch_size,
                          n_neurons,
                          cnf_matrix,
                          classes=class_names,
                          normalize=False,
                          title='Confusion Matrix Without normalization')
コード例 #18
0
y_test = to_categorical(y_test1)

# reshape input to be [samples, time steps, features]
X_train = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
X_test = np.reshape(testT, (testT.shape[0], 1, testT.shape[1]))

batch_size = 64
learning_rate = 0.1

# 1. define the network
model = Sequential()
model.add(
    SimpleRNN(
        64,
        init=lambda shape, name: normal(shape, scale=0.001, name=name),
        inner_init=lambda shape, name: identity(shape, scale=1.0, name=name),
        input_dim=22,
        activation='relu',
        return_sequences=True))  # try using a GRU instead, for fun
model.add(SimpleRNN(64, activation='relu', return_sequences=True))
model.add(SimpleRNN(64, activation='relu', return_sequences=True))
model.add(SimpleRNN(64, activation='relu', return_sequences=True))
model.add(SimpleRNN(64, activation='relu', return_sequences=True))
model.add(SimpleRNN(64, activation='relu', return_sequences=True))
model.add(SimpleRNN(64, activation='relu', return_sequences=True))
model.add(SimpleRNN(64, activation='relu', return_sequences=False))
model.add(Dense(11))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)

# try using different optimizers and different optimizer configs
コード例 #19
0
# rnn in keras
from keras.models import Sequential
from keras.layers import Embedding, SimpleRNN

model = Sequential()
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32))
model.summary()
コード例 #20
0
def what_d(runtimes=1, renew=True, maxlen=100, file_id=3):

    [glove,[char_X_100, char_X_010, char_X_001, char_Y_10, char_Y_01],[word_lens]] = readfile(file_id=file_id)

    char_X_010 = min(char_X_010, maxlen)

    vocab = []
    X = np.zeros((char_X_100, char_X_010, char_X_001), dtype=np.bool)
    y = np.zeros((char_Y_10, char_Y_01 ), dtype=np.float64)

    ii = 0
    for i in range(0, word_lens):
        ttt = glove[i].split()
        ttt_lens = len(ttt)
        lists = ["".join(ttt[0:ttt_lens - char_Y_01])] + ttt[ttt_lens - char_Y_01:]
        lists[0] = re.sub("[^0-9a-zA-Z]", "", lists[0].lower())
        if 0 < len(lists[0]) <= maxlen:
            #print(ii, i)
            vocab.append(lists[0])
            text = lists[0].ljust(char_X_010)
            for j in range(0, char_X_010):
                X[ii, j, char_indices[text[j]]] = 1
            for k in range(1, char_Y_01 + 1):
                y[ii, k - 1] = lists[k]
            ii = ii + 1
            if i % 40000 == 0:
                print(i)

    # Find par.
    lens = []
    for word in vocab:
        lens.append(len(word))
    print(max(lens))   # min(maxlen, char_X_010)
    print(len(vocab))  # 399488
    char_X_100 = len(vocab)
    char_Y_10 = len(vocab)
    X = X[0:len(vocab)]
    y = y[0:len(vocab)]


    # First time: build the model: a bidirectional SimpleRNN
    if renew == True:
        print('Build model...')
        model = Sequential()
        model.add(SimpleRNN(char_Y_01, input_shape=(char_X_010, char_X_001), activation='tanh',
                           # inner_activation='sigmoid', dropout_W=0.5, dropout_U=0.5))
                           dropout_W=0.5, dropout_U=0.5))
        model.compile('Adadelta', 'MSE', metrics=['accuracy'])
        model.fit(X, y, batch_size=512, nb_epoch=1)
        model.save(path + "layer_1/RNN" + str(file_id) + ".pk")


    # Not first time: build the model: a bidirectional LSTM

    print('Load model...')
    model = load_model(path+"layer_1/RNN" + str(file_id) + ".pk")
    for j in range(0,runtimes-1):
        print('Build model...')
        model.fit(X, y,
                  batch_size=512,
                  nb_epoch=1)
        model.save(path + "layer_1/RNN" + str(file_id) + ".pk")


    # Test cosine similarity, train set

    print('Test cosine similarity, train set')
    cos = []
    for i in range(0, len(vocab)):
        text = vocab[i].ljust(char_X_010)
        x = np.zeros((1, char_X_010, char_X_001), dtype=np.bool)
        for j in range(0, len(text)):
            x[0, j, char_indices[text[j]]] = 1
        map_LSTM = model.predict(x, verbose=0)

        map_GloVe = y[i]

        cos.append(1 - spatial.distance.cosine(map_LSTM, map_GloVe))
    f = open(path+"layer_1/cosine.txt", 'a')
    f.write("20 times RNN" + str(file_id) + " cosine similarity: "+str(sum(cos)/len(cos))+"\n")
    f.close()


    # Test cosine similarity, misspelling

    print('Test cosine similarity, misspelling')
    cos = []
    change_engs = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
                   'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
    for i in range(0, len(vocab)):
        misspelling = vocab[i]
        if len(misspelling)>4:
            loc = int(np.random.uniform(0,1,1)*len(misspelling))
            cha = int(np.random.uniform(0,1,1)*26)

            tem = list(misspelling)
            tem[loc] = change_engs[cha]

            misspelling = "".join(tem)
            text = misspelling.ljust(char_X_010)
            x = np.zeros((1, char_X_010, char_X_001), dtype=np.bool)
            for j in range(0, len(text)):
                x[0, j, char_indices[text[j]]] = 1
            map_LSTM = model.predict(x, verbose=0)
            map_GloVe = y[i]

            cos.append(1 - spatial.distance.cosine(map_LSTM, map_GloVe))
    f = open(path+"layer_1/cosine.txt", 'a')
    f.write("20 times RNN" + str(file_id) + " misspelling cosine similarity : "+str(sum(cos)/len(cos))+", len: "+str(len(cos))+"\n")
    f.close()
コード例 #21
0
    # print('Weight Matrix: {}'.format(weight_matrix))
    # print('Weights: {}'.format(weight_matrix))
    eigenvalue, _ = K.tf.linalg.eigh(weight_matrix)
    eigenvalue = K.variable(eigenvalue)
    # print('Eigenvalue: {}'.format(eigenvalue))
    return K.sum(
        K.cast_to_floatx(l1_lambda) *
        K.abs(K.ones_like(eigenvalue) - eigenvalue))


print('Evaluate Model {}...'.format(model_name))
model = Sequential()
model.add(
    SimpleRNN(hidden_units,
              kernel_initializer=initializers.RandomNormal(stddev=0.001),
              recurrent_initializer=initializers.Identity(gain=1.0),
              recurrent_regularizer=eigen_reg,
              activation='relu',
              input_shape=x_train.shape[1:]))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy',
              optimizer=rmsprop,
              metrics=['accuracy'])

# for e in zip(model.layers[0].trainable_weights, model.layers[0].get_weights()):
#         print('Param %s:\n%s' % (e[0],e[1]))
# print('Params: {} | {}'.format(model.layers[0].trainable_weights[1], model.layers[0].get_weights()[1]))

train_log = keras.callbacks.CSVLogger(
    os.path.join('expr', model_name, 'training.log'))
コード例 #22
0
def lstm(n_features=4,
         n_timesteps=12,
         n_train=10,
         n_window=5,
         n_units=100,
         n_epochs=50,
         with_att=False,
         methods='lstm',
         lr=0.001):
    """

    :param n_features: 4 or 10, using 4 features or 10 features
    :param n_train: training timesteps
    :param n_window: width of training window, for example, [0 1 2 3 4]->[5], n_window = 5
    :param n_units: LSTM units
    :param n_epochs: trainning epochs
    :return:
    """
    data = []

    for i in range(len(name_node_pairs)):
        f = open('./data/features_{}/{}_temp_link_ft.csv'.format(
            n_features, name_node_pairs[i]))
        df = pd.read_csv(f)
        data.append(df.values)

    data = np.array(data)
    print('data: {}, \ndata.shape(): {}'.format(data, data.shape))

    # define train, test
    # scaler = MinMaxScaler(feature_range=(0, 1))
    n_samples, n_timesteps, n_features = data.shape
    scaled_data = data.reshape((n_samples, n_timesteps * n_features))
    # scaled_data = scaler.fit_transform(scaled_data)
    scaled_data = scaled_data.reshape((n_samples, n_timesteps, n_features))

    # define problem properties
    n_test = 12 - n_train

    # define LSTM
    # sequential
    # model = Sequential()
    # model.add(Bidirectional(LSTM(n_units, input_shape=(n_window, n_features))))
    # model.add(Dense(1))
    #
    # model.compile(loss='mse', optimizer='adam')

    # Model
    inputs = Input(shape=(n_window, n_features))
    return_sequences = False
    if with_att == True:
        return_sequences = True
    if methods == 'lstm':
        att_in = Bidirectional(
            LSTM(n_units,
                 input_shape=(n_window, n_features),
                 return_sequences=return_sequences))(inputs)
    elif methods == 'gru':
        att_in = Bidirectional(
            GRU(n_units,
                input_shape=(n_window, n_features),
                return_sequences=return_sequences))(inputs)
    elif methods == 'rnn':
        att_in = Bidirectional(
            SimpleRNN(n_units,
                      input_shape=(n_window, n_features),
                      return_sequences=return_sequences))(inputs)
    if with_att == True:
        att_out = attention()(att_in)
        outputs = Dense(1)(att_out)
    else:
        outputs = Dense(1)(att_in)

    model = Model(inputs, outputs)
    opt = optimizers.Adam(lr=lr)
    model.compile(loss='mse', optimizer=opt)

    # fit network
    for i in range(n_train - n_window):
        history = model.fit(scaled_data[:, i:i + n_window, :],
                            scaled_data[:, i + n_window, 1],
                            epochs=n_epochs)
        # plot history
        # plt.plot(history.history['loss'])
        # plt.show()
    # make prediction
    inv_yhat = []
    for i in range(n_test):
        yhat = model.predict(scaled_data[:, n_train - n_window + i:n_train +
                                         i, :])
        inv_yhat.append(yhat)

    inv_yhat = np.array(inv_yhat)
    print('inv_yhat.shape:{}'.format(
        inv_yhat.shape))  #inv_yhat.shape:(3, 736, 1)
    inv_yhat = inv_yhat.reshape((inv_yhat.shape[0], inv_yhat.shape[1]))
    print('inv_yhat.shape:{}'.format(inv_yhat.shape))  #inv_yhat.shape:(3, 736)
    inv_yhat = inv_yhat.T
    print('inv_yhat.shape:{}'.format(inv_yhat.shape))  #inv_yhat.shape:(736, 3)

    inv_yhat = np.concatenate((scaled_data[:, n_train:, 0], inv_yhat), axis=1)
    inv_yhat = inv_yhat.reshape((n_samples, n_test, 2))
    inv_yhat = np.concatenate((inv_yhat, scaled_data[:, n_train:, 2:]), axis=2)
    print('inv_yhat.shape:{}'.format(inv_yhat.shape))
    inv_yhat = np.concatenate((scaled_data[:, :n_train, :], inv_yhat), axis=1)
    print('inv_yhat.shape:{}'.format(inv_yhat.shape))
    inv_yhat = inv_yhat.reshape(n_samples, n_timesteps * n_features)
    print('inv_yhat.shape:{}'.format(inv_yhat.shape))
    # inv_yhat = scaler.inverse_transform(inv_yhat)
    inv_yhat = inv_yhat.reshape(n_samples, n_timesteps, n_features)
    inv_yhat[inv_yhat < 0] = 0  # transform negative values to zero
    prediction = inv_yhat[:, -3:, 1]
    prediction = prediction.reshape(prediction.shape[0], prediction.shape[1],
                                    1)
    original = data[:, -3:, 1]
    original = original.reshape(original.shape[0], original.shape[1], 1)
    concat = np.concatenate((original, prediction), axis=2)
    print('concat.shape:{}'.format(concat.shape))
    np.set_printoptions(threshold=1e6)
    print('concat\n{}'.format(concat))
    concat = concat.reshape(concat.shape[0] * concat.shape[1], concat.shape[2])
    df = pd.DataFrame(concat)
    df.columns = ['original', 'prediction']
    df.to_csv('./data/LSTM/prediction_LSTM_{}.csv'.format(n_features),
              index=False)
    rmse = sqrt(mean_squared_error(inv_yhat[:, -3:, 1], data[:, -3:, 1]))
    print('rmse: {}'.format(rmse))
コード例 #23
0
    kernel_size = 3
    hidden_dims = 250
    epochs = 5
    num_neurons = 25

    prep = Preprocessor('corpus_marked', 'vk_comment_model')
    x_train, y_train, x_test, y_test = prep.train_pipeline(
        maxlen, embedding_dims)

    # Инициплизация пустой сети Keras
    model = Sequential()

    # Добавление рекррентного слоя
    model.add(
        SimpleRNN(num_neurons,
                  return_sequences=True,
                  input_shape=(maxlen, embedding_dims)))

    # Добавление слоя дропаута
    model.add(Dropout(.2))

    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))

    # Компиляция нашей рекуррентной нейронной сети
    model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
    model.summary()

    # Обучение и сохранение модели
    model.fit(x_train,
              y_train,
コード例 #24
0
    def _test_rnn_layer(self, allow_slow):
        i = 0
        layer_name = str(SimpleRNN).split('.')[3].split("'>")[0]
        numerical_err_models = []
        shape_err_models = []
        for base_params in self.base_layer_params:
            base_params = dict(zip(self.params_dict.keys(), base_params))
            input_data = generate_input(base_params['input_dims'][0],
                                        base_params['input_dims'][1],
                                        base_params['input_dims'][2])
            for rnn_params in self.rnn_layer_params:
                rnn_params = dict(
                    zip(self.simple_rnn_params_dict.keys(), rnn_params))
                model = Sequential()
                model.add(
                    SimpleRNN(
                        base_params['output_dim'],
                        input_length=base_params['input_dims'][1],
                        input_dim=base_params['input_dims'][2],
                        # init=base_params['init'],
                        # inner_init=base_params['inner_init'],
                        activation=base_params['activation'],
                        return_sequences=base_params['return_sequences'],
                        go_backwards=base_params['go_backwards'],
                        unroll=base_params['unroll'],
                        dropout_U=rnn_params['dropout']['dropout_U'],
                        dropout_W=rnn_params['dropout']['dropout_W'],
                        W_regularizer=rnn_params['regularizer']
                        ['W_regularizer'],
                        U_regularizer=rnn_params['regularizer']
                        ['U_regularizer'],
                        b_regularizer=rnn_params['regularizer']
                        ['b_regularizer'],
                    ))
                model_dir = tempfile.mkdtemp()
                keras_model_path = os.path.join(model_dir, 'keras.h5')
                coreml_model_path = os.path.join(model_dir, 'keras.mlmodel')
                model.save_weights(keras_model_path)
                mlkitmodel = _get_mlkit_model_from_path(
                    model, coreml_model_path)
                keras_preds = model.predict(input_data).flatten()
                input_data = np.transpose(input_data, [1, 0, 2])
                coreml_preds = mlkitmodel.predict({'data': input_data
                                                   })['output'].flatten()
                try:
                    self.assertEquals(coreml_preds.shape, keras_preds.shape)
                except AssertionError:
                    print(
                        "Shape error:\nbase_params: {}\nkeras_preds.shape: {}\ncoreml_preds.shape: {}"
                        .format(base_params, keras_preds.shape,
                                coreml_preds.shape))
                    shape_err_models.append(base_params)
                    shutil.rmtree(model_dir)
                    i += 1
                    continue
                try:
                    for idx in range(0, len(coreml_preds)):
                        relative_error = (coreml_preds[idx] -
                                          keras_preds[idx]) / coreml_preds[idx]
                        self.assertAlmostEqual(relative_error, 0, places=2)
                except AssertionError:
                    print(
                        "Assertion error:\nbase_params: {}\nkeras_preds: {}\ncoreml_preds: {}"
                        .format(base_params, keras_preds, coreml_preds))
                    numerical_err_models.append(base_params)
                shutil.rmtree(model_dir)
                i += 1

                if not allow_slow:
                    break

            if not allow_slow:
                break

        self.assertEquals(shape_err_models, [],
                          msg='Shape error models {}'.format(shape_err_models))
        self.assertEquals(
            numerical_err_models, [],
            msg='Numerical error models {}'.format(numerical_err_models))
コード例 #25
0
encoder.add(Embedding(input_dim=vocab_size, output_dim=query_length))
c = encoder(story)
# Embed the question sequence into u
question = Input((query_length, ))
encoder = Sequential()
encoder.add(
    Embedding(input_dim=vocab_size, output_dim=64, input_length=query_length))
u = encoder(question)
# Compute  p = softmax(m * u)
p = dot([m, u], axes=(2, 2))
p = Activation('softmax')(p)
# Compute o = p * c
o = multiply([p, c])
# Pass (o,u) through RNN for answer
answer = concatenate([Permute((2, 1))(o), u])
answer = SimpleRNN(64)(answer)
answer = Dense(vocab_size, kernel_initializer='random_normal')(answer)
answer = Activation('softmax')(answer)
# Model everything together
model = Model([story, question], answer)
model.compile(optimizer='sgd',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
print(model.count_params())
# Train
model.fit_generator(data_generator(
    len(train_data) / steps, train_data, word_idx, story_length, query_length),
                    steps_per_epoch=steps,
                    nb_epoch=10,
                    validation_data=data_generator(
                        len(test_data) / steps, test_data, word_idx,
コード例 #26
0
def define_rcnn_model(VOCA_SIZE, cnt):

    org_word_seq = Input(shape=(None, ), dtype="int32")
    left_context_seq = Input(shape=(None, ), dtype="int32")
    right_context_seq = Input(shape=(None, ), dtype="int32")

    if WORD_EMBEDDING == 'rand_300d':
        embedder = Embedding(
            VOCA_SIZE,  # vocab_size
            EMBEDDING_DIM,
            embeddings_initializer='uniform',
            trainable=IS_TRAINABLE)  # is trainable?

    else:  # use pretrained word embedding
        embedder = Embedding(
            VOCA_SIZE,  # vocab_size
            EMBEDDING_DIM,
            weights=[embedding_matrix],
            trainable=IS_TRAINABLE)

    word_embed_seq = embedder(org_word_seq)
    left_context_emb_seq = embedder(left_context_seq)
    rght_context_emb_seq = embedder(right_context_seq)

    if MODEL_VER == 'original':
        if RNN_MOD == 'LSTM':
            left_context_vector = LSTM(
                C_DIM, return_sequences=True)(left_context_emb_seq)
            right_context_vector = LSTM(
                C_DIM, return_sequences=True,
                go_backwards=True)(rght_context_emb_seq)
        elif RNN_MOD == 'GRU':
            left_context_vector = GRU(
                C_DIM, return_sequences=True)(left_context_emb_seq)
            right_context_vector = GRU(C_DIM,
                                       return_sequences=True,
                                       go_backwards=True)(rght_context_emb_seq)
        elif RNN_MOD == 'SimpleRNN':
            left_context_vector = SimpleRNN(
                C_DIM, return_sequences=True)(left_context_emb_seq)
            right_context_vector = SimpleRNN(
                C_DIM, return_sequences=True,
                go_backwards=True)(rght_context_emb_seq)
        concat_all = concatenate(
            [left_context_vector, word_embed_seq, right_context_vector],
            axis=2)

        latent_semantic_vector = TimeDistributed(
            Dense(Y_DIM, activation="tanh"))(concat_all)
        max_pool_output_vector = Lambda(
            lambda x: K.max(x, axis=1),
            output_shape=(Y_DIM, ))(latent_semantic_vector)
        output = Dense(NUM_CLASSES, input_dim=(Y_DIM),
                       activation="softmax")(max_pool_output_vector)

    elif MODEL_VER == 'proposed':
        if RNN_MOD == 'LSTM':
            left_context_vector = LSTM(
                C_DIM, return_sequences=True)(left_context_emb_seq)
            right_context_vector = LSTM(
                C_DIM, return_sequences=True,
                go_backwards=True)(rght_context_emb_seq)
        elif RNN_MOD == 'GRU':
            left_context_vector = GRU(
                C_DIM, return_sequences=True)(left_context_emb_seq)
            right_context_vector = GRU(C_DIM,
                                       return_sequences=True,
                                       go_backwards=True)(rght_context_emb_seq)
        elif RNN_MOD == 'SimpleRNN':
            left_context_vector = SimpleRNN(
                C_DIM, return_sequences=True)(left_context_emb_seq)
            right_context_vector = SimpleRNN(
                C_DIM, return_sequences=True,
                go_backwards=True)(rght_context_emb_seq)
        conv_a = Conv1D(filters=F_DIM,
                        kernel_size=3,
                        padding='same',
                        activation='relu')(word_embed_seq)
        conv_b = Conv1D(filters=F_DIM,
                        kernel_size=4,
                        padding='same',
                        activation='relu')(word_embed_seq)
        conv_c = Conv1D(filters=F_DIM,
                        kernel_size=5,
                        padding='same',
                        activation='relu')(word_embed_seq)
        concat_all = concatenate([
            left_context_vector, word_embed_seq, right_context_vector, conv_a,
            conv_b, conv_c
        ],
                                 axis=2)

        latent_semantic_vector = TimeDistributed(
            Dense(Y_DIM, activation="tanh"))(concat_all)
        max_pool_output_vector = Lambda(
            lambda x: K.max(x, axis=1),
            output_shape=(Y_DIM, ))(latent_semantic_vector)
        #att_output = AttentionWithContext()(latent_semantic_vector)
        #concat_output = concatenate([max_pool_output_vector, att_output], axis = -1)
        output = Dense(NUM_CLASSES,
                       input_dim=(Y_DIM + Y_DIM),
                       activation="softmax")(max_pool_output_vector)

    model = Model(inputs=[org_word_seq, left_context_seq, right_context_seq],
                  outputs=output)
    model.compile(optimizer="rmsprop",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    if cnt == 1:
        print('\n')
        print('[ MODEL SUMMARY ]')
        print(model.summary())

    return model
コード例 #27
0
  y = series[t+T]
  # print("y:", y)
  Y.append(y)

X = np.array(X)
Y = np.array(Y)
N = len(X)



### many-to-one RNN
inputs = np.expand_dims(X, -1)

# make the RNN
i = Input(shape=(T, D))
x = SimpleRNN(5)(i)
x = Dense(1)(x)
model = Model(i, x)
model.compile(
  loss='mse',
  optimizer=Adam(lr=0.1),
)

# train the RNN
r = model.fit(
  inputs[:-N//2], Y[:-N//2],
  batch_size=32,
  epochs=80,
  validation_data=(inputs[-N//2:], Y[-N//2:]),
)
コード例 #28
0
ファイル: model.py プロジェクト: ryanraba/stocksml
def BuildModel(fdf,
               choices,
               layers=[('rnn', 32), ('dnn', 64), ('dnn', 32)],
               depth=5,
               count=2):
    """
    Build a model with the given structure

    Parameters
    ----------
    fdf : pandas.DataFrame
        feature dataframe
    choices : int
        number of ticker symbols model can choose between
    layers : list of tuples
        list of tuples defining structure of model. Each tuple is (layer, size) where layer can
        be 'dnn', 'cnn', 'lstm', 'rnn', or 'drop'. Default is a 3-layer model with [('rnn',32),('dnn',64),('dnn',32)]
    depth : int
        depth of time dimension for recurrent and convolutional networks (rnn, cnn, lstm). Ignored if using dnn only.
        Default is 5.
    count : int
        number of models to build. Default is 2
        
    Returns
    -------
    list of keras.Model, numpy.ndarray
        list of keras Models built, compiled and ready for training along with the appropriate data array for training
    """
    from keras.models import Model, clone_model
    from keras.layers import Input, Dense, SimpleRNN, LSTM, Conv1D, Flatten, Dropout

    # check to see if depth is needed
    layer_types = np.unique([nn[0] for nn in layers])
    if ('cnn' in layer_types) or ('rnn' in layer_types) or ('lstm'
                                                            in layer_types):
        dx = np.repeat(fdf.values[:, None, :], depth, axis=1)
        for ii in range(1, depth):
            dx[:, ii, :] = np.vstack((np.zeros(
                (ii, fdf.values.shape[1])), dx[:-ii, ii, :]))
    else:
        dx = fdf.values

    # fixed input to model
    ins = Input(shape=dx.shape[1:], name='input')
    hh = ins

    # build middle layers according to specified structure
    for ll, layer in enumerate(layers):
        name = layer[0] + '_' + str(ll)
        flatten = (ll >= len(layers) - 1) or (layers[ll + 1][0] == 'dnn') or (
            (ll < len(layers) - 2) and (layers[ll + 1][0] == 'drop') and
            (layers[ll + 2][0] == 'dnn'))
        if layer[0] is 'dnn':
            hh = Dense(layer[1], activation='tanh', name=name)(hh)
        elif layer[0] is 'rnn':
            hh = SimpleRNN(layer[1],
                           activation='tanh',
                           return_sequences=not flatten,
                           name=name)(hh)
        elif layer[0] is 'lstm':
            hh = LSTM(layer[1],
                      activation='tanh',
                      return_sequences=not flatten,
                      name=name)(hh)
        elif layer[0] is 'cnn':
            hh = Conv1D(layer[1],
                        3,
                        padding='valid',
                        activation='relu',
                        name=name)(hh)
            if flatten:
                hh = Flatten(name='flatten')(hh)
        elif layer[0] is 'drop':
            hh = Dropout(layer[1], name=name)(hh)

    # fixed outputs from model
    action = Dense(5, activation='softmax', name='action')(hh)
    symbol = Dense(choices, activation='softmax', name='symbol')(hh)
    limit = Dense(1, activation='tanh', name='limit')(hh)

    model = Model(inputs=ins, outputs=[action, symbol, limit], name='model')
    model.compile(
        loss=['categorical_crossentropy', 'categorical_crossentropy', 'mse'],
        optimizer='adam')

    models = [model]
    for mm in range(count - 1):
        models += [clone_model(model)]
        models[-1].compile(loss=[
            'categorical_crossentropy', 'categorical_crossentropy', 'mse'
        ],
                           optimizer='adam')

    return models, dx
コード例 #29
0
MAX_LEN = 20

if __name__ == '__main__':
    train = pd.read_csv(PATH + 'train.txt', sep='\t')
    val = pd.read_csv(PATH + 'valid.txt', sep='\t')
    test = pd.read_csv(PATH + 'test.txt', sep='\t')

    train_onehot, train_label, val_onehot, val_label, test_onehot, test_label, vocab, word_index = text_encode(
        train, val, test, type='onehot')

    print(train_onehot[:4])

    model = Sequential()

    model.add(Embedding(len(vocab) + 1, DW))
    model.add(SimpleRNN(DH, dropout=0.2, recurrent_dropout=0.1))
    model.add(Dense(4, activation='softmax'))

    model.summary()

    model.compile(
        loss='categorical_crossentropy',
        optimizer='sgd',
        metrics=["accuracy"],
    )

    model.fit(train_onehot, train_label)

    score = model.evaluate(test_onehot, test_label)
    print("Test loss:", score[0])
    print("Test accuracy:", score[1])
コード例 #30
0
# normalize data to between 0 and 1
max_val = max(data)
min_val = min(data)
data = (data - min_val) / (max_val - min_val)

# split into train and test sets
split = int(len(data) * 0.70)
train = data[:split]
test = data[split:]

trainX, trainY = create_dataset(train)
testX, testY = create_dataset(test)

trainX = trainX[:, :, np.newaxis]
testX = testX[:, :, np.newaxis]

# create and fit the RNN
model = Sequential()
model.add(SimpleRNN(1, input_shape=(config.look_back, 1)))
model.compile(loss='mae', optimizer='adam')
model.fit(trainX,
          trainY,
          epochs=1000,
          batch_size=1,
          validation_data=(testX, testY),
          callbacks=[
              WandbCallback(),
              PlotCallback(trainX, trainY, testX, testY, config.look_back)
          ])