コード例 #1
0
ファイル: train.py プロジェクト: Rene90/dl4nlp
def main():
    args = getArguments()
    HIDDEN = args.hidden_size
    EPOCHS  = args.epoch_size
    BATCH_SIZE = args.batch_size

    #
    # prepare DATA
    #    
    print "Load Data"
    X_tr = np.load(args.input)
    y_tr = np.load(args.output) if args.input != args.output else X_tr
    
    sx, sy, sz = X_tr.shape
    split = int(sx * 0.8)
    
    X_val = X_tr[split:,:,:]
    X_tr  = X_tr[:split,:,:]
    
    y_val = y_tr[split:,:,:]
    y_tr  = y_tr[:split,:,:]

    
    if args.mode == "train":
        print "Define RNN"
        inputs  = Input(shape=(sy,sz))
        encoded = LSTM(HIDDEN,
                       activation="relu",
                       init="normal")(inputs)
        
        decoded = RepeatVector(sy)(encoded)
        decoded = LSTM(sz,
                       return_sequences=True,
                       activation="softmax",
                       init="normal")(decoded)

        autoencoder = Model(inputs,decoded)
    elif args.mode == "retrain":
        print "load existing model: ", args.model
        autoencoder = load_model(args.model)
    print "Compile"
    autoencoder.compile(optimizer="rmsprop", loss="categorical_crossentropy")
    print "Train"
    autoencoder.fit(X_tr, y_tr,
                    shuffle=True,
                    nb_epoch=EPOCHS,
                    batch_size=BATCH_SIZE,
                    validation_data=(X_val, y_val),
                    callbacks=[
                        ModelCheckpoint(args.model,save_best_only=True),
                        #EarlyStopping(patience=20)
                    ]
    )
コード例 #2
0
def main():
    args = getArguments()
    SPLIT = 100

    #
    # prepare DATA
    #
    print "Load Data"
    X, char_dict = loadData()
    X = X[:100]

    print "load existing model: ", args.model
    autoencoder = load_model(args.model)

    y_hat = autoencoder.predict(X)

    for idx in range(X.shape[0]):
        x1 = decode(np.argmax(X[idx], axis=1), char_dict)
        x2 = decode(np.argmax(y_hat[idx], axis=1), char_dict)
        print "".join(x1), "".join(x2)
        raw_input()
コード例 #3
0
ファイル: test.py プロジェクト: Rene90/dl4nlp
def main():
    args = getArguments()
    SPLIT = 100

    #
    # prepare DATA
    #    
    print "Load Data"
    X, char_dict = loadData()
    X = X[:100]

    print "load existing model: ", args.model
    autoencoder = load_model(args.model)

    y_hat = autoencoder.predict(X)

    for idx in range(X.shape[0]):
        x1 = decode(np.argmax(X[idx],    axis=1),char_dict)
        x2 = decode(np.argmax(y_hat[idx],axis=1),char_dict)
        print "".join(x1), "".join(x2)
        raw_input()
コード例 #4
0
ファイル: rnn_predict.py プロジェクト: rknaebel/dl4nlp
    v_init = get_var_from("initial_state", model.shared_variables)
    v_states = get_var_from("H_apply_states", model.intermediary_variables)
    #v_states  = get_var_from("H_apply_states",model.intermediary_variables)

    f = theano.function([v_inchar],
                        v_softmax,
                        updates=[(v_init, v_states[0][0])])
    #f = theano.function([v_inchar], v_softmax)

    seq = [init_char]
    for _ in xrange(num_chars):
        dist = f(np.atleast_2d(seq[-1]).astype(np.int32))[0]
        sample = np.random.choice(vocab_size, 1, p=dist)[0]
        seq.append(sample)
    #print seq
    return seq


def sample_text(model, num_chars, corpus):
    return "".join(
        corpus.decode(sample_chars(model, num_chars, corpus.vocab_size())))


corpus = Corpus(open("corpus.txt").read())

args = getArguments()
main_loop = load(args.model)
model = main_loop.model

#print sample_text(model, args.sample_size, corpus)
コード例 #5
0
ファイル: rnn_predict.py プロジェクト: Rene90/dl4nlp
    v_inchar = get_var_from("inchar", model.variables)
    v_softmax = get_var_from("softmax_apply_output", model.variables)
    v_init = get_var_from("initial_state", model.shared_variables)
    v_states = get_var_from("H_apply_states", model.intermediary_variables)
    # v_states  = get_var_from("H_apply_states",model.intermediary_variables)

    f = theano.function([v_inchar], v_softmax, updates=[(v_init, v_states[0][0])])
    # f = theano.function([v_inchar], v_softmax)

    seq = [init_char]
    for _ in xrange(num_chars):
        dist = f(np.atleast_2d(seq[-1]).astype(np.int32))[0]
        sample = np.random.choice(vocab_size, 1, p=dist)[0]
        seq.append(sample)
    # print seq
    return seq


def sample_text(model, num_chars, corpus):
    return "".join(corpus.decode(sample_chars(model, num_chars, corpus.vocab_size())))


corpus = Corpus(open("corpus.txt").read())

args = getArguments()
main_loop = load(args.model)
model = main_loop.model

# print sample_text(model, args.sample_size, corpus)