Exemplo n.º 1
0
 def condensed_nn(self, trainset, testset, k, isClassification):
     #get the reduced dataset using condensed nn
     reduced_dataset = cnn.Cnn().getReducedDataset(trainset)
     #call knn with the reduced train set
     predicted = Knn.Knn().fit(reduced_dataset.values, testset, k,
                               isClassification)
     return predicted, testset.iloc[:,
                                    -1]  #return predicted and actual labels
Exemplo n.º 2
0
    # with open(featdir + '/maxlength', 'r') as fid:
        # max_input_length = int(fid.read())
    featreader = feature_reader.FeatureReader(featdir + '/feats_shuffled.scp', featdir + '/cmvn.scp', 
        featdir + '/utt2spk', int(cnn_conf['context_width']), max_input_length)
    
    # create a target coder
    coder = target_coder.AlignmentCoder(lambda x, y: x, num_labels)
    
    # lda在哪里做?
    dispenser = batchdispenser.AlignmentBatchDispenser(featreader, coder, 
        int(cnn_conf['batch_size']), input_dim, alifile)

    #train the neural net
    print('------- training neural net ----------')
    #create the neural net
    cnn = cnn.Cnn(input_dim, num_labels, total_frames, cnn_conf)
    cnn.train(dispenser)


# if TEST_NNET:

    # #use the neural net to calculate posteriors for the testing set
    # print '------- computing state pseudo-likelihoods ----------'
    # savedir = config.get('directories', 'expdir') + '/' + config.get('nnet', 'name')
    # decodedir = savedir + '/decode'
    # if not os.path.isdir(decodedir):
        # os.mkdir(decodedir)

    # featdir = config.get('directories', 'test_features') + '/' +  config.get('dnn-features', 'name')

    # #create a feature reader
Exemplo n.º 3
0
import torch.nn.functional as F
import torch.autograd as autograd
import cnn
import json
import random
import os

baselrate = 500
lrate = baselrate / 100000.0
print("Learning Rate:", lrate)
epochnum = 200
idx = []
for i in range(0, 2342):
    idx.append(i)

model = cnn.Cnn()
model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=lrate, weight_decay=1e-5)

file = open('traindata.json', 'r')
data = json.load(file)
file.close()

file = open('trainlabel.json', 'r')
label = json.load(file)
file.close()

print("Training Set Loaded")

file = open('validdata.json', 'r')
validdata = json.load(file)
if len(sys.argv) > 1:
    env = sys.argv[1]
else:
    env = "local"

# print(labels)
print("Total labels: ", len(config.labels))
print(config.vocabulary_size)

path = ""
if env == "local":
    path = "data/reuters/"
elif env == "server":
    path = "data/reuters/"

cnn = cn.Cnn()
# Construct model
pred = cnn.network(cnn.x, cnn.weights, cnn.biases, cnn.dropout)

# Define loss and optimizer
#cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred, labels=cnn.y))
#cost = tf.reduce_mean(bpmll_out_module.bp_mll(pred, cnn.y))
cost = -tf.reduce_sum(
    ((cnn.y * tf.log(pred + 1e-9)) + ((1 - cnn.y) * tf.log(1 - pred + 1e-9))),
    name='xentropy') + 0.01 * (tf.nn.l2_loss(cnn.weights['wd1']) +
                               tf.nn.l2_loss(cnn.weights['out']))
#cost = -tf.reduce_sum(((mlp.y * tf.log(pred + 1e-9)) + ((1-mlp.y) * tf.log(1 - pred + 1e-9)) )  , name='entropy' ) + 0.01 * (tf.nn.l2_loss(mlp.weights['wd1']) + tf.nn.l2_loss(mlp.weights['out']))
optimizer = tf.train.AdamOptimizer(
    learning_rate=cnn.learning_rate).minimize(cost)

# Evaluate model
Exemplo n.º 5
0
import pickle
import matplotlib.pyplot as plt

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.pipeline import FeatureUnion

import config
import utils

import class_Dataset as ds
import cnn as ml
from stop_words import get_stop_words
env = sys.argv[1]
mlp = ml.Cnn()
# Construct model
pred = mlp.network(mlp.x, mlp.weights, mlp.biases, mlp.dropout)

# Define loss and optimizer
cost = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=mlp.y))
#cross_entropy_cnn = -1 * mlp.y * tf.nn.log_softmax(pred)  #method (2)
#cost = tf.reduce_sum(cross_entropy_cnn)
optimizer = tf.train.AdamOptimizer(
    learning_rate=mlp.learning_rate).minimize(cost)
#optimizer = tf.train.MomentumOptimizer(learning_rate=mlp.learning_rate, momentum=0.9).minimize(cost)
#optimizer = tf.train.AdagradOptimizer(learning_rate=mlp.learning_rate).minimize(cost)

# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(mlp.y, 1))
Exemplo n.º 6
0
        # Get action from Q-network (exploitation)
        # Estimate the Qs values state
        Qs = sess.run(nn.output,
                      feed_dict={nn.inputs: state.reshape((1, *state.shape))})

        # Take the biggest Q value (= the best action)
        choice = np.argmax(Qs)
        action = possible_actions[int(choice)]

    return action, explore_probability


tf.reset_default_graph()

# Instantiate the nn
nn = cnn.Cnn(state_size, action_size, learning_rate)


class Memory:
    def __init__(self, max_size):
        self.buffer = deque(maxlen=max_size)

    def add(self, experience):
        self.buffer.append(experience)

    def sample(self, batch_size):
        buffer_size = len(self.buffer)
        index = np.random.choice(np.arange(buffer_size),
                                 size=batch_size,
                                 replace=False)
Exemplo n.º 7
0
    else:
        # Append frame to deque, automatically removes the oldest frame
        stacked_frames.append(frame)

        # Build the stacked state (first dimension specifies different frames)
        stacked_state = np.stack(stacked_frames, axis=2)

    return stacked_state, stacked_frames


# Reset the graph
tf.reset_default_graph()

# Instantiate the DQNetwork
DQNetwork = cnn.Cnn(state_size, action_size, learning_rate)


class Memory:
    def __init__(self, max_size):
        self.buffer = deque(maxlen=max_size)

    def add(self, experience):
        self.buffer.append(experience)

    def sample(self, batch_size):
        buffer_size = len(self.buffer)
        index = np.random.choice(np.arange(buffer_size),
                                 size=batch_size,
                                 replace=False)