Exemple #1
0
def get_audio_feature_extractor(model_path, gpu=-1):
    if gpu < 0:
        device = torch.device("cpu")
        model_dict = torch.load(model_path,
                                map_location=lambda storage, loc: storage)
    else:
        device = torch.device("cuda:" + str(gpu))
        model_dict = torch.load(
            model_path, map_location=lambda storage, loc: storage.cuda(gpu))

    audio_rate = model_dict["audio_rate"]
    audio_feat_len = model_dict['audio_feat_len']
    rnn_gen_dim = model_dict['rnn_gen_dim']
    aud_enc_dim = model_dict['aud_enc_dim']
    video_rate = model_dict["video_rate"]

    encoder = RNN(audio_feat_len,
                  aud_enc_dim,
                  rnn_gen_dim,
                  audio_rate,
                  init_kernel=0.005,
                  init_stride=0.001)
    encoder.to(device)
    encoder.load_state_dict(model_dict['encoder'])

    overlap = audio_feat_len - 1.0 / video_rate
    return encoder, {
        "rate": audio_rate,
        "feature length": audio_feat_len,
        "overlap": overlap
    }
class Scorer(object):
    def __init__(self, char_list, model_path, rnn_type, ninp, nhid, nlayers,
                 device):
        char_list = list(char_list) + ['sil_start', 'sil_end']
        self.inv_vocab_map = dict([(i, c) for (i, c) in enumerate(char_list)])
        self.vocab_map = dict([(c, i) for (i, c) in enumerate(char_list)])
        self.criterion = nn.CrossEntropyLoss()
        self.device = device
        self.rnn = RNN(rnn_type, len(char_list), ninp, nhid,
                       nlayers).to(self.device)
        self.rnn.load_state_dict(torch.load(model_path))
        self.rnn.eval()
        self.history = defaultdict(tuple)

    def get_score(self, string):
        if len(string) < 2:
            return 0, self.rnn.init_hidden(1)
        string_idx = map(lambda x: self.vocab_map[x], string)
        input = string_idx[:-1]
        grt = string_idx[1:]
        input, grt = torch.LongTensor(input).to(
            self.device), torch.LongTensor(grt).to(self.device)
        input = input.view(1, input.size()[0])
        init_hidden = self.rnn.init_hidden(1)
        pred, hidden = self.rnn(input, init_hidden)
        pred = pred.view(-1, pred.size(-1))
        loss = self.criterion(pred, grt)
        return -(len(string_idx) - 1) * loss.item(), hidden

    def get_score_fast(self, strings):
        strings = [''.join(x) for x in strings]
        history_to_update = defaultdict(tuple)
        scores = []
        for string in strings:
            if len(string) <= 2:
                score, hidden_state = self.get_score(string)
                scores.append(score)
                history_to_update[string] = (score, hidden_state)
            elif string in self.history:
                history_to_update[string] = self.history[string]
                scores.append(self.history[string][0])
            elif string[:-1] in self.history:
                score, hidden = self.history[string[:-1]]
                input, grt = torch.LongTensor([
                    self.vocab_map[string[-2]]
                ]).view(1, 1).to(self.device), torch.LongTensor(
                    [self.vocab_map[string[-1]]]).to(self.device)
                pred, hidden = self.rnn(input, hidden)
                loss = self.criterion(pred.view(-1, pred.size(-1)), grt).item()
                history_to_update[string] = (score - loss, hidden)
                scores.append(score - loss)
            else:
                raise ValueError("%s not stored" % (string[:-1]))
        self.history = history_to_update
        return scores
Exemple #3
0
def load_model(input_size):

    model = RNN(input_size, hidden_size, num_layers)

    # load on CPU only
    checkpoint = torch.load('checkpoint.pt', map_location='cpu')
    model.load_state_dict(checkpoint['model_state_dict'])
    model.eval()

    print(model)
    print('model training loss', checkpoint['loss'])
    print('model training epoch', checkpoint['epoch'])

    return model
Exemple #4
0
def creat_trunk_ply_by_nn(AE_model_dir1, AE_model_dir2, RNN_model_dir,
                          rawPc_ply_dir_list, device, threshold, save_dir):

    rnn_model = RNN().to(device)
    rnn_model.load_state_dict(torch.load(RNN_model_dir))

    AE_model1 = autoencoder.AE_3d_conv().to(device)
    AE_model1.load_state_dict(torch.load(AE_model_dir1))

    rnn_in_feature = get_rnn_in_featur(AE_model1, device, rawPc_ply_dir_list)
    rnn_out = get_rnn_out(rnn_model, rnn_in_feature)

    #     AE_model2 = autoencoder.AE_3d_conv().to(device)
    #     AE_model2.load_state_dict(torch.load(AE_model_dir2))

    AE_decoder_out = get_AE_decoder_out(AE_model1, rnn_out)
    #point_counts = len(AE_decoder_out[AE_decoder_out>0.2])
    AE_decoder_out = torch.squeeze(AE_decoder_out)

    print('try to save predicted point cloud(.ply) by NN ')
    tensor_to_ply(AE_decoder_out, threshold, save_dir)
def predicted_labels(sentence, hypothesis, classifier, network='best-GRU'):
    if network == 'best-GRU':
        # load model
        vocab = ['Europeans', 'Germans', 'Italians', 'Romans', 'all', 'children', 'fear', 'hate', 'like', 'love', 'not',
                 'some']
        rels = ['#', '<', '=', '>', '^', 'v', '|']

        word_dim = 25
        n_hidden = 128
        cpr_dim = 75

        model_path = '/Users/mathijs/Documents/Studie/MoL/thesis/mol_thesis/final_experiments/binary_fol_rnn/nobrackets/models/GRUbinary_2dets_4negs_train_0bracket_pairs1.pt'
        net = RNN('GRU', vocab, rels, word_dim, n_hidden, cpr_dim)
        net.load_state_dict(torch.load(model_path))

    s = [sentence.split()]

    _, hidden_vectors = net.rnn_forward(s, 1, hypothesis=hypothesis)
    test_hiddens = np.array(hidden_vectors[0])
    y_pred = classifier.predict(test_hiddens)

    labels = np.array([y_pred])

    return(labels)
Exemple #6
0
n_epochs = 200
print_every = 500
frac_train = 0.90

n_hidden = 512

learning_rate = 0.001

model = RNN(20,n_hidden,2)

criterion = nn.NLLLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.8,nesterov=True)

if os.path.isfile('all_pos_148.ckpt'):
    checkpoint = torch.load('all_pos_148.ckpt')
    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    print("=> loaded checkpoint ")
    with open(logfile_name,'a') as outfile:
        outfile.write("=> loaded checkpoint\n")

#optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)
#optimizer = torch.optim.ASGD(model.parameters(),lr=learning_rate)

def train(category_tensor,line_tensor):
    model.zero_grad()
    hidden = model.init_hidden()

    for i in range(line_tensor.size()[0]):
        output, hidden  = model(line_tensor[i],hidden)
Exemple #7
0
    # input after only good parts of vae taken
    input_size = 50
    lr = 1e-4
    rnn = RNN(input_size, hidden_size)
    optim = optim.Adam(rnn.parameters(), lr=lr, weight_decay=1e-6)
    if use_cuda:
        rnn.cuda()
    rnn_epoch = 0
    total_passes = 0

    train_loss = []
    test_loss = []
    if args.rnn_model_loadpath is not None:
        if os.path.exists(args.rnn_model_loadpath):
            rnn_model_dict = torch.load(args.rnn_model_loadpath)
            rnn.load_state_dict(rnn_model_dict['state_dict'])
            optim.load_state_dict(rnn_model_dict['optimizer'])
            rnn_epoch = rnn_model_dict['epoch']
            try:
                total_passes = rnn_model_dict['total_passes']
                train_loss = rnn_model_dict['train_loss']
                test_loss = rnn_model_dict['test_loss']
            except:
                print("could not load total passes")
            print("loaded rnn from %s at epoch %s" %
                  (args.rnn_model_loadpath, rnn_epoch))
        else:
            print("could not find model at %s" % args.rnn_model_loadpath)
            sys.exit()
    else:
        print("creating new model")
Exemple #8
0
from data_loader import fetch_data

# Our stuff we imported
from gensim.models import Word2Vec
from collections import Counter

from rnn import RNN
# from ffnn import FFNN, convert_to_vector_representation, make_indices, make_vocab

directory = 'models_b/'
model_paths = [
    'rnn_sgd_base.pt', 'rnn_rmsprop_base.pt', 'lstm_sgd_base.pt',
    'lstm_rmsprop_base.pt'
]
model1 = RNN(32, 1, 64, True)
model1.load_state_dict(torch.load(os.path.join(directory, model_paths[0])))
model2 = RNN(32, 1, 64, True)
model2.load_state_dict(torch.load(os.path.join(directory, model_paths[1])))
model3 = RNN(32, 1, 64, False)
model3.load_state_dict(torch.load(os.path.join(directory, model_paths[2])))
model4 = RNN(32, 1, 64, False)
model4.load_state_dict(torch.load(os.path.join(directory, model_paths[3])))
models = [model1, model2, model3, model4]

print('models succesfuly loaded')

# Load trained word embeddings
train_data, valid_data = fetch_data()
wv_model = Word2Vec.load("word2vec.model")

validation_samples = []
# load data
data_file = '/Users/mathijs/Documents/Studie/MoL/thesis/mol_thesis/data/binary/2dets_4negs/partial_bracketing/binary_2dets_4negs_train_0bracket_all.txt'
data = dat.SentencePairsDataset(data_file)
data.load_data(sequential=True)

vocab = [
    'Europeans', 'Germans', 'Italians', 'Romans', 'all', 'children', 'fear',
    'hate', 'like', 'love', 'not', 'some'
]
rels = ['#', '<', '=', '>', '^', 'v', '|']

word_dim = 25
n_hidden = 128
cpr_dim = 75

model_path = '/Users/mathijs/Documents/Studie/MoL/thesis/mol_thesis/final_experiments/binary_fol_rnn/nobrackets/models/GRUbinary_2dets_4negs_train_0bracket_pairs1.pt'
net = RNN('GRU', vocab, rels, word_dim, n_hidden, cpr_dim)
net.load_state_dict(torch.load(model_path))

net.eval()

shuffle_samples = False
batch_size = 50
batches = dat.BatchData(data, batch_size, shuffle_samples)
batches.create_batches()

for batch_idx in range(batches.num_batches):
    print('Batch %i / %i' % (batch_idx, batches.num_batches))
    inputs = batches.batched_data[batch_idx]
    net(inputs)
Exemple #10
0
class SDFA():
    def __init__(self, model_path, gpu=-1):
        self.model_path = model_path
        if gpu < 0:
            self.device = torch.device("cpu")
            model_dict = torch.load(self.model_path,
                                    map_location=lambda storage, loc: storage)
            self.fa = face_alignment.FaceAlignment(
                face_alignment.LandmarksType._2D,
                device="cpu",
                flip_input=False)
        else:
            self.device = torch.device("cuda:" + str(gpu))
            model_dict = torch.load(
                self.model_path,
                map_location=lambda storage, loc: storage.cuda(gpu))
            self.fa = face_alignment.FaceAlignment(
                face_alignment.LandmarksType._2D,
                device="cuda:" + str(gpu),
                flip_input=False)

        self.stablePntsIDs = [33, 36, 39, 42, 45]
        self.mean_face = model_dict["mean_face"]
        self.img_size = model_dict["img_size"]
        self.audio_rate = model_dict["audio_rate"]
        self.video_rate = model_dict["video_rate"]
        self.audio_feat_len = model_dict['audio_feat_len']
        self.audio_feat_samples = model_dict['audio_feat_samples']
        self.id_enc_dim = model_dict['id_enc_dim']
        self.rnn_gen_dim = model_dict['rnn_gen_dim']
        self.aud_enc_dim = model_dict['aud_enc_dim']
        self.aux_latent = model_dict['aux_latent']
        self.sequential_noise = model_dict['sequential_noise']
        self.conversion_dict = {'s16': np.int16, 's32': np.int32}

        self.img_transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((self.img_size[0], self.img_size[1])),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        self.encoder = RNN(self.audio_feat_len,
                           self.aud_enc_dim,
                           self.rnn_gen_dim,
                           self.audio_rate,
                           init_kernel=0.005,
                           init_stride=0.001)
        self.encoder.to(self.device)
        self.encoder.load_state_dict(model_dict['encoder'])

        self.encoder_id = ImageEncoder(code_size=self.id_enc_dim,
                                       img_size=self.img_size)
        self.encoder_id.to(self.device)
        self.encoder_id.load_state_dict(model_dict['encoder_id'])

        skip_channels = list(self.encoder_id.channels)
        skip_channels.reverse()

        self.generator = Generator(
            self.img_size,
            self.rnn_gen_dim,
            condition_size=self.id_enc_dim,
            num_gen_channels=self.encoder_id.channels[-1],
            skip_channels=skip_channels,
            aux_size=self.aux_latent,
            sequential_noise=self.sequential_noise)

        self.generator.to(self.device)
        self.generator.load_state_dict(model_dict['generator'])

        self.encoder.eval()
        self.encoder_id.eval()
        self.generator.eval()

    def save_video(self,
                   video,
                   audio,
                   path,
                   overwrite=True,
                   experimental_ffmpeg=False,
                   scale=None):
        if not os.path.isabs(path):
            path = os.getcwd() + "/" + path

        with tempdir() as dirpath:

            writer = sio.FFmpegWriter(dirpath + "/tmp.avi",
                                      inputdict={
                                          '-r': str(self.video_rate) + "/1",
                                      },
                                      outputdict={
                                          '-r': str(self.video_rate) + "/1",
                                      })
            for i in range(video.shape[0]):
                frame = np.rollaxis(video[i, :, :, :], 0, 3)

                if scale is not None:
                    frame = tf.rescale(frame,
                                       scale,
                                       anti_aliasing=True,
                                       multichannel=True,
                                       mode='reflect')

                writer.writeFrame(frame)
            writer.close()

            wav.write(dirpath + "/tmp.wav", self.audio_rate, audio)

            in1 = ffmpeg.input(dirpath + "/tmp.avi")
            in2 = ffmpeg.input(dirpath + "/tmp.wav")
            if experimental_ffmpeg:
                out = ffmpeg.output(in1['v'],
                                    in2['a'],
                                    path,
                                    strict='-2',
                                    loglevel="panic")
            else:
                out = ffmpeg.output(in1['v'], in2['a'], path, loglevel="panic")

            if overwrite:
                out = out.overwrite_output()
            out.run()

    def preprocess_img(self, img):
        src = self.fa.get_landmarks(img)[0][self.stablePntsIDs, :]
        dst = self.mean_face[self.stablePntsIDs, :]
        tform = tf.estimate_transform('similarity', src, dst)
        warped = tf.warp(img,
                         inverse_map=tform.inverse,
                         output_shape=self.img_size)
        warped = warped * 255
        warped = warped.astype('uint8')

        return warped

    def _cut_sequence_(self, seq, cutting_stride, pad_samples):
        pad_left = torch.zeros(pad_samples // 2, 1)
        pad_right = torch.zeros(pad_samples - pad_samples // 2, 1)

        seq = torch.cat((pad_left, seq), 0)
        seq = torch.cat((seq, pad_right), 0)

        stacked = seq.narrow(0, 0, self.audio_feat_samples).unsqueeze(0)
        iterations = (seq.size()[0] -
                      self.audio_feat_samples) // cutting_stride + 1
        for i in range(1, iterations):
            stacked = torch.cat(
                (stacked,
                 seq.narrow(0, i * cutting_stride,
                            self.audio_feat_samples).unsqueeze(0)))
        return stacked.to(self.device)

    def _broadcast_elements_(self, batch, repeat_no):
        total_tensors = []
        for i in range(0, batch.size()[0]):
            total_tensors += [torch.stack(repeat_no * [batch[i]])]

        return torch.stack(total_tensors)

    def __call__(self, img, audio, fs=None, aligned=False):
        if isinstance(img, str):
            frm = Image.open(img)
            frm.thumbnail((400, 400))
            frame = np.array(frm)
        else:
            frame = img

        if not aligned:
            frame = self.preprocess_img(frame)

        if isinstance(audio, str):
            info = mediainfo(audio)
            fs = int(info['sample_rate'])
            audio = np.array(
                AudioSegment.from_file(audio,
                                       info['format_name']).set_channels(
                                           1).get_array_of_samples())

            if info['sample_fmt'] in self.conversion_dict:
                audio = audio.astype(self.conversion_dict[info['sample_fmt']])
            else:
                if max(audio) > np.iinfo(np.int16).max:
                    audio = audio.astype(np.int32)
                else:
                    audio = audio.astype(np.int16)

        if audio.ndim > 1 and audio.shape[1] > 1:
            audio = audio[:, 0]

        max_value = np.iinfo(audio.dtype).max
        if fs != self.audio_rate:
            seq_length = audio.shape[0]
            speech = torch.from_numpy(
                signal.resample(
                    audio, int(seq_length * self.audio_rate / float(fs))) /
                float(max_value)).float()
            speech = speech.view(-1, 1)
        else:
            audio = torch.from_numpy(audio / float(max_value)).float()
            speech = audio.view(-1, 1)

        frame = self.img_transform(frame).to(self.device)

        cutting_stride = int(self.audio_rate / float(self.video_rate))
        audio_seq_padding = self.audio_feat_samples - cutting_stride

        audio_feat_seq = self._cut_sequence_(speech, cutting_stride,
                                             audio_seq_padding)
        frame = frame.unsqueeze(0)
        audio_feat_seq = audio_feat_seq.unsqueeze(0)
        audio_feat_seq_length = audio_feat_seq.size()[1]

        z = self.encoder(audio_feat_seq, [audio_feat_seq_length])
        noise = torch.FloatTensor(1, audio_feat_seq_length,
                                  self.aux_latent).normal_(0, 0.33).to(
                                      self.device)
        z_id, skips = self.encoder_id(frame, retain_intermediate=True)
        skip_connections = []
        for skip_variable in skips:
            skip_connections.append(
                self._broadcast_elements_(skip_variable,
                                          z.size()[1]))
        skip_connections.reverse()

        z_id = self._broadcast_elements_(z_id, z.size()[1])
        gen_video = self.generator(z, c=z_id, aux=noise, skip=skip_connections)

        returned_audio = ((2**15) * speech.detach().cpu().numpy()).astype(
            np.int16)
        gen_video = 125 * gen_video.squeeze().detach().cpu().numpy() + 125
        return gen_video, returned_audio
Exemple #11
0
from cuda_check import device
from parse import *
from rnn import RNN
from util import *

usefulness, problemlemmas = get_usefulness_problemslemmas()
all_letters = string.printable
n_letters = len(all_letters)

# Output is just a float, the proof length ratios
output_size = 1
n_hidden = 128

model = RNN(n_letters, n_hidden, output_size)
state_dict = torch.load('./test2models/training.pt')
model.load_state_dict(state_dict)
model.to(device)

criterion = nn.MSELoss()


def eval_model(usefulness_tensor, line_tensor):
    hidden = model.initHidden()
    output = None

    model.zero_grad()

    for i in range(line_tensor.size()[0]):
        output, hidden = model(line_tensor[i], hidden)

    loss = criterion(output, usefulness_tensor)
Exemple #12
0
    cnn.cuda()
    rnn.cuda()
    loss_fn.cuda()
else:
    raise ValueError('Please specify a valid device from ["cpu", "gpu"].')
print('Loaded the models to the %s.' % (params['device'].upper()))

if params['is_training']:
    if params['resume_training']:
        print("Loading the model - %s" %
              (params['resume_model_train'] + '.ckpt'))
        state_dict = torch.load(
            os.path.join(params['output_dir'],
                         params['resume_model_train'] + '.ckpt'))
        cnn.load_state_dict(state_dict['encoder_state_dict'])
        rnn.load_state_dict(state_dict['decoder_state_dict'])
        optimizer.load_state_dict(state_dict['optimizer_state_dict'])
        print("Models loaded.")

    cnn.train()
    rnn.train()

    start_time = time.time()

    print('Training started.')
    for epoch in range(params['num_epochs']):

        print("Epoch %d started." % (epoch + 1))

        train_loss = []
        for idx, (_, image, caption,
Exemple #13
0
from rnn import RNN
from ffnn import FFNN, convert_to_vector_representation, make_indices, make_vocab

# Load models
base = ('rnn_sgd_base.pt', 'ffnn_sgd_base.pt')
hx2 = ('rnn_sgd_hx2.pt', 'ffnn_sgd_hx2.pt')
lx2 = ('rnn_sgd_lx2.pt', 'ffnn_sgd_lx2.pt')

files = (base, hx2, lx2)
directory = 'models_part_a/'

base_models = []
#RNN SGD BASE
path = directory + base[0]
model = RNN(32, 1, 64, True)
model.load_state_dict(torch.load(path))
base_models.append(model)
#FFNN SGD BASE
path = directory + base[1]
model = FFNN(97305, 32, 1)
model.load_state_dict(torch.load(path))
base_models.append(model)

hx2_models = []
#RNN SGD hx2
path = directory + hx2[0]
model = RNN(64, 1, 64, True)
model.load_state_dict(torch.load(path))
hx2_models.append(model)
#FFNN SGD hx2
path = directory + hx2[1]
HL = 1
HN1 = 5
EPOCHS = 30
BATCH_SIZE = 8
LR = [0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
MODELS = {}

rnn = RNN(3, 5, 12, HN1, HL)
init_state = copy.deepcopy(rnn.state_dict())
for lr in LR:
    MSEs = []
    for index, subset in enumerate(subset_train_list):
        subset.value = np.array(subset.value)
        subset_test_list[index].value = np.array(subset_test_list[index].value)

        rnn.load_state_dict(init_state)
        training_inputs = subset.value[:, 0:5]
        training_labels = subset.value[:, 5:]
        test_inputs = subset_test_list[index].value[:, 0:5]
        test_labels = subset_test_list[index].value[:, 5:]

        training_inputs = np.split(training_inputs, 505)
        training_labels = np.split(training_labels, 505)

        test_inputs = np.array([test_inputs])
        test_labels = np.array([test_labels])
        
        train(rnn, training_inputs, training_labels, EPOCHS, lr, BATCH_SIZE)
        avg_mse = test(test_inputs, test_labels, rnn)
        MSEs.append(avg_mse)
except FileNotFoundError as e:
    print(e)
    print("Please download the input data from 'https://cs.stanford.edu/people/karpathy/char-rnn/'")
    print("Place it in the 'input' directory")
    exit(1)

input_length = len(input_text)


net = RNN(input_size=ALPHABET_SIZE,
          hidden_size=HIDDEN_SIZE,
          output_size=ALPHABET_SIZE)

if MODEL_SAVE_PATH.exists():
    print("Loading trained model from file")
    net.load_state_dict(torch.load(MODEL_SAVE_PATH))

net.train()

optimizer = optim.RMSprop(net.parameters())

hidden_state = torch.zeros(BATCH_SIZE, HIDDEN_SIZE)

total_loss = 0.0

print("Starting to train char RNN")
i = 0
last_print = 0
while i < input_length:
    if i + BATCH_SIZE >= input_length:
        # TODO: pad last batch to `BATCH_SIZE`
Exemple #16
0
def main(args):

    config_file = args.config
    test = args.test

    cfg = Config(config_file)

    tr = None
    if test is None:
        tr = DataSet(cfg.tr_data, cfg)
        te = DataSet(cfg.te_data, cfg, sub_sample=1)
        tr0 = DataSet([cfg.tr_data[0]], cfg, sub_sample=1)
        cfg.att = te.sz[1]
    else:
        if test == 'te':
            te = DataSet([cfg.te_data[0]], cfg)
        else:
            te = DataSet([cfg.tr_data[0]], cfg)
        cfg.att = te.sz[1]

    iterations = 10000
    loop = cfg.loop
    print "input attribute", cfg.att, "LR", cfg.lr, 'feature', cfg.feature_len

    n_att = cfg.att
    n_length = cfg.feature_len
    n_hidden = 256
    n_output = cfg.num_output

    mrnn = RNN(n_att, n_length, n_hidden, n_output, cfg.lr)
    # print("Model's state_dict:")
    # for param_tensor in mrnn.state_dict():
    #     print(param_tensor, "\t", mrnn.state_dict()[param_tensor].size())
    hidden = ToTensor(np.ones(n_hidden).astype(np.float32))

    if test:
        mrnn.load_state_dict(torch.load(cfg.netTest[:-3]))
        run_test(mrnn, te, cfg, hidden)
        tr_loss, tr_median = run_test(mrnn, te, cfg, hidden)
        for a in range(len(tr_loss)):
            print a, tr_loss[a], tr_median[a]

        exit(0)

    if cfg.renetFile:
        mrnn.load_state_dict(torch.load(cfg.renetFile[:-3]))

    t00 = datetime.datetime.now()

    T = 0
    T_err=0
    for a in range(iterations):

        tr_pre_data = tr.prepare(multi=1)
        while tr_pre_data:
            for b in tr_pre_data:
                length = len(b[0])
                x = ToTensor(b[0].reshape(length, cfg.feature_len, cfg.att).astype(np.float32))
                y = ToTensor(b[1].astype(np.float32))
                err = mrnn.train(y, x, hidden)
                if a%loop==0 and a>0:
                    t1 = datetime.datetime.now()
                    print a, (t1 - t00).total_seconds()/3600.0, T_err/T
                    T_err=0
                    T = 0
                    torch.save(mrnn.state_dict(), cfg.netFile[:-3])
                T_err += err
                T += 1

            tr_pre_data = tr.get_next()
Exemple #17
0
def main():
    cuda = int(torch.cuda.is_available()) - 1

    # define fields
    TEXT = data.Field(lower=True, init_token="<start>", eos_token="<end>")
    LABEL = data.Field(sequential=False, unk_token=None)

    # data paths
    data_path = './data/'
    train_path = 'train.tsv'
    val_path = 'val.tsv'
    test_path = 'test.tsv'

    # hyperparams
    hidden_size = 64
    num_classes = 2
    num_layers = 2
    num_dir = 2
    batch_size = 8
    emb_dim = 300
    dropout = .2
    net_type = 'lstm'
    embfix = False

    # build dataset splits
    train, val, test = data.TabularDataset.splits(path=data_path,
                                                  train=train_path,
                                                  validation=val_path,
                                                  test=test_path,
                                                  format='tsv',
                                                  fields=[('text', TEXT),
                                                          ('label', LABEL)])

    # build vocabs
    TEXT.build_vocab(train, vectors=GloVe(name='6B', dim=emb_dim), min_freq=2)
    prevecs = TEXT.vocab.vectors
    #TEXT.build_vocab(train, min_freq=3)
    LABEL.build_vocab(train)

    num_classes = len(LABEL.vocab)
    input_size = len(TEXT.vocab)

    # build iterators
    train_iter = data.BucketIterator(train,
                                     batch_size=batch_size,
                                     sort_key=lambda x: len(x.text),
                                     train=True)
    val_iter = data.Iterator(val,
                             batch_size=batch_size,
                             repeat=False,
                             train=False,
                             sort=False,
                             shuffle=False)
    test_iter = data.Iterator(test,
                              batch_size=batch_size,
                              repeat=False,
                              train=False,
                              sort=False,
                              shuffle=False)  #batch_size = len(test)

    model = RNN(input_size=input_size,
                hidden_size=hidden_size,
                num_classes=num_classes,
                prevecs=prevecs,
                num_layers=num_layers,
                num_dir=num_dir,
                batch_size=batch_size,
                emb_dim=emb_dim,
                embfix=embfix,
                dropout=dropout,
                net_type=net_type)

    model.load_state_dict(torch.load('./models/e16_100.0.pt'))

    print('type(model):', type(model))
    print('model:', model)

    val_acc, val_preds, _ = evaluate(val_iter, model, TEXT, LABEL)
    print('val_acc:', val_acc)
    test_acc, test_preds, _ = evaluate(test_iter, model, TEXT, LABEL)
    print('test_acc:', test_acc)