예제 #1
0
파일: generate.py 프로젝트: chrwang/sonnets
def generate_sonnet_rhyme(num_states):
    hmm = train.load(num_states, is_reversed=True)
    text = open('data/shakespeare.txt').read()

    obs, vocab, inv_vocab = preprocess.get_observations(text)
    rhyme_dict = preprocess.build_rhyme_dict(text, vocab)
    lengths = preprocess.get_lengths(obs)
    punctuation = preprocess.get_punctuation(text)

    samples = [None] * 14
    for quatrain in range(3):
        for line in range(2):
            couplet = random.sample(random.choice(rhyme_dict), 2)
            length = np.random.choice(list(lengths.keys()),
                                      p=list(lengths.values()))

            samples[quatrain * 4 + line] = generate_emission_seeded(
                hmm, length, couplet[0])
            samples[quatrain * 4 + line + 2] = generate_emission_seeded(
                hmm, length, couplet[1])

    couplet = random.sample(random.choice(rhyme_dict), 2)
    samples[12] = generate_emission_seeded(hmm, length, couplet[0])
    samples[13] = generate_emission_seeded(hmm, length, couplet[1])

    for i in range(len(samples)):
        samples[i].reverse()

    return format_sonnet(samples, inv_vocab, punctuation)
예제 #2
0
파일: generate.py 프로젝트: chrwang/sonnets
def generate_sonnet(num_states):
    hmm = train.load(num_states)
    text = open('data/shakespeare.txt').read()

    obs, vocab, inv_vocab = preprocess.get_observations(text)
    lengths = preprocess.get_lengths(obs)
    punctuation = preprocess.get_punctuation(text)

    samples = []
    for _ in range(14):
        length = np.random.choice(list(lengths.keys()),
                                  p=list(lengths.values()))
        samples.append(generate_emission(hmm, length))

    return format_sonnet(samples, inv_vocab, punctuation)
예제 #3
0
def get_calibrated_model():
    trainloader, valid_loader, testloader, log_freq = data_load()
    # load saved model
    net = Net_MCDO()
    net = nn.DataParallel(net)
    net = load('Net_MCDO', net)

    # Now we're going to wrap the model with a decorator that adds temperature scaling
    cali_model = ModelWithTemperature(net)

    # Tune the model temperature, and save the results
    cali_model.set_temperature(valid_loader)
    #model = nn.DataParallel(model)
    #model_filename = os.path.join('./model', 'model_with_temperature.pkl')
    save('model_with_temperature', cali_model)
    #torch.save(model.state_dict(), model_filename)
    print('Temperature scaled model sved')
    print('Done!')
    """ net = net.eval()
    cali_model = cali_model.eval() """
    test(net, True, testloader)
    test(cali_model, True, testloader)
예제 #4
0
def get_tfidf(stem=0):
    X, y, _ = train.load()
    vec_pipe = train.get_vec_pipe(num_comp=0, reducer='svd', stem=stem)
    Xtr = vec_pipe.fit_transform(X)
    vec = vec_pipe.named_steps['vec']
    return vec, Xtr, y
예제 #5
0
                              num_workers=NUM_WORKERS,
                              drop_last=False)
    test_loader = DataLoader(test_dataset,
                             batch_size=BATCH_SIZE,
                             shuffle=True,
                             num_workers=NUM_WORKERS,
                             drop_last=False)

    # model, optimizer, scheduler
    facenet = FaceNet(model_name=model_name,
                      pool=pool,
                      embedding_size=embedding_size,
                      dropout=dropout,
                      device=device,
                      pretrain=pretrain)
    #facenet = torch.nn.DataParallel(facenet, device_ids=[0,1,2,3]) # multi-GPU training, here shows four cuda
    optimizer = get_Optimizer(facenet, optimizer_type, lr,
                              weight_decay)  # optimizer
    scheduler = get_Scheduler(optimizer, lr, scheduler_name)  # scheduler
    # load previous trained model
    if False:
        facenet, optimizer, scheduler = load('./models/' + name)

    # train
    train(facenet.to(device), train_loader, eval_loader1, eval_loader2,
          optimizer, scheduler, num_epochs, eval_every, margin, device, name)
    dist_threshold = evalulate(facenet, eval_loader1, eval_loader2, device,
                               loss_fn)
    print('Distance threshold:', dist_threshold)
    test(facenet, test_loader, dist_threshold, device, loss_fn)
예제 #6
0
__author__ = 'simon'
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
from matplotlib import pyplot

import lasagne
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
import cPickle as pickle
from train import load

if __name__ == '__main__':
    X, y, cmap = load()

    print len(cmap), type(cmap)
    for k, v in cmap.iteritems():
        print k, v

    net1 = pickle.load(file('net1.pickle', 'rb'))
    print net1.get_all_params()
    pvs = net1.get_all_params_values()
    for k, v in pvs.iteritems():
        print k, v

    net1.save_params_to("net1_params.pkl")

    y_pred = net1.predict(X)

    print y[-10:]
예제 #7
0
    print(device)

    train_dataset = customized_dataset(df_train, mode='train')
    eval_dataset1 = customized_dataset(df_eval1, mode='eval')
    eval_dataset2 = customized_dataset(df_eval2, mode='eval')
    test_dataset = customized_dataset(df_test, mode='test')

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, drop_last=False)
    eval_loader1 = DataLoader(eval_dataset1, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, drop_last=False)
    eval_loader2 = DataLoader(eval_dataset2, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, drop_last=False)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, drop_last=False)

    # class_weights for arcface loss
    val_counts = df_train.target.value_counts().sort_index().values
    class_weights = 1/np.log1p(val_counts)
    class_weights = (class_weights / class_weights.sum()) * num_classes
    class_weights = torch.tensor(class_weights, dtype=torch.float32)
    # arcface
    metric_crit = ArcFaceLoss(arcface_s, arcface_m, crit, weight=class_weights, class_weights_norm=class_weights_norm)
    facenet = FaceNet2(num_classes=num_classes, model_name=model_name, pool=pool, embedding_size=embedding_size, dropout=dropout, device=device, pretrain=pretrain)
    optimizer = get_Optimizer2(facenet, metric_crit, optimizer_type, lr, weight_decay) # optimizer
    scheduler = get_Scheduler(optimizer, lr, scheduler_name) # scheduler
    # load previous trained model
    if False:
        facenet, optimizer, scheduler = load(name)
        facenet.to(device)
    # train
    train2(facenet.to(device),train_loader,eval_loader1,eval_loader2,metric_crit,optimizer,scheduler,num_epochs,eval_every,num_classes,device,name)
    dist_threshold = evalulate(facenet, eval_loader1, eval_loader2, device, loss_fn)
    print('Distance threshold:',dist_threshold)
    test(facenet,test_loader,dist_threshold,device, loss_fn)
예제 #8
0
def main():
    x_train, y_train = XOR_dataset()
    np.save("table.npy", x_train)
    np.save("out.npy", y_train)
    #train.train(x_train, y_train)
    train.load(x_train)
예제 #9
0
파일: test.py 프로젝트: mosssimo/glass
__author__ = 'simon'
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
from matplotlib import pyplot

import lasagne
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
import cPickle as pickle
from train import load

if __name__=='__main__':
    X,y,cmap = load()

    print len(cmap), type(cmap)
    for k,v in cmap.iteritems():
        print k,v

    net1 = pickle.load(file('net1.pickle','rb'))
    print net1.get_all_params()
    pvs = net1.get_all_params_values()
    for k,v in pvs.iteritems():
        print k,v

    net1.save_params_to("net1_params.pkl")


    y_pred = net1.predict(X)