Beispiel #1
0
def aim(Phen):
    f = np.zeros([1, 1])
    CV = np.zeros([1, 1])
    for i in Phen:

        train_model = Train.train(i)
        train_model.training()
        #print(i)
        #print(sum(i==1))
        c = sum(i == 1)
        if (c == 0):
            c = 1
        f = np.append(f, np.array([[train_model.acc]]), axis=0)
        CV = np.append(CV, np.array([[math.log(c, 64)]]), axis=0)

    return f[1:], CV[1:]  # 返回目标函数值矩阵
Beispiel #2
0
    # data_loader
    sem_data_loader = ReadSenData(config)
    tra_sen_data, dev_sen_data = sem_data_loader.read_data()
    word_data_loader = ReadData(config)
    tra_data, dev_data = word_data_loader.read_data()

    # vocab
    if os.path.isfile(config.fact_word_vocab):
        tra_fact_word_vocab = read_pkl(config.fact_word_vocab)
        tra_fact_char_vocab = read_pkl(config.fact_char_vocab)
        if config.read_sen:
            tra_fact_sen_vocab = read_pkl(config.fact_sen_vocab)
    else:
        if not os.path.isdir(config.save_vocab_path):
            os.makedirs(config.save_vocab_path)
        tra_fact_word_vocab = FactWordVocab([tra_data, dev_data], config)
        pickle.dump(tra_fact_word_vocab, open(config.fact_word_vocab, 'wb'))
        tra_fact_char_vocab = FactCharVocab([tra_data, dev_data], config)
        pickle.dump(tra_fact_char_vocab, open(config.fact_char_vocab, 'wb'))
        if config.read_sen:
            tra_fact_sen_vocab = FactSenVocab([tra_sen_data, dev_sen_data], config)
            pickle.dump(tra_fact_sen_vocab, open(config.fact_sen_vocab, 'wb'))

    model = MyBertModel(config)

    if config.use_cuda:
        model.cuda()

    # train
    train(model, tra_data, dev_data, tra_fact_word_vocab, config)
Beispiel #3
0
    if gpu:
        config.add_args('Train', 'use_cuda', 'True')

    word_data_loader = ReadData(config)

    usual_tra_data_set, virus_tra_data_set, usual_dev_data_set, virus_dev_data_set, usual_vat_data, virus_vat_data \
        = word_data_loader.read_data(tokenizer)
    usual_eval_data, virus_eval_data = word_data_loader.read_eval_data(tokenizer)

    # vocab
    if os.path.isfile(config.fact_word_src_vocab):
        tag_vocab = read_pkl(config.fact_word_tag_vocab)
    else:
        if not os.path.isdir(config.save_vocab_path):
            os.makedirs(config.save_vocab_path)
        tag_vocab = FactWordTagVocab([usual_tra_data_set, virus_tra_data_set])
        pickle.dump(tag_vocab, open(config.fact_word_tag_vocab, 'wb'))

    distinguish_vocab = DistinguishVocab()

    bert_model = MyBertModel(config)
    distinguish_model = DistinguishModel(config)

    if config.use_cuda:
        bert_model = bert_model.cuda()
        distinguish_model = distinguish_model.cuda()

    # train
    train(bert_model, distinguish_model, usual_tra_data_set, virus_tra_data_set, usual_dev_data_set, virus_dev_data_set, tag_vocab, config,
          distinguish_vocab, tokenizer, usual_eval_data, virus_eval_data, usual_vat_data, virus_vat_data)
Beispiel #4
0
from Model import Model
from Train import DataLoader, BiOptimizer, train
from Evaluation import Visualizer

device = None

model = Model(width=8, height=8).to(device)
data_loader = DataLoader(data_dir='../Data/npimage8.npy',
                         batch_size=128,
                         device=device)
optim = BiOptimizer(model)
visualizer = Visualizer(model, device)

optim.step()

# for batch in data_loader:
#     _ = model(batch[0])

# train.CTR = True
train.train(model, data_loader, optim, 1)
visualizer.visualize()
visualizer.show()
        imgs, labels, fn = data
        imgs = imgs.to(device)
        labels = labels.to(device)

        out_hm, out_wh, out_off = self.model(imgs)

        # out = self.model(imgs)    
        # out_hm = torch.cat([o['hm'].squeeze() for o in out], dim=0) 
        # out_off = torch.cat([o['off'].squeeze() for o in out], dim=0)
        # out_wh = torch.cat([o['wh'].squeeze() for o in out], dim=0)

        loss_hm = self.focal_loss(out_hm, labels[:, 0])
        loss_off = self.regloss(out_off,  labels[:, [1,2]])
        loss_wh = self.regloss(out_wh,  labels[:, [3,4]])

        losses = {}
        losses['loss_hm'] = loss_hm
        losses['loss_wh'] = loss_wh
        losses['loss_offset'] = loss_off
        
        loss = loss_hm + 0.1 * loss_wh + loss_off

        return losses, loss



if __name__=='__main__':

    context = Context(cfg)
    train(context)
Beispiel #6
0
    if gpu:
        config.add_args('Train', 'use_cuda', 'True')

    word_data_loader = ReadData(config)

    kd_tra_data_set, ca_tra_data_set, kd_dev_data_set, ca_dev_data_set = word_data_loader.read_data(
        tokenizer)
    kd_tra_data, kd_tra_data_gold = kd_tra_data_set[0], kd_tra_data_set[1]
    ca_tra_data, ca_tra_data_gold = ca_tra_data_set[0], ca_tra_data_set[1]
    kd_dev_data, kd_dev_data_gold = kd_dev_data_set[0], kd_dev_data_set[1]
    ca_dev_data, ca_dev_data_gold = ca_dev_data_set[0], ca_dev_data_set[1]

    # vocab
    if os.path.isfile(config.fact_word_src_vocab):
        tag_vocab = read_pkl(config.fact_word_tag_vocab)
    else:
        if not os.path.isdir(config.save_vocab_path):
            os.makedirs(config.save_vocab_path)
        tag_vocab = FactWordTagVocab([kd_tra_data, ca_tra_data])
        pickle.dump(tag_vocab, open(config.fact_word_tag_vocab, 'wb'))

    bert_model = MyBertModel(config)

    if config.use_cuda:
        bert_model = bert_model.cuda()

    # train
    train(bert_model, kd_tra_data, ca_tra_data, kd_dev_data, ca_dev_data,
          tag_vocab, config, kd_tra_data_gold, ca_tra_data_gold,
          kd_dev_data_gold, ca_dev_data_gold, tokenizer)