Exemple #1
0
def main():
    USE_GPU = True
    if USE_GPU and torch.cuda.is_available():
        torch.cuda.empty_cache()
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
        
    print('using device:', device)
    dtype = torch.float32
    
    '''    
    filename = 'curr_model_soumya_mat_it_es'
    f = open(filename, 'rb')
    model = pickle.load(f)
    f.close()

    '''
    n_epochs = 5
    n_refinement = 5
    batch_size = 32
    
    model = UMWE(dtype, device, batch_size, n_epochs, n_refinement)
    model.build_model()
    model.discrim_fit()
    filename = 'curr_model_soumya_mat_it_es'
    f = open(filename, 'wb')
    pickle.dump(model, f)
    f.close()

    eval_ = Evaluator(model)
    print(eval_.clws('es', 'en'))
    print(eval_.clws('en', 'es'))
    print(eval_.clws('it', 'en'))
    eval_.word_translation('es', 'en')
    eval_.word_translation('en', 'es')
    eval_.word_translation('it', 'en')

    model.mpsr_refine()
    print(eval_.clws('es', 'en'))
    print(eval_.clws('en', 'es'))
    print(eval_.clws('it', 'en'))
    eval_.word_translation('es', 'en')
    eval_.word_translation('en', 'es')
    eval_.word_translation('it', 'en')
    
    
    filename = 'curr_model_soumya_mpsr_it_es'
    f = open(filename, 'wb')
    pickle.dump(model, f)
    f.close()
    
    for lang in model.src_langs.values():
        model.export_embeddings(lang, model.embs, "txt", "20th")
Exemple #2
0
def main():
    USE_GPU = True
    if USE_GPU and torch.cuda.is_available():
        torch.cuda.empty_cache()
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
        
    print('using device:', device)
    dtype = torch.float32
    
# =============================================================================
#     filename = 'curr_model'
#     f = open(filename, 'rb')
#     model = pickle.load(f)
#     f.close()
#     
# =============================================================================
    model = UMWE(dtype, device, 32, 2)
    model.build_model()
    # model.discrim_fit()
    # filename = 'curr_model'
    # f = open(filename, 'wb')
    # pickle.dump(model, f)
    # f.close()
# =============================================================================
    model.mpsr_refine()
# =============================================================================
# =============================================================================
    # for lang in model.src_langs.values():
        # model.export_embeddings(lang, model.embs, "txt")
# =============================================================================
    model.export_embeddings('es', model.embs, "txt")
    eval_ = Evaluator(model)
    print(eval_.clws('es', 'en'))
    eval_.word_translation('es', 'en')