Beispiel #1
0
def main(arg, min_edge_distance=0.5, max_edge_distance=1.5):
    dataset_param = arg2dataset_param(arg[0])
    model_param = arg2model_param(arg[1])
    test_dataset_param = arg2dataset_param(arg[2])
    layer_num = None if len(arg) <= 3 else int(arg[3])

    dataset_param.size = 100000
    dataset_param.min_num_node = 4
    dataset_param.num_num_node = 30
    test_dataset_param.size = 1000
    test_dataset_param.min_num_node = 4
    test_dataset_param.num_num_node = 30
    test_dataset_param.min_edge_distance = min_edge_distance
    test_dataset_param.max_edge_distance = max_edge_distance

    metric_name_list = get_metric_names(test_dataset_param.dataset_name)
    test_model(dataset_param,
               model_param,
               test_dataset_param,
               batch_size=max(
                   min(
                       int(16 * 1000 / test_dataset_param.min_num_node * 10 /
                           (layer_num or model_param.layer_num)), 50), 1),
               metric_name_list=metric_name_list,
               layer_num=layer_num)
Beispiel #2
0
def ping_ML():
    # Save image, send local URL to model
    print("pinging model")
    image_filename = None
    image = request.files.get('testImage')
    if image.filename != '':
        image_filename = image.filename
        image.save(image_filename)
    else:
        return "Missing an image there mate!"
    text, prob = main.test_model(image_filename)
    return "Text: {}, Probability: {}".format(text, prob)
Beispiel #3
0
        torch.load(os.path.join(args.model_dir,
                                "self_aligned_model_epoch49.pth"),
                   map_location=lambda storage, loc: storage))
    compare_model.load_state_dict(
        torch.load(os.path.join(args.model_dir, "compare_model_epoch49.pth"),
                   map_location=lambda storage, loc: storage))
    aggregate_model.load_state_dict(
        torch.load(os.path.join(args.model_dir, "aggregate_model_epoch49.pth"),
                   map_location=lambda storage, loc: storage))
    model_list = [
        attend_model, self_attend_model, self_aligned_model, compare_model,
        aggregate_model
    ]

    # Test on test set
    main.test_model(test_loader, model_list, 1, False, char_embedding)
    # Get the output for dataloader
    true_labels = []
    predictions = []
    div1_matrix_list = []
    div2_matrix_list = []
    for iter, (sentence_data_index_a, sentence_data_index_b,
               label) in tqdm(enumerate(error_loader)):
        sentence_data_index_a, sentence_data_index_b, label_batch = \
            Variable(sentence_data_index_a), Variable(sentence_data_index_b), Variable(label)
        outputs = main.model_inference(model_list, sentence_data_index_a,
                                       sentence_data_index_b, char_embedding,
                                       False)
        # Use the code below when you want to get attention matrix
        # outputs, div1, div2 = main.model_inference(model_list, sentence_data_index_a,
        #                                                  sentence_data_index_b, char_embedding, False)
Beispiel #4
0
    print("Loading models...")
    encoder = torch.load(config.encoder_model)
    decoder = torch.load(config.decoder_model)
    vocabulary = joblib.load(config.vocabulary)
    if config.use_pretrained_embedding is True and os.path.exists(config.glove_vectors):
        glove_vectors = joblib.load(config.glove_vectors)
        print("Pretrained embeddings loaded!")
    else:
        glove_vectors = None
        print("No pretrained embeddings loaded.")
    dev = torch.device(config.device if torch.cuda.is_available() else "cpu")

    print("Chat service initiated!")
    print()

    while True:
        text = input("Please enter a sentence: ")
        text = text.lower().strip()
        if text == 'q' or text == 'quit':
            break

        try:
            input_elems, output_elems = generate_training_data([(text, text)], vocabulary)
        except KeyError as ke:
            print("Oops - seems like I don't know the following word: {}".format(str(ke)))
            continue
        response = test_model(encoder, decoder, input_elems, output_elems, vocabulary, dev,
                              config.use_pretrained_embedding, glove_vectors, chatting=True)
        print("Response: {}".format(response))
        print()
Beispiel #5
0
 def post(self):
     body = json.loads(self.request.body.decode('utf-8'))
     configFile = "./configs/{0}.yml".format(body['model'])
     configs = yaml.load(codecs.open(configFile, encoding="utf-8"))
     main.test_model(configs)
Beispiel #6
0
    writer.add_scalar("Loss/train", avg_train_loss, ep)
    writer.add_scalar("Recall/train", avg_train_recall, ep)
    writer.add_scalar("Precision/train", avg_train_prec, ep)
    writer.add_scalar("F1/train", avg_train_f1, ep)

    avg_val_loss, avg_val_recall, avg_val_prec, avg_val_f1 = validate_model(
        rec_sys_model, exec_device, data_type, config_param['batch_size'],
        loss_func, A, valid_loader, ep, top_k, val_display_step)
    writer.add_scalar("Loss/val", avg_val_loss, ep)
    writer.add_scalar("Recall/val", avg_val_recall, ep)
    writer.add_scalar("Precision/val", avg_val_prec, ep)
    writer.add_scalar("F1/val", avg_val_f1, ep)

    avg_test_loss, avg_test_recall, avg_test_prec, avg_test_f1 = test_model(
        rec_sys_model, exec_device, data_type, config_param['batch_size'],
        loss_func, A, test_loader, ep, top_k, test_display_step)
    writer.add_scalar("Loss/test", avg_test_loss, ep)
    writer.add_scalar("Recall/test", avg_test_recall, ep)
    writer.add_scalar("Precision/test", avg_test_prec, ep)
    writer.add_scalar("F1/test", avg_test_f1, ep)

    state = {
        'state_dict': rec_sys_model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'lr': args.lr,
        'seed': args.seed
    }
    check_point.save_ckpt(state, args.model_name, output_dir, ep)
    if (avg_test_f1 > f1_max):
        score_matrix = []