Exemple #1
0
def test_a_epoch(name, data, model, result_file, label2id, lr, batch_size, en):
    model.training = False

    full_img_paths = []
    evaluator = Evaluator(name, label2id)

    for images, labels, img_paths in tqdm(data,
                                          desc=name,
                                          total=np.math.ceil(len(data))):
        inp = Variable(images.cuda())

        seq_out = model(inp)

        pred = argmax(seq_out)
        evaluator.append_data(0, pred, labels)

        # print(predicted)
        full_img_paths.extend(img_paths)

    evaluator.gen_results()
    evaluator.print_results()
    evaluator.write_results(
        result_file, "epoch = {2}; GOOGLE NET; lr={0}; batch_size={1}".format(
            lr, batch_size, en))

    return evaluator, full_img_paths
Exemple #2
0
def train_a_epoch(name, data, model, optimizer, criterion, res_file, lr,
                  batch_size, label2id, en):
    model = model.train(True)

    evaluator = Evaluator(name, label2id)
    print("evaluator loaded")
    i = 0
    pbar = tqdm(data, desc=name, total=np.math.ceil(len(data)))
    for images, labels, _ in pbar:
        # zero the parameter gradients
        sys.stdout.flush()
        optimizer.zero_grad()
        if i == 0:
            pbar.total = np.math.ceil(len(data))
        inp = Variable(images.cuda())
        seq_out = model(inp)
        pred = argmax(seq_out)

        loss = criterion(seq_out, Variable(torch.cuda.LongTensor(labels)))
        pbar.set_description("{0} loss: {1}".format(name, to_scalar(loss)))
        evaluator.append_data(to_scalar(loss), pred, labels)

        loss.backward()
        optimizer.step()
        i += 1

    print("Training Done")
    evaluator.gen_results()
    evaluator.print_results()
    evaluator.write_results(
        res_file, "epoch = {2}; GOOGLE NET; lr={0}; batch_size={1}".format(
            lr, batch_size, en))

    return model
Exemple #3
0
def test_a_epoch(name, data, model, result_file, cfg, en):
    pred_list = []
    true_list = []
    evaluator = Evaluator(name,
                          label2id=cfg['LABEL_2_ID'],
                          pure_labels=cfg['PURE_LABELS'])

    for images, labels in tqdm(data, desc=name, total=np.math.ceil(len(data))):
        seq_out = model(Variable(torch.from_numpy(images).float().cuda()))

        pred = argmax(seq_out)

        evaluator.append_data(0, pred, labels)

        # print(predicted)
        pred_list.extend(pred)
        true_list.extend(labels)

    evaluator.gen_results()
    evaluator.print_results()
    evaluator.write_results(
        result_file, "epoch = {2}; GOOGLE NET; lr={0}; batch_size={1}".format(
            cfg['LEARNING_RATE'], cfg['BATCH_SIZE'], en))

    return evaluator, pred_list, true_list
Exemple #4
0
def test_a_epoch(name, data, model, result_file, label2id):
    model.training = False

    pred_list = []
    true_list = []
    evaluator = Evaluator(name, label2id)

    for images, labels in tqdm(data, desc=name, total=np.math.ceil(len(data))):
        inp = Variable(images.cuda())

        seq_out = model(inp)

        pred = argmax(seq_out)
        evaluator.append_data(0, pred, labels)

        # print(predicted)
        pred_list.extend(pred)
        true_list.extend(labels)

    evaluator.gen_results()
    evaluator.print_results()
    evaluator.write_results(result_file, '')

    return evaluator, pred_list, true_list
Exemple #5
0
def train_a_epoch(name, data, model, optimizer, criterion, res_file, lr,
                  batch_size, label2id, en):
    model.training = True

    evaluator = Evaluator(name, label2id)
    print("evaluator loaded")
    i = 0
    for images, labels, _ in tqdm(data,
                                  desc=name,
                                  total=np.math.ceil(len(data))):
        # zero the parameter gradients
        sys.stdout.flush()
        optimizer.zero_grad()
        model.zero_grad()
        # image = crop(sample.image, cfg.MAX_IMG_SIZE)

        inp = Variable(images.cuda())
        # inp = torch.transpose(inp, 1, 3)
        seq_out = model(inp)

        pred = argmax(seq_out)
        loss = criterion(seq_out, Variable(torch.cuda.LongTensor(labels)))

        evaluator.append_data(to_scalar(loss), pred, labels)
        loss.backward()
        optimizer.step()
        i += 1

    print("Training Done")
    evaluator.gen_results()
    evaluator.print_results()
    evaluator.write_results(
        res_file, "epoch = {2}; GOOGLE NET; lr={0}; batch_size={1}".format(
            lr, batch_size, en))

    return model