def evaluate(model, test_loader):
    model.eval()

    final_loss = 0
    final_preds = []
    final_targets = []

    with torch.no_grad():
        for idx, (img, label1, label2, label3) in enumerate(test_loader):
            img = img.to(device)
            label1 = label1.to(device)
            label2 = label2.to(device)
            label3 = label3.to(device)
            
            pred_grapheme, pred_vowel, pred_consonan = model(img)

            final_preds.append(torch.cat((pred_grapheme, pred_vowel, pred_consonan), dim=1))
            final_targets.append(torch.stack((label1, label2, label3), dim=1))

            loss_grapheme  = F.cross_entropy(pred_grapheme, label1)
            loss_vowel = F.cross_entropy(pred_vowel, label2)
            loss_consonan = F.cross_entropy(pred_consonan, label3)

            loss = loss_grapheme + loss_vowel + loss_consonan
            final_loss += loss

        final_preds = torch.cat(final_preds)
        final_targets = torch.cat(final_targets)
        recall = macro_recall(final_preds, final_targets)

        return final_loss/len(test_loader), recall
示例#2
0
def evaluate(dataset, data_loader, model, optimizer, label_name):
    model.eval()
    final_loss = 0
    counter = 0
    final_loss = 0
    final_outputs = []
    final_targets = []
    with torch.no_grad():
        for bi, d in tqdm(enumerate(data_loader),
                          total=int(len(dataset) / data_loader.batch_size)):
            counter = counter + 1
            image = d['image']
            target = d[label_name]

            image = image.to(DEVICE, dtype=torch.float)
            target = target.to(DEVICE, dtype=torch.long)

            optimizer.zero_grad()
            output = model(image)
            loss = loss_fn(output, target)

            final_loss += loss

            final_outputs.append(output)
            final_targets.append(torch.unsqueeze(target, dim=1))

        final_outputs = torch.cat(final_outputs)
        final_targets = torch.cat(final_targets)

        print("=================Evalutions=================")
        macro_recall_score = macro_recall(final_outputs, final_targets,
                                          label_name)

    return final_loss / counter, macro_recall_score
示例#3
0
def train(model, train_loader, optimizer, epoch):
    model.train()
    loss = 0.0
    acc = 0.0

    final_loss = 0.0
    final_preds = []
    final_targets = []

    for idx, (img, label1, label2, label3) in enumerate(train_loader):
        img = img.to(device)
        label1 = label1.to(device)
        label2 = label2.to(device)
        label3 = label3.to(device)

        optimizer.zero_grad()

        if np.random.rand() < 0.5:
            images, targets = mixup(img, label1, label2, label3, 0.4)
            output1, output2, output3 = model(images)
            loss = mixup_criterion(output1, output2, output3, targets)
        else:
            output1, output2, output3 = model(img)
            loss_grapheme = F.cross_entropy(output1, label1)
            loss_vowel = F.cross_entropy(output2, label2)
            loss_consonan = F.cross_entropy(output3, label3)
            loss = loss_grapheme + loss_vowel + loss_consonan

        final_preds.append(torch.cat((output1, output2, output3), dim=1))
        final_targets.append(torch.stack((label1, label2, label3), dim=1))

        final_loss += loss

        acc += (output1.argmax(1) == label1).float().mean()
        acc += (output2.argmax(1) == label2).float().mean()
        acc += (output3.argmax(1) == label3).float().mean()

        loss.backward()
        optimizer.step()

    final_preds = torch.cat(final_preds)
    final_targets = torch.cat(final_targets)
    recall, _ = macro_recall(final_preds, final_targets)

    print('acc : {:.2f}% , loss : {:.4f}, Recall : {:.4f}'.format(
        acc / (len(train_loader) * 3), final_loss / len(train_loader), recall))
def train(dataset, data_loader, model, optimizer):
    model.train()
    final_loss = 0
    counter = 0
    final_outputs = []
    final_targets = []

    for bi, d in tqdm(enumerate(data_loader),
                      total=int(len(dataset) / data_loader.batch_size)):
        counter = counter + 1
        image = d["image"]
        grapheme_root = d["grapheme_root"]
        vowel_diacritic = d["vowel_diacritic"]
        consonant_diacritic = d["consonant_diacritic"]
        DEVICE = "cuda"
        image = image.to(DEVICE, dtype=torch.float)
        grapheme_root = grapheme_root.to(DEVICE, dtype=torch.long)
        vowel_diacritic = vowel_diacritic.to(DEVICE, dtype=torch.long)
        consonant_diacritic = consonant_diacritic.to(DEVICE, dtype=torch.long)

        optimizer.zero_grad()
        outputs = model(image)
        targets = (grapheme_root, vowel_diacritic, consonant_diacritic)
        loss = loss_fn(outputs, targets)

        loss.backward()
        optimizer.step()

        final_loss += loss

        o1, o2, o3 = outputs
        t1, t2, t3 = targets
        final_outputs.append(torch.cat((o1, o2, o3), dim=1))
        final_targets.append(torch.stack((t1, t2, t3), dim=1))

        #if bi % 10 == 0:
        #    break
    final_outputs = torch.cat(final_outputs)
    final_targets = torch.cat(final_targets)

    print("=================Train=================")
    macro_recall_score = macro_recall(final_outputs, final_targets)

    return final_loss / counter, macro_recall_score
示例#5
0
def evaluate(dataset, data_loader, model,optimizer):
    model.eval()
    final_loss = 0
    counter = 0
    final_loss = 0
    final_outputs = []
    final_targets = []
    with torch.no_grad():
        for bi, d in tqdm(enumerate(data_loader), total=int(len(dataset)/data_loader.batch_size)):
            counter = counter +1
            image = d['image']
            grapheme_root = d['grapheme_root']
            vowel_diacritic = d['vowel_diacritic']
            consonant_diacritic = d['consonant_diacritic']


            image = image.to(DEVICE, dtype=torch.float)
            grapheme_root = grapheme_root.to(DEVICE, dtype = torch.long)
            vowel_diacritic = vowel_diacritic.to(DEVICE, dtype = torch.long)
            consonant_diacritic = consonant_diacritic.to(DEVICE, dtype = torch.long)

            optimizer.zero_grad()
            outputs = model(image)
            targets = (grapheme_root, vowel_diacritic, consonant_diacritic)
            loss = loss_fn(outputs, targets)

            final_loss +=loss


            o1, o2, o3 = outputs
            t1, t2, t3 = targets
            #print(t1.shape)
            final_outputs.append(torch.cat((o1,o2,o3), dim=1))
            final_targets.append(torch.stack((t1,t2,t3), dim=1))
        
        final_outputs = torch.cat(final_outputs)
        final_targets = torch.cat(final_targets)

        print("=================Evalutions=================")
        macro_recall_score = macro_recall(final_outputs, final_targets)
        

    return final_loss/counter,  macro_recall_score
def train(model, train_loader, optimizer, epoch):
    model.train()
    loss = 0.0
    acc = 0.0

    final_loss = 0.0
    final_preds = []
    final_targets = []

    for idx, (img, label1, label2, label3) in enumerate(train_loader):
        img = img.to(device)
        label1 = label1.to(device)
        label2 = label2.to(device)
        label3 = label3.to(device)

        optimizer.zero_grad()
        pred_grapheme, pred_vowel, pred_consonan = model(img)

        final_preds.append(
            torch.cat((pred_grapheme, pred_vowel, pred_consonan), dim=1))
        final_targets.append(torch.stack((label1, label2, label3), dim=1))

        loss_grapheme = F.cross_entropy(pred_grapheme, label1)
        loss_vowel = F.cross_entropy(pred_vowel, label2)
        loss_consonan = F.cross_entropy(pred_consonan, label3)

        loss = loss_grapheme + loss_vowel + loss_consonan
        final_loss += loss

        acc += (pred_grapheme.argmax(1) == label1).float().mean()
        acc += (pred_vowel.argmax(1) == label2).float().mean()
        acc += (pred_consonan.argmax(1) == label3).float().mean()

        loss.backward()
        optimizer.step()

    final_preds = torch.cat(final_preds)
    final_targets = torch.cat(final_targets)
    recall = macro_recall(final_preds, final_targets)

    print('acc : {:.2f}% , loss : {:.4f}, Recall : {:.4f}'.format(
        acc / (len(train_loader) * 3), final_loss / len(train_loader), recall))
示例#7
0
def train(dataset, data_loader, models, optimizers):

    counter = 0
    final_outputs = []
    final_targets = []

    g_final_loss = 0
    v_final_loss = 0
    c_final_loss = 0

    g_model, v_model, c_model = models
    g_optimizer, v_optimizer, c_optimizer = optimizers

    g_model.train()
    v_model.train()
    c_model.train()

    for bi, d in tqdm(enumerate(data_loader),
                      total=int(len(dataset) / data_loader.batch_size)):
        counter += 1
        image = d['image']
        grapheme_root = d['grapheme_root']
        vowel_diacritic = d['vowel_diacritic']
        consonant_diacritic = d['consonant_diacritic']

        image = image.to(DEVICE, dtype=torch.float)
        grapheme_root = grapheme_root.to(DEVICE, dtype=torch.long)
        vowel_diacritic = vowel_diacritic.to(DEVICE, dtype=torch.long)
        consonant_diacritic = consonant_diacritic.to(DEVICE, dtype=torch.long)

        g_optimizer.zero_grad()
        v_optimizer.zero_grad()
        c_optimizer.zero_grad()

        g = g_model(image)
        v = v_model(image)
        c = c_model(image)

        outputs = (g, v, c)
        targets = (grapheme_root, vowel_diacritic, consonant_diacritic)
        g_loss, v_loss, c_loss = loss_fn(outputs, targets)

        g_loss.backward()
        g_optimizer.step()

        v_loss.backward()
        v_optimizer.step()

        c_loss.backward()
        c_optimizer.step()

        g_final_loss += g_loss
        v_final_loss += v_loss
        c_final_loss += c_loss

        o1, o2, o3 = outputs
        t1, t2, t3 = targets

        final_outputs.append(torch.cat((o1, o2, o3), dim=1))
        final_targets.append(torch.stack((t1, t2, t3), dim=1))

    final_outputs = torch.cat(final_outputs)
    final_targets = torch.cat(final_targets)

    final_losses = (g_final_loss / counter, v_final_loss / counter,
                    c_final_loss / counter)

    print("=================Train=================")
    macro_recall_score = macro_recall(final_outputs, final_targets)

    return final_losses, macro_recall_score
示例#8
0
            # A TP for this one is in essence a tn for all other classifications
            for core_value in arff.attributes[arff.attr_position["class"]][1]:
                if core_value != classification:
                    confusion_matrices[core_value]["tn"] += 1
        else:
            # This is a false positive for this classification
            confusion_matrices[classification]["fp"] += 1
            # A FP for this one is a FN for the correct one
            confusion_matrices[record[arff.attr_position["class"]]]["fn"] += 1
    
    results_dict = {}
    results_dict["mp"] = utils.micro_precision(arff.attributes[arff.attr_position["class"]][1], confusion_matrices)
    results_dict["mr"] = utils.micro_recall(arff.attributes[arff.attr_position["class"]][1], confusion_matrices)
    results_dict["mf1"] = utils.micfo_f1(arff.attributes[arff.attr_position["class"]][1], confusion_matrices)
    results_dict["Mp"] = utils.macro_precision(arff.attributes[arff.attr_position["class"]][1], confusion_matrices)
    results_dict["Mr"] = utils.macro_recall(arff.attributes[arff.attr_position["class"]][1], confusion_matrices)
    results_dict["Mf1"] = utils.macro_f1(arff.attributes[arff.attr_position["class"]][1], confusion_matrices)
    results_dict["ac"] = utils.accuracy(arff.attributes[arff.attr_position["class"]][1], confusion_matrices)
    
    print("Micro Precision  " + str(run_num) + ": " + str(results_dict["mp"]))
    print("Micro Recall     " + str(run_num) + ": " + str(results_dict["mr"]))
    print("Micro F1         " + str(run_num) + ": " + str(results_dict["mf1"]))
    print("Macro Precision  " + str(run_num) + ": " + str(results_dict["Mp"]))
    print("Macro Recall     " + str(run_num) + ": " + str(results_dict["Mr"]))
    print("Macro F1         " + str(run_num) + ": " + str(results_dict["Mf1"]))
    print("Accuracy         " + str(run_num) + ": " + str(results_dict["ac"]))

    validation_results.append(results_dict)

    # Push the test data back into the training data
    arff.data.extend(training_records)