def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq):
    model.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    header = 'Epoch: [{}]'.format(epoch)
    lr_scheduler = None
    for images, labels, _ in metric_logger.log_every(data_loader, print_freq, header):
        images = list(img.to(device, dtype=torch.float32) for img in images)
        targets = list({k: v.to(device, dtype=torch.long) for k,v in t.items()} for t in labels)

        loss_dict = model(images, targets)
        losses = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())

        loss_value = losses_reduced.item()

        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        losses.backward()
        optimizer.step()

        if lr_scheduler is not None:
            lr_scheduler.step()

        metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
    jo = 0
Exemple #2
0
def train_one_epoch(model, model_name, optimizer, data_loader, device, epoch, print_freq, loss_list,risk=True):
    model.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    header = 'Epoch: [{}]'.format(epoch)
    lr_scheduler = None
    i = 0
    path_save = r'C:\Users\johan\iCloudDrive\DTU\KID\BA\Kode\Predictions_FRCNN'
    num_boxes = []
    num_boxes_pred = []
    for (images, labels, masks) in metric_logger.log_every(data_loader, print_freq, header):
        images = list(img.to(device, dtype=torch.float32) for img in images)
        none_targets = list(0 for t in labels if t == None)
        if len(none_targets)>0:
            tar = list(labels)
            loss_dict = model(images, tar)
        targets = list({k: v.to(device, dtype=torch.long) for k, v in t.items()} for t in labels if t != None)


        loss_dict = model(images, targets)
        losses = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())

        loss_value = losses_reduced.item()
        loss_list.append(loss_value)
        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            print("Target was: ", targets)
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        losses.backward()
        optimizer.step()

        if lr_scheduler is not None:
            lr_scheduler.step()

        ids = [targets[l]['image_id'].cpu() for l in range(len(targets))]

        if risk == True:
            if i < 5:
                if epoch % 5 == 0:
                    samples = []
                    model.eval()
                    outputs = model(images)
                    num_boxes.append(np.mean([len(targets[j]['boxes']) for j in range(len(ids))]))
                    num_boxes_pred.append(np.mean([len(outputs[k]['boxes']) for k in range(len(ids))]))
                    model.train()
                    samples.append((images, masks, targets, outputs))
                    get_samples(samples, model_name, ids, N=epoch, path_save=path_save, train=True)
        i += 1

        metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])

    return model, np.mean(np.array(loss_list)), np.mean(np.array(num_boxes_pred)), np.mean(np.array(num_boxes))
Exemple #3
0
def train_one_epoch(model,
                    model_name,
                    optim_name,
                    lr,
                    optimizer,
                    layers,
                    data_loader,
                    device,
                    epoch,
                    print_freq,
                    loss_list,
                    save_folder,
                    risk=True,
                    HPC=True):
    model.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter(
        'lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    header = 'Epoch: [{}]'.format(epoch)
    i = 0
    path_save = save_folder
    num_boxes = []
    num_boxes_pred = []
    for (images, labels,
         masks) in metric_logger.log_every(data_loader, print_freq, header):
        images = list(img.to(device, dtype=torch.float32) for img in images)
        targets = list({k: v.to(device) for k, v in t.items()} for t in labels)

        loss_dict = model(images, targets)
        losses = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())

        loss_value = losses_reduced.item()
        loss_list.append(loss_value)
        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            print("Target was: ", targets)
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        losses.backward()
        optimizer.step()

        i += 1

        metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])

    return model, np.mean(np.array(loss_list)), np.mean(
        np.array(num_boxes_pred)), np.mean(np.array(num_boxes))
Exemple #4
0
def evaluate(model, data_loader, device,N,risk=True):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    path_save = r'C:\Users\johan\iCloudDrive\DTU\KID\BA\Kode\Predictions_FRCNN'
    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'
    num_boxes_val = []
    num_boxes_pred = []
    i = 0
    for (image, labels, masks) in metric_logger.log_every(data_loader, 100, header):
        images = list(img.to(device) for img in image)
        targets = list({k: v.to(device, dtype=torch.long) for k,v in t.items()} for t in labels)

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(images)
        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time
        ids = [targets[i]['image_id'].numpy() for i in range(len(targets))]

        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

        samples = []
        num_boxes_val.append([len(targets[i]['boxes']) for i in range(len(ids))])
        num_boxes_pred.append([len(targets[i]['boxes']) for i in range(len(ids))])
        samples.append((images, masks, targets, outputs))
        if risk==True:
            if i < 10:
                get_samples(samples,ids,N=N,path_save=path_save,train=False)
        #i+=1

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator,np.mean(np.array(num_boxes_pred)),np.mean(np.array(num_boxes_val))
Exemple #5
0
def evaluate(model, model_name, data_loader, device,N,risk=True):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    model.eval()
    path_save = r'C:\Users\johan\iCloudDrive\DTU\KID\BA\Kode\Predictions_FRCNN'
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'
    num_boxes_val = []
    num_boxes_pred = []
    i = 0
    mAP = []
    mAP2 = []
    for (image, labels, masks) in metric_logger.log_every(data_loader, 100, header):
        images = list(img.to(device) for img in image)
        targets = list({k: v.to(device, dtype=torch.long) for k, v in t.items()} for t in labels)

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(images)
        outputs = [{k: v.to(device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time
        ids = [targets[i]['image_id'].cpu() for i in range(len(targets))]

        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
        for j in range(len(ids)):
            iou, index, selected_iou = get_iou2(boxes=outputs[j]['boxes'].cpu(), target=targets[j]['boxes'].cpu())
            df, AP, AP2 = get_map2(outputs[j]['boxes'], targets[j]['boxes'], outputs[j]['scores'],
                                   iou_list=iou, threshold=0.3)
            mAP.append(AP)
            mAP2.append(AP2)
        samples = []
        num_boxes_val.append(np.mean([len(targets[i]['boxes']) for i in range(len(ids))]))
        num_boxes_pred.append(np.mean([len(outputs[i]['boxes']) for i in range(len(ids))]))
        samples.append((images, masks, targets, outputs))
        if risk == True:
            if i < 10:
                if N % 5 == 0:
                    get_samples(samples, model_name, ids, N=N, path_save=path_save, train=False)
        i += 1

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    print("mean Average Precision for epoch {}: ".format(N), np.mean(mAP))
    # accumulate predictions from all images
    torch.set_num_threads(n_threads)
    return np.mean(mAP), np.mean(mAP2), np.mean(np.array(num_boxes_pred)), np.mean(np.array(num_boxes_val))
def evaluate(model, data_loader, device, file_names):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    for (image, labels, masks) in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = list({k: v.to(device, dtype=torch.long) for k,v in t.items()} for t in labels)

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)
        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        ids = [targets[i]['image_id'].numpy() for i in range(len(targets))]
        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
        boxes = outputs[0]['boxes'].numpy()
        bmask1 = get_bbox_mask(mask=masks[0], bbox=boxes)
        Image._show(Image.fromarray(bmask1))
        dfs = 2

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Exemple #7
0
def evaluate(model,
             model_name,
             optim_name,
             lr,
             layers,
             data_loader,
             device,
             N,
             loss_list,
             save_folder,
             risk=True,
             HPC=True,
             multi=False,
             scale=True,
             threshold=0.3):
    n_threads = torch.get_num_threads()
    torch.set_num_threads(n_threads)
    model.eval()
    if HPC:
        path_save = save_folder
    else:
        path_save = r'C:\Users\johan\iCloudDrive\DTU\KID\BA\Kode\Predictions_FRCNN'
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'
    num_boxes_val = []
    num_boxes_pred = []
    i = 0
    mIoU = []
    mAP = []
    mAP2 = []
    conf_matrix = {}
    conf_matrix["true_positives"] = 0
    conf_matrix["false_positives"] = 0
    conf_matrix["true_negatives"] = 0
    conf_matrix["false_negatives"] = 0
    conf_matrix["total_num_defects"] = 0
    conf_matrix["good_leather"] = 0
    conf_matrix["bad_leather"] = 0
    conf_matrix2 = {}
    conf_matrix2["true_positives"] = 0
    conf_matrix2["false_positives"] = 0
    conf_matrix2["true_negatives"] = 0
    conf_matrix2["false_negatives"] = 0
    conf_matrix2["total_num_defects"] = 0
    conf_matrix2["good_leather"] = 0
    conf_matrix2["bad_leather"] = 0
    with torch.no_grad():
        for (image, labels,
             masks) in metric_logger.log_every(data_loader, 100, header):
            images = list(img.to(device) for img in image)
            targets = list(
                {k: v.to(device, dtype=torch.long)
                 for k, v in t.items()} for t in labels)

            torch.cuda.synchronize()
            model_time = time.time()
            outputs = model(images)
            outputs = [{k: v.to(device)
                        for k, v in t.items()} for t in outputs]
            model_time = time.time() - model_time
            ids = [targets[i]['image_id'].cpu() for i in range(len(targets))]

            evaluator_time = time.time()
            evaluator_time = time.time() - evaluator_time
            for j in range(len(ids)):
                if multi:
                    iou, label_list = iou_multi(
                        boxes=outputs[j]['boxes'].cpu(),
                        targets=targets[j]['boxes'].cpu(),
                        pred=outputs[j]['labels'].cpu(),
                        labels=targets[j]['labels'].cpu())
                    df, AP, AP2, c = get_class_iou(
                        iou_list=iou,
                        label_list=label_list,
                        scores=outputs[j]['scores'].cpu(),
                        target=targets[j]['boxes'].cpu(),
                        labels=targets[j]['labels'].cpu(),
                        preds=outputs[j]['labels'].cpu(),
                        threshold=threshold)
                    mAP.append(AP)
                    mAP2.append(AP2)
                    if N % 50 == 0:
                        df2, _, _ = get_map2(outputs[j]['boxes'],
                                             targets[j]['boxes'],
                                             outputs[j]['scores'],
                                             outputs[j]['labels'].cpu(),
                                             targets[j]['labels'].cpu(),
                                             iou_list=iou,
                                             threshold=threshold,
                                             print_state=True)
                else:

                    boxes = outputs[j]['boxes'].cpu()
                    scores = outputs[j]['scores'].cpu()
                    preds = outputs[j]['labels'].cpu()

                    new_boxes, new_scores, new_preds = do_nms(boxes,
                                                              scores,
                                                              preds,
                                                              threshold=0.2)
                    if scale == True:
                        expand = 42
                    else:
                        expand = 21
                    iou_target, iou_pred = get_iou_targets(
                        boxes=new_boxes,
                        targets=targets[j]['boxes'].cpu(),
                        preds=new_preds,
                        labels=targets[j]['labels'].cpu(),
                        image=images[j],
                        expand=expand)
                    acc_dict = classifier_metric(iou_target, iou_pred,
                                                 new_scores,
                                                 targets[j]['boxes'].cpu(),
                                                 targets[j]['labels'].cpu())

                    conf_matrix["true_positives"] += acc_dict["Detected"]
                    conf_matrix["false_negatives"] += acc_dict[
                        "Defects"] - acc_dict["Detected"]
                    conf_matrix["false_positives"] += acc_dict["FP"]
                    conf_matrix["total_num_defects"] += acc_dict["Defects"]
                    if acc_dict["Defects"] == 0:
                        conf_matrix["good_leather"] += 1
                        if acc_dict["FP"] == 0:
                            conf_matrix["true_negatives"] += 1
                    else:
                        conf_matrix["bad_leather"] += 1

                    iou_target2, iou_pred2 = get_iou_targets(
                        boxes=boxes,
                        targets=targets[j]['boxes'].cpu(),
                        preds=preds,
                        labels=targets[j]['labels'].cpu(),
                        image=images[j],
                        expand=expand)
                    acc_dict2 = classifier_metric(iou_target2, iou_pred2,
                                                  scores,
                                                  targets[j]['boxes'].cpu(),
                                                  targets[j]['labels'].cpu())

                    conf_matrix2["true_positives"] += acc_dict2["Detected"]
                    conf_matrix2["false_negatives"] += acc_dict2[
                        "Defects"] - acc_dict2["Detected"]
                    conf_matrix2["false_positives"] += acc_dict2["FP"]
                    conf_matrix2["total_num_defects"] += acc_dict2["Defects"]
                    if acc_dict2["Defects"] == 0:
                        conf_matrix2["good_leather"] += 1
                        if acc_dict2["FP"] == 0:
                            conf_matrix2["true_negatives"] += 1
                    else:
                        conf_matrix2["bad_leather"] += 1
                    iou, _, _ = get_iou2(boxes=new_boxes,
                                         targets=targets[j]['boxes'].cpu(),
                                         pred=new_preds,
                                         labels=targets[j]['labels'].cpu())
                    df, _, AP = get_map2(new_boxes,
                                         targets[j]['boxes'],
                                         new_scores,
                                         new_preds,
                                         targets[j]['labels'].cpu(),
                                         iou_list=iou,
                                         threshold=threshold)
                    iou2, _, _ = get_iou2(boxes=outputs[j]['boxes'].cpu(),
                                          targets=targets[j]['boxes'].cpu(),
                                          pred=outputs[j]['labels'].cpu(),
                                          labels=targets[j]['labels'].cpu())
                    df2, _, AP2 = get_map2(outputs[j]['boxes'],
                                           targets[j]['boxes'],
                                           outputs[j]['scores'],
                                           outputs[j]['labels'].cpu(),
                                           targets[j]['labels'].cpu(),
                                           iou_list=iou2,
                                           threshold=threshold)
                    mAP.append(AP)
                    mAP2.append(AP2)
                    IoU = mask_iou(boxes=new_boxes,
                                   mask=masks[j],
                                   targets=targets[j]['boxes'].cpu())
                    mIoU.append(IoU[1])

            samples = []
            num_boxes_val.append(
                np.mean([len(targets[i]['boxes']) for i in range(len(ids))]))
            num_boxes_pred.append(
                np.mean([len(outputs[i]['boxes']) for i in range(len(ids))]))
            samples.append((images, masks, targets, outputs))
            if N % 40 == 0:
                metric_logger.update(model_time=model_time,
                                     evaluator_time=evaluator_time)

            if risk == True:
                model.train()
                images = list(
                    img.to(device, dtype=torch.float32) for img in image)
                targets = list({k: v.to(device)
                                for k, v in t.items()} for t in labels)

                loss_dict = model(images, targets)

                # reduce losses over all GPUs for logging purposes
                loss_dict_reduced = utils.reduce_dict(loss_dict)
                losses_reduced = sum(loss
                                     for loss in loss_dict_reduced.values())

                loss_value = losses_reduced.item()
                loss_list.append(loss_value)

                model.eval()
            i += 1

        # gather the stats from all processes
        metric_logger.synchronize_between_processes()
        if HPC:
            if N % 40 == 0:
                print("Averaged stats:", metric_logger)
                print(
                    "mean Average Precision for epoch {} with nms: ".format(N),
                    np.mean(mAP))
                print("mean Average Precision without nms {}: ".format(N),
                      np.mean(mAP2))
                print("mean IoU with nms: ", np.mean(mIoU))
                print("TP: ", conf_matrix["true_positives"])
                print("FP: ", conf_matrix["false_positives"])
                print("TN: ", conf_matrix["true_negatives"])
                print("FN: ", conf_matrix["false_negatives"])
                print("Total number of defects: ",
                      conf_matrix["total_num_defects"])
                print("Images with good leather: ",
                      conf_matrix["good_leather"])
                print("Images with bad leather: ", conf_matrix["bad_leather"])
        else:
            print("Averaged stats:", metric_logger)
            print("mean Average Precision for epoch {} with nms: ".format(N),
                  np.mean(mAP))
            print("mean Average Precision without nms {}: ".format(N),
                  np.mean(mAP2))
            print("mean IoU with nms: ", np.mean(mIoU))
            print("TP: ", conf_matrix["true_positives"])
            print("FP: ", conf_matrix["false_positives"])
            print("TN: ", conf_matrix["true_negatives"])
            print("FN: ", conf_matrix["false_negatives"])
            print("Total number of defects: ",
                  conf_matrix["total_num_defects"])
            print("Images with good leather: ", conf_matrix["good_leather"])
            print("Images with bad leather: ", conf_matrix["bad_leather"])

    return np.mean(mAP), np.mean(mAP2), np.mean(loss_list), np.mean(
        np.array(num_boxes_pred)), np.mean(np.array(
            num_boxes_val)), conf_matrix, conf_matrix2, np.nanmean(mIoU)
Exemple #8
0
            iou_list = np.zeros(3)
        else:
            target = np.ones(7)
            iou_list = np.array([1 / (1 + i), 1 / (2 + i), 1 / (3 + i)])
        acc_image, acc_def, acc1, acc2 = classifier_metric(iou_list, scores, target)
        acimg.append(acc_image)
        acdef.append(acc_def)
        ac1 += acc1
        ac2 += acc2

if __name__ == '__main__':
        from object_detect.load_data import val_loader
        device = torch.device('cpu')
        epoch = 1
        data_loader = val_loader
        metric_logger = utils.MetricLogger(delimiter="  ")
        metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
        header = 'Epoch: [{}]'.format(epoch)
        i = 0
        total_num_defects = 0
        true_positives = 0
        false_negatives = 0
        false_positives = 0
        true_negatives = 0
        for (images, labels, masks) in metric_logger.log_every(data_loader, 1, header):
            images = list(img.to(device, dtype=torch.float32) for img in images)
            targets = list({k: v.to(device, dtype=torch.long) for k, v in t.items()} for t in labels)

            outputs = torch.tensor([[50, 50, 120, 100],
                                  [70, 110, 90, 140],
                                    [150, 60, 270, 190]], dtype=torch.float32)