Exemple #1
0
def main():
    model = Yolov1(split_size=7, num_boxes=2, num_classes=20).to(DEVICE)
    optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
    loss_fn = YoloLoss()
    train_dataset = VOCDataset("data/train.csv", transform=transform, img_dir=IMG_DIR, label_dir=LABEL_DIR)
    test_dataset = VOCDataset("data/test.csv", transform=transform, img_dir=IMG_DIR, label_dir=LABEL_DIR)
    train_loader=DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, num_workers=1, pin_memory=PIN_MEMORY, shuffle=True,drop_last=True)
    test_loader=DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, num_workers=1, pin_memory=PIN_MEMORY, shuffle=True,drop_last=True)
    for epoch in range(EPOCHS):
        pred_boxes, target_boxes = get_bboxes(train_loader, model, iou_threshold=0.5, threshold=0.4)
        mAP = mean_average_precision(pred_boxes, target_boxes, iou_threshold=0.5)
        print(f"Train mAP:{mAP}")
        train_fn(train_loader, model, optimizer, loss_fn)
    if epoch > 99:
        for x, y in test_loader:
        x = x.to(DEVICE)
        for idx in range(16):
            bboxes = cellboxes_to_boxes(model(x))
            bboxes = non_max_suppression(bboxes[idx], iou_threshold=0.5, threshold=0.4)
            plot_image(x[idx].permute(1,2,0).to("cpu"), bboxes)
        
        

if __name__  == "__main__":
    
    main()
 def _calc_map(self, x, y, pred):
     self.anchor_boxes = self.anchor_boxes.to(self.device)
     exist_mask = torch.round(torch.sigmoid(pred[..., 4:5]))
     cell_idx = torch.arange(13, device=self.device)
     bx = exist_mask * torch.sigmoid(
         pred[..., 0:1]) + exist_mask * cell_idx.view([1, 1, -1, 1, 1])
     by = exist_mask * torch.sigmoid(
         pred[..., 1:2]) + exist_mask * cell_idx.view([1, -1, 1, 1, 1])
     bw = (exist_mask * self.anchor_boxes[:, 2].view([1, 1, 1, -1, 1]) *
           exist_mask * torch.exp(pred[..., 2:3]))
     bh = (exist_mask * self.anchor_boxes[:, 3].view([1, 1, 1, -1, 1]) *
           exist_mask * torch.exp(pred[..., 3:4]))
     pred[..., :4] = torch.cat([bx, by, bw, bh], dim=-1)
     pred_boxes, target_boxes = get_bboxes(
         x=x,
         y=y,
         predictions=pred,
         iou_threshold=0.45,
         threshold=0.005,
         S=self.S,
         B=self.B,
         device=self.device,
     )
     mean_avg_prec = mAP(pred_boxes, target_boxes, iou_threshold=0.5)
     return mean_avg_prec
Exemple #3
0
    def _calc_map(self, y, pred):
        pred_boxes = []
        target_boxes = []
        small_preprocessed_pred = self._preprocess(pred[0],
                                                   self.anchor_boxes[6:9] /
                                                   (416 / 13),
                                                   S=13)
        medium_preprocessed_pred = self._preprocess(pred[1],
                                                    self.anchor_boxes[3:6] /
                                                    (416 / 26),
                                                    S=26)
        large_preprocessed_pred = self._preprocess(pred[2],
                                                   self.anchor_boxes[:3] /
                                                   (416 / 52),
                                                   S=52)

        pred_boxes, target_boxes = get_bboxes(
            y=y,
            predictions=(
                small_preprocessed_pred,
                medium_preprocessed_pred,
                large_preprocessed_pred,
            ),
            iou_threshold=0.5,
            threshold=0.5,
            S=[13, 26, 52],
            B=3,
            device=self.device,
        )

        mean_avg_prec = mAP(pred_boxes, target_boxes, iou_threshold=0.5)
        return mean_avg_prec
def main():
    model = Yolov1(split_size=7, num_boxes=2, num_classes=20).to(DEVICE)
    optimizer = optim.Adam(
        model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY
    )
    loss_fn = YoloLoss()

    if LOAD_MODEL:
        load_checkpoint(torch.load(LOAD_MODEL_FILE), model, optimizer)
    

    train_dataset = VOCDataset(
        "data/100examples.csv",
        transform=transform,
        img_dir=IMG_DIR,
        label_dir=LABEL_DIR
    )

    test_dataset = VOCDataset(
        "data/test.csv",
        transform=transform,
        img_dir=IMG_DIR,
        label_dir=LABEL_DIR
    )

    train_loader = DataLoader(
        dataset=train_dataset,
        batch_size=BATCH_SIZE,
        num_workers=NUM_WORKERS,
        pin_memory=PIN_MEMORY,
        shuffle=True,
        drop_last=True
    )

    test_loader = DataLoader(
        dataset=test_dataset,
        batch_size=BATCH_SIZE,
        num_workers=NUM_WORKERS,
        pin_memory=PIN_MEMORY,
        shuffle=True,
        drop_last=True
    )

    for epoch in range(EPOCHS):
        pred_boxes, target_boxes = get_bboxes(
            train_loader, model, iou_threshold=0.5, threshold=0.4
        )

        mean_avg_prec = mean_average_precision(
            pred_boxes, target_boxes, iou_threshold=0.5, box_format="midpoint"
        )

        print(f"Train mAP in {epoch}: {mean_avg_prec}")

        train_fn(train_loader, model, optimizer, loss_fn)
Exemple #5
0
 def _calc_map(self, x, y, pred):
     pred_boxes, target_boxes = get_bboxes(
         x=x,
         y=y,
         predictions=pred,
         iou_threshold=0.5,
         threshold=0.4,
         S=self.S,
         device=self.device,
     )
     mean_avg_prec = mAP(pred_boxes, target_boxes, iou_threshold=0.5)
     return mean_avg_prec
Exemple #6
0
def main():

    model = YOLOv2_lite(grid_size=GRID_SIZE,
                        num_boxes=NUM_BOXES,
                        num_classes=NUM_CLASSES).to(DEVICE)

    model.load_state_dict(torch.load(MODEL_PATH))
    model.eval()

    test_dataset = YOLOVOCDataset(
        DATA_CSV,
        transform=transform,
        img_dir=IMG_DIR,
        label_dir=LABEL_DIR,
        S=GRID_SIZE,
        B=NUM_BOXES,
        C=NUM_CLASSES,
        hflip_prob=0.5,
    )

    test_loader = DataLoader(
        dataset=test_dataset,
        batch_size=BATCH_SIZE,
        shuffle=True,
        drop_last=False,
    )

    images, predictions, labels = get_bboxes(
        test_loader,
        model,
        iou_threshold=0.5,
        prob_threshold=0.4,
        S=GRID_SIZE,
        C=NUM_CLASSES,
        mode="all",
        return_images=True,
    )

    mAP = mean_average_precision(predictions, labels, num_classes=NUM_CLASSES)

    print("Mean Average Precision: %.3f" % (mAP))

    img_to_plot = np.random.randint(0, len(images))

    image = images[img_to_plot]

    img_preds = [pred[1:] for pred in predictions if pred[0] == img_to_plot]
    img_labels = [label[1:] for label in labels if label[0] == img_to_plot]

    #plot_detections(image, img_preds)
    plot_detections(image, img_labels)
Exemple #7
0
def main():
    model = YOLOv1(split_size=7, num_boxes=2, num_classes=20).to(device)
    optimizer = optim.Adam(model.parameters(),
                           lr=learning_rate,
                           weight_decay=wd)
    loss_fn = YoloLoss()
    if load_model:
        load_checkpoint(torch.load(load_model_file), model, optimizer)

    train_dataset = VOCDataset("data/8examples.csv",
                               transform=transform,
                               img_dir=img_dir,
                               label_dir=label_dir)

    test_dataset = VOCDataset("data/test.csv",
                              transform=transform,
                              img_dir=img_dir,
                              label_dir=label_dir)

    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=bs,
                              num_workers=num_workers,
                              pin_memory=pin_mem,
                              shuffle=True,
                              drop_last=False)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=bs,
                             num_workers=num_workers,
                             pin_memory=pin_mem,
                             shuffle=True,
                             drop_last=True)

    for epoch in range(epochs):
        pred_boxes, target_boxes = get_bboxes(train_loader,
                                              model,
                                              iou_threshold=0.5,
                                              threshold=0.4)
        mean_avg_prec = mean_average_precision(pred_boxes,
                                               target_boxes,
                                               iou_threshold=0.5,
                                               box_format="midpoint")
        print(f"Train mAP: {mean_avg_prec}")

        train_fn(train_loader, model, optimizer, loss_fn)
        joblib.load(model_fname + '_svc.pickle')

file_list = sorted(glob.glob('test_images/*.jpg'))
output_dir = 'output_images/'
for f in file_list:
    time_start = time.time()
    fname = f[f.rindex('/') + 1:]
    print('Processing', fname)
    img = cv2.imread(f)
    boxes = utils.detect_vehicles_parallel(img, scaler, clf)
    cv2.imwrite(output_dir + 'raw_boxes_' + fname, \
            utils.draw_boxes(img, boxes))
    heatmap = utils.get_heatmap(img.shape, boxes, 1)
    cv2.imwrite(output_dir + 'hm_no_thresh_' + fname, \
            (heatmap * 255).astype(np.uint8))
    heatmap = utils.get_heatmap(img.shape, boxes, 3)
    cv2.imwrite(output_dir + 'hm_thresh_' + fname, \
            (255*heatmap).astype(np.uint8))
    hmlbl, lbls = utils.get_labels(heatmap)
    if lbls:
        cv2.imwrite(output_dir + 'labeled_hm_' + fname, \
                (hmlbl*(255//lbls)).astype(np.uint8))
    else:
        cv2.imwrite(output_dir + 'labeled_hm_' + fname, \
                hmlbl.astype(np.uint8))
    bboxes = utils.get_bboxes(hmlbl, lbls)
    img_final = utils.draw_boxes(img, bboxes)
    cv2.imwrite(output_dir + 'final_' + fname, img_final)
    time_end = time.time()
    print(-time_start + time_end)
def train_loop(cfg_path, gpu_n='0'):
    # get configs
    with open(cfg_path, 'r') as stream:
        config = yaml.safe_load(stream)
    device = torch.device('cuda:{}'.format(gpu_n) if config['GPU']
                          and torch.cuda.is_available else 'cpu')
    dtype = torch.float32  # TODO: find out how it affects speed and accuracy
    MODEL = config['MODEL']
    LOAD_MODEL = config['LOAD_MODEL']
    LOAD_MODEL_FILE = config['LOAD_MODEL_FILE']
    SAVE_MODEL = config['SAVE_MODEL']
    SAVE_MODEL_N = config['SAVE_MODEL_N']
    SAVE_MODEL_DIR = config['SAVE_MODEL_DIR']
    DATASET_DIR = config['DATASET_DIR']
    L_RATE = config['LEARNING_RATE']
    DECAY_RATE = config['DECAY_RATE']
    DECAY_EPOCHS = config['DECAY_EPOCHS']
    WEIGHT_DECAY = config['WEIGHT_DECAY']
    EPOCHS = config['EPOCHS']
    BATCH_SIZE = config['BATCH_SIZE']
    NUM_WORKERS = config['NUM_WORKERS']
    PIN_MEMORY = config['PIN_MEMORY']
    CSV_TRAIN = config['CSV_TRAIN']
    CSV_VAL = config['CSV_VAL']

    # set up model
    if MODEL == 'Darknet':
        model = YoloV1(grid_size=7, num_boxes=2, num_classes=20).to(DEVICE)
    elif MODEL == 'VGG':
        pass  # add here VGG backbone
    if LOAD_MODEL:
        # TODO: load backbone
        # cfg_cp, start_epoch = load_checkpoint(LOAD_MODEL_FILE, model)
        val = input(
            'Do you want to use config from checkpoint? Answer "yes" or "no": '
        )
        # if 'val' == 'yes':
        #     L_RATE = cfg_cp['LEARNING_RATE']
        #     DECAY_RATE = cfg_cp['DECAY_RATE']
        #     DECAY_EPOCHS = cfg_cp['DECAY_EPOCHS']
        #     WEIGHT_DECAY = cfg_cp['WEIGHT_DECAY']
        #     BALANCED = cfg_cp['BALANCED_DATASET']
        #     BATCH_SIZE = cfg_cp['BATCH_SIZE']
        #     NUM_WORKERS = cfg_cp['NUM_WORKERS']
        #     PIN_MEMORY = cfg_cp['PIN_MEMORY']
        #     MIN_IMAGES = cfg_cp['MIN_IMAGES']
        #     LOSS = cfg_cp['LOSS']
    else:
        model = init_weights(model)
        start_epoch = 0

    optimizer = optim.Adam(model.parameters(),
                           lr=L_RATE,
                           weight_decay=WEIGHT_DECAY)
    loss_fn = YoloLoss()
    loader_params = BATCH_SIZE, NUM_WORKERS, PIN_MEMORY, DATASET_DIR, CSV_TRAIN, CSV_VAL
    loader = get_dataloader(loader_params)

    # create folder to save models
    if SAVE_MODEL:
        if not os.path.exists('{}/{}'.format(SAVE_MODEL_DIR, MODEL)):
            os.makedirs('{}/{}'.format(SAVE_MODEL_DIR, MODEL))
    losses, accuracies = {
        'train': [],
        'validate': []
    }, {
        'train': [],
        'validate': []
    }

    for epoch in range(start_epoch, EPOCHS + start_epoch):
        t = time()
        if (epoch + 1) % DECAY_EPOCHS == 0:
            L_RATE *= (1 - DECAY_RATE)
            optimizer = optim.Adam(model.parameters(),
                                   lr=L_RATE,
                                   weight_decay=WEIGHT_DECAY)

        # print epoch number
        print_report(part='start', epoch=epoch)
        # train loop
        train_epoch(loader['train'], model, optimizer, device, loss_fn)

        # print metrics
        pred_bb, target_bb = get_bboxes(loader['train'],
                                        model,
                                        iou_threshold=0.5,
                                        threshold=0.4)
        train_map = mean_average_precision(pred_bb,
                                           target_bb,
                                           iou_threshold=0.5,
                                           box_format='midpoint')

        v_pred_bb, v_target_bb = get_bboxes(loader['val'],
                                            model,
                                            iou_threshold=0.5,
                                            threshold=0.4)
        val_map = mean_average_precision(v_pred_bb,
                                         v_target_bb,
                                         iou_threshold=0.5,
                                         box_format='midpoint')

        metrics = -1, -1, train_map, val_map
        print_report(part='accuracy', metrics=metrics)
        # collect metrics
        # losses['train'].append(train_loss)
        # losses['validate'].append(val_loss)
        # accuracies['train'].append(train_acc)
        # accuracies['validate'].append(val_acc)

        # save models
        # if SAVE_MODEL:
        #     save_checkpoint(model=model, cfg=cfg, epoch=epoch, loss=round(val_loss, 3))

        # print time
        print_report(part='end', t=int(time() - t))
def main():
    model = Yolov1(split_size=S, num_boxes=B, num_classes=C).to(DEVICE)
    optimizer = optim.Adam(model.parameters(),
                           lr=LEARNING_RATE,
                           weight_decay=WEIGHT_DECAY)
    loss_fn = YoloLoss(S=S, B=B, C=C)

    if LOAD_MODEL:
        load_checkpoint(torch.load(LOAD_MODEL_FILE), model, optimizer)

    train_dataset = VOCDataset(
        training_path=
        '/home/mt/Desktop/For_github/computer_vision_projects/face_recognition/data',
        S=3,
        C=2,
        transform=transform)

    # test_dataset = VOCDataset(
    #     "data/test.csv", transform=transform, img_dir=IMG_DIR, label_dir=LABEL_DIR,
    # )

    train_loader = DataLoader(
        dataset=train_dataset,
        batch_size=BATCH_SIZE,
        num_workers=NUM_WORKERS,
        pin_memory=PIN_MEMORY,
        shuffle=True,
        drop_last=True,
    )

    # test_loader = DataLoader(
    #     dataset=test_dataset,
    #     batch_size=BATCH_SIZE,
    #     num_workers=NUM_WORKERS,
    #     pin_memory=PIN_MEMORY,
    #     shuffle=True,
    #     drop_last=True,
    # )

    for epoch in range(EPOCHS):
        # for x, y in train_loader:
        #    x = x.to(DEVICE)
        #    for idx in range(8):
        #        bboxes = cellboxes_to_boxes(model(x))
        #        bboxes = non_max_suppression(bboxes[idx], iou_threshold=0.5, threshold=0.4, box_format="midpoint")
        #        plot_image(x[idx].permute(1,2,0).to("cpu"), bboxes)

        #    import sys
        #    sys.exit()

        pred_boxes, target_boxes = get_bboxes(train_loader,
                                              model,
                                              iou_threshold=0.5,
                                              threshold=0.4)

        mean_avg_prec = mean_average_precision(pred_boxes,
                                               target_boxes,
                                               iou_threshold=0.5,
                                               box_format="midpoint")
        print(f"Train mAP: {mean_avg_prec}")

        #if mean_avg_prec > 0.9:
        #    checkpoint = {
        #        "state_dict": model.state_dict(),
        #        "optimizer": optimizer.state_dict(),
        #    }
        #    save_checkpoint(checkpoint, filename=LOAD_MODEL_FILE)
        #    import time
        #    time.sleep(10)

        train_fn(train_loader, model, optimizer, loss_fn)
Exemple #11
0
def main():
    model = Yolov1(split_size=7, num_boxes=2, num_classes=20).to(DEVICE)
    optimizer = optim.Adam(
        model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY
    )
    loss_fn = YoloLoss()

    if LOAD_MODEL:
        load_checkpoint(torch.load(LOAD_MODEL_FILE), model, optimizer)

    train_dataset = VOCDataset(
        # test.csv, 8examples.csv, 100examples.csv
        "data/8examples.csv", 
        transform=transform,
        img_dir=IMG_DIR,
        label_dir=LABEL_DIR,
    )
    
    test_dataset = VOCDataset(
        "data/test.csv" , transform = transform, img_dir = IMG_DIR, label_dir = LABEL_DIR        
    )

    train_loader = DataLoader(
        dataset=train_dataset,
        batch_size=BATCH_SIZE,
        num_workers=NUM_WORKERS,
        pin_memory=PIN_MEMORY,
        shuffle=True,
        drop_last=False,
    )

    test_loader = DataLoader(
        dataset=test_dataset,
        batch_size=BATCH_SIZE,
        num_workers=NUM_WORKERS,
        pin_memory=PIN_MEMORY,
        shuffle=True,
        drop_last=False,
    )


    for epoch in range(EPOCHS):
        # for x, y in train_loader:
        #    x = x.to(DEVICE)
        #    for idx in range(8):
        #        bboxes = cellboxes_to_boxes(model(x))
        #        bboxes = non_max_suppression(bboxes[idx], iou_threshold=0.5, threshold=0.4, box_format="midpoint")
        #        plot_image(x[idx].permute(1,2,0).to("cpu"), bboxes)

        #    import sys
        #    sys.exit()
        pred_boxes, target_boxes = get_bboxes(
            train_loader, model, iou_threshold=0.5, threshold=0.4 , device= DEVICE,
        )

        mean_avg_prec = mean_average_precision(
            pred_boxes, target_boxes, iou_threshold=0.5, box_format="midpoint"
        )
        
        if mean_avg_prec > 0.9:
            checkpoint = {
                "state_dict": model.state_dict(),
                "optimizer": optimizer.state_dict(),
            }
            save_checkpoint(checkpoint, filename=LOAD_MODEL_FILE)
            import time
            time.sleep(10)

        print(f"Train mAP: {mean_avg_prec}")
        
        train_fn(train_loader, model, optimizer, loss_fn)
Exemple #12
0
def main():
    model = YOLOv1(grid_size=GRID_SIZE, 
            num_boxes=NUM_BOXES, num_classes=NUM_CLASSES).to(DEVICE)
    optimizer = optim.Adam(
            model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY
    )
    loss_fn = YoloLoss(S=GRID_SIZE, B=NUM_BOXES, C=NUM_CLASSES)

    epochs_passed = 0

    if LOAD_CHECKPOINT:
        checkpoint = torch.load(LOAD_CHECKPOINT_PATH)
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        for param_group in optimizer.param_groups:
          param_group['lr'] = 2e-6
        epochs_passed = checkpoint['epoch']

    mask_dataset = YOLOVOCDataset(
            TRAINING_DATA, transform=train_transform, 
            img_dir=IMG_DIR, label_dir=LABEL_DIR,
            S=GRID_SIZE, B=NUM_BOXES, C=NUM_CLASSES,
            hflip_prob = 0.5,
            random_crops=0.25,
    )

    train_dataset, val_dataset = random_split (
            mask_dataset, [TRAIN_SIZE, VAL_SIZE], 
            generator=torch.Generator().manual_seed(42),
    )

    if torch.cuda.is_available():
        train_loader = DataLoader(
                dataset=train_dataset,
                batch_size=BATCH_SIZE,
                num_workers=NUM_WORKERS,
                pin_memory=PIN_MEMORY,
                shuffle=True,
                drop_last=DROP_LAST,
        )
        
        val_loader = DataLoader(
                dataset=val_dataset,
                batch_size=BATCH_SIZE,
                num_workers=NUM_WORKERS,
                pin_memory=PIN_MEMORY,
                shuffle=True,
                drop_last=False,
        )

    else:
        train_loader = DataLoader(
                dataset=train_dataset,
                batch_size=BATCH_SIZE,
                #num_workers=NUM_WORKERS,
                #pin_memory=PIN_MEMORY,
                shuffle=True,
                drop_last=DROP_LAST,
        )
        
        val_loader = DataLoader(
                dataset=val_dataset,
                batch_size=BATCH_SIZE,
                #num_workers=NUM_WORKERS,
                #pin_memory=PIN_MEMORY,
                shuffle=True,
                drop_last=False,
        )

    for epoch in range(EPOCHS):
        if epochs_passed%2 == 0:
            train_pred_boxes, train_target_boxes = get_bboxes(
                    train_loader, model, iou_threshold=0.35, prob_threshold=0.4,
                    S=GRID_SIZE, C=NUM_CLASSES, mode = "batch",
                    device=DEVICE,
            )
            val_pred_boxes, val_target_boxes = get_bboxes(
                    val_loader, model, iou_threshold=0.35, prob_threshold=0.4,
                    S=GRID_SIZE, C=NUM_CLASSES, mode = "batch",
                    device=DEVICE,
            )

            # map function takes predicted boxes and ground truth
            # boxes in form [[],[],[],...] where each sublist is a bounding box
            # of form [image_index, class_pred, x_mid, y_mid, w, h, prob]
            train_mAP = single_map(
                    train_pred_boxes, train_target_boxes, 
                    box_format="midpoint", num_classes=NUM_CLASSES,
                    iou_threshold=0.35,
            )
            val_mAP = single_map(
                    val_pred_boxes, val_target_boxes, 
                    box_format="midpoint", num_classes=NUM_CLASSES,
                    iou_threshold=0.35,
            )

            train_mAPs.append(train_mAP)
            val_mAPs.append(val_mAP)
            epochs_recorded.append(epochs_passed)

            print("Train mAP: %f"%(train_mAP))
            print("Val mAP: %f"%(val_mAP))

        if epochs_passed%10 == 0:
            save_path = SAVE_CHECKPOINT_PATH + ("checkpoint_%de"%(epochs_passed))+".pt"
            checkpoint = { 
                'epoch': epochs_passed,
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }
            torch.save(checkpoint,save_path)
            print("Trained for %d epochs"%(epochs_passed))

        train_fn(train_loader, model, optimizer, loss_fn)
        epochs_passed += 1
Exemple #13
0
def main():

    IMG_DIR = "../datasets/voc/images"
    LABEL_DIR = "../datasets/voc/labels"
    MAPPING_FILE = "../datasets/voc/100examples.csv"
    BATCH_SIZE = 8
    NUM_WORKERS = 2
    PIN_MEMORY = True
    DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
    LEARNING_RATE = 4e-4
    WEIGHT_DECAY = 0.0
    EPOCHS = 100
    LOAD_MODEL_FILE = "retrained_grad_true.pth.tar"
    S = 7
    C = 20
    B = 2
    PRETRAINED = True

    print("Using Device:", DEVICE)

    class Compose(object):
        def __init__(self, transforms):
            self.transforms = transforms

        def __call__(self, image, bboxes):
            for t in self.transforms:
                image = t(image)
            return image, bboxes

    # Types of preprocessing transforms we want to apply
    convert_transform = transforms.ToTensor()
    resize_transform = transforms.Resize((448, 448))
    # normalize_transform = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    transform = Compose([convert_transform, resize_transform])

    train_dataset = VOCDataset(MAPPING_FILE, S=S, C=C, B=B, transform=transform, img_dir=IMG_DIR, label_dir=LABEL_DIR)
    train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=True)

    val_dataset = VOCDataset(MAPPING_FILE, S=S, C=C, B=B, transform=transform, img_dir=IMG_DIR, label_dir=LABEL_DIR)
    val_loader = DataLoader(dataset=val_dataset, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=True)

    model = YoloV1(S=S, B=B, C=C, pretrained=PRETRAINED).to(DEVICE)

    for counter, param in enumerate(model.resnet.parameters()):
        param.requires_grad = True
    model.resnet.fc.requires_grad = True

    for counter, param in enumerate(model.parameters()):
        print(counter, param.requires_grad)

    parameters = [param.numel() for param in model.parameters()]
    print("Number of model parameters", sum(parameters))

    parameters = [param.numel() for param in model.resnet.parameters()]
    print("Number of resnet parameters", sum(parameters))

    parameter_list = filter(lambda p: p.requires_grad, model.parameters())

    optimizer = optim.Adam(parameter_list, lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
    yolo_loss = YoloLoss(S=S, C=C, B=B)
    writer = SummaryWriter()
    current_time = time.time()
    mean_loss_list = []

    for epoch in range(EPOCHS):

        mean_loss = []

        pred_boxes, target_boxes = get_bboxes(train_loader, model, iou_threshold=0.5, threshold=0.4, device=DEVICE)
        mean_avg_prec = mean_average_precision(pred_boxes, target_boxes, iou_threshold=0.5, box_format="midpoint")

        # Save the model
        if mean_avg_prec > 0.99 or epoch == (EPOCHS - 1):
            checkpoint = {
                "state_dict": model.state_dict(),
                "optimizer": optimizer.state_dict(),
            }
            print("=> Saving checkpoint")
            torch.save(checkpoint, LOAD_MODEL_FILE)
            time.sleep(10)

        for batch_idx, (x, y) in enumerate(train_loader):

            x, y = x.to(DEVICE), y.to(DEVICE)
            y_p = model(x)
            # y = torch.flatten(y, start_dim=1, end_dim=3)
            y_p = torch.reshape(y_p, (BATCH_SIZE, S, S, C + 5 * B))
            loss = yolo_loss(y_p, y)
            mean_loss.append(loss.item())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # Calculate average loss
        mean_loss = sum(mean_loss)/len(mean_loss)
        delta_time = time.time() - current_time
        current_time = time.time()

        writer.add_scalar("Average Loss: ", mean_loss, epoch)
        writer.add_scalar("Mean Average Precision: ", mean_avg_prec, epoch)
        writer.add_scalar("Epoch Duration [s]", delta_time, epoch)

        print("Epoch:", epoch)
        mean_loss_list.append(mean_loss)

    writer.close()