Ejemplo n.º 1
0
    def __init__(self, opt, train=True):
        super().__init__()
        self.data_dir = opt.data_dir
        self.train = train

        if train:
            self.data_dir = osp(self.data_dir, "VisDrone2019-DET-train")
        else:
            self.data_dir = osp(self.data_dir, "VisDrone2019-DET-val")

        self.img_dir = osp.join(self.data_dir, 'images', '{}.jpg')
        self.den_dir = osp.join(self.data_dir, 'DensityMask', '{}.png')
        self.im_ids = self._load_image_set_index()

        self.img_number = len(self.img_list)

        # transform
        self.train_dtf = transforms.Compose([
            dtf.ImgFixedResize(size=self.opt.input_size),
            dtf.RandomColorJeter(0.3, 0.3, 0.3, 0.3),
            dtf.RandomHorizontalFlip(),
            dtf.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            dtf.ToTensor()])
        self.test_dtf = transforms.Compose([
            dtf.ImgFixedResize(crop_size=self.opt.input_size),  # default = 513
            dtf.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            dtf.ToTensor()])
Ejemplo n.º 2
0
def main(args):
    num_digits = 6
    prefix = '%s_%s_' % (args.filename_prefix, args.split)
    img_template = '%s%%0%dd.png' % (prefix, num_digits)
    scene_template = '%s%%0%dd.json' % (prefix, num_digits)
    blend_template = '%s%%0%dd.blend' % (prefix, num_digits)
    arr_template = '%s%%0%dd.npz' % (prefix, num_digits)

    img_template = osp.join(args.output_dir, 'train/images', img_template)
    scene_template = osp.join(args.output_dir, 'train/scenes', scene_template)
    blend_template = osp.join(args.output_dir, 'train/blend', blend_template)
    arr_template = osp.join(args.output_dir, 'train/arr', arr_template)

    if not os.path.isdir(args.output_dir):
        os.makedirs(args.output_dir)

    if not os.path.isdir(osp.join(args.output_dir, 'train/images')):
        os.makedirs(osp.join(args.output_dir, 'train/images'))

    if not os.path.isdir(osp.join(args.output_dir, 'train/scenes')):
        os.makedirs(osp.join(args.output_dir, 'train/scenes'))

    if not os.path.isdir(osp.join(args.output_dir, 'train/arr')):
        os.makedirs(osp.join(args.output_dir, 'train/arr'))

    if args.save_blendfiles == 1 and not os.path.isdir(
            osp(args.output_dir, 'train/blend')):
        os.makedirs(osp(args.output_dir, 'train/blend'))

    all_scene_paths = []
    for i in range(args.num_images):

        print("GENERATING FRAME {0}/{1}".format(i, args.num_images))

        img_path = img_template % (i + args.start_idx)
        scene_path = scene_template % (i + args.start_idx)
        arr_path = arr_template % (i + args.start_idx)

        all_scene_paths.append(scene_path)
        blend_path = None
        if args.save_blendfiles == 1:
            blend_path = blend_template % (i + args.start_idx)
        num_objects = random.randint(args.min_objects, args.max_objects)

        render_scene(args,
                     num_objects=num_objects,
                     output_index=(i + args.start_idx),
                     output_split=args.split,
                     output_image=img_path,
                     output_scene=scene_path,
                     output_arr=arr_path,
                     output_blendfile=blend_path,
                     scene_idx=i)

    exit()
Ejemplo n.º 3
0
def main(args):
  num_digits = 6
  prefix = '%s_%s_' % (args.filename_prefix, args.split)
  img_template = '%s%%0%dd.png' % (prefix, num_digits)
  scene_template = '%s%%0%dd.json' % (prefix, num_digits)
  blend_template = '%s%%0%dd.blend' % (prefix, num_digits)
  arr_template = '%s%%0%dd.npz' % (prefix, num_digits)
  
  img_template = osp.join(args.output_dir, 'train/images', img_template)
  scene_template = osp.join(args.output_dir, 'train/scenes', scene_template)
  blend_template = osp.join(args.output_dir, 'train/blend', blend_template)
  arr_template = osp.join(args.output_dir, 'train/arr', arr_template)

  if not os.path.isdir(args.output_dir):
    os.makedirs(args.output_dir)

  if not os.path.isdir(osp.join(args.output_dir, 'train/images')):
    os.makedirs(osp.join(args.output_dir, 'train/images'))

  if not os.path.isdir(osp.join(args.output_dir, 'train/scenes')):
    os.makedirs(osp.join(args.output_dir, 'train/scenes'))

  if not os.path.isdir(osp.join(args.output_dir, 'train/arr')):
    os.makedirs(osp.join(args.output_dir, 'train/arr'))

  if args.save_blendfiles == 1 and not os.path.isdir(osp(args.output_dir, 'train/blend')):
    os.makedirs(osp(args.output_dir, 'train/blend'))

  num_objects = random.randint(args.min_objects, args.max_objects)

  dir_list = args.output_dir.split('/')[-2].split('_')
  directions = {}
  for i, direction in enumerate(dir_list):
    directions[i] = direction.split('-')
  print(directions)
  render_scene(args,
    num_objects=num_objects,
    num_images=args.num_images,
    output_split=args.split,
    image_template=img_template,
    scene_template=scene_template,
    arr_template=arr_template,
    directions=directions
  )

  exit()
Ejemplo n.º 4
0
def single_gpu_test(args, cfg, model):
    testImgList = os.listdir(args.testImgpath)
    for imgfile in testImgList:
        imgfile = imgfile.strip()
        img = Image.open(os.path.join(args.testImgpath, imgfile))
        image = img.convert('RGB')
        img = np.array(image)
        width, height, channel = img.shape
        rows = int(math.ceil(1.0 * (width - args.cropsize) / args.stride)) + 1
        cols = int(math.ceil(1.0 * (height - args.cropsize) / args.stride)) + 1
        multi_bboxes = list()
        multi_scores = list()
        for row in range(rows):
            if width > args.cropsize:
                y_start = min(row * args.stride, width - args.cropsize)
                y_end = y_start + args.cropsize
            else:
                y_start = 0
                y_end = width
            for col in range(cols):
                if height > args.cropsize:
                    x_start = min(col * args.stride, height - args.cropsize)
                    x_end = x_start + args.cropsize
                else:
                    x_start = 0
                    x_end = height
                subimg = copy.deepcopy(img[y_start:y_end, x_start:x_end, :])
                w, h, c = np.shape(subimg)
                outimg = np.zeros((args.cropsize, args.cropsize, 3))
                outimg[0:w, 0:h, :] = subimg
                result = inference_detector(model, outimg)  #15
                bboxes = np.vstack(result)
                labels = [  #0-15
                    np.full(bbox.shape[0], i + 1, dtype=np.int32)
                    for i, bbox in enumerate(result)
                ]
                labels = np.concatenate(labels)
                if len(bboxes) > 0:
                    # image = draw_boxes_with_label_and_scores(outimg, bboxes[:, :5], bboxes[:, 5], labels - 1, 1)
                    # image.save(os.path.join(args.patchImgPath, imgfile[:-4]+'_'+str(y_start)+'_'+str(x_start)+'.png'))
                    bboxes[:, :2] += [x_start, y_start]
                    multi_bboxes.append(bboxes[:, :5])
                    scores = np.zeros(
                        (bboxes.shape[0], len(ODAI_LABEL_MAP.keys())))  #0-15
                    for i, j in zip(range(bboxes.shape[0]), labels):
                        scores[i, j] = bboxes[i, 5]
                    multi_scores.append(scores)
        crop_num = len(multi_bboxes)
        if crop_num > 0:
            multi_bboxes = np.vstack(multi_bboxes)
            multi_scores = np.vstack(multi_scores)
            multi_bboxes = torch.Tensor(multi_bboxes)
            multi_scores = torch.Tensor(multi_scores)
            score_thr = 0.3
            nms = dict(type='nms', iou_thr=0.5)
            max_per_img = 2000
            det_bboxes, det_labels = multiclass_nms(multi_bboxes, multi_scores,
                                                    score_thr, nms,
                                                    max_per_img)
            if det_bboxes.shape[0] > 0:
                det_bboxes = np.array(det_bboxes)
                det_labels = np.array(det_labels)  #0-14
                image = draw_boxes_with_label_and_scores(
                    img, det_bboxes[:, :5], det_bboxes[:, 5], det_labels, 1)
                image.save(os.path.join(args.saveImgpath, imgfile))

                CLASS_DOTA = ODAI_LABEL_MAP.keys()
                LABEl_NAME_MAP = get_label_name_map()
                write_handle_r = {}
                osp(args.saveTxtpath)

                for sub_class in CLASS_DOTA:
                    if sub_class == 'back-ground':
                        continue
                    write_handle_r[sub_class] = open(
                        os.path.join(args.saveTxtpath,
                                     'Task1_%s.txt' % sub_class), 'a+')
                """
                :det_bboxes: format [x_c, y_c, w, h, theta, score]
                :det_labels: [label]
                """
                boxes = []

                for rect in det_bboxes[:, :5]:
                    box = cv2.boxPoints(
                        ((rect[0], rect[1]), (rect[2], rect[3]), -rect[4]))
                    box = np.reshape(box, [
                        -1,
                    ])
                    boxes.append([
                        box[0], box[1], box[2], box[3], box[4], box[5], box[6],
                        box[7]
                    ])

                rboxes = np.array(boxes, dtype=np.float32)

                for i, rbox in enumerate(rboxes):
                    command = '%s %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f\n' % (
                        imgfile[:-4], det_bboxes[i, 5], rbox[0], rbox[1],
                        rbox[2], rbox[3], rbox[4], rbox[5], rbox[6], rbox[7])

                    write_handle_r[LABEl_NAME_MAP[int(det_labels[i]) +
                                                  1]].write(command)

                for sub_class in CLASS_DOTA:
                    if sub_class == 'back-ground':
                        continue
                    write_handle_r[sub_class].close()
Ejemplo n.º 5
0
                        box[0], box[1], box[2], box[3], box[4], box[5], box[6],
                        box[7]
                    ])

                rboxes = np.array(boxes, dtype=np.float32)

                for i, rbox in enumerate(rboxes):
                    command = '%s %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f\n' % (
                        imgfile[:-4], det_bboxes[i, 5], rbox[0], rbox[1],
                        rbox[2], rbox[3], rbox[4], rbox[5], rbox[6], rbox[7])

                    write_handle_r[LABEl_NAME_MAP[int(det_labels[i]) +
                                                  1]].write(command)

                for sub_class in CLASS_DOTA:
                    if sub_class == 'back-ground':
                        continue
                    write_handle_r[sub_class].close()


if __name__ == '__main__':
    args = parse_args()
    model = init_detector(args.config, args.checkpoint, device='cuda:0')
    if isinstance(args.config, str):
        config = mmcv.Config.fromfile(args.config)
    osp(args.saveTxtpath)
    osp(args.saveImgpath)
    osp(args.patchImgPath)

    single_gpu_test(args, config, model)
Ejemplo n.º 6
0
    from PIL import Image

    config_file = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/configs/myconfig/test_cascade_rcnn_x101_64x4d_fpn_1x_origin.py'
    checkpoint_file = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/checkpoints/cascade_rcnn_x101_64x4d_fpn_1x_trainval_e40_934.pth'

    # build the model from a config file and a checkpoint file
    model = init_detector(config_file, checkpoint_file, device='cuda:0')

    # test a single image and show the results
    # img = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/data/test_sp/images/P0063__1__0___0.png'  # or img = mmcv.imread(img), which will only load it once
    # imgpath = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/data/rpn15_512/crop512_256'
    # imgpath = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/data/rpn15_512/crop1024_512'
    imgpath = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/data/rpn15/test'
    saveimgpath = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/demo/work_dirs/out_img/cascaded_x101/origin/img2'
    savetxtpath = '/mnt/lustre/yanhongchang/project/one-rpn/mmdetection/demo/work_dirs/out_img/cascaded_x101/origin/txt2'
    osp(saveimgpath)
    osp(savetxtpath)
    imglist = os.listdir(imgpath)
    for imgname in imglist:
        singleimgpath = os.path.join(imgpath, imgname)
        result = inference_detector(model, singleimgpath)
        bboxes = np.vstack(result)
        labels = [  # 0-15
            np.full(bbox.shape[0], i, dtype=np.int32)
            for i, bbox in enumerate(result)
        ]
        labels = np.concatenate(labels)
        if len(bboxes) > 0:

            outimg = Image.open(singleimgpath).convert('RGB')
            outimg = np.array(outimg)
Ejemplo n.º 7
0
for i in outputs[0]:
    sum += i
class_names = [
            'aeroplane',
            'bicycle',
            'bird',
            'boat',
            'bottle',
            'bus',
            'car',
            'cat',
            'chair',
            'cow',
            'diningtable',
            'dog',
            'horse',
            'motorbike',
            'person',
            'pottedplant',
            'sheep',
            'sofa',
            'train',
            'tvmonitor'
        ]
s2 = 0
with open(osp(sys.argv[2])+'_res.txt','a') as the_file:
    for i, cl in enumerate(class_names):
        the_file.write(cl + ' ' + str(outputs[0,i].item()/sum.item())+'\n')
        s2 += outputs[0,i].item()/sum.item()
print('s2 shoulde be 1 ',s2)
Ejemplo n.º 8
0
    def saveContour(prediction, dataset, idx, saveLoc, labelList = None,\
        gap=4, size=(512, 512), img_suffix=".jpg"):
        """
        Deprecated, Use Saver instead
        Save Contour By AI prediction and CV contours

        Arguments:
            dataset(torch.utils.Dataset)
            idx(int)
            saveLoc(str)
            labels(list)
        """
        os.makedirs(saveLoc, exist_ok=True)
        boxes = prediction["boxes"]
        labels = prediction["labels"]
        masks = prediction['masks']
        i = 1
        count = masks.shape[0]
        label = labels.cpu().numpy()
        shapes = []
        if labelList is None:
            labelList = dataset.Labels
        print(f"{idx} Masks: {count}")

        imname = dataset.imgs[idx]
        imfilename = imname[:imname.rfind(img_suffix)]
        for c in range(count):
            mask = masks[c, 0]
            label_name = labelList[label[c]]
            imdata = mask.mul(255).byte().cpu().numpy()
            # im = Image.fromarray(imdata)
            # display(im)
            ret, thresh = cv.threshold(imdata, 127, 255, 0)
            contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE,
                                                  cv.CHAIN_APPROX_SIMPLE)
            if len(contours) == 0:
                continue
            # reshape as a list
            contour = contours[0].reshape(-1, 2)
            if len(contour) > gap * 5:
                contour = contour[0::gap]
            if len(contour) < 2:
                continue
            shapes.append({
                "label": label_name,
                "points": contour.tolist(),
                "group_id": None,
                "shape_type": "polygon",
                "flags": {}
            })
        imageData = LabelFile.load_image_file(osp.join(dataset.root, imname))
        imageData = base64.b64encode(imageData).decode("utf-8")
        data = {
            "version": "4.5.5",
            "flags": {},
            "shapes": shapes,
            "imagePath": imname,
            "imageHeight": size[1],
            "imageWidth": size[0],
            "imageData": imageData
        }
        with open(osp(saveLoc, imfilename + ".json"), "w") as f:
            f.write(json.dumps(data, indent=True))
Ejemplo n.º 9
0
def main(args):
    num_digits = 6
    prefix = '%s_%s_' % (args.filename_prefix, args.split)
    img_template = '%s%%0%dd.png' % (prefix, num_digits)
    scene_template = '%s%%0%dd.json' % (prefix, num_digits)
    blend_template = '%s%%0%dd.blend' % (prefix, num_digits)
    arr_template = '%s%%0%dd.npz' % (prefix, num_digits)

    img_template = osp.join(args.output_dir, 'train/images', img_template)
    scene_template = osp.join(args.output_dir, 'train/scenes', scene_template)
    blend_template = osp.join(args.output_dir, 'train/blend', blend_template)
    arr_template = osp.join(args.output_dir, 'train/arr', arr_template)

    if not os.path.isdir(args.output_dir):
        os.makedirs(args.output_dir)

    if not os.path.isdir(osp.join(args.output_dir, 'train/images')):
        os.makedirs(osp.join(args.output_dir, 'train/images'))

    if not os.path.isdir(osp.join(args.output_dir, 'train/scenes')):
        os.makedirs(osp.join(args.output_dir, 'train/scenes'))

    if not os.path.isdir(osp.join(args.output_dir, 'train/arr')):
        os.makedirs(osp.join(args.output_dir, 'train/arr'))

    if args.save_blendfiles == 1 and not os.path.isdir(
            osp(args.output_dir, 'train/blend')):
        os.makedirs(osp(args.output_dir, 'train/blend'))

    # all_scene_paths = []
    # for i in range(args.num_images):

    #   img_path = img_template % (i + args.start_idx)
    #   scene_path = scene_template % (i + args.start_idx)
    #   arr_path = arr_template % (i + args.start_idx)

    #   all_scene_paths.append(scene_path)
    #   blend_path = None
    #   if args.save_blendfiles == 1:
    #     blend_path = blend_template % (i + args.start_idx)
    num_objects = random.randint(args.min_objects, args.max_objects)

    dir_list = args.output_dir.split('/')[-2].split('_')
    directions = {}
    for i, direction in enumerate(dir_list):
        directions[i] = direction.split('-')
    print(directions)
    render_scene(args,
                 num_objects=num_objects,
                 num_images=args.num_images,
                 output_split=args.split,
                 image_template=img_template,
                 scene_template=scene_template,
                 arr_template=arr_template,
                 directions=directions)

    # After rendering all images, combine the JSON files for each scene into a
    # single JSON file.
    # all_scenes = []
    # for scene_path in all_scene_paths:
    #   with open(scene_path, 'r') as f:
    #     all_scenes.append(json.load(f))
    # output = {
    #   'info': {
    #     'date': args.date,
    #     'version': args.version,
    #     'split': args.split,
    #     'license': args.license,
    #   },
    #   'scenes': all_scenes
    # }

    # with open(args.output_scene_file, 'w') as f:
    #   json.dump(output, f)

    exit()
Ejemplo n.º 10
0
def train(multi_scale=False, freeze_backbone=False, mode='train'):

    # config parameter
    device, gpu_num = torch_utils.select_device(is_head=True)
    start_epoch = 0
    cutoff = 10  # freeze backbone endpoint
    best = osp.join(opt.save_folder, opt.backbone + '_best.pt')
    latest = osp.join(opt.save_folder, opt.backbone + '_latest.pt')
    best_loss = float('inf')
    train_best_loss = float('inf')
    used_mulgpu = False

    #visualization
    if opt.visdom:
        vis = visdom.Visdom()
        vis_legend = ['correct', 'loss', 'F1']
        # epoch_plot = create_vis_plot(vis, 'Epoch', 'Loss', 'train loss', [vis_legend[0],])
        batch_plot = create_vis_plot(vis, 'Batch', 'Loss', 'batch loss', [
            vis_legend[0],
        ])
        test_plot = create_vis_plot(vis, 'Epoch', 'Loss', 'test loss',
                                    vis_legend)

    # dataset load
    dataset = DogCat(opt.trainset_path, opt.img_size, mode)
    dataloader = DataLoader(dataset,
                            batch_size=opt.batch_size,
                            num_workers=opt.num_workers,
                            shuffle=True,
                            pin_memory=True,
                            collate_fn=dataset.collate_fn)

    # model and optimizer create , init, load checkpoint
    if opt.backbone == 'resnet':
        model = resnet101(pretrained=opt.pretrained)
    elif opt.backbone == 'vgg':
        model = vgg16(pretrained=opt.pretrained)
    optimizer = optim.SGD(model.parameters(),
                          lr=hyp['lr0'],
                          momentum=hyp['momentum'],
                          weight_decay=hyp['weight_decay'])
    scheduler = lr_scheduler.MultiStepLR(
        optimizer,
        milestones=[round(opt.epochs * x) for x in (0.8, 0.9)],
        gamma=hyp['lr_gamma'])
    scheduler.last_epoch = start_epoch - 1

    # resume
    if opt.resume:
        try:
            model, best_loss, start_epoch, optimizer = resume_load_weights(
                model, optimizer, latest)
        except:
            print(
                'load checkpoint failure, the file might be corrupted.\n Now, training from epoch 0...'
            )
    # gpu set
    if opt.gpu > 1 and gpu_num > 1:
        device_id = []
        for i in range(min(opt.gpu, gpu_num)):
            device_id.append(i)
        model = torch.nn.DataParallel(model, device_ids=device_id)
        model.to(device)
        used_mulgpu = True
    else:
        model.to(device)

    # Loss
    criterion = nn.CrossEntropyLoss().to(device)

    # train
    model.hyp = hyp
    model_info(model)
    batch_number = len(dataloader)
    n_burnin = min(round(batch_number / 5 + 1), 1000)  # burn-in batches
    total_time = time.time()
    for epoch in range(start_epoch, opt.epochs):
        print(('%10s' * 4) % ('Epoch', 'Batch', 'Loss', 'Time'))
        model.train()
        scheduler.step()

        # Freeze backbone at epoch 0, unfreeze at epoch 1 (optional)
        if freeze_backbone and epoch < 2:
            for name, p in model.named_parameters():
                if int(name.split('.')[1]) < cutoff:
                    p.requires_grad = False if epoch == 0 else True

        for i, (imgs, label, file_) in enumerate(dataloader):
            imgs = imgs.to(device)
            label = label.to(device)
            start_time = time.time()

            # Multi-Scale training
            if multi_scale:
                if (i + 1 + batch_number * epoch) % 10 == 0:
                    img_size = random.choice(
                        range(img_size_min, img_size_max + 1)) * 32
                    print('img_size = %g' % img_size)
                scale_factor = img_size / max(imgs.shape[-2:])
                imgs = F.interplot(imgs,
                                   scale_factor=scale_factor,
                                   mode='bilinear',
                                   align_corners=False)

            # SGD burn-in
            if epoch == 0 and i <= n_burnin:
                lr = hyp['lr0'] * (i / n_burnin)**4
                for x in optimizer.param_groups:
                    x['lr'] = lr

            # run model and compute loss
            pred = model(imgs)

            #loss = compute_loss(pred, loss)
            loss = criterion(pred, label[:, 1].view(-1))
            train_best_loss = min(train_best_loss, loss)

            if torch.isnan(loss):
                print('WARNING: nan loss detected, ending training')
                return results

            loss.backward()

            if (i + 1) % opt.accumulate == 0 or (i + 1) == batch_number:
                optimizer.step()
                optimizer.zero_grad()
            end_time = time.time()

            if opt.visdom:
                update_vis_plot(vis, batch_number * epoch + i, [loss.cpu()],
                                batch_plot, 'append')

            summary = ('%8s%12s' + '%10.3g' * 2) % (
                '%g/%g' % (epoch, opt.epochs), '%g/%g' %
                (i, batch_number), loss, end_time - start_time)

            print(summary)

        if not opt.notest or epoch == opt.epochs - 1:
            with torch.no_grad():
                result = val.val(opt=opt, model=model,
                                 mode='test')  # P, R, F1, test_loss

        if not osp.exists('result_log'):
            os.makedirs('result_log')
        with open(osp.join('result_log', opt.backbone + 'result.txt'),
                  'a') as file:
            file.write(summary + '%10.3g' * 3 % result + '\n')

        test_loss = result[1]
        if test_loss < best_loss:
            best_loss = test_loss

        # visdom
        if opt.visdom:
            # update_vis_plot(vis, epoch, [train_best_loss], epoch_plot, 'append')
            update_vis_plot(vis, epoch, result, test_plot, 'append')

        save = (not opt.nosave) or (epoch == opt.epochs - 1)
        if save:
            # Create checkpoint
            chkpt = {
                'epoch':
                epoch,
                'best_loss':
                best_loss,
                'model':
                model.module.state_dict()
                if used_mulgpu else model.state_dict(),
                'optimizer':
                None if used_mulgpu else optimizer.state_dict()
            }
            if not osp.exists(opt.save_folder):
                os.makedirs(opt.save_folder)
            torch.save(chkpt, latest)
            if best_loss == test_loss:
                torch.save(chkpt, best)

            backup = False
            if backup and epoch > 0 and epoch % 10 == 0:
                torch.save(
                    chkpt,
                    osp(opt.save_folder,
                        opt.backbone + 'backup_%g.pt' % epoch))
            # Delete checkpoint
            del chkpt

    total_time = (time.time() - total_time) / 3600
    print("%g epochs completed in %.3f hours.%" %
          (epoch - start_epoch + 1, total_time))