Exemple #1
0
                    dest='lock_base',
                    help='Lock base')
parser.add_argument("--weights_only",
                    action='store_true',
                    default=False,
                    dest='weights_only',
                    help='Resume only weights from loaded model')
parser.add_argument("--loc", default=None, type=str, help="Dataset location")

args = parser.parse_args()

height = 300
width = 300

cocoEvalSet = cd.CocoDetection(root=args.loc + "images/",
                               annFile=args.loc +
                               "annotations/instances_val2014.json",
                               preprocess=Crop(size=(height, width)))

cocoEvalLoader = DataLoader(cocoEvalSet,
                            args.batch,
                            shuffle=False,
                            num_workers=8,
                            pin_memory=True,
                            collate_fn=cd.collate)

cocoTrainSet = cd.CocoDetection(root=args.loc + "images/",
                                annFile=args.loc +
                                "annotations/instances_train2014.json",
                                preprocess=Augment(size=(height, width)))

cocoTrainLoader = DataLoader(cocoTrainSet,
Exemple #2
0
print("Preparing validation dataset")
width = 300
height = 300

import json

with open('ssd/lib/data/coco_labels.json') as f:
    coco = json.load(f)["categories"]
coco = [x["name"] for x in coco]
small = ["person", "bicycle", "car", "motorcycle", "bus", "train", "truck"]
head = ["head"]

# surveillance dataset for SSD
cocoEvalSet = cd.CocoDetection(root=args.loc + "images/",
                               annFile=args.loc +
                               "annotations/instances_val2014.json",
                               preprocess=Crop(size=(height, width)),
                               classes=small)

evalLoader = DataLoader(cocoEvalSet,
                        1,
                        shuffle=False,
                        num_workers=1,
                        pin_memory=True,
                        collate_fn=cd.collate)

# HollywoodHead dataset for SSDTC
# hheadsTrainSet =  hhd.HHeadsDetection(root=args.loc + "JPEGImages/",
#                                       annFile=args.loc+"annotations/val.json",
#                                       preprocess=CropChunk(size=(height, width), eval=True),
#                                       chunk=5,