Ejemplo n.º 1
0
                                annFile=args.loc +
                                "annotations/instances_train2014.json",
                                preprocess=Augment(size=(height, width)))

cocoTrainLoader = DataLoader(cocoTrainSet,
                             args.batch,
                             shuffle=True,
                             num_workers=8,
                             pin_memory=True,
                             collate_fn=cd.collate)

SSD = SSD(resume=args.resume,
          resume_head=args.resume_head,
          lock_base=args.lock_base,
          weights_only=args.weights_only,
          size=16,
          classes=81,
          base="vgg",
          evalLoader=cocoEvalLoader,
          trainLoader=cocoTrainLoader,
          loss_balance=args.loss_balance,
          save_dir=args.export,
          learning_rate=args.lr,
          batch_size=args.batch,
          save_interval=args.save_interval,
          eval_interval=args.eval_interval,
          height=height,
          width=width)

SSD.train_model(args.epochs)
Ejemplo n.º 2
0
                                     preprocess=Augment(size=(height, width)))

trainLoader = DataLoader(hheadsTrainSet,
                         args.batch,
                         shuffle=False,
                         num_workers=1,
                         pin_memory=True,
                         collate_fn=cd.collate)

SSD = SSD(
    resume=args.resume,
    resume_head=args.resume_head,
    lock_base=True,
    lock_extras=True,
    weights_only=args.weights_only,
    classes=2,
    base="resnet",
    size=34,
    # evalLoader=cocoEvalLoader,
    trainLoader=trainLoader,
    loss_balance=args.loss_balance,
    save_dir=args.export,
    learning_rate=args.lr,
    batch_size=args.batch,
    save_interval=args.save_interval,
    eval_interval=args.eval_interval,
    height=height,
    width=width)

SSD.train_model(args.epochs)
Ejemplo n.º 3
0
# HollywoodHead dataset for SSDTC
# hheadsTrainSet =  hhd.HHeadsDetection(root=args.loc + "JPEGImages/",
#                                       annFile=args.loc+"annotations/val.json",
#                                       preprocess=CropChunk(size=(height, width), eval=True),
#                                       chunk=5,
#                                       eval=True
#                                       )
# evalLoader = DataLoader(hheadsTrainSet, 1, shuffle=False, num_workers=1, pin_memory=True, collate_fn=hhd.collate_chunk_s)

# HollywoodHeads dataset for SSD
# hheadsTrainSet =  hhd.HHeadsDetection(root=args.loc + "JPEGImages/",
#                                      annFile=args.loc+"annotations/val.json",
#                                      preprocess=Crop(size=(height, width))
#                                      )
#
# evalLoader = DataLoader(hheadsTrainSet, 1, shuffle=False, num_workers=1, pin_memory=True, collate_fn=cd.collate)

SSD = SSD(resume=args.resume,
          classes=8,
          size=args.size,
          base=args.net,
          evalLoader=evalLoader,
          batch_size=args.batch,
          width=width,
          height=height,
          weights_only=True,
          save_dir="saves")

SSD.export_detections(small)
Ejemplo n.º 4
0
small = ["PERSON", "BICYCLE", "CAR", "MOTOCYCLE", "BUS", "TRAIN", "TRUCK"]
head = ["Head"]

# set dataset
dataset = small

video = cv2.VideoCapture(args.video)

w = int(video.get(3))
h = int(video.get(4))

# set classes
SSD = SSD(args.weights,
          classes=len(dataset) + 1,
          phase='infer',
          width=w,
          height=h,
          size=args.size,
          base=args.net)

frame = 0
predictions = {}
timestamp = time.time()
batch = []

# for ssdtc
predictions[0] = []
predictions[1] = []

while video.isOpened():
    # leave last 4 frames if ssdtc
Ejemplo n.º 5
0
#load images
for subdir, dirs, files in os.walk(args.dir):
    for image_file in files:
        image = cv2.imread(os.path.join(args.dir, image_file))
        pics.append(image)

width = 300
height = 300

FPS = []

#SSDTC
ssd = SSD(resume=args.weights + "ssdtc_c.pth",
          classes=2,
          base="ssdtc",
          width=width,
          height=height,
          phase='infer')

timestamp = time.time()
process(ssd, args.dir, args.batch)
duration = time.time() - timestamp
print("ssdtc: " + str(7500. / duration))
ssd = None

# SSD

ssd = SSD(resume=args.weights + "hhead.pth",
          classes=2,
          size=34,
          base="resnet",
Ejemplo n.º 6
0
coco =  [x["name"] for x in coco]

shapes = ["circle", "square", "triangle", "rectangle", "ellipse"]

small = ["PERSON", "BICYCLE", "CAR", "MOTOCYCLE", "BUS", "TRAIN", "TRUCK"]

#set dataset
dataset = small

w, h = 300, 300

#set network type and classes number
SSD = SSD(args.resume,
          classes=len(dataset) + 1,
          phase='infer',
          width=w,
          height=h,
          size=34,
          base="xception_H")

augment = Crop(size=(h, w))
for subdir, dirs, files in os.walk(args.dir):
    for image_file in files:
        image = cv2.imread(os.path.join(args.dir, image_file))
        image, _ = augment(image)

        detections = SSD.inference([image], conf=0.2)
        t = ""

        if detections:
            loc, labels, conf = detections