def __init__(self,
                 img_size,
                 weights,
                 device='',
                 half=True,
                 conf_ths=0.4,
                 iou_ths=0.5,
                 augmented_inference=False,
                 classes_subset=None,
                 agnostic_nms=False):
        self._img_size = img_size
        self._weights = weights
        self._half = half
        self._augmented_inference = augmented_inference
        self._conf_ths = conf_ths
        self._iou_ths = iou_ths
        self._classes_subset = classes_subset
        self._agnostic_nms = agnostic_nms

        # Initialize
        self._device = torch_utils.select_device(device)

        self._model = attempt_load(self._weights, map_location=self._device)
        if half:
            self._model.half()
Ejemplo n.º 2
0
def yolov5_detect(YOLOV5_CFG,reid):
    device = torch_utils.select_device(YOLOV5_CFG.device)
    model = torch.load(YOLOV5_CFG.weights, map_location=device)['model']
    #model = torch.load(YOLOV5_CFG.weights, map_location=device)
    model.to(device).eval()
    names = model.names if hasattr(model, 'names') else model.modules.names
    colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
    
    if os.path.isfile(YOLOV5_CFG.source) is True:
        cap = cv2.VideoCapture(YOLOV5_CFG.source)
        w0,h0,fps,max_index = int(cap.get(3)),int(cap.get(4)),int(cap.get(5)),int(cap.get(7))
        [w1,h1] = YOLOV5_CFG.im_size
        if YOLOV5_CFG.output is not False:
            out = cv2.VideoWriter(YOLOV5_CFG.output, cv2.VideoWriter_fourcc(*'XVID'), fps, (w0,h0))
        for i in range(max_index):
            re,im = cap.read()
            net_input = np.transpose(cv2.resize(im, (w1,h1))/255,(2,0,1)).reshape((-1,3,h1,w1))
            net_input = torch.from_numpy(net_input).to(device).type(torch.float32)
            t = time.time()
            pred = model(net_input, augment=YOLOV5_CFG.augment)[0]
            pred = non_max_suppression(pred, YOLOV5_CFG.conf_thres, YOLOV5_CFG.iou_thres, fast=True, classes=YOLOV5_CFG.classes, agnostic=YOLOV5_CFG.agnostic_nms)
            if pred is None or pred[0] is None:
                cv2.imshow('A',im)
                if YOLOV5_CFG.output is not False:
                    out.write(im)
                continue
            bboxes,confs,cats = pred[0][:,:4].cpu().detach().numpy(),pred[0][:,4].cpu().detach().numpy(),pred[0][:,5].cpu().detach().numpy()
            bboxes[:,[0,2]],bboxes[:,[1,3]]=bboxes[:,[0,2]]*(w0/w1),bboxes[:,[1,3]]*(h0/h1)
            list_bbox,list_conf=[],[]
            for bbox,conf,cat in zip(bboxes.astype(np.int),confs,cats.astype(np.int)):
                if (names[cat]=='person'):
                    #p_min,p_max = (bbox[0],bbox[1]),(bbox[2],bbox[3])
                    #im = cv2.rectangle(im, p_min, p_max, (255,0,123), 1, cv2.LINE_AA)
                    #im = cv2.putText(im, '%s %.2f'%(names[cat],conf), p_min, cv2.FONT_HERSHEY_DUPLEX, 0.6, (0, 0, 255), 1, cv2.LINE_AA)
                    (x1, y1, x2, y2)=bbox
                    bbox_=(int(abs((x1+x2)*0.5)),int(abs((y1+y2)*0.5)),int(abs(x1-x2)),int(abs(y1-y2)))
                    list_bbox.append(bbox_)
                    list_conf.append(conf)
            #CrossVideo Reid
            out_bboxs,identities,confidence ,color,frame = reid.cross_video(list_bbox,list_conf,im)
            display(out_bboxs,im,identities,confidence,color,t)
            t=time.time()
            if YOLOV5_CFG.output is not False:
                out.write(im)
            if cv2.waitKey(1)&0xff==ord('q'):
                break
        cap.release()
        if YOLOV5_CFG.output is not False:
            out.release()
        cv2.destroyAllWindows()
Ejemplo n.º 3
0
import json
import os

import numpy as np
import torch

from old_bridge import Bridge, crop_boxes, calcu_dist
from cityperson import CityPersonDetection, json2tensor, res2json
from detection import Detection
from reidentification import Reid
from utils.datasets import letterbox
from utils.utils import torch_utils, scale_coords, xyxy2ltxywh

# data_loader = LoadImages("data/samples")
data_loader = CityPersonDetection(image_set="val")
device = torch_utils.select_device(force_cpu=False)

detector = Detection(data_loader=data_loader, device=device)
reid = Reid(device=device)
bridge = Bridge(detector.conf_thres, detector.nms_thres, anom_thres=1.8)

query_feats = torch.Tensor()
result = []
with torch.no_grad():
    for i, (path, img0, target) in enumerate(data_loader):
        # for (_, _, img0, _) in data_loader:
        # 单张加载图片,coding水平暂不足处理batch_size
        # 改变尺寸、连续内存、BGR2RGB  HWC2CHW、归一化
        # Padded resize
        img, *_ = letterbox(
            img0, new_shape=640)  # img经过padding后的最小输入矩形图: (416, 320, 3)
import time, random, copy
import numpy as np
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
import utils.utils

from utils.utils import torch_utils

device = torch_utils.select_device('0')

epsilon = 1e-7
si_c = 1e-5
SI_parameters = {
    'previous_task': {},
    'old_parameters': {},
    'omega': {},
    'W': {}
}

'--- Synaptic Intelligence (SI) Specific Functions ---'


def si_register_parameters(model):
    for n, p in model.named_parameters():
        pname = n.replace('.', '_')
        if 'YOLOLayer' in pname: