def __init__(self, net, netfile_name, cfgfile=None): self.net = net self.netfile_name = netfile_name print(cfgfile) if cfgfile != None: self.cfginit(cfgfile) print(1) print(SAVE_DIR) utils.makedir(SAVE_DIR) parser = argparse.ArgumentParser(description="base class for network training") self.args = self.argparser(parser) net_savefile = "{0}.{1}".format(self.netfile_name, NETFILE_EXTENTION) self.save_dir = os.path.join(SAVE_DIR, "nets") utils.makedir(self.save_dir) self.save_path = os.path.join(self.save_dir, net_savefile) self.savepath_epoch = os.path.join(SAVEDIR_EPOCH, net_savefile) if os.path.exists(self.save_path) and CONTINUETRAIN: try: self.net.load_state_dict(torch.load(self.save_path)) print("net param load successful") except: self.net = torch.load(self.save_path) print("net load successful") else: self.net.paraminit() print("param initial complete") if ISCUDA: self.net = self.net.to(DEVICE) if NEEDTEST: self.detecter = Detector() self.logdir = os.path.join(SAVE_DIR, "log") utils.makedir(self.logdir) self.logfile = os.path.join(self.logdir, "{0}.txt".format(self.netfile_name)) if not os.path.exists(self.logfile): with open(self.logfile, 'w') as f: print("%.2f %d " % (0.00, 0), end='\r', file=f) print("logfile created") self.optimizer = optim.Adam(self.net.parameters()) # 损失函数定义 self.conf_loss_fn = nn.BCEWithLogitsLoss() # 定义置信度损失函数 self.center_loss_fn = nn.BCEWithLogitsLoss() # 定义中心点损失函数 self.wh_loss_fn = nn.MSELoss() # 宽高损失 # self.cls_loss_fn = torch.nn.CrossEntropyLoss() # 定义交叉熵损失 self.cls_loss_fn = nn.CrossEntropyLoss() self.detecter = Detector() print("initial complete")
def run_camera(args, ctx): assert args.batch_size == 1, "only batch size of 1 is supported" logging.info("Detection threshold is {}".format(args.thresh)) iter = CameraIterator(frame_resize=parse_frame_resize(args.frame_resize)) class_names = parse_class_names(args.class_names) mean_pixels = (args.mean_r, args.mean_g, args.mean_b) data_shape = int(args.data_shape) batch_size = int(args.batch_size) detector = Detector( get_symbol(args.network, data_shape, num_classes=len(class_names)), network_path(args.prefix, args.network, data_shape), args.epoch, data_shape, mean_pixels, batch_size, ctx) for frame in iter: logging.info("Frame info: shape %s type %s", frame.shape, frame.dtype) logging.info("Generating batch") data_batch = detector.create_batch(frame) logging.info("Detecting objects") detections_batch = detector.detect_batch(data_batch) #detections = [mx.nd.array((1,1,0.2,0.2,0.4,0.4))] detections = detections_batch[0] logging.info("%d detections", len(detections)) for det in detections: obj = det.asnumpy() (klass, score, x0, y0, x1, y1) = obj if score > args.thresh: draw_detection(frame, obj, class_names) cv2.imshow('frame', frame)
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, nms_thresh=0.5, force_nms=True): """ wrapper for initialize a detector Parameters: ---------- net : str test network name prefix : str load model prefix epoch : int load model epoch data_shape : int resize image shape mean_pixels : tuple (float, float, float) mean pixel values (R, G, B) ctx : mx.ctx running context, mx.cpu() or mx.gpu(?) force_nms : bool force suppress different categories """ sys.path.append(os.path.join(os.getcwd(), 'symbol')) if net is not None: net = importlib.import_module("symbol_" + net) \ .get_symbol(len(CLASSES), nms_thresh, force_nms) detector = Detector(net, prefix + "_" + str(data_shape), epoch, \ data_shape, mean_pixels, ctx=ctx) return detector
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, num_class, nms_thresh=0.5, force_nms=True, nms_topk=400): """ wrapper for initialize a detector Parameters: ---------- net : str test network name prefix : str load model prefix epoch : int load model epoch data_shape : int resize image shape mean_pixels : tuple (float, float, float) mean pixel values (R, G, B) ctx : mx.ctx running context, mx.cpu() or mx.gpu(?) num_class : int number of classes nms_thresh : float non-maximum suppression threshold force_nms : bool force suppress different categories """ if net is not None: net = get_symbol(net, data_shape, num_classes=num_class, nms_thresh=nms_thresh, force_nms=force_nms, nms_topk=nms_topk) detector = Detector(net, prefix, epoch, data_shape, mean_pixels, ctx=ctx) return detector
def initialize(self, alias_data): from detect.data.response import Response self.data_response = Response() self.data_response.open_connection() self.alias_data = alias_data self.param_extractor = ParamExtractor(self) self.path_extractor = PathExtractor(self) self.entity_factory = EntityFactory(self.alias_data) self.brute_detector = Detector(self.alias_data)
def peopleDetect(): CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') cap = cv2.VideoCapture(1) net = None prefix = os.path.join(os.getcwd(), 'model', 'yolo2_darknet19_416') epoch = 0 mean_pixels = (123, 117, 104) ctx = mx.gpu(0) global numPeople global isNotQuit count = 0 ret1, frame1 = cap.read() detector = Detector(net, prefix, epoch, data_shape, mean_pixels, ctx=ctx, batch_size=batch) while isNotQuit: count += 1 ret, frame = cap.read() ims = [ cv2.resize(frame, (data_shape, data_shape)) for i in range(batch) ] data = None data = get_batch(ims) start = timer() det_batch = mx.io.DataBatch(data, []) detector.mod.forward(det_batch, is_train=False) detections = detector.mod.get_outputs()[0].asnumpy() result = [] for i in range(detections.shape[0]): det = detections[i, :, :] res = det[np.where(det[:, 0] >= 0)[0]] result.append(res) time_elapsed = timer() - start # print("Detection time for {} images: {:.4f} sec , fps : {:.4f}".format(batch*1, time_elapsed , (batch*1/time_elapsed))) numPeople, numChair = detector.show_result(frame, det, CLASSES, 0.5, batch * 1 / time_elapsed) # if count>40: # isNotQuit = False #break cap.release() cv2.destroyAllWindows()
def evaluate_net(net, dataset, devkit_path, mean_pixels, data_shape, model_prefix, epoch, ctx, year=None, sets='test', batch_size=1, nms_thresh=0.5, force_nms=False): """ Evaluate entire dataset, basically simple wrapper for detections Parameters: --------- dataset : str name of dataset to evaluate devkit_path : str root directory of dataset mean_pixels : tuple of float (R, G, B) mean pixel values data_shape : int resize input data shape model_prefix : str load model prefix epoch : int load model epoch ctx : mx.ctx running context, mx.cpu() or mx.gpu(0)... year : str or None evaluate on which year's data sets : str evaluation set batch_size : int using batch_size for evaluation nms_thresh : float non-maximum suppression threshold force_nms : bool force suppress different categories """ # set up logger logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.INFO) if dataset == "pascal": if not year: year = '2007' imdb = PascalVoc(sets, year, devkit_path, shuffle=False, is_train=False) data_iter = DetIter(imdb, batch_size, data_shape, mean_pixels, rand_samplers=[], rand_mirror=False, is_train=False, shuffle=False) sys.path.append(os.path.join(cfg.ROOT_DIR, 'symbol')) net = importlib.import_module("symbol_" + net) \ .get_symbol(imdb.num_classes, nms_thresh, force_nms) model_prefix += "_" + str(data_shape) detector = Detector(net, model_prefix, epoch, data_shape, mean_pixels, batch_size, ctx) logger.info("Start evaluation with {} images, be patient...".format(imdb.num_images)) detections = detector.detect(data_iter) imdb.evaluate_detections(detections) else: raise NotImplementedError, "No support for dataset: " + dataset
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, nms_thresh=0.5, force_nms=True): sys.path.append(os.path.join(os.getcwd(), 'symbol')) net = importlib.import_module("symbol_"+net)\ .get_symbol(len(CLASSES), nms_thresh, force_nms) detector = Detector(net, prefix + "_"+ str(data_shape), epoch, \ data_shape, mean_pixels, ctx = ctx) return detector
def get_mxnet_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, batch_size=1): detector = Detector(net, prefix, epoch, data_shape, mean_pixels, ctx=ctx, batch_size=1) return detector
min_face_size = 24 stride = 2 slide_window = False shuffle = False detectors = [] prefix = [ 'detect/MTCNN_model/PNet_landmark/PNet', 'detect/MTCNN_model/RNet_landmark/RNet', 'detect/MTCNN_model/ONet_landmark/ONet' ] epoch = [18, 14, 16] batch_size = [2048, 256, 16] model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)] detectors.append(FcnDetector(P_Net, model_path[0])) detectors.append(Detector(R_Net, 24, batch_size[1], model_path[1])) detectors.append(Detector(O_Net, 48, batch_size[2], model_path[2])) mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size, stride=stride, threshold=thresh, slide_window=slide_window) # Init another version of MtcnnDetector print('Creating networks and loading parameters') minsize = 20 # minimum size of face threshold = [0.6, 0.7, 0.7] # three steps's threshold factor = 0.709 # scale factor margin = 44 with tf.Graph().as_default():
from multiprocessing import Process from urllib.parse import urlparse from capture.har import Har from capture.chrome import Chrome from database.observer import Observer from detect.detector import Detector REDIS_SERVER = os.environ['REDIS_SERVER'] REDIS_PASSWORD = os.environ['REDIS_PASSWORD'] REDIS_TOPIC_OBSERVER_URLS = os.environ['REDIS_TOPIC_OBSERVER_URLS'] redis = redis.StrictRedis(host=REDIS_SERVER, password=REDIS_PASSWORD) har = Har() observer = Observer() chrome = Chrome() detector = Detector(observer) def get_origin(url): if url.startswith('http'): origin = get_fld(url) else: origin = get_fld('http://' + url) return origin # def do_observe(id, observer_url, language): # # origin = get_origin(observer_url) # # data = har.capture(observer_url) # # print(data) #
import torch from PIL import Image from matplotlib import pyplot from detect.detector import Detector if __name__ == '__main__': image_path = r'E:\PyCharmProject\mtcnn\src\images\2.jpg' p_net_param = r'E:\PyCharmProject\mtcnn\config\p.pt' r_net_param = r'E:\PyCharmProject\mtcnn\config\r.pt' o_net_param = r'E:\PyCharmProject\mtcnn\config\o.pt' device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') detector = Detector(p_net_param, r_net_param, o_net_param, device) with Image.open(image_path) as img: print(img.size) boxes = detector.detect(img) print(boxes) for box in boxes: x1 = int(box[0]) y1 = int(box[1]) x2 = int(box[2]) y2 = int(box[3]) pyplot.gca().add_patch( pyplot.Rectangle((x1, y1), width=x2 - x1, height=y2 - y1,
img = './data/demo/dog.jpg' net = 'darknet19_yolo' sys.path.append(os.path.join(os.getcwd(), 'symbol')) net = importlib.import_module("symbol_" + net) \ .get_symbol(len(CLASSES), nms_thresh = 0.5, force_nms = True) prefix = os.path.join(os.getcwd(), 'model', 'yolo2_darknet19_416') epoch = 0 data_shape = 608 mean_pixels = (123, 117, 104) ctx = mx.gpu(0) batch = 3 detector = Detector(net, prefix, epoch, data_shape, mean_pixels, ctx=ctx, batch_size=batch) ims = [ cv2.resize(cv2.imread(img), (data_shape, data_shape)) for i in range(batch) ] def get_batch(imgs): img_len = len(imgs) l = [] for i in range(batch): if i < img_len: img = np.swapaxes(imgs[i], 0, 2)