Ejemplo n.º 1
0
def evaluate_net(net, dataset, devkit_path, mean_pixels, data_shape,
                 model_prefix, epoch, ctx, year=None, sets='test',
                 batch_size=1, nms_thresh=0.5, force_nms=False):
    """
    Evaluate entire dataset, basically simple wrapper for detections

    Parameters:
    ---------
    dataset : str
        name of dataset to evaluate
    devkit_path : str
        root directory of dataset
    mean_pixels : tuple of float
        (R, G, B) mean pixel values
    data_shape : int
        resize input data shape
    model_prefix : str
        load model prefix
    epoch : int
        load model epoch
    ctx : mx.ctx
        running context, mx.cpu() or mx.gpu(0)...
    year : str or None
        evaluate on which year's data
    sets : str
        evaluation set
    batch_size : int
        using batch_size for evaluation
    nms_thresh : float
        non-maximum suppression threshold
    force_nms : bool
        force suppress different categories
    """
    # set up logger
    logging.basicConfig()
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)

    if dataset == "pascal":
        if not year:
            year = '2007'
        imdb = PascalVoc(sets, year, devkit_path, shuffle=False, is_train=False)
        data_iter = DetIter(imdb, batch_size, data_shape, mean_pixels,
            rand_samplers=[], rand_mirror=False, is_train=False, shuffle=False)
        sys.path.append(os.path.join(cfg.ROOT_DIR, 'symbol'))
        net = importlib.import_module("symbol_" + net) \
            .get_symbol(imdb.num_classes, nms_thresh, force_nms)
        model_prefix += "_" + str(data_shape)
        detector = Detector(net, model_prefix, epoch, data_shape, mean_pixels, batch_size, ctx)
        logger.info("Start evaluation with {} images, be patient...".format(imdb.num_images))
        detections = detector.detect(data_iter)
        imdb.evaluate_detections(detections)
    else:
        raise NotImplementedError, "No support for dataset: " + dataset
Ejemplo n.º 2
0
def evaluate_net(net, dataset, devkit_path, mean_pixels, data_shape,
                 model_prefix, epoch, ctx, year=None, sets='test',
                 batch_size=1, nms_thresh=0.5, force_nms=False):
    """
    Evaluate entire dataset, basically simple wrapper for detections

    Parameters:
    ---------
    dataset : str
        name of dataset to evaluate
    devkit_path : str
        root directory of dataset
    mean_pixels : tuple of float
        (R, G, B) mean pixel values
    data_shape : int
        resize input data shape
    model_prefix : str
        load model prefix
    epoch : int
        load model epoch
    ctx : mx.ctx
        running context, mx.cpu() or mx.gpu(0)...
    year : str or None
        evaluate on which year's data
    sets : str
        evaluation set
    batch_size : int
        using batch_size for evaluation
    nms_thresh : float
        non-maximum suppression threshold
    force_nms : bool
        force suppress different categories
    """
    # set up logger
    logging.basicConfig()
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)

    if dataset == "pascal":
        if not year:
            year = '2007'
        imdb = PascalVoc(sets, year, devkit_path, shuffle=False, is_train=False)
        data_iter = DetIter(imdb, batch_size, data_shape, mean_pixels,
            rand_samplers=[], rand_mirror=False, is_train=False, shuffle=False)
        sys.path.append(os.path.join(cfg.ROOT_DIR, 'symbol'))
        net = importlib.import_module("symbol_" + net) \
            .get_symbol(imdb.num_classes, nms_thresh, force_nms)
        model_prefix += "_" + str(data_shape)
        detector = Detector(net, model_prefix, epoch, data_shape, mean_pixels, batch_size, ctx)
        logger.info("Start evaluation with {} images, be patient...".format(imdb.num_images))
        detections = detector.detect(data_iter)
        imdb.evaluate_detections(detections)
    else:
        raise NotImplementedError("No support for dataset: " + dataset)
Ejemplo n.º 3
0
class Detect(RequestHandler):
    brute_detector = None
    alias_data = None
    data_response = None
    param_extractor = None
    path_extractor = None
    entity_factory = None

    def data_received(self, chunk):
        pass

    def initialize(self, alias_data):
        from detect.data.response import Response
        self.data_response = Response()
        self.data_response.open_connection()
        self.alias_data = alias_data
        self.param_extractor = ParamExtractor(self)
        self.path_extractor = PathExtractor(self)
        self.entity_factory = EntityFactory(self.alias_data)
        self.brute_detector = Detector(self.alias_data)

    def on_finish(self):
        pass

    @asynchronous
    def post(self, *args, **kwargs):
        self.set_header('Content-Type', 'application/json')

        detection_id = ObjectId()

        app_log.info(
            "app=detection,function=detect,detection_id=%s,application_id=%s,session_id=%s,q=%s",
            detection_id, self.param_extractor.application_id(),
            self.param_extractor.session_id(), self.param_extractor.query())

        if False:
            url = "%smessage?v=%s&q=%s&msg_id=%s" % (
                WIT_URL, WIT_URL_VERSION,
                url_escape(self.param_extractor.query()), str(detection_id))
            r = HTTPRequest(url,
                            headers={"Authorization": "Bearer %s" % WIT_TOKEN})
            client = AsyncHTTPClient()
            client.fetch(r, callback=self.wit_call_back)
        else:
            date = datetime.now()
            outcomes = self.brute_detector.detect(self.param_extractor.query())
            self.data_response.insert(self.param_extractor.user_id(),
                                      self.param_extractor.application_id(),
                                      self.param_extractor.session_id(),
                                      detection_id,
                                      "brute",
                                      date,
                                      self.param_extractor.query(),
                                      outcomes=outcomes)

            self.set_status(202)
            self.set_header("Location", "/%s" % str(detection_id))
            self.set_header("_id", str(detection_id))
            self.finish()

            Worker(self.param_extractor.user_id(),
                   self.param_extractor.application_id(),
                   self.param_extractor.session_id(),
                   detection_id,
                   date,
                   self.param_extractor.query(),
                   self.param_extractor.skip_slack_log(),
                   detection_type="wit",
                   outcomes=outcomes).start()

    @asynchronous
    def get(self, detection_id, *args, **kwargs):
        data = self.data_response.get(
            self.path_extractor.detection_id(detection_id))
        if data is not None:
            self.set_header('Content-Type', 'application/json')
            self.set_status(200)
            self.finish(
                dumps({
                    "type": data["type"],
                    "q": data["q"],
                    "outcomes": data["outcomes"],
                    "_id": data["_id"],
                    "version": data["version"],
                    "timestamp": data["timestamp"]
                }))
        else:
            self.set_status(404)
            self.finish()

    def wit_call_back(self, response):
        data = json_decode(response.body)
        outcomes = []
        date = datetime.now()
        for outcome in data["outcomes"]:
            entities = []
            for _type in outcome["entities"].keys():
                if _type not in ["polite"]:
                    for value in outcome["entities"][_type]:
                        suggested = value[
                            "suggested"] if "suggested" in value else False
                        key = value["value"]["value"] if type(
                            value["value"]) is dict else value["value"]
                        entity = self.entity_factory.create(
                            _type, key, suggested)

                        # TODO this needs to be moved somewhere else preferably a seperate service call
                        entities.append(entity)

            outcomes.append({
                "confidence": outcome["confidence"] * 100,
                "intent": outcome["intent"],
                "entities": entities
            })

        self.data_response.insert(self.param_extractor.user_id(),
                                  self.param_extractor.application_id(),
                                  self.param_extractor.session_id(),
                                  ObjectId(data["msg_id"]),
                                  "wit",
                                  date,
                                  self.param_extractor.query(),
                                  outcomes=outcomes)

        self.set_status(202)
        self.set_header("Location", "/%s" % data["msg_id"])
        self.set_header("_id", data["msg_id"])
        self.finish()

        Worker(self.param_extractor.user_id(),
               self.param_extractor.application_id(),
               self.param_extractor.session_id(),
               ObjectId(data["msg_id"]),
               date,
               self.param_extractor.query(),
               self.param_extractor.skip_slack_log(),
               detection_type="wit",
               outcomes=outcomes).start()
Ejemplo n.º 4
0
from detect.detector import Detector
if __name__ == '__main__':

    image_path = r'E:\PyCharmProject\mtcnn\src\images\2.jpg'

    p_net_param = r'E:\PyCharmProject\mtcnn\config\p.pt'
    r_net_param = r'E:\PyCharmProject\mtcnn\config\r.pt'
    o_net_param = r'E:\PyCharmProject\mtcnn\config\o.pt'

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    detector = Detector(p_net_param, r_net_param, o_net_param, device)

    with Image.open(image_path) as img:
        print(img.size)
        boxes = detector.detect(img)
        print(boxes)
        for box in boxes:
            x1 = int(box[0])
            y1 = int(box[1])
            x2 = int(box[2])
            y2 = int(box[3])

            pyplot.gca().add_patch(
                pyplot.Rectangle((x1, y1),
                                 width=x2 - x1,
                                 height=y2 - y1,
                                 edgecolor='red',
                                 facecolor='none'))
            # pyplot.scatter(int(box[5]), int(box[6]), color='green', marker='.')
            # pyplot.scatter(int(box[7]), int(box[8]), color='green', marker='.')
Ejemplo n.º 5
0
class Yolov3Trainer:
    def __init__(self, net, netfile_name, cfgfile=None):
        self.net = net
        self.netfile_name = netfile_name
        print(cfgfile)
        if cfgfile != None:
            self.cfginit(cfgfile)
            print(1)
        print(SAVE_DIR)
        utils.makedir(SAVE_DIR)
        parser = argparse.ArgumentParser(description="base class for network training")
        self.args = self.argparser(parser)

        net_savefile = "{0}.{1}".format(self.netfile_name, NETFILE_EXTENTION)
        self.save_dir = os.path.join(SAVE_DIR, "nets")
        utils.makedir(self.save_dir)
        self.save_path = os.path.join(self.save_dir, net_savefile)
        self.savepath_epoch = os.path.join(SAVEDIR_EPOCH, net_savefile)

        if os.path.exists(self.save_path) and CONTINUETRAIN:

            try:
                self.net.load_state_dict(torch.load(self.save_path))
                print("net param load successful")

            except:
                self.net = torch.load(self.save_path)
                print("net load successful")


        else:
            self.net.paraminit()
            print("param initial complete")

        if ISCUDA:
            self.net = self.net.to(DEVICE)

        if NEEDTEST:
            self.detecter = Detector()

        self.logdir = os.path.join(SAVE_DIR, "log")

        utils.makedir(self.logdir)
        self.logfile = os.path.join(self.logdir, "{0}.txt".format(self.netfile_name))
        if not os.path.exists(self.logfile):
            with open(self.logfile, 'w') as f:
                print("%.2f %d    " % (0.00, 0), end='\r', file=f)
                print("logfile created")

        self.optimizer = optim.Adam(self.net.parameters())

        # 损失函数定义
        self.conf_loss_fn = nn.BCEWithLogitsLoss()  # 定义置信度损失函数
        self.center_loss_fn = nn.BCEWithLogitsLoss()  # 定义中心点损失函数
        self.wh_loss_fn = nn.MSELoss()  # 宽高损失
        # self.cls_loss_fn = torch.nn.CrossEntropyLoss()  # 定义交叉熵损失
        self.cls_loss_fn = nn.CrossEntropyLoss()

        self.detecter = Detector()

        print("initial complete")

    def cfginit(self, cfgfile):
        config = configparser.ConfigParser()
        config.read(cfgfile)
        items_ = config.items(self.netfile_name)

        for key, value in items_:
            if key.upper() in globals().keys():
                try:
                    globals()[key.upper()] = config.getint(self.netfile_name, key.upper())
                except:
                    try:
                        globals()[key.upper()] = config.getfloat(self.netfile_name, key.upper())
                    except:
                        try:
                            globals()[key.upper()] = config.getboolean(self.netfile_name, key.upper())
                        except:
                            globals()[key.upper()] = config.get(self.netfile_name, key.upper())


    def argparser(self, parser):
        """default argparse, please customize it by yourself. """

        parser.add_argument("-e", "--epoch", type=int, default=EPOCH, help="number of epochs")
        parser.add_argument("-b", "--batch_size", type=int, default=BATCHSIZE, help="mini-batch size")
        parser.add_argument("-n", "--num_workers", type=int, default=NUMWORKERS,
                            help="number of threads used during batch generation")
        parser.add_argument("-l", "--lr", type=float, default=LR, help="learning rate for gradient descent")
        parser.add_argument("-r", "--record_point", type=int, default=RECORDPOINT, help="print frequency")
        parser.add_argument("-t", "--test_point", type=int, default=TESTPOINT,
                            help="interval between evaluations on validation set")
        parser.add_argument("-a", "--alpha", type=float, default=ALPHA, help="ratio of conf and offset loss")
        parser.add_argument("-d", "--threshold", type=float, default=THREHOLD, help="threhold")

        return parser.parse_args()

    def _loss_fn(self, output, target, alpha):

        output = output.permute(0, 2, 3, 1)
        output = output.reshape(output.size(0), output.size(1), output.size(2), 3, -1)

        mask_obj = target[..., 0] > 0
        mask_noobj = target[..., 0] == 0

        output_obj, target_obj = output[mask_obj], target[mask_obj]

        loss_obj_conf = self.conf_loss_fn(output_obj[:, 0], target_obj[:, 0])
        loss_obj_center = self.center_loss_fn(output_obj[:, 1:3], target_obj[:, 1:3])
        loss_obj_wh = self.wh_loss_fn(output_obj[:, 3:5], target_obj[:, 3:5])
        loss_obj_cls = self.cls_loss_fn(output_obj[:, 5:], target_obj[:, 5].long())
        loss_obj = loss_obj_conf + loss_obj_center + loss_obj_wh + loss_obj_cls

        output_noobj, target_noobj = output[mask_noobj], target[mask_noobj]
        loss_noobj = self.conf_loss_fn(output_noobj[:, 0], target_noobj[:, 0])

        loss = alpha * loss_obj + (1 - alpha) * loss_noobj
        return loss

    def logging(self, result, dataloader_len, RECORDPOINT):

        with open(self.logfile, "r+") as f:

            if f.readline() == "":
                batchcount = 0
                f.seek(0, 0)
                print("%.2f %d        " % (0.00, 0), end='\r', file=f)

            else:
                f.seek(0, 0)
                batchcount = int(f.readline().split()[-1].strip()) + RECORDPOINT


            f.seek(0, 0)
            print("%.2f %d " % (batchcount / dataloader_len, batchcount), end='', file=f)

            f.seek(0, 2)
            print(result, file=f)

    def getstatistics(self):
        datalist = []
        with open(self.logfile) as f:
            for line in f.readlines():
                if not line[0].isdigit():
                    datalist.append(eval(line))
        return datalist

    def scalarplotting(self, datalist, key):
        save_dir = os.path.join(SAVE_DIR, key)
        utils.makedir(save_dir)
        save_name = "{0}.jpg".format(key)

        save_file = os.path.join(save_dir, save_name)
        values = []
        for data_dict in datalist:
            if data_dict:
                values.append(data_dict[key])
        if len(values) != 0:
            plt.plot(values)
            plt.savefig(save_file)
            plt.show()

    def FDplotting(self, net):
        save_dir = os.path.join(SAVE_DIR, "params")
        utils.makedir(save_dir)
        save_name = "{0}_param.jpg".format(self.netfile_name)
        save_file = os.path.join(SAVE_DIR, save_name)
        params = []
        for param in net.parameters():
            params.extend(param.view(-1).cpu().detach().numpy())
        params = np.array(params)
        histo = np.histogram(params, 10, range=(np.min(params), np.max(params)))
        plt.plot(histo[1][1:], histo[0])
        plt.savefig(save_file)
        plt.show()

    def train(self):
        dataset = YoloDataset(LABEL_PATH, PIC_DIR)
        train_loader = data.DataLoader(dataset, batch_size=self.args.batch_size, shuffle=True,
                                       num_workers=self.args.num_workers,
                                       drop_last=True)
        dataloader_len = len(train_loader)

        start_time = time.time()

        if os.path.exists(self.logfile):
            with open(self.logfile) as f:
                if f.readline() != "":
                    f.seek(0, 0)
                    batch_count = int(float(f.readline().split()[1]))

        for i in range(self.args.epoch):

            for j, (target13, target26, target52, img_data) in enumerate(train_loader):

                self.net.train()
                if ISCUDA:
                    target13 = target13.to(DEVICE)
                    target26 = target26.to(DEVICE)
                    target52 = target52.to(DEVICE)
                    img_data = img_data.to(DEVICE)

                output_13, output_26, output_52 = self.net(img_data)

                loss_13 = self._loss_fn(output_13, target13, alpha=ALPHA)
                loss_26 = self._loss_fn(output_26, target26, alpha=ALPHA)
                loss_52 = self._loss_fn(output_52, target52, alpha=ALPHA)
                loss = loss_13 + loss_26 + loss_52
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()


                if j % self.args.record_point == 0:

                    checktime = time.time() - start_time

                    result = "{'epoch':%d,'batch':%d,'loss':%.5f,'loss_13':%.5f,'loss_26':%.5f,'loss_52':%f',total_time':%.2f,'time':%s}" % (
                        i, j, loss, loss_13, loss_26, loss_52, checktime,
                        time.strftime("%Y%m%d%H%M%S", time.localtime()))
                    print(result)

                    # self.logging(result, dataloader_len, self.args.record_point)
                    if NEEDSAVE:
                        # torch.save(self.net.state_dict(), self.save_path)
                        torch.save(self.net, self.save_path)
                        print("net save successful")

                if NEEDTEST and j % self.args.test_point == 0:
                    self.net.eval()

                    batch_count = i
                    self.test(batch_count,j)
            if NEEDSAVE:
                torch.save(self.net.state_dict(), self.savepath_epoch)
                # torch.save(self.net, self.savepath_epoch)
                # print("an epoch save successful")

    def test(self, batch_count,j):
        with torch.no_grad():
            self.net.eval()

            img = Image.open(TEST_IMG)
            # img_ = cv2.imread(TEST_IMG)

            last_boxes = self.detecter.detect(img, self.args.threshold, net=self.net)

            draw = ImageDraw.Draw(img)
            font = ImageFont.truetype(font="arial.ttf", size=10, encoding="utf-8")

            if np.any(last_boxes):
                for box in last_boxes:
                    xybox = box[:4].astype("i4")
                    text_x, text_y = list(box[:2])[0], list(box[:2])[1] - 10
                    text_conf = list(box[:2])[0] + 30
                    draw.text((text_x, text_y), cfg.COCO_DICT[int(box[5])], fill=(255, 0, 0), font=font)
                    draw.text((text_conf, text_y), "%.2f" % box[4], fill=(255, 0, 0), font=font)
                    draw.rectangle(list(xybox), outline="green", width=2)

            # img.show()
            if NEEDSAVE:
                testpic_savedir = os.path.join(SAVE_DIR, "testpic", self.netfile_name)
                utils.makedir(testpic_savedir)
                testpic_savefile = os.path.join(testpic_savedir, "{0}_{1}.jpg".format(batch_count,j))
                img.save(testpic_savefile)

            if NEEDSHOW:
                plt.clf()
                plt.axis("off")
                plt.imshow(img)
                plt.pause(0.1)
Ejemplo n.º 6
0
class Detect(RequestHandler):
    brute_detector = None
    alias_data = None
    data_response = None
    param_extractor = None
    path_extractor = None
    entity_factory = None

    def data_received(self, chunk):
        pass

    def initialize(self, alias_data):
        from detect.data.response import Response

        self.data_response = Response()
        self.data_response.open_connection()
        self.alias_data = alias_data
        self.param_extractor = ParamExtractor(self)
        self.path_extractor = PathExtractor(self)
        self.entity_factory = EntityFactory(self.alias_data)
        self.brute_detector = Detector(self.alias_data)

    def on_finish(self):
        pass

    @asynchronous
    def post(self, *args, **kwargs):
        self.set_header("Content-Type", "application/json")

        detection_id = ObjectId()

        app_log.info(
            "app=detection,function=detect,detection_id=%s,application_id=%s,session_id=%s,q=%s",
            detection_id,
            self.param_extractor.application_id(),
            self.param_extractor.session_id(),
            self.param_extractor.query(),
        )

        if False:
            url = "%smessage?v=%s&q=%s&msg_id=%s" % (
                WIT_URL,
                WIT_URL_VERSION,
                url_escape(self.param_extractor.query()),
                str(detection_id),
            )
            r = HTTPRequest(url, headers={"Authorization": "Bearer %s" % WIT_TOKEN})
            client = AsyncHTTPClient()
            client.fetch(r, callback=self.wit_call_back)
        else:
            date = datetime.now()
            outcomes = self.brute_detector.detect(self.param_extractor.query())
            self.data_response.insert(
                self.param_extractor.user_id(),
                self.param_extractor.application_id(),
                self.param_extractor.session_id(),
                detection_id,
                "brute",
                date,
                self.param_extractor.query(),
                outcomes=outcomes,
            )

            self.set_status(202)
            self.set_header("Location", "/%s" % str(detection_id))
            self.set_header("_id", str(detection_id))
            self.finish()

            Worker(
                self.param_extractor.user_id(),
                self.param_extractor.application_id(),
                self.param_extractor.session_id(),
                detection_id,
                date,
                self.param_extractor.query(),
                self.param_extractor.skip_slack_log(),
                detection_type="wit",
                outcomes=outcomes,
            ).start()

    @asynchronous
    def get(self, detection_id, *args, **kwargs):
        data = self.data_response.get(self.path_extractor.detection_id(detection_id))
        if data is not None:
            self.set_header("Content-Type", "application/json")
            self.set_status(200)
            self.finish(
                dumps(
                    {
                        "type": data["type"],
                        "q": data["q"],
                        "outcomes": data["outcomes"],
                        "_id": data["_id"],
                        "version": data["version"],
                        "timestamp": data["timestamp"],
                    }
                )
            )
        else:
            self.set_status(404)
            self.finish()

    def wit_call_back(self, response):
        data = json_decode(response.body)
        outcomes = []
        date = datetime.now()
        for outcome in data["outcomes"]:
            entities = []
            for _type in outcome["entities"].keys():
                if _type not in ["polite"]:
                    for value in outcome["entities"][_type]:
                        suggested = value["suggested"] if "suggested" in value else False
                        key = value["value"]["value"] if type(value["value"]) is dict else value["value"]
                        entity = self.entity_factory.create(_type, key, suggested)

                        # TODO this needs to be moved somewhere else preferably a seperate service call
                        entities.append(entity)

            outcomes.append(
                {"confidence": outcome["confidence"] * 100, "intent": outcome["intent"], "entities": entities}
            )

        self.data_response.insert(
            self.param_extractor.user_id(),
            self.param_extractor.application_id(),
            self.param_extractor.session_id(),
            ObjectId(data["msg_id"]),
            "wit",
            date,
            self.param_extractor.query(),
            outcomes=outcomes,
        )

        self.set_status(202)
        self.set_header("Location", "/%s" % data["msg_id"])
        self.set_header("_id", data["msg_id"])
        self.finish()

        Worker(
            self.param_extractor.user_id(),
            self.param_extractor.application_id(),
            self.param_extractor.session_id(),
            ObjectId(data["msg_id"]),
            date,
            self.param_extractor.query(),
            self.param_extractor.skip_slack_log(),
            detection_type="wit",
            outcomes=outcomes,
        ).start()