Esempio n. 1
0
from configs.CC import Config
from tqdm import tqdm
from utils.core import *

parser = argparse.ArgumentParser(description='M2Det Testing')
parser.add_argument('-c', '--config', default='configs/m2det320_vgg.py', type=str)
parser.add_argument('-d', '--dataset', default='COCO', help='VOC or COCO version')
parser.add_argument('-m', '--trained_model', default=None, type=str, help='Trained state_dict file path to open')
parser.add_argument('--test', action='store_true', help='to submit a test file')
args = parser.parse_args()

print_info('----------------------------------------------------------------------\n'
           '|                       M2Det Evaluation Program                     |\n'
           '----------------------------------------------------------------------', ['yellow','bold'])
global cfg
cfg = Config.fromfile(args.config)
if not os.path.exists(cfg.test_cfg.save_folder):
    os.mkdir(cfg.test_cfg.save_folder)
anchor_config = anchors(cfg)
print_info('The Anchor info: \n{}'.format(anchor_config))
priorbox = PriorBox(anchor_config)
with torch.no_grad():
    priors = priorbox.forward()
    if cfg.test_cfg.cuda:
        priors = priors.cuda()

def test_net(save_folder, net, detector, cuda, testset, transform, max_per_image=300, thresh=0.005):
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    num_images = len(testset)
Esempio n. 2
0
                difficult = False
            else:
                difficult = int(obj.find('difficult').text) == 1
            if not self.keep_difficult and difficult:
                continue
            name = obj.find('name').text.lower().strip()
            bbox = obj.find('bndbox')

            pts = ['xmin', 'ymin', 'xmax', 'ymax']
            bndbox = []
            for i, pt in enumerate(pts):
                cur_pt = int(bbox.find(pt).text) - 1
                bndbox.append(cur_pt)
            label_idx = self.class_names.index(name)
            bndbox.append(label_idx)
            res = np.vstack((res,bndbox))  # [xmin, ymin, xmax, ymax, label_ind]
        return res  # [[xmin, ymin, xmax, ymax, label_ind], ... ]



if __name__ == '__main__':
    from configs.CC import Config
    from utils.core import config_compile
    from data import preproc
    cfg = Config.fromfile("./configs/m2det320_resnet101.py")
    config_compile(cfg)
    _preproc = preproc(cfg.model.input_size, cfg.model.rgb_means, cfg.model.p)
    dataset = GeneralImageDataset(root_dir="./dataset",
                                  class_names=cfg.model.m2det_config.class_names,
                                  transforms=_preproc)
    print(dataset[0])
Esempio n. 3
0
                                 v,
                                 kernel_size=3,
                                 stride=stride[k],
                                 padding=padding[k])
                ]
            else:
                layers += [
                    conv_relu(in_channels,
                              v,
                              kernel_size=3,
                              stride=stride[k],
                              padding=padding[k])
                ]
        in_channels = v

    return layers


if __name__ == "__main__":
    """Testing
    """
    cfg = Config.fromfile('configs/Pelee_VOC.py')
    model = ShuffleNet("test", 304, cfg.model)
    #init_net(model, cfg, None)
    dummy_input = torch.rand([10, 3, 304, 304])
    #with SummaryWriter(comment='ShuffleNet') as w:
    #  w.add_graph(model, (dummy_input,), verbose=True)
    y = model(dummy_input)
    g = make_dot(y)
    g.view()
Esempio n. 4
0
from configs.CC import Config

global cfg
cfg = Config.fromfile("m2det320_vgg.py")

print(cfg.model.input_size)
print(cfg.model.m2det_config)
Esempio n. 5
0
    def Model_Params(self, model_dir="output", use_gpu=True):
        '''
        User Function - Set Model Params

        Args:
            model_dir (str): Select the right model name as per training
            model_path (str): Relative path to params file
            use_gpu (bool): If True use GPU else run on CPU
        Returns:
            None
            
        '''

        f = open(model_dir +"/config_final.py", 'r');
        lines = f.read();
        f.close();

        if(not use_gpu):
            lines = lines.replace("cuda=True",
                                    "cuda=False");

        f = open(model_dir +"/config_test.py", 'w');
        f.write(lines);
        f.close();


        print("Loading model for inference");
        self.system_dict["cfg"] = Config.fromfile(model_dir +"/config_test.py")
        anchor_config = anchors(self.system_dict["cfg"].model)
        self.system_dict["priorbox"] = PriorBox(anchor_config)
        self.system_dict["net"] = build_net('test', self.system_dict["cfg"].model.input_size, self.system_dict["cfg"].model)
        init_net(self.system_dict["net"], self.system_dict["cfg"], model_dir + "/VOC/Final_Pelee_VOC_size304.pth")
        print_info('===> Finished constructing and loading model', ['yellow', 'bold'])
        self.system_dict["net"].eval()

        with torch.no_grad():
            self.system_dict["priors"] = self.system_dict["priorbox"].forward()
            if self.system_dict["cfg"].test_cfg.cuda:
                self.system_dict["net"] = self.system_dict["net"].cuda()
                self.system_dict["priors"] = self.system_dict["priors"].cuda()
                cudnn.benchmark = True
            else:
                self.system_dict["net"] = self.system_dict["net"].cpu()
        self.system_dict["_preprocess"] = BaseTransform(self.system_dict["cfg"].model.input_size, 
                                        self.system_dict["cfg"].model.rgb_means, (2, 0, 1))
        self.system_dict["num_classes"] = self.system_dict["cfg"].model.num_classes
        self.system_dict["detector"] = Detect(self.system_dict["num_classes"],
                                                self.system_dict["cfg"].loss.bkg_label, anchor_config)
                
        print("Done....");


        print("Loading other params");
        base = int(np.ceil(pow(self.system_dict["num_classes"], 1. / 3)))
        self.system_dict["colors"] = [self._to_color(x, base)
                  for x in range(self.system_dict["num_classes"])]
        cats = ['__background__'];
        f = open(self.system_dict["class_list"]);
        lines = f.readlines();
        f.close();
        for i in range(len(lines)):
            if(lines != ""):
                cats.append(lines[i][:len(lines[i])-1])
        self.system_dict["labels"] = cats;
        print("Done....");
Esempio n. 6
0
from darknet_dnn.msg import *
from cv_bridge import CvBridge, CvBridgeError

#parameters
show_result = rospy.get_param("/m2det_ros/show_result")
subscribe_image = rospy.get_param("/m2det_ros/subscribe_image")

print_info(
    ' -------------------------------------------------------------\n'
    '|                       M2Det ROS                            |\n'
    ' -------------------------------------------------------------',
    ['blue', 'bold'])

global cfg
#cfg = Config.fromfile(args.config)
cfg = Config.fromfile(
    roslib.packages.get_pkg_dir("m2det_ros") + "/configs/m2det512_vgg.py")
anchor_config = anchors(cfg)
print_info('The Anchor info: \n{}'.format(anchor_config))
priorbox = PriorBox(anchor_config)
net = build_net('test',
                size=cfg.model.input_size,
                config=cfg.model.m2det_config)
#init_net(net, cfg, args.trained_model)
init_net(
    net, cfg,
    roslib.packages.get_pkg_dir("m2det_ros") + "/weights/m2det512_vgg.pth")

print_info('===> Finished constructing and loading model', ['yellow', 'bold'])
net.eval()
with torch.no_grad():
    priors = priorbox.forward()
Esempio n. 7
0
import cv2
from configs.CC import Config
import time
#import boto3

#endpoint='m2det-endpoint'
#runtime_client = boto3.client('sagemaker-runtime')
#response = runtime_client.invoke_endpoint(EndpointName=endpoint,
#           Body=img_encoded.tostring(), ContentType='text/csv')
##files= {'image': open(sys.argv[1], 'rb')}
##r = requests.post(url, files=files)
#print(response['Body'].read().decode('ascii'))

global cfg

cfg = Config.fromfile('configs/m2det512_vgg.py')


def _to_color(indx, base):
    """ return (b, r, g) tuple"""
    base2 = base * base
    b = 2 - indx / base2
    r = 2 - (indx % base2) / base
    g = 2 - (indx % base2) % base
    return b * 127, r * 127, g * 127


print(cfg.model.m2det_config.num_classes)
base = int(np.ceil(pow(cfg.model.m2det_config.num_classes, 1. / 3)))
colors = [
    _to_color(x, base) for x in range(cfg.model.m2det_config.num_classes)
    def setup(self):
        f = open(
            "Monk_Object_Detection/15_pytorch_peleenet/lib/configs/Pelee_VOC.py"
        )
        lines = f.read()
        f.close()

        lines = lines.replace(
            "save_epochs=10",
            "save_epochs=" + str(self.system_dict["params"]["num_epochs"]))
        lines = lines.replace(
            "print_epochs=10",
            "print_epochs=" + str(self.system_dict["params"]["num_epochs"]))
        lines = lines.replace(
            "weights_save='weights/'", "weights_save='" +
            self.system_dict["params"]["model_output_dir"] + "/'")
        if (self.system_dict["params"]["use_gpu"]):
            lines = lines.replace("cuda=True", "cuda=True")
        else:
            lines = lines.replace("cuda=True", "cuda=False")
        lines = lines.replace(
            "per_batch_size=64",
            "per_batch_size=" + str(self.system_dict["params"]["batch_size"]))
        lines = lines.replace(
            "num_workers=8",
            "num_workers=" + str(self.system_dict["params"]["num_workers"]))

        f = open("config.py", 'w')
        f.write(lines)
        f.close()

        self.system_dict["local"]["cfg"] = Config.fromfile("config.py")

        print_info('===> Loading Dataset...', ['yellow', 'bold'])
        self.system_dict["local"]["dataset"] = get_dataloader(
            self.system_dict["local"]["cfg"],
            train_img_dir=self.system_dict["params"]["train_img_dir"],
            train_anno_dir=self.system_dict["params"]["train_anno_dir"],
            class_file=self.system_dict["params"]["class_file"])
        print_info('===> Done...', ['yellow', 'bold'])

        print_info('===> Setting up epoch details...', ['yellow', 'bold'])
        self.system_dict["local"]["epoch_size"] = len(
            self.system_dict["local"]["dataset"]) // (
                self.system_dict["local"]["cfg"].train_cfg.per_batch_size *
                self.system_dict["params"]["ngpu"])

        self.system_dict["local"]["max_iter"] = self.system_dict["local"][
            "epoch_size"] * self.system_dict["params"]["num_epochs"]

        self.system_dict["local"]["stepvalues"] = [
            self.system_dict["local"]["max_iter"] // 3,
            2 * self.system_dict["local"]["max_iter"] // 3
        ]

        f = open("config.py")
        lines = f.read()
        f.close()

        lines = lines.replace(
            "step_lr=[80000, 100000, 120000,160000]",
            "step_lr=" + str(self.system_dict["local"]["stepvalues"]))
        lines = lines.replace(
            "num_classes=21", "num_classes=" +
            str(len(self.system_dict["local"]["dataset"].class_to_ind)))
        lines = lines.replace("lr=5e-3",
                              "lr=" + str(self.system_dict["params"]["lr"]))
        lines = lines.replace(
            "gamma=0.1", "gamma=" + str(self.system_dict["params"]["gamma"]))
        lines = lines.replace(
            "momentum=0.9",
            "momentum=" + str(self.system_dict["params"]["momentum"]))
        lines = lines.replace(
            "weight_decay=0.0005",
            "weight_decay=" + str(self.system_dict["params"]["weight_decay"]))

        f = open("config_final.py", 'w')
        f.write(lines)
        f.close()
        print_info('===> Done...', ['yellow', 'bold'])

        self.system_dict["local"]["cfg"] = Config.fromfile("config_final.py")
        #print(self.system_dict["local"]["cfg"])

        self.system_dict["local"]["net"] = build_net(
            'train', self.system_dict["local"]["cfg"].model.input_size,
            self.system_dict["local"]["cfg"].model)

        if (self.system_dict["params"]["resume_net"]):
            init_net(self.system_dict["local"]["net"],
                     self.system_dict["local"]["cfg"],
                     self.system_dict["params"]
                     ["resume_net"])  # init the network with pretrained
        if self.system_dict["params"]["ngpu"] > 1:
            self.system_dict["local"]["net"] = torch.nn.DataParallel(
                self.system_dict["local"]["net"])
        if self.system_dict["local"]["cfg"].train_cfg.cuda:
            self.system_dict["local"]["net"].cuda()
            cudnn.benckmark = True

        self.system_dict["local"]["optimizer"] = set_optimizer(
            self.system_dict["local"]["net"], self.system_dict["local"]["cfg"])
        self.system_dict["local"]["criterion"] = set_criterion(
            self.system_dict["local"]["cfg"])
        self.system_dict["local"]["priorbox"] = PriorBox(
            anchors(self.system_dict["local"]["cfg"].model))

        with torch.no_grad():
            self.system_dict["local"]["priors"] = self.system_dict["local"][
                "priorbox"].forward()
            if self.system_dict["local"]["cfg"].train_cfg.cuda:
                self.system_dict["local"]["priors"] = self.system_dict[
                    "local"]["priors"].cuda()
Esempio n. 9
0
def detect_parking_spaces(dir,
                          threshold=0.2,
                          save=False,
                          show=False,
                          cam=-1,
                          gpu=False,
                          config='training/m2det/configs/m2det512_vgg.py',
                          weights='training/m2det/weights/m2det512_vgg.pth'):
    print('Detect Parking Spaces Programe')
    cfg = Config.fromfile(config)
    anchor_config = anchors(cfg)

    priorbox = PriorBox(anchor_config)
    net = build_net('test',
                    size=cfg.model.input_size,
                    config=cfg.model.m2det_config)
    init_net(net, cfg, weights)
    net.eval()
    if not gpu:
        cfg.test_cfg.cuda = False

    with torch.no_grad():
        priors = priorbox.forward()
        if cfg.test_cfg.cuda:
            net = net.cuda()
            priors = priors.cuda()
            cudnn.benchmark = True
        else:
            net = net.cpu()
    print_info('===> Finished constructing and loading model')

    _preprocess = BaseTransform(cfg.model.input_size, cfg.model.rgb_means,
                                (2, 0, 1))
    detector = Detect(cfg.model.m2det_config.num_classes, cfg.loss.bkg_label,
                      anchor_config)

    base = int(np.ceil(pow(cfg.model.m2det_config.num_classes, 1. / 3)))
    colors = [
        _to_color(x, base) for x in range(cfg.model.m2det_config.num_classes)
    ]
    cats = [
        _.strip().split(',')[-1]
        for _ in open('training/m2det/data/coco_labels.txt', 'r').readlines()
    ]
    labels = tuple(['__background__'] + cats)

    im_path = dir + '/images'
    cam = cam
    if cam >= 0:
        capture = cv2.VideoCapture(cam)
    im_fnames = sorted((fname for fname in os.listdir(im_path)
                        if os.path.splitext(fname)[-1] == '.jpg'))
    im_fnames = (os.path.join(im_path, fname) for fname in im_fnames)
    im_iter = iter(im_fnames)

    save_dir = dir + '/detection_images'
    os.makedirs(save_dir, exist_ok=True)
    locs_list = {}
    while True:
        if cam < 0:
            try:
                fname = next(im_iter)
            except StopIteration:
                break
            image = cv2.imread(fname, cv2.IMREAD_COLOR)
        else:
            ret, image = capture.read()
            if not ret:
                cv2.destroyAllWindows()
                capture.release()
                break

        loop_start = time.time()
        w, h = image.shape[1], image.shape[0]
        img = _preprocess(image).unsqueeze(0)
        if cfg.test_cfg.cuda:
            img = img.cuda()
        scale = torch.Tensor([w, h, w, h])
        out = net(img)
        if not gpu:
            priors = priors.cpu()

        boxes, scores = detector.forward(out, priors)
        boxes = (boxes[0] * scale).cpu().numpy()
        scores = scores[0].cpu().numpy()
        allboxes = []

        for j in range(1, cfg.model.m2det_config.num_classes):
            inds = np.where(scores[:, j] > cfg.test_cfg.score_threshold)[0]
            if len(inds) == 0:
                continue
            c_bboxes = boxes[inds]
            c_scores = scores[inds, j]
            c_dets = np.hstack(
                (c_bboxes, c_scores[:, np.newaxis])).astype(np.float32,
                                                            copy=False)
            soft_nms = cfg.test_cfg.soft_nms
            keep = nms(
                c_dets, cfg.test_cfg.iou, force_cpu=soft_nms
            )  #min_thresh, device_id=0 if cfg.test_cfg.cuda else None)
            keep = keep[:cfg.test_cfg.keep_per_class]
            c_dets = c_dets[keep, :]
            allboxes.extend([_.tolist() + [j] for _ in c_dets])

        loop_time = time.time() - loop_start
        allboxes = np.array(allboxes)
        boxes = allboxes[:, :4]
        scores = allboxes[:, 4]
        cls_inds = allboxes[:, 5]
        # print('\n'.join(['pos:{}, ids:{}, score:{:.3f}'.format('(%.1f,%.1f,%.1f,%.1f)' % (o[0],o[1],o[2],o[3]) \
        #         ,labels[int(oo)],ooo) for o,oo,ooo in zip(boxes,cls_inds,scores)]))
        fps = 1.0 / float(loop_time) if cam >= 0 else -1
        im2show, loc = draw_detection(image,
                                      boxes,
                                      scores,
                                      cls_inds,
                                      fps,
                                      threshold,
                                      colors=colors,
                                      labels=labels)
        locs_list[fname] = loc

        if im2show.shape[0] > 1100:
            im2show = cv2.resize(im2show, (int(
                1000. * float(im2show.shape[1]) / im2show.shape[0]), 1000))
        if show:
            cv2.imshow('test', im2show)
            if cam < 0:
                cv2.waitKey(1000)
            else:
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    cv2.destroyAllWindows()
                    capture.release()
                    break
        if save:
            name = fname.split('.')[0]
            name = name.split('/')[-1]
            cv2.imwrite(f"{save_dir}/{name}.jpg", im2show)

    save_name = dir + '/labels/split.txt'
    f = open(save_name, 'wb')
    pickle.dump(locs_list, f)
    f.close()
Esempio n. 10
0
def train(cfg):
    cfg = Config.fromfile(cfg)
    net = build_net(
        'train',
        size=cfg.model.input_size,  # Only 320, 512, 704 and 800 are supported
        config=cfg.model.m2det_config)
    net.to(device)
    if os.path.exists(checkpoint_path):
        checkpoints = torch.load(checkpoint_path)
        net.load_state_dict(checkpoints)
        logging.info('checkpoint loaded.')

    optimizer = optim.SGD(net.parameters(),
                          lr=cfg.train_cfg.lr[0],
                          momentum=cfg.optimizer.momentum,
                          weight_decay=cfg.optimizer.weight_decay)
    criterion = MultiBoxLoss(cfg.model.m2det_config.num_classes,
                             overlap_thresh=cfg.loss.overlap_thresh,
                             prior_for_matching=cfg.loss.prior_for_matching,
                             bkg_label=cfg.loss.bkg_label,
                             neg_mining=cfg.loss.neg_mining,
                             neg_pos=cfg.loss.neg_pos,
                             neg_overlap=cfg.loss.neg_overlap,
                             encode_target=cfg.loss.encode_target)
    priorbox = PriorBox(anchors(cfg))
    with torch.no_grad():
        priors = priorbox.forward().to(device)
    net.train()

    dataset = get_dataloader(cfg, 'COCO', 'train_sets')
    train_ds = DataLoader(dataset,
                          cfg.train_cfg.per_batch_size,
                          shuffle=True,
                          num_workers=0,
                          collate_fn=detection_collate)
    logging.info('dataset loaded, start to train...')

    for epoch in range(cfg.model.epochs):
        for i, data in enumerate(train_ds):
            try:
                lr = adjust_learning_rate_v2(optimizer, epoch, i, 10320, cfg)
                images, targets = data
                images = images.to(device)
                targets = [anno.to(device) for anno in targets]
                out = net(images)

                optimizer.zero_grad()
                loss_l, loss_c = criterion(out, priors, targets)
                loss = loss_l + loss_c
                loss.backward()
                optimizer.step()

                if i % 30 == 0:
                    logging.info(
                        'Epoch: {}, iter: {}, loc_loss: {}, conf_loss: {}, loss: {}, lr: {}'
                        .format(epoch, i, loss_l.item(), loss_c.item(),
                                loss.item(), lr))

                if iter % 2000 == 0:
                    torch.save(net.state_dict(), checkpoint_path)
                    logging.info('model saved.')
            except KeyboardInterrupt:
                torch.save(net.state_dict(), checkpoint_path)
                logging.info('model saved.')
                exit(0)
    torch.save(net.state_dict(), checkpoint_path)