Esempio n. 1
0
def main_active_mode(args):
    Query_iteration = 10
    create_net = lambda num: create_mobilenetv2_ssd_lite(
        num, width_mult=args['detection_model']["width_mult"])
    config = mobilenetv1_ssd_config
    train_loader, val_loader, num_classes = dataset_loading(args, config)

    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)
    test_transform = TestTransform(config.image_size, config.image_mean,
                                   config.image_std)
    active_dataset = VIRAT_table_comm(
        args["Datasets"]["virat_seq"]["train_image_path"],
        args["Datasets"]["virat_seq"]["train_anno_path"],
        transform=test_transform,
        target_transform=target_transform,
        downpurning_ratio=0.2)

    labeled, unlabeled = train_loader.dataset.dataset_information()
    query_item = len(unlabeled) // Query_iteration

    active_dataset.Active_mode()

    for q_iter in range(Query_iteration):
        #if q_iter != 0:
        active_dataset.Active_mode()
        labeled, unlabeled = active_dataset.dataset_information()
        query_index = np.random.choice(unlabeled, query_item, replace=False)
        train_loader.dataset.setting_be_selected_sample(query_index)
        active_dataset.setting_be_selected_sample(query_index)
        train_loader.dataset.training_mode()
        train(args, train_loader)
Esempio n. 2
0
            def __init__(self, info):
                super(SSD, self).__init__()
                self.infer = info['infer']
                detection_metadata = info['metadatas'][1]
                if detection_metadata and 'Categories' in detection_metadata:
                    self.categories = detection_metadata['Categories']
                else:
                    self.categories = ['object']
                self.num_classes = len(self.categories) + 1
                lib.eprint('ssd: set num_classes={}'.format(self.num_classes))

                self.mode = info['params'].get('mode', 'mb2-ssd-lite')
                mb2_width_mult = info['params'].get('mb2_width_mult', 1.0)

                # adapt from train_ssd.py
                if self.mode == 'vgg16-ssd':
                    create_net = create_vgg_ssd
                    config = vgg_ssd_config
                elif self.mode == 'mb1-ssd':
                    create_net = create_mobilenetv1_ssd
                    config = mobilenetv1_ssd_config
                elif self.mode == 'mb1-ssd-lite':
                    create_net = create_mobilenetv1_ssd_lite
                    config = mobilenetv1_ssd_config
                elif self.mode == 'sq-ssd-lite':
                    create_net = create_squeezenet_ssd_lite
                    config = squeezenet_ssd_config
                elif self.mode == 'mb2-ssd-lite':
                    create_net = lambda num, is_test: create_mobilenetv2_ssd_lite(
                        num, width_mult=mb2_width_mult, is_test=is_test)
                    config = mobilenetv1_ssd_config
                elif self.mode == 'mb3-large-ssd-lite':
                    create_net = lambda num: create_mobilenetv3_large_ssd_lite(
                        num, is_test=is_test)
                    config = mobilenetv1_ssd_config
                elif self.mode == 'mb3-small-ssd-lite':
                    create_net = lambda num: create_mobilenetv3_small_ssd_lite(
                        num, is_test=is_test)
                    config = mobilenetv1_ssd_config

                config.iou_threshold = info['params'].get(
                    'iou_threshold', config.iou_threshold)
                self.prob_threshold = info['params'].get(
                    'confidence_threshold', 0.01)
                self.config = config

                self.model = create_net(self.num_classes, is_test=self.infer)
                self.criterion = MultiboxLoss(config.priors,
                                              iou_threshold=0.5,
                                              neg_pos_ratio=3,
                                              center_variance=0.1,
                                              size_variance=0.2,
                                              device=info['device'])
                self.match_prior = MatchPrior(config.priors,
                                              config.center_variance,
                                              config.size_variance, 0.5)
                self.image_mean = torch.tensor(self.config.image_mean,
                                               dtype=torch.float32).reshape(
                                                   1, 3, 1,
                                                   1).to(info['device'])
Esempio n. 3
0
 def __init__(self):
     self.class_names = [name.strip() for name in open(label_path).readlines()]
     self.num_classes = len(self.class_names)
     self.net = create_mobilenetv2_ssd_lite(len(self.class_names), is_test=True)
     self.net.load(model_path)
     self.predictor = create_mobilenetv2_ssd_lite_predictor(self.net, candidate_size=200,
                                                            device=torch.device("cuda:0"))
Esempio n. 4
0
 def __init__(self, **kwargs):
     self.__dict__.update(self._defaults) # set up default values
     self.__dict__.update(kwargs) # and update with user overrides
     self.class_names = self._get_class()
     self.net = create_mobilenetv2_ssd_lite(len(self.class_names), is_test=True)
     self.net.load(self.model_path)
     self.net.eval()
     self.net.cuda()
     self.predictor = create_mobilenetv2_ssd_lite_predictor(self.net, candidate_size=200, device='cuda')
Esempio n. 5
0
	def __init__(self):

		model = "v1"
		self.prob_threshold = 0.85
		self.cv_bridge = CvBridge() 
		self.labels = ['background' , # always index 0
				'person','palm']
		self.objects = []
		if model == "v2_lite":
			self.network = create_mobilenetv2_ssd_lite(len(self.labels), is_test=True) 
		elif model == "v1":
			self.network = create_mobilenetv1_ssd(len(self.labels), is_test=True) 
		elif model == "v1_lite":
			self.network = create_mobilenetv1_ssd_lite(len(self.labels), is_test=True) 

		model_path = '/home/arg_ws3/pytorch-ssd/models/argbot_person_palm_new_new/mb1-ssd-Epoch-749-Loss-1.8576.pth'
		state_dict = torch.load(os.path.join(model_path))
		self.network.load_state_dict(state_dict)
		DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
		self.network.to(DEVICE)
		if model == "v2_lite":
			self.predictor = create_mobilenetv2_ssd_lite_predictor(self.network, candidate_size=200, device = DEVICE)
		elif model == "v1_lite":
			self.predictor = create_mobilenetv1_ssd_lite_predictor(self.network, candidate_size=200, device = DEVICE)
		elif model == "v1":	
			self.predictor = create_mobilenetv1_ssd_predictor(self.network, candidate_size=200, device = DEVICE)

		#### Publisher
		self.origin = rospy.Publisher('/input', bb_input, queue_size=1)
		self.image_pub = rospy.Publisher("/predict_img", Image, queue_size = 1)
		self.pub_tracking = rospy.Publisher("/tracking_point", Marker, queue_size = 1)
		self.pub_point_array = rospy.Publisher("/person_point_array", MarkerArray, queue_size = 1)

		self.kf_x = KalmanFilter(dim_x=2, dim_z=1)
		self.kf_y = KalmanFilter(dim_x=2, dim_z=1)
		self.path = []
		self.person_list = []
		self.cv_depthimage = None
		self.start_tracking = False
		self.frame_id = 'camera_link'
		self.t_old = None 
		self.t_now = None

		info_msg = rospy.wait_for_message('/camera/color/camera_info', CameraInfo, timeout=None)
		self.fx = info_msg.P[0]
		self.fy = info_msg.P[5]
		self.cx = info_msg.P[2]
		self.cy = info_msg.P[6]

		### msg filter 
		self.is_compressed = False

		image_sub = message_filters.Subscriber('/camera/color/image_raw', Image)
		depth_sub = message_filters.Subscriber('/camera/aligned_depth_to_color/image_raw', Image)
		ts = message_filters.TimeSynchronizer([image_sub, depth_sub], 10)
		ts.registerCallback(self.callback)
Esempio n. 6
0
def main(args):

    create_net = lambda num: create_mobilenetv2_ssd_lite(
        num, width_mult=args['detection_model']["width_mult"])
    config = mobilenetv1_ssd_config

    train_loader, val_loader, num_classes = dataset_loading(args, config)

    for epoch in range(0, args['flow_control']['num_epochs']):
        train(args, train_loader)
Esempio n. 7
0
    def __init__(self):
        model = "v1"
        self.prob_threshold = 0.85
        self.cv_bridge = CvBridge()
        self.num_points = 8000
        self.labels = [
            'background',  # always index 0
            'person',
            'palm'
        ]
        self.objects = []
        if model == "v2_lite":
            self.network = create_mobilenetv2_ssd_lite(len(self.labels),
                                                       is_test=True)
        elif model == "v1":
            self.network = create_mobilenetv1_ssd(len(self.labels),
                                                  is_test=True)
        elif model == "v1_lite":
            self.network = create_mobilenetv1_ssd_lite(len(self.labels),
                                                       is_test=True)

        model_path = '/home/arg_ws3/pytorch-ssd/models/argbot_person_palm/mb1-ssd-Epoch-10-Loss-3.1767.pth'
        state_dict = torch.load(os.path.join(model_path))
        self.network.load_state_dict(state_dict)
        DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.network.to(DEVICE)
        if model == "v2_lite":
            self.predictor = create_mobilenetv2_ssd_lite_predictor(
                self.network, candidate_size=200, device=DEVICE)
        elif model == "v1_lite":
            self.predictor = create_mobilenetv1_ssd_lite_predictor(
                self.network, candidate_size=200, device=DEVICE)
        elif model == "v1":
            self.predictor = create_mobilenetv1_ssd_predictor(
                self.network, candidate_size=200, device=DEVICE)

        #### Publisher
        self.origin = rospy.Publisher('/input', bb_input, queue_size=1)
        self.image_pub = rospy.Publisher("/predict_img", Image, queue_size=1)
        self.mask_pub = rospy.Publisher("/predict_mask", Image, queue_size=1)

        ### msg filter
        self.is_compressed = False

        video_mode = False
        if video_mode:
            image_sub = rospy.Subscriber('/camera/color/image_raw', Image,
                                         self.video_callback)
        else:
            image_sub = message_filters.Subscriber('/camera/color/image_raw',
                                                   Image)
            depth_sub = message_filters.Subscriber(
                '/camera/aligned_depth_to_color/image_raw', Image)
            ts = message_filters.TimeSynchronizer([image_sub, depth_sub], 10)
            ts.registerCallback(self.callback)
    def __init__(self):
        model = "v1"
        self.prob_threshold = 0.65
        r = rospkg.RosPack()
        self.path = r.get_path('ssd_mobile_lite')
        self.cv_bridge = CvBridge()
        self.num_points = 8000
        self.labels = [
            'background',  # always index 0
            'duckie_car',
            'house',
            'broken',
            'duck'
        ]
        self.objects = []
        if model == "v2_lite":
            self.network = create_mobilenetv2_ssd_lite(len(self.labels),
                                                       is_test=True)
            model_dir = "/home/nvidia"
            model_name = "model1.pth"
        elif model == "v1":
            self.network = create_mobilenetv1_ssd(len(self.labels),
                                                  is_test=True)
            model_name = "/models/model.pth"

        elif model == "v1_lite":
            self.network = create_mobilenetv1_ssd_lite(len(self.labels),
                                                       is_test=True)
            model_name = "/models/model.pth"

        state_dict = torch.load(self.path + model_name)
        self.network.load_state_dict(state_dict)
        DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.network.to(DEVICE)
        if model == "v2_lite":
            self.predictor = create_mobilenetv2_ssd_lite_predictor(
                self.network, candidate_size=200, device=DEVICE)
        elif model == "v1_lite":
            self.predictor = create_mobilenetv1_ssd_lite_predictor(
                self.network, candidate_size=200, device=DEVICE)
        elif model == "v1":
            self.predictor = create_mobilenetv1_ssd_predictor(
                self.network, candidate_size=200, device=DEVICE)
        print("finish load model")
        #### Publisher
        self.image_pub = rospy.Publisher("/predict_img", Image, queue_size=1)

        ### msg filter
        self.is_compressed = True

        image_sub = rospy.Subscriber('/camera/color/image_raw/compressed',
                                     CompressedImage, self.callback)
Esempio n. 9
0
    def init_mobile_net(self):
        print(sys.path)
        self.label_path = '/home/keita/catkin_ws/src/preprocess/scripts/models/voc-model-labels.txt'
        self.model_path = '/home/keita/catkin_ws/src/preprocess/scripts/models/mb2-ssd-lite-mp-0_686.pth'

        self.class_names = [
            name.strip() for name in open(self.label_path).readlines()
        ]
        self.num_classes = len(self.class_names)
        self.net = create_mobilenetv2_ssd_lite(len(self.class_names),
                                               is_test=True)
        self.net.load(self.model_path)
        self.predictor = create_mobilenetv2_ssd_lite_predictor(
            self.net, candidate_size=200)
Esempio n. 10
0
        def __init__(self, mode):

            self.config = patch_config.patch_configs[mode](
            )  # select the mode for the patch

            # load cfg file (.yaml) and override default cfg options in lib_ssd.utils.config_parse
            # cfg_from_file(self.config.cfgfile_ssds)
            # self.cfgfile_ssds = cfg

            self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
            print(torch.cuda.device_count())

            #self.darknet_model = Darknet(self.config.cfgfile)
            #self.darknet_model.load_weights(self.config.weightfile)

            #self.mbntv2_ssdlite_model, self.priorbox = create_model(self.cfgfile_ssds.MODEL) # COCO
            #self.priors = Variable(self.priorbox.forward(), volatile=True) # num_priors = grid x grid x num_anchors

            self.mbntv2_ssdlite_model = create_mobilenetv2_ssd_lite(
                21, is_test=True)  # VOC
            self.mbntv2_ssdlite_model.load(
                self.config.ssdlitembntv2_model_path)

            if use_cuda:
                #self.darknet_model = self.darknet_model.eval().to(self.device)  # Why eval? test!
                self.mbntv2_ssdlite_model = self.mbntv2_ssdlite_model.eval(
                ).to(self.device)
                self.patch_applier = PatchApplier().to(self.device)
                self.patch_transformer = PatchTransformer().to(self.device)
                #self.prob_extractor = MaxProbExtractor(0, 80, self.config).to(self.device)
                self.score_extractor_ssd = ssd_feature_output_manage(
                    15, 21, self.config).to(
                        self.device
                    )  # 15 is person class in VOC (with 21 elements)
                self.nps_calculator = NPSCalculator(self.config.printfile,
                                                    self.config.patch_size).to(
                                                        self.device)
                self.total_variation = TotalVariation().to(self.device)
            else:
                #self.darknet_model = self.darknet_model.eval()  # Why eval? test!
                self.mbntv2_ssdlite_model = self.mbntv2_ssdlite_model.eval()
                self.patch_applier = PatchApplier()
                self.patch_transformer = PatchTransformer()
                #self.prob_extractor = MaxProbExtractor(0, 80, self.config)
                self.score_extractor_ssd = ssd_feature_output_manage(
                    15, 21, self.config).to(self.device)
                self.nps_calculator = NPSCalculator(self.config.printfile,
                                                    self.config.patch_size)
                self.total_variation = TotalVariation()
Esempio n. 11
0
 def Init_Net(self, net_type):
     print("==========Init Net==========")
     if net_type=="mb2-ssd":
         model_path = "./models/mobilenet2-ssd.pth"
         label_path = "./models/voc-model-labels_mb2.txt"
         self.class_names = [name.strip() for name in open(label_path).readlines()]
         net = create_mobilenetv2_ssd_lite(len(self.class_names), is_test=True)
         net.load(model_path)
         self.predictor = create_mobilenetv2_ssd_lite_predictor(net, candidate_size=200)
     elif net_type=="mb3-large-ssd":
         model_path = "./models/mobilenet3-large-ssd.pth"
         label_path = "./models/voc-model-labels_mb3.txt"
         self.class_names = [name.strip() for name in open(label_path).readlines()]
         net = create_mobilenetv3_ssd_lite("Large", len(self.class_names), is_test=True)
         net.load(model_path)
         self.predictor = create_mobilenetv3_ssd_lite_predictor(net, candidate_size=200)
Esempio n. 12
0
def net_select(model_type, class_names):
    if model_type == 'vgg16-ssd':
        return create_vgg_ssd(len(class_names), is_test=True)
    elif model_type == 'mb1-ssd':
        return create_mobilenetv1_ssd(len(class_names), is_test=True)
    elif model_type == 'mb1-ssd-lite':
        return create_mobilenetv1_ssd_lite(len(class_names), is_test=True)
    elif model_type == 'mb2-ssd-lite':
        return create_mobilenetv2_ssd_lite(len(class_names), is_test=True)
    elif model_type == 'sq-ssd-lite':
        return create_squeezenet_ssd_lite(len(class_names), is_test=True)
    else:
        print(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )
        sys.exit(1)
Esempio n. 13
0
 def _create_network(self, net_type):
     if net_type == 'vgg16-ssd':
         return create_vgg_ssd(len(self.class_names), is_test=True)
     elif net_type == 'mb1-ssd':
         return create_mobilenetv1_ssd(len(self.class_names), is_test=True)
     elif net_type == 'mb1-ssd-lite':
         return create_mobilenetv1_ssd_lite(len(self.class_names),
                                            is_test=True)
     elif net_type == 'mb2-ssd-lite':
         return create_mobilenetv2_ssd_lite(len(self.class_names),
                                            is_test=True)
     elif net_type == 'sq-ssd-lite':
         return create_squeezenet_ssd_lite(len(self.class_names),
                                           is_test=True)
     else:
         raise RuntimeError(
             "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
         )
def select_net(model_path, net_type, num_classes):
    '''
    选择模型
    :param model_path: 模型路径
    :param net_type: 模型类型
    :param num_classes: label个数,label=0,是背景
    :return:
    '''
    if net_type == 'vgg16-ssd':
        net = create_vgg_ssd(num_classes, is_test=True)
    elif net_type == 'mb1-ssd':
        net = create_mobilenetv1_ssd(num_classes, is_test=True)
    elif net_type == 'mb1-ssd-lite':
        net = create_mobilenetv1_ssd_lite(num_classes, is_test=True)
    elif net_type == 'mb2-ssd-lite':
        net = create_mobilenetv2_ssd_lite(num_classes,
                                          is_test=True,
                                          device=device)
    elif net_type == 'sq-ssd-lite':
        net = create_squeezenet_ssd_lite(num_classes, is_test=True)
    else:
        print(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )
        sys.exit(1)
    net.load(model_path)
    if net_type == 'vgg16-ssd':
        predictor = create_vgg_ssd_predictor(net, candidate_size=200)
    elif net_type == 'mb1-ssd':
        predictor = create_mobilenetv1_ssd_predictor(net, candidate_size=200)
    elif net_type == 'mb1-ssd-lite':
        predictor = create_mobilenetv1_ssd_lite_predictor(net,
                                                          candidate_size=200)
    elif net_type == 'mb2-ssd-lite':
        predictor = create_mobilenetv2_ssd_lite_predictor(net,
                                                          candidate_size=200,
                                                          device=device)
    elif net_type == 'sq-ssd-lite':
        predictor = create_squeezenet_ssd_lite_predictor(net,
                                                         candidate_size=200)
    else:
        predictor = create_vgg_ssd_predictor(net, candidate_size=200)
    return predictor
	def __init__(self):

		model = "v1"
		r = rospkg.RosPack()
		path = r.get_path('ssd_mobile_lite')
		model_name = "Epoch-630-Loss-0.4744.pth"
		self.prob_threshold = 0.5
		self.cv_bridge = CvBridge() 

		self.labels = ['BACKGROUND', 'backpack']
		if model == "v2_lite":
			self.network = create_mobilenetv2_ssd_lite(len(self.labels), is_test=True) 
		elif model == "v1":
			self.network = create_mobilenetv1_ssd(len(self.labels), is_test=True) 	
		elif model == "v1_lite":
			self.network = create_mobilenetv1_ssd_lite(len(self.labels), is_test=True) 

		state_dict = torch.load(os.path.join(path, "weights/", model_name))
		self.network.load_state_dict(state_dict)
		DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
		self.network.to(DEVICE)
		if model == "v2_lite":
			self.predictor = create_mobilenetv2_ssd_lite_predictor(self.network, candidate_size=200, device = DEVICE)
		elif model == "v1_lite":
			self.predictor = create_mobilenetv1_ssd_lite_predictor(self.network, candidate_size=200, device = DEVICE)
		elif model == "v1":	
			self.predictor = create_mobilenetv1_ssd_predictor(self.network, candidate_size=200, device = DEVICE)

		## Publisher
		self.image_pub = rospy.Publisher("camera/predict_img/", Image, queue_size=1)
		self.BoundingBoxes_pub = rospy.Publisher("BoundingBoxes/", BoundingBoxes, queue_size = 1)

		## msg filter 
		self.depth_sub = message_filters.Subscriber(
			"camera/aligned_depth_to_color/image_raw", Image)
		self.image_sub = message_filters.Subscriber("camera/color/image_raw", Image)
		self.ts = message_filters.ApproximateTimeSynchronizer(
			[self.image_sub, self.depth_sub], 5, 5)
		self.ts.registerCallback(self.img_cb)

		print("Start Predicting image")
    def __init__(self):
        model = "v1"
        self.prob_threshold = 0.65
        self.cv_bridge = CvBridge() 
        self.num_points = 8000
        self.labels = ['background' , # always index 0
            'duckie_car','house','broken','duck']
        self.objects = []
        if model == "v2_lite":
            self.network = create_mobilenetv2_ssd_lite(len(self.labels), is_test=True) 
            model_dir = "/home/nvidia"
            model_name = "model1.pth"
        elif model == "v1":
            self.network = create_mobilenetv1_ssd(len(self.labels), is_test=True) 
            model_dir = "/home/nvidia"
            model_name = "model.pth"    
        elif model == "v1_lite":
            self.network = create_mobilenetv1_ssd_lite(len(self.labels), is_test=True) 
            model_dir = "/home/nvidia/"
            model_name = "model.pth"

        state_dict = torch.load(os.path.join(model_dir, model_name))
        self.network.load_state_dict(state_dict)
        DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.network.to(DEVICE)
        if model == "v2_lite":
            self.predictor = create_mobilenetv2_ssd_lite_predictor(self.network, candidate_size=200, device = DEVICE)
        elif model == "v1_lite":
            self.predictor = create_mobilenetv1_ssd_lite_predictor(self.network, candidate_size=200, device = DEVICE)
        elif model == "v1": 
            self.predictor = create_mobilenetv1_ssd_predictor(self.network, candidate_size=200, device = DEVICE)

        #### Publisher
        self.origin = rospy.Publisher('/input', bb_input, queue_size=1) 
        self.image_pub = rospy.Publisher("/predict_img", Image, queue_size = 1)
        self.mask_pub = rospy.Publisher("/predict_mask", Image, queue_size = 1)
        self.position = rospy.Publisher("/position", Point32, queue_size = 1)
        ### msg filter 
        self.is_compressed = False

        image_sub = rospy.Subscriber('/camera/color/image_raw', Image, self.callback)
Esempio n. 17
0
def prepare_predictor(net_type, model_path, label_path):
    class_names = [name.strip() for name in open(label_path).readlines()]
    num_classes = len(class_names)
    if net_type == 'vgg16-ssd':
        net = create_vgg_ssd(num_classes, is_test=True)
    elif net_type == 'mb1-ssd':
        net = create_mobilenetv1_ssd(num_classes, is_test=True)
    elif net_type == 'mb1-ssd-lite':
        net = create_mobilenetv1_ssd_lite(num_classes, is_test=True)
    elif net_type == 'mb2-ssd-lite':
        net = create_mobilenetv2_ssd_lite(num_classes, is_test=True)
    elif net_type == 'sq-ssd-lite':
        net = create_squeezenet_ssd_lite(num_classes, is_test=True)
    else:
        raise ValueError(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )

    net.load(model_path)

    if net_type == 'vgg16-ssd':
        predictor = create_vgg_ssd_predictor(net, candidate_size=200)
    elif net_type == 'mb1-ssd':
        predictor = create_mobilenetv1_ssd_predictor(net, candidate_size=200)
    elif net_type == 'mb1-ssd-lite':
        predictor = create_mobilenetv1_ssd_lite_predictor(net,
                                                          candidate_size=200)
    elif net_type == 'mb2-ssd-lite':
        predictor = create_mobilenetv2_ssd_lite_predictor(net,
                                                          candidate_size=200)
    elif net_type == 'sq-ssd-lite':
        predictor = create_squeezenet_ssd_lite_predictor(net,
                                                         candidate_size=200)
    else:
        raise ValueError(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )

    return class_names, predictor
Esempio n. 18
0
def Load_model_configuration(args):
    if args["flow_control"]["net"] == 'vgg16-ssd':
        create_net = create_vgg_ssd
        config = vgg_ssd_config
    elif args["flow_control"]["net"] == 'mb1-ssd':
        create_net = create_mobilenetv1_ssd
        config = mobilenetv1_ssd_config
    elif args["flow_control"]["net"] == 'mb1-ssd-lite':
        create_net = create_mobilenetv1_ssd_lite
        config = mobilenetv1_ssd_config
    elif args["flow_control"]["net"] == 'sq-ssd-lite':
        create_net = create_squeezenet_ssd_lite
        config = squeezenet_ssd_config
    elif args["flow_control"]["net"] == 'mb2-ssd-lite':
        create_net = lambda num: create_mobilenetv2_ssd_lite(
            num, width_mult=args['detection_model']["width_mult"])
        config = mobilenetv1_ssd_config
    else:
        logging.fatal("The net type is wrong.")
        parser.print_help(sys.stderr)
        sys.exit(1)
    return create_net, config
def select_net(net_name):
    if net_name == 'vgg16-ssd':
        create_net = create_vgg_ssd
        config = vgg_ssd_config
    elif net_name == 'mb1-ssd':
        create_net = create_mobilenetv1_ssd
        config = mobilenetv1_ssd_config
    elif net_name == 'mb1-ssd-lite':
        create_net = create_mobilenetv1_ssd_lite
        config = mobilenetv1_ssd_config
    elif net_name == 'sq-ssd-lite':
        create_net = create_squeezenet_ssd_lite
        config = squeezenet_ssd_config
    elif net_name == 'mb2-ssd-lite':
        create_net = lambda num: create_mobilenetv2_ssd_lite(
            num, width_mult=mb2_width_mult)
        config = mobilenetv1_ssd_config
    else:
        logging.fatal("The net type is wrong.")
        # parser.print_help(sys.stderr)
        sys.exit(1)
    return create_net, config
Esempio n. 20
0
def main():
    print('cuda device count: ', torch.cuda.device_count())
    DEVICE = 'cuda:0'
    class_names = [
        name.strip()
        for name in open('models/voc-model-labels.txt').readlines()
    ]

    image = torch.ones(1, 3, 300, 300).to(DEVICE)

    net = create_mobilenetv2_ssd_lite(len(class_names), is_test=True)
    net.load('models/mb2-ssd-lite-mp-0_686.pth')
    net = net.to(DEVICE)

    net = net.eval()
    scores, boxes = net(image)

    print("Input shape ", image.shape)
    print("Scores shape ", scores.shape)
    print("Boxes shape ", boxes.shape)

    export_as_weights(net)
Esempio n. 21
0
def main(args):
    DEVICE = torch.device("cuda:0" if torch.cuda.is_available()
                          and args["flow_control"]["use_cuda"] else "cpu")
    #DEVICE = torch.device("cpu")
    if args["flow_control"]["use_cuda"] and torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
        logging.info("Use Cuda.")

    timer = Timer()

    #logging.info(args)
    if args["flow_control"]["net"] == 'vgg16-ssd':
        create_net = create_vgg_ssd
        config = vgg_ssd_config
    elif args["flow_control"]["net"] == 'mb1-ssd':
        create_net = create_mobilenetv1_ssd
        config = mobilenetv1_ssd_config
    elif args["flow_control"]["net"] == 'mb1-ssd-lite':
        create_net = create_mobilenetv1_ssd_lite
        config = mobilenetv1_ssd_config
    elif args["flow_control"]["net"] == 'sq-ssd-lite':
        create_net = create_squeezenet_ssd_lite
        config = squeezenet_ssd_config
    elif args["flow_control"]["net"] == 'mb2-ssd-lite':
        create_net = lambda num: create_mobilenetv2_ssd_lite(
            num, width_mult=args['detection_model']["width_mult"])
        config = mobilenetv1_ssd_config
    else:
        logging.fatal("The net type is wrong.")
        parser.print_help(sys.stderr)
        sys.exit(1)

    train_loader, val_loader, num_classes = dataset_loading(args, config)
    net = create_net(num_classes)
    net, criterion, optimizer, scheduler = optim_and_model_initial(
        args, net, timer, config, DEVICE)
    dataloader_display(train_loader, net, criterion, optimizer, DEVICE)
Esempio n. 22
0
from config import Configuration
import cv2
from src.WorkWithImage import WorkWithImage

from vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite, create_mobilenetv2_ssd_lite_predictor

app = Flask(__name__)
app.config.from_object(Configuration)
# to disable caching files
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0

label_path = 'voc-model-labels.txt'
model_path = 'mb2-ssd-lite-mp-0_686.pth'

class_names = [name.strip() for name in open(label_path).readlines()]
net = create_mobilenetv2_ssd_lite(len(class_names), is_test=True)
net.load(model_path)
predictor = create_mobilenetv2_ssd_lite_predictor(net, candidate_size=200)


@app.route('/', methods=['GET'])
def index():
    return render_template('index.html')


@app.route('/upload', methods=['POST'])
def detect_image():
    img = img_url = None
    try:
        img = request.files['image']
    except KeyError:
Esempio n. 23
0
    elif args.dataset_type == 'laptools':
        dataset = LapToolsDataset(args.root_folder, args.dataset, is_test=True)

    true_case_stat, all_gb_boxes, all_difficult_cases = group_annotation_by_class(
        dataset)
    if args.net == 'vgg16-ssd':
        net = create_vgg_ssd(len(class_names), is_test=True)
    elif args.net == 'mb1-ssd':
        net = create_mobilenetv1_ssd(len(class_names), is_test=True)
    elif args.net == 'mb1-ssd-lite':
        net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True)
    elif args.net == 'sq-ssd-lite':
        net = create_squeezenet_ssd_lite(len(class_names), is_test=True)
    elif args.net == 'mb2-ssd-lite':
        net = create_mobilenetv2_ssd_lite(len(class_names),
                                          width_mult=args.mb2_width_mult,
                                          is_test=True)
    else:
        logging.fatal(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )
        parser.print_help(sys.stderr)
        sys.exit(1)

    timer.start("Load Model")
    # net.load(args.trained_model)
    net.load_checkpoint(args.trained_model)
    net = net.to(DEVICE)
    print(f'It took {timer.end("Load Model")} seconds to load the model.')
    if args.net == 'vgg16-ssd':
        predictor = create_vgg_ssd_predictor(net,
Esempio n. 24
0
    logging.info(args)
    if args.net == 'vgg16-ssd':
        create_net = create_vgg_ssd
        config = vgg_ssd_config
    elif args.net == 'mb1-ssd':
        create_net = create_mobilenetv1_ssd
        config = mobilenetv1_ssd_config
    elif args.net == 'mb1-ssd-lite':
        create_net = create_mobilenetv1_ssd_lite
        config = mobilenetv1_ssd_config
    elif args.net == 'sq-ssd-lite':
        create_net = create_squeezenet_ssd_lite
        config = squeezenet_ssd_config
    elif args.net == 'mb2-ssd-lite':
        create_net = lambda num: create_mobilenetv2_ssd_lite(
            num, width_mult=args.mb2_width_mult)
        config = mobilenetv1_ssd_config
    else:
        logging.fatal("The net type is wrong.")
        parser.print_help(sys.stderr)
        sys.exit(1)
    train_transform = TrainAugmentation(config.image_size, config.image_mean,
                                        config.image_std)
    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)

    test_transform = TestTransform(config.image_size, config.image_mean,
                                   config.image_std)

    logging.info("Prepare training datasets.")
    datasets = []
Esempio n. 25
0
def main(args):
    net_type = args.net_type
    model_path = args.weights_path
    label_path = args.label_path
    class_names = [name.strip() for name in open(label_path).readlines()]
    num_classes = len(class_names)

    if args.live:
        cap = cv2.VideoCapture(0)
        cap.set(3, 640)
        cap.set(4, 480)
    else:
        cap = cv2.VideoCapture(args.video_path)

    Fourcc = cv2.VideoWriter_fourcc('M', 'P', '4', 'V')
    writer = cv2.VideoWriter('result.mp4',
                             fourcc=Fourcc,
                             fps=15,
                             frameSize=(640, 480))

    num_gpus = torch.cuda.device_count()
    device = 'cuda' if num_gpus else 'cpu'

    if net_type == 'vgg16-ssd':
        net = create_vgg_ssd(len(class_names), is_test=True)
    elif net_type == 'mb1-ssd':
        net = create_mobilenetv1_ssd(len(class_names), is_test=True)
    elif net_type == 'mb1-ssd-lite':
        net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True)
    elif net_type == 'mb2-ssd-lite':
        net = create_mobilenetv2_ssd_lite(len(class_names), is_test=True)
    elif net_type == 'sq-ssd-lite':
        net = create_squeezenet_ssd_lite(len(class_names), is_test=True)
    #elif net_type == 'mb3-ssd-lite':
    #    net = create_mobilenetv3_ssd_lite(len(class_names), is_test=True)
    else:
        print(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )
        sys.exit(1)

    net.load(model_path)

    if net_type == 'vgg16-ssd':
        predictor = create_vgg_ssd_predictor(net,
                                             candidate_size=20,
                                             device=device)
    elif net_type == 'mb1-ssd':
        predictor = create_mobilenetv1_ssd_predictor(net,
                                                     candidate_size=20,
                                                     device=device)
    elif net_type == 'mb1-ssd-lite':
        predictor = create_mobilenetv1_ssd_lite_predictor(net,
                                                          candidate_size=20,
                                                          device=device)
    elif net_type == 'mb2-ssd-lite':
        predictor = create_mobilenetv2_ssd_lite_predictor(net,
                                                          candidate_size=20,
                                                          device=device)
    elif net_type == 'sq-ssd-lite':
        predictor = create_squeezenet_ssd_lite_predictor(net,
                                                         candidate_size=20,
                                                         device=device)
    #elif net_type == 'mb3-ssd-lite':
    #    predictor = create_mobilenetv3_ssd_lite_predictor(net, candidate_size=10)
    else:
        print(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )
        sys.exit(1)

    timer = Timer()

    while True:
        _, orig_image = cap.read()
        if orig_image is None:
            print('END')
            break

        image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)
        timer.start()
        boxes, labels, probs = predictor.predict(image, 10, 0.4)
        interval = timer.end()
        print('Time: {:.2f}s, Detect Objects: {:d}.'.format(
            interval, labels.size(0)))
        for i in range(boxes.size(0)):
            box = boxes[i, :]
            label = f"{class_names[labels[i]]}: {probs[i]:.2f}"
            cv2.rectangle(orig_image, (box[0], box[1]), (box[2], box[3]),
                          (255, 255, 0), 4)

            cv2.putText(
                orig_image,
                label,
                (box[0] + 20, box[1] + 40),
                cv2.FONT_HERSHEY_SIMPLEX,
                1,  # font scale
                (255, 0, 255),
                2)  # line type
        writer.write(orig_image)
        cv2.imshow('annotated', orig_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    writer.release()
    cv2.destroyAllWindows()
    if args.out_video:
        shutil.move('result.mp4', args.out_video)
    else:
        os.remove('result.mp4')
Esempio n. 26
0
def main(args):
    DEVICE = torch.device("cuda:0" if torch.cuda.is_available()
                          and args['flow_control']['use_cuda'] else "cpu")

    # eval_path = pathlib.Path(args.eval_dir)
    # eval_path.mkdir(exist_ok=True)
    if not os.path.exists(args['flow_control']['eval_dir']):
        os.mkdir(args['flow_control']['eval_dir'])
    timer = Timer()
    class_names = [
        name.strip()
        for name in open(args['flow_control']['label_file']).readlines()
    ]

    _net = args['flow_control']['net']
    _dataset_type = args['flow_control']['dataset_type']

    if _dataset_type == "voc":
        raise NotImplementedError("Not implement error")
        dataset = VOCDataset(args['flow_control']['dataset'], is_test=True)
    elif _dataset_type == 'open_images':
        raise NotImplementedError("Not implement error")
        dataset = OpenImagesDataset(args['flow_control']['dataset'],
                                    dataset_type="test")
    elif _dataset_type == "coco":
        # dataset = CocoDetection("/home/wenyen4desh/datasets/coco/test2017","/home/wenyen4desh/datasets/annotations/image_info_test2017.json")
        #dataset = CocoDetection("../../dataset/datasets/coco/val2017","../../dataset/datasets/coco/annotations/instances_val2017.json")
        # dataset = CocoDetection("/home/wenyen4desh/datasets/coco/train2017","/home/wenyen4desh/datasets/coco/annotations/instances_train2017.json")
        dataset = CocoDetection(args['Datasets']['coco']['val_image_path'],
                                args['Datasets']['coco']['val_anno_path'])
    elif _dataset_type == "ecp":
        dataset = EuroCity_Dataset(args['Datasets']['ecp']['val_image_path'],
                                   args['Datasets']['ecp']['val_anno_path'])
    true_case_stat, all_gb_boxes, all_difficult_cases = group_annotation_by_class(
        dataset)
    if _net == 'vgg16-ssd':
        net = create_vgg_ssd(len(class_names), is_test=True)
    elif _net == 'mb1-ssd':
        net = create_mobilenetv1_ssd(len(class_names), is_test=True)
    elif _net == 'mb1-ssd-lite':
        net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True)
    elif _net == 'sq-ssd-lite':
        net = create_squeezenet_ssd_lite(len(class_names), is_test=True)
    elif _net == 'mb2-ssd-lite':
        net = create_mobilenetv2_ssd_lite(
            len(class_names),
            width_mult=args['flow_control']['mb2_width_mult'],
            is_test=True)
    else:
        logging.fatal(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )
        parser.print_help(sys.stderr)
        sys.exit(1)

    #train_transform = MatchPrior(config.priors, config.center_variance,
    #                              config.size_variance, 0.5)

    #test_transform = TestTransform(config.image_size, config.image_mean, config.image_std)
    import pdb
    pdb.set_trace()
    ############################## automatically validation ############################################
    timer.start("Load Model")
    net.load(args['flow_control']['trained_model'])
    net = net.to(DEVICE)
    print('It took {} seconds to load the model.'.format(
        timer.end("Load Model")))
    _nms_method = args['flow_control']['nms_method']
    if _net == 'vgg16-ssd':
        predictor = create_vgg_ssd_predictor(net,
                                             nms_method=_nms_method,
                                             device=DEVICE)
    elif _net == 'mb1-ssd':
        predictor = create_mobilenetv1_ssd_predictor(net,
                                                     nms_method=_nms_method,
                                                     device=DEVICE)
    elif _net == 'mb1-ssd-lite':
        predictor = create_mobilenetv1_ssd_lite_predictor(
            net, nms_method=_nms_method, device=DEVICE)
    elif _net == 'sq-ssd-lite':
        predictor = create_squeezenet_ssd_lite_predictor(
            net, nms_method=_nms_method, device=DEVICE)
    elif _net == 'mb2-ssd-lite':
        predictor = create_mobilenetv2_ssd_lite_predictor(
            net, nms_method=_nms_method, device=DEVICE)
    else:
        logging.fatal(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )
        parser.print_help(sys.stderr)
        sys.exit(1)

    results = []
    # Predict Bounding Box
    for i in range(len(dataset)):
        print("process image {}", i)
        timer.start("Load Image")
        image = dataset.get_image(i)
        print("Load Image: {:4f} seconds.".format(timer.end("Load Image")))
        timer.start("Predict")
        boxes, labels, probs = predictor.predict(image)
        print("Prediction: {:4f} seconds.".format(timer.end("Predict")))
        indexes = torch.ones(labels.size(0), 1, dtype=torch.float32) * i
        results.append(
            torch.cat(
                [
                    indexes.reshape(-1, 1),
                    labels.reshape(-1, 1).float(),
                    probs.reshape(-1, 1),
                    boxes + 1.0  # matlab's indexes start from 1
                ],
                dim=1))
    results = torch.cat(results)

    # Write the result to file
    for class_index, class_name in enumerate(class_names):
        if class_index == 0: continue  # ignore background
        file_name = "det_test_{}.txt".format(class_name)
        prediction_path = os.path.join(args['flow_control']['eval_dir'],
                                       file_name)
        with open(prediction_path, "w") as f:
            sub = results[results[:, 1] == class_index, :]
            for i in range(sub.size(0)):
                prob_box = sub[i, 2:].numpy()
                image_id, _ = dataset.get_annotation(int(sub[i, 0]))
                f.write(
                    str(image_id) + " " + " ".join([str(v)
                                                    for v in prob_box]) + "\n")
                # image_id = dataset.ids[int(sub[i, 0])]
                # print(str(image_id) + " " + " ".join([str(v) for v in prob_box]), file=f)

    aps = []
    prcs = []
    recalls = []
    print("\n\nAverage Precision Per-class:")
    for class_index, class_name in enumerate(class_names):
        if class_index == 0:
            continue
        file_name = "det_test_{}.txt".format(class_name)
        prediction_path = os.path.join(args['flow_control']['eval_dir'],
                                       file_name)
        # [email protected] evaluation method
        ap, precision, recall = compute_average_precision_per_class(
            args, true_case_stat[class_index], all_gb_boxes[class_index],
            all_difficult_cases[class_index], prediction_path,
            args['flow_control']['iou_threshold'],
            args['flow_control']['use_2007_metric'])

        # # COCO eval

        # ap, precision, recall = coco_ap_per_class(
        #     true_case_stat[class_index],
        #     all_gb_boxes[class_index],
        #     all_difficult_cases[class_index],
        #     prediction_path,
        #     args.use_2007_metric
        # )

        aps.append(ap)
        prcs.append(precision)
        recalls.append(recall)
        print("{}: {}".format(class_name, ap))

    print("\nAverage Precision Across All Classes:{}".format(
        sum(aps[0:5]) / len(aps[0:5])))
    print("\nAverage Precision :{}".format(sum(prcs[0:5]) / len(prcs[0:5])))
    print("\nAverage Recall :{}".format(sum(recalls[0:5]) / len(recalls[0:5])))
Esempio n. 27
0
def main(args):
    net_type = args.net_type
    img_folder = args.img_folder
    model_path = args.weights_path
    label_path = args.label_path
    class_names = [name.strip() for name in open(label_path).readlines()]
    out_path = args.out_path
    if not os.path.exists(out_path):
        os.mkdir(out_path)

    num_gpus = torch.cuda.device_count()
    device = 'cuda' if num_gpus else 'cpu'

    if net_type == 'vgg16-ssd':
        net = create_vgg_ssd(len(class_names), is_test=True)
    elif net_type == 'mb1-ssd':
        net = create_mobilenetv1_ssd(len(class_names), is_test=True)
    elif net_type == 'mb1-ssd-lite':
        net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True)
    elif net_type == 'mb2-ssd-lite':
        net = create_mobilenetv2_ssd_lite(len(class_names), is_test=True)
    elif net_type == 'sq-ssd-lite':
        net = create_squeezenet_ssd_lite(len(class_names), is_test=True)
    #elif net_type == 'mb3-ssd-lite':
    #    net = create_mobilenetv3_ssd_lite(len(class_names), is_test=True)
    else:
        print(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )
        sys.exit(1)

    net.load(model_path)

    if net_type == 'vgg16-ssd':
        predictor = create_vgg_ssd_predictor(net,
                                             candidate_size=20,
                                             device=device)
    elif net_type == 'mb1-ssd':
        predictor = create_mobilenetv1_ssd_predictor(net,
                                                     candidate_size=20,
                                                     device=device)
    elif net_type == 'mb1-ssd-lite':
        predictor = create_mobilenetv1_ssd_lite_predictor(net,
                                                          candidate_size=20,
                                                          device=device)
    elif net_type == 'mb2-ssd-lite':
        predictor = create_mobilenetv2_ssd_lite_predictor(net,
                                                          candidate_size=20,
                                                          device=device)
    elif net_type == 'sq-ssd-lite':
        predictor = create_squeezenet_ssd_lite_predictor(net,
                                                         candidate_size=20,
                                                         device=device)
    #elif net_type == 'mb3-ssd-lite':
    #    predictor = create_mobilenetv3_ssd_lite_predictor(net, candidate_size=10)
    else:
        print(
            "The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite."
        )
        sys.exit(1)

    timer = Timer()

    img_names = glob.glob(img_folder + os.sep + '*.jpg')
    #result_csv=os.path.join(out_path,'rest_result.csv')
    if len(img_names) == 0:
        print('No imagesfound in {}'.format(img_folder))
        exit(-1)

    for img_name in img_names:
        image = cv2.imread(img_name)

        timer.start()
        boxes, labels, probs = predictor.predict(image, 10, 0.3)
        interval = timer.end()

        print('Time: {:.2f}s, Detect Objects: {:d}.'.format(
            interval, labels.size(0)))

        label_text = []
        for i in range(boxes.size(0)):
            box = boxes[i, :]
            label = f"{class_names[labels[i]]}: {probs[i]:.2f}"
            label_text.append(label)
            cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]),
                          (255, 255, 0), 4)
            cv2.putText(image, label, (box[0] + 20, box[1] + 40),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2)

        if args.store_result:
            new_name = '{}/{}'.format(out_path, img_name.split('/')[-1])
            cv2.imwrite(new_name, image)
            if not label_text:
                result_label = 'empty'
            else:
                result_label = label_text[0]
            with open(os.path.join(out_path, 'rest_result.csv'),
                      'a+') as result_writer:
                result_writer.write(
                    img_name.split('/')[-1] + ',' + result_label + '\n')

        cv2.imshow('result', image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
Esempio n. 28
0
            regression_loss, classification_loss = criterion(
                confidence, locations, labels, boxes)
            loss = regression_loss + classification_loss

        running_loss += loss.item()
        running_regression_loss += regression_loss.item()
        running_classification_loss += classification_loss.item()
    return running_loss / num, running_regression_loss / num, running_classification_loss / num


if __name__ == "__main__":
    import torchvision
    import torchvision.transforms as transforms
    #         raise ValueError("????  3333 ")
    config = mobilenetv1_ssd_config
    create_net = lambda num: create_mobilenetv2_ssd_lite(
        num, width_mult=1, onnx_compatible=True)
    # create_net = create_mobilenetv1_ssd
    cub200_root = "D:\BirdRecognition\CUB_200_2011"
    transform = transforms.Compose([
        transforms.Resize(300),
        transforms.RandomCrop(300),
        transforms.RandomHorizontalFlip(),
        # transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    train_transform = TrainAugmentation(config.image_size, config.image_mean,
                                        config.image_std)

    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)
Esempio n. 29
0
def main_acitve_mode(args):
    # Device setting
    DEVICE = torch.device("cuda:0" if torch.cuda.is_available()
                          and args["flow_control"]["use_cuda"] else "cpu")
    if args["flow_control"]["use_cuda"] and torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
        logging.info("Use Cuda.")

    timer = Timer()

    # Model setting
    if args["flow_control"]["net"] == 'vgg16-ssd':
        create_net = create_vgg_ssd
        config = vgg_ssd_config
    elif args["flow_control"]["net"] == 'mb1-ssd':
        create_net = create_mobilenetv1_ssd
        config = mobilenetv1_ssd_config
    elif args["flow_control"]["net"] == 'mb1-ssd-lite':
        create_net = create_mobilenetv1_ssd_lite
        config = mobilenetv1_ssd_config
    elif args["flow_control"]["net"] == 'sq-ssd-lite':
        create_net = create_squeezenet_ssd_lite
        config = squeezenet_ssd_config
    elif args["flow_control"]["net"] == 'mb2-ssd-lite':
        create_net = lambda num: create_mobilenetv2_ssd_lite(
            num, width_mult=args['detection_model']["width_mult"])
        config = mobilenetv1_ssd_config
    else:
        logging.fatal("The net type is wrong.")
        parser.print_help(sys.stderr)
        sys.exit(1)

    # Dataset
    train_loader, val_loader, num_classes = dataset_loading(args, config)
    net = create_net(num_classes)
    net, criterion, optimizer, scheduler = optim_and_model_initial(
        args, net, timer, config, DEVICE)

    Query_iteration = 10
    train_loader.dataset.Active_mode()
    labeled, unlabeled = train_loader.dataset.dataset_information()
    query_item = len(unlabeled) // Query_iteration
    logging.info("Query iteration: {}, ".format(Query_iteration) +
                 "per query each item : {} ".format(query_item))

    target_transform = MatchPrior(config.priors, config.center_variance,
                                  config.size_variance, 0.5)
    test_transform = TestTransform(config.image_size, config.image_mean,
                                   config.image_std)
    if args['flow_control']['dataset_type'] == "ecp":
        active_dataset = ECP_table_comm(
            args["Datasets"]["ecp"]["train_image_path"],
            args["Datasets"]["ecp"]["train_anno_path"],
            transform=test_transform,
            target_transform=target_transform)
    elif args['flow_control']['dataset_type'] in ["virat", "VIRAT"]:
        active_dataset = ECP_table_comm(
            args["Datasets"]["virat_seq"]["train_image_path"],
            args["Datasets"]["virat_seq"]["train_anno_path"],
            transform=test_transform,
            target_transform=target_transform)
    else:
        raise NotImplementedError("Doen't implmented")
    active_dataset.Active_mode()

    for q_iter in range(Query_iteration):
        if q_iter != 0:
            scheduler.base_lrs[0] = scheduler.base_lrs[0] * 1.1
        active_dataset.Active_mode()
        labeled, unlabeled = active_dataset.dataset_information()
        logging.info(
            "Query iteration: {}/{}, ".format(q_iter, Query_iteration) +
            "per query each item : {} ".format(query_item))
        logging.info("Fetch data...")
        # imgs_list, bboxes_list, labels_list = train_loader.dataset.data_fetch()
        logging.info("Fetch data finish...")
        _setting_sampler = args["flow_control"]["sample_method"]
        if _setting_sampler == "random":
            net.train(False)
            query_index = np.random.choice(unlabeled,
                                           query_item,
                                           replace=False)
            train_loader.dataset.setting_be_selected_sample(query_index)
            active_dataset.setting_be_selected_sample(query_index)
        elif _setting_sampler == "seqencial":
            net.train(False)
            query_index = unlabeled[:query_item]
            train_loader.dataset.setting_be_selected_sample(query_index)
            active_dataset.setting_be_selected_sample(query_index)
        elif _setting_sampler == "uncertainty_modify":
            net.train(False)
            imgs_list = active_dataset.data_fetch()
            max_num = 50
            confidences = []
            for index in range(len(imgs_list) // max_num + 1):
                with torch.no_grad():
                    begin_pointer = index * max_num
                    end_pointer = min((index + 1) * max_num, len(imgs_list))
                    sub_batch = torch.stack(
                        imgs_list[begin_pointer:end_pointer]).cuda()
                    _confidence, locations = net(sub_batch)
                    confidences.append(_confidence.data.cpu())
            confidences = torch.cat(confidences, 0)
            probability = torch.softmax(confidences, 2)
            entropy = torch.sum(probability * torch.log(probability) * -1, 2)
            mean = torch.mean(entropy, 1)
            stddev = torch.std(entropy, 1)
            criteria = mean * stddev / (mean + stddev)
            query_index = torch.argsort(-1 * criteria)[:query_item].tolist()
            train_loader.dataset.setting_be_selected_sample(query_index)
            active_dataset.setting_be_selected_sample(query_index)
        elif _setting_sampler == "uncertainty":
            net.train(False)
            imgs_list = active_dataset.data_fetch()
            max_num = 50
            confidences = []
            for index in range(len(imgs_list) // max_num + 1):
                with torch.no_grad():
                    begin_pointer = index * max_num
                    end_pointer = min((index + 1) * max_num, len(imgs_list))
                    sub_batch = torch.stack(
                        imgs_list[begin_pointer:end_pointer]).cuda()
                    _confidence, locations = net(sub_batch)
                    confidences.append(_confidence.data.cpu())
            confidences = torch.cat(confidences, 0)
            probability = torch.softmax(confidences, 2)
            entropy = torch.sum(probability * torch.log(probability) * -1, 2)
            maximum = torch.max(entropy, 1)[0]
            criteria = maximum
            query_index = torch.argsort(-1 * criteria)[:query_item].tolist()
            train_loader.dataset.setting_be_selected_sample(query_index)
            active_dataset.setting_be_selected_sample(query_index)
        elif _setting_sampler == "diversity":
            pass
        elif _setting_sampler == "balance_feature":
            pass
        else:
            raise NotImplementedError(
                "_setting_sampler : {} doesn't implement".format(
                    _setting_sampler))

        train_loader.dataset.training_mode()
        # Training process
        for epoch in range(0, args['flow_control']['num_epochs']):

            scheduler.step()
            train(args,
                  train_loader,
                  net,
                  criterion,
                  optimizer,
                  device=DEVICE,
                  debug_steps=args['flow_control']['debug_steps'],
                  epoch=epoch)

            if epoch % args['flow_control'][
                    'validation_epochs'] == 0 or epoch == args['flow_control'][
                        'num_epochs'] - 1:
                val_loss, val_regression_loss, val_classification_loss = test(
                    args, val_loader, net, criterion, DEVICE)
                logging.info("Epoch: {}, ".format(epoch) +
                             "Validation Loss: {:.4f}, ".format(val_loss) +
                             "Validation Regression Loss {:.4f}, ".format(
                                 val_regression_loss) +
                             "Validation Classification Loss: {:.4f}".format(
                                 val_classification_loss))

                _postfix_infos = [args['flow_control']['dataset_type'], args["flow_control"]["net"],START_TRAINING_TIME] if (args['flow_control']['dataset_type'] != "ecp-random" or args['flow_control']['dataset_type'] != "ecp-centroid") \
                                else [args['flow_control']['dataset_type'], str(args['flow_control']['dataset_ratio']), args["flow_control"]["net"],START_TRAINING_TIME]
                postfix = "_".join(_postfix_infos)
                folder_name = os.path.join(
                    args["flow_control"]["checkpoint_folder"] + "_" + postfix,
                    "query_iter_{}".format(
                        str(float(q_iter + 1) / float(Query_iteration))))
                if not os.path.exists(folder_name):
                    os.makedirs(folder_name)
                model_path = os.path.join(
                    folder_name, "{}-Epoch-{}-Loss-{}.pth".format(
                        args['flow_control']['net'], epoch, val_loss))
                net.module.save(model_path)
                logging.info("Saved model {}".format(model_path))
Esempio n. 30
0
def main(args):
    DEVICE = torch.device("cuda:0" if torch.cuda.is_available()
                          and args["flow_control"]["use_cuda"] else "cpu")
    if args["flow_control"]["use_cuda"] and torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
        logging.info("Use Cuda.")

    timer = Timer()

    #logging.info(args)
    if args["flow_control"]["net"] == 'vgg16-ssd':
        create_net = create_vgg_ssd
        config = vgg_ssd_config
    elif args["flow_control"]["net"] == 'mb1-ssd':
        create_net = create_mobilenetv1_ssd
        config = mobilenetv1_ssd_config
    elif args["flow_control"]["net"] == 'mb1-ssd-lite':
        create_net = create_mobilenetv1_ssd_lite
        config = mobilenetv1_ssd_config
    elif args["flow_control"]["net"] == 'sq-ssd-lite':
        create_net = create_squeezenet_ssd_lite
        config = squeezenet_ssd_config
    elif args["flow_control"]["net"] == 'mb2-ssd-lite':
        create_net = lambda num: create_mobilenetv2_ssd_lite(
            num, width_mult=args['detection_model']["width_mult"])
        config = mobilenetv1_ssd_config
    else:
        logging.fatal("The net type is wrong.")
        parser.print_help(sys.stderr)
        sys.exit(1)

    train_loader, val_loader, num_classes = dataset_loading(args, config)
    net = create_net(num_classes)
    net, criterion, optimizer, scheduler = optim_and_model_initial(
        args, net, timer, config, DEVICE)

    for epoch in range(0, args['flow_control']['num_epochs']):
        scheduler.step()
        train(args,
              train_loader,
              net,
              criterion,
              optimizer,
              device=DEVICE,
              debug_steps=args['flow_control']['debug_steps'],
              epoch=epoch)

        if epoch % args['flow_control'][
                'validation_epochs'] == 0 or epoch == args['flow_control'][
                    'num_epochs'] - 1:
            val_loss, val_regression_loss, val_classification_loss = test(
                args, val_loader, net, criterion, DEVICE)
            logging.info("Epoch: {}, ".format(epoch) +
                         "Validation Loss: {:.4f}, ".format(val_loss) +
                         "Validation Regression Loss {:.4f}, ".format(
                             val_regression_loss) +
                         "Validation Classification Loss: {:.4f}".format(
                             val_classification_loss))

            _postfix_infos = [args['flow_control']['dataset_type'], args["flow_control"]["net"],START_TRAINING_TIME] if (args['flow_control']['dataset_type'] != "ecp-random" or args['flow_control']['dataset_type'] != "ecp-centroid") \
                            else [args['flow_control']['dataset_type'], str(args['flow_control']['dataset_ratio']), args["flow_control"]["net"],START_TRAINING_TIME]
            postfix = "_".join(_postfix_infos)
            folder_name = args["flow_control"][
                "checkpoint_folder"] + "_" + postfix
            if not os.path.exists(folder_name):
                os.makedirs(folder_name)
            model_path = os.path.join("{}-Epoch-{}-Loss-{}.pth".format(
                args['flow_control']['net'], epoch, val_loss))
            net.module.save(model_path)
            logging.info("Saved model {}".format(model_path))