Exemplo n.º 1
0
    def __init__(self, gpu_id, model_def, model_weights, labelmap_file, data_shape=None):
        caffe.set_device(gpu_id)
        caffe.set_mode_gpu()

        # Load the net in the test phase for inference, and configure input preprocessing.
        self.net = caffe.Net(model_def,      # defines the structure of the model
                             model_weights,  # contains the trained weights
                             caffe.TEST)     # use test mode (e.g., don't perform dropout)
         # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
        if data_shape is None:
            data_shape = self.net.blobs['data'].data.shape
        else:
            if isinstance (data_shape, int):
                data_shape = (data_shape, data_shape)
        self.data_shape = data_shape
        self.transformer = caffe.io.Transformer({'data': (1, 3, data_shape[0], data_shape[1])})
        # self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_mean('data', np.array([127.5, 127.5, 127.5])) # mean pixel
        # the reference model operates on images in [0,255] range instead of [0,1]
        #self.transformer.set_raw_scale('data', 255)
        self.transformer.set_input_scale('data', 0.007843)
        self.transformer.set_raw_scale('data', 255)
        # the reference model has channels in BGR order instead of RGB
        self.transformer.set_channel_swap('data', (2, 1, 0))

        # load PASCAL VOC labels
        file = open(labelmap_file, 'r')
        self.labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), self.labelmap)
Exemplo n.º 2
0
    def __init__(self, gpu_id, model_def, model_weights, image_resize,
                 labelmap_file):
        caffe.set_device(gpu_id)  #gpu编号
        caffe.set_mode_gpu()  #gpu模式

        self.image_resize = image_resize
        # 定义网络
        self.net = caffe.Net(
            model_def,  # 网络模型
            model_weights,  # 网络权重
            caffe.TEST)  # 测试模式 不使用 droupout

        # 输入图片处理变换器: 'data' is the name of the input blob == net.inputs[0]
        self.transformer = caffe.io.Transformer(
            {'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))  # 通道*宽*高
        self.transformer.set_mean('data',
                                  np.array([104, 117,
                                            123]))  # voc数据集 各个通道均值 mean pixel
        # the reference model operates on images in [0,255] range instead of [0,1]
        self.transformer.set_raw_scale('data', 255)  #[0,1]转换成[0,255]
        # the reference model has channels in BGR order instead of RGB
        self.transformer.set_channel_swap('data', (2, 1, 0))  # RGB --->BGR

        # 图片标签文件 load PASCAL VOC labels
        file = open(labelmap_file, 'r')
        self.labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), self.labelmap)
Exemplo n.º 3
0
    def load_model(self):
        caffe.set_device(5)
        caffe.set_mode_gpu()
        # load labelmap
        file = open(self.labelmap_file, 'r')
        labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), labelmap)

        # load model

        net = caffe.Net(self.model_def, self.model_weights, caffe.TEST)

        # image preprocessing
        if '320' in self.model_def:
            img_resize = 320
        else:
            img_resize = 512
        net.blobs['data'].reshape(1, 3, img_resize, img_resize)
        transformer = caffe.io.Transformer(
            {'data': net.blobs['data'].data.shape})
        transformer.set_transpose('data', (2, 0, 1))
        transformer.set_mean('data', np.array([104, 117, 123]))  # mean pixel
        transformer.set_raw_scale(
            'data', 255
        )  # the reference model operates on images in [0,255] range instead of [0,1]
        transformer.set_channel_swap('data', (
            2, 1,
            0))  # the reference model has channels in BGR order instead of RGB

        # im_names = os.listdir('examples/images')

        return net, transformer, labelmap
Exemplo n.º 4
0
def imgfeature (img):
    # 图片预处理设置
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})  # 设定图片的shape格式(1,3,28,28)
    transformer.set_transpose('data', (2, 0, 1))  # 改变维度的顺序,由原始图片(28,28,3)变为(3,28,28)
    transformer.set_mean('data', np.array([63.7, 79.3, 136.1]))  # 减去均值,前面训练模型时没有减均值,这儿就不用
    # transformer.set_raw_scale('data', 255)  # 缩放到【0,255】之间
    # transformer.set_channel_swap('data', (2, 1, 0))  # 交换通道,将图片由RGB变为BGR

    # im = caffe.io.load_image(img)  # 加载图片
    net.blobs['data'].data[...] = transformer.preprocess('data', img)  # 执行上面设置的图片预处理操作,并将图片载入到blob中

    # 执行测试
    out = net.forward()

    file = open(labels_file, 'r')
    labelmap = caffe_pb2.LabelMap()
    text_format.Merge(str(file.read()), labelmap)
    # 得到网络的最终输出结果
    loc = net.blobs['detection_out'].data[0][0]

    # labels = np.loadtxt(labels_filename, str, delimiter='\t')  # 读取类别名称文件
    #
    # prob = net.blobs['softmax'].data[0].flatten()  # 取出最后一层(Softmax)属于某个类别的概率值,并打印
    # print (prob)
    # order = prob.argsort()[1]  # 将概率值排序,取出最大值所在的序号
    # print( 'the class is:', labels[order] ) # 将该序号转换成对应的类别名称,并打印
    return loc,labelmap
def draw(image, predictions, labelmap_file):
    if labelmap_file:
        labelmap = caffe_pb2.LabelMap()
        file = open(labelmap_file, 'r')
        text_format.Merge(str(file.read()), labelmap)
        file.close()

    for prediction in predictions:
        x, y, w, h = prediction[0]
        score = prediction[1]
        cls = prediction[2]

        x1 = max(0, np.floor(x + 0.5).astype(int))
        y1 = max(0, np.floor(y + 0.5).astype(int))

        x2 = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
        y2 = min(image.shape[0], np.floor(y + h + 0.5).astype(int))

        cv2.rectangle(image, (x1,y1), (x2,y2), (255, 0, 0), 2)
        cv2.putText(image, '{0} {1:.2f}'.format(get_label_name(labelmap, cls), score),
                    (x1, y1-6),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.6, (0,0,255), 1,
                    cv2.LINE_AA)

        print('class: {0}, score: {1:.2f}'.format(cls, score))
        print('box coordinate x, y, w, h: {0}'.format(prediction[0]))
    return image
Exemplo n.º 6
0
def get_class_num():
    lm_f = open(labelmap_file, 'r')
    labelmap = caffe_pb2.LabelMap()
    text_format.Merge(str(lm_f.read()), labelmap)
    class_num = len(labelmap.item)
    lm_f.close()
    return class_num
Exemplo n.º 7
0
    def __init__(self):
        caffe.set_device(0)
        caffe.set_mode_gpu()
        labelmap_file = open('data/coco/labelmap_coco_minsun.prototxt', 'r')
        self.labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(labelmap_file.read()), self.labelmap)

        # * Load the net in the test phase for inference, and configure input preprocessing.
        model_def = 'models/VGGNet/coco/SSD_300x300/deploy.prototxt'
        model_weights = 'models/VGGNet/coco/SSD_300x300/VGG_coco_SSD_300x300_iter_240000.caffemodel'

        self.net = caffe.Net(
            model_def,  # defines the structure of the model
            model_weights,  # contains the trained weights
            caffe.TEST)  # use test mode (e.g., don't perform dropout)

        # input pre-processing: 'data' is the name of the input blob == net.inputs[0]
        self.transformer = caffe.io.Transformer(
            {'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_mean('data', np.array([104, 117,
                                                    123]))  # mean pixel
        self.transformer.set_raw_scale(
            'data', 255
        )  # the reference model operates on images in [0,255] range instead of [0,1]
        self.transformer.set_channel_swap('data', (
            2, 1,
            0))  # the reference model has channels in BGR order instead of RGB

        # ### 2. SSD detection
        # Load an image. Set net to batch size of 1
        image_resize = 300
        self.net.blobs['data'].reshape(1, 3, image_resize, image_resize)
    def __init__(self, model_def, model_weights, image_resize, labelmap_file,
                 caffe_root, data_dir):
        #caffe.set_device(gpu_id)
        #caffe_root = '/home/ubuntu/caffe_ssd'
        os.chdir(caffe_root)  #切换到caffe_root目录
        sys.path.insert(0, os.path.join(caffe_root, 'python'))
        import caffe
        from caffe.proto import caffe_pb2
        caffe.set_mode_cpu()

        self.image_resize = image_resize
        # Load the net in the test phase for inference, and configure input preprocessing.
        self.net = caffe.Net(
            model_def,  # defines the structure of the model
            model_weights,  # contains the trained weights
            caffe.TEST)  # use test mode (e.g., don't perform dropout)
        # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
        self.transformer = caffe.io.Transformer(
            {'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_mean('data', np.array([104, 117,
                                                    123]))  # mean pixel
        # the reference model operates on images in [0,255] range instead of [0,1]
        self.transformer.set_raw_scale('data', 255)
        # the reference model has channels in BGR order instead of RGB
        self.transformer.set_channel_swap('data', (2, 1, 0))

        # load PASCAL VOC labels
        file = open(labelmap_file, 'r')
        self.labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), self.labelmap)
Exemplo n.º 9
0
def load_model():
    # load PASCAL VOC labels
    labelmap_file = 'data/VOC0712/labelmap_voc.prototxt'
    file = open(labelmap_file, 'r')
    labelmap = caffe_pb2.LabelMap()
    text_format.Merge(str(file.read()), labelmap)

    model_def = 'models/VGGNet/VOC0712/SSD_300x300/deploy.prototxt'
    model_weights = 'models/VGGNet/VOC0712/SSD_300x300/VGG_VOC0712_SSD_300x300_iter_120000.caffemodel'

    net = caffe.Net(
        model_def,  # defines the structure of the model
        model_weights,  # contains the trained weights
        caffe.TEST)  # use test mode (e.g., don't perform dropout)

    # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2, 0, 1))
    transformer.set_mean('data', np.array([104, 117, 123]))  # mean pixel
    transformer.set_raw_scale(
        'data', 255
    )  # the reference model operates on images in [0,255] range instead of [0,1]
    transformer.set_channel_swap(
        'data',
        (2, 1,
         0))  # the reference model has channels in BGR order instead of RGB

    # set net to batch size of 1
    image_resize = 300
    net.blobs['data'].reshape(1, 3, image_resize, image_resize)
    return net, transformer, labelmap
Exemplo n.º 10
0
    def __init__(self, gpu_id, model_def, model_weights, image_resize,
                 labelmap_file):
        caffe.set_device(gpu_id)
        caffe.set_mode_gpu()

        self.image_resize = image_resize
        # Load the net in the test phase for inference, and configure input preprocessing.
        self.net = caffe.Net(
            model_def,  # defines the structure of the model
            model_weights,  # contains the trained weights
            caffe.TEST)  # use test mode (e.g., don't perform dropout)
        # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
        self.transformer = caffe.io.Transformer(
            {'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_input_scale('data', 0.017)
        self.transformer.set_mean('data', np.array([103.94, 116.78,
                                                    123.68]))  # mean pixel
        # the reference model operates on images in [0,255] range instead of [0,1]
        self.transformer.set_raw_scale('data', 255)
        # the reference model has channels in BGR order instead of RGB
        self.transformer.set_channel_swap('data', (2, 1, 0))

        # load labels
        file = open(labelmap_file, 'r')
        self.labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), self.labelmap)
 def initalNetModel(self):
     caffe.set_mode_gpu()
     caffe.set_device(self.gpuId)
     self.net = caffe.Net(str(self.deployFileName),
                          str(self.modelFileName), caffe.TEST)
     with open(str(self.labelFileName), 'r') as f:
         self.label_list = caffe_pb2.LabelMap()
         text_format.Merge(str(f.read()), self.label_list)
def load_labels(label_map_file_path):
    """ Load labels from labelmap file

    :param label_map_file_path: Labelmap file
    """
    label_map = caffe_pb2.LabelMap()
    label_map_serialized = open(label_map_file_path, 'rt').read()
    text_format.Merge(str(label_map_serialized), label_map)
    labels = [str(item.display_name) for item in label_map.item]
    return labels
Exemplo n.º 13
0
def labelmap(labelmap_file, label_info):
    labelmap = caffe_pb2.LabelMap()
    for i in range(len(label_info)):
        labelmapitem = caffe_pb2.LabelMapItem()
        labelmapitem.name = label_info[i]['name']
        labelmapitem.label = label_info[i]['label']
        labelmapitem.display_name = label_info[i]['display_name']
        labelmap.item.add().MergeFrom(labelmapitem)
    with open(labelmap_file, 'w') as f:
        f.write(str(labelmap))
Exemplo n.º 14
0
def labelmap(example, result):
    labelmap = caffe_pb2.LabelMap()
    # iterate through the sample file 
    for i in range(len(example)):
        item = caffe_pb2.LabelMapItem()
        item.name = example[i]['name']
        item.label = example[i]['label']
        labelmap.item.add().MergeFrom(item)
    # writes the mapping result to the result file
    with open(result, 'w') as file:
        file.write(str(labelmap))
Exemplo n.º 15
0
 def __init__(self, model_def, model_weights, image_resize, labelmap_file):
     self.image_resize = image_resize
     if not os.path.exists(model_weights):
         print("MobileNetSSD_deploy.caffemodel does not exist,")
         print("use merge_bn.py to generate it.")
         exit()
     self.net = caffe.Net(model_def, model_weights, caffe.TEST)
     file = open(labelmap_file, 'r')
     self.labelmap = caffe_pb2.LabelMap()
     text_format.Merge(str(file.read()), self.labelmap)
     self.colors = sns.color_palette("hls", len(self.labelmap.item))
Exemplo n.º 16
0
def declare_network(model_def, model_weights, labelmap_file, args):
    file = open(labelmap_file, 'r')
    labelmap = caffe_pb2.LabelMap()
    text_format.Merge(str(file.read()), labelmap)
    net = caffe.Net(model_def, model_weights, caffe.TEST)
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2, 0, 1))
    transformer.set_mean('data', np.array(args["img_mean"]))
    transformer.set_input_scale('data', args["img_input_scale"])

    return net, transformer, labelmap
Exemplo n.º 17
0
def main():
    global THREAD_RUNNING
    args = parse_args()
    # print('Called with args:')
    # print(args)

    # Initialize Caffe
    if args.cpu_mode:
        print('Running Caffe in CPU mode')
        caffe.set_mode_cpu()
    else:
        print('Running Caffe in GPU mode')
        caffe.set_device(0)
        caffe.set_mode_gpu()
    net = caffe.Net(DEFAULT_PROTOTXT, DEFAULT_MODEL, caffe.TEST)

    # Build the class (index/name) dictionary from labelmap file
    lm_handle = open(DEFAULT_LABELMAP, 'r')
    lm_map = caffe_pb2.LabelMap()
    text_format.Merge(str(lm_handle.read()), lm_map)
    cls_dict = {x.label: x.display_name for x in lm_map.item}

    # Open camera
    if args.use_file:
        cap = cv2.VideoCapture(args.filename)
        # ignore image width/height settings here

    elif args.use_usb:
        cap = open_cam_usb(args.video_dev, args.image_width, args.image_height)
    elif args.use_rtsp:
        cap = open_cam_rtsp(args.rtsp_uri, args.image_width, args.image_height,
                            args.rtsp_latency)
    else:  # By default, use the Jetson onboard camera
        cap = open_cam_onboard(args.image_width, args.image_height)

    if not cap.isOpened():
        sys.exit('Failed to open camera!')

    # Start the sub-thread, which is responsible for grabbing images
    THREAD_RUNNING = True
    th = threading.Thread(target=grab_img, args=(cap, ))
    th.start()

    # Grab image and do object detection (until stopped by user)
    open_window(args.image_width, args.image_height)
    read_cam_and_detect(net, cls_dict, args.conf_th)

    # Terminate the sub-thread
    THREAD_RUNNING = False
    th.join()

    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 18
0
    def initCaffeModule(self):
        os.chdir(self.caffe_root)
        sys.path.insert(0, 'python')
        caffe.set_device(0)
        caffe.set_mode_gpu()
        logging.basicConfig(
            level=logging.DEBUG,
            format=
            '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
            datefmt='%a, %d %b %Y %H:%M:%S',
            filename='allCaffe.log',
            filemode='w')
        #        labelmap_file = '../caffemodule/supervisormodule/labelmap_voc.prototxt'
        file = open(self.labelmap_file, 'r')
        self.labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), self.labelmap)
        #        model_def = '../caffemodule/supervisormodule/deploy.prototxt'
        #        model_weights = '../caffemodule/supervisormodule/VGG_VOC0712_SSD_300x300_iter_60000.caffemodel'
        self.net = caffe.Net(
            self.model_def,  # defines the structure of the model
            self.model_weights,  # contains the trained weights
            caffe.TEST)  # use test mode (e.g., don't perform dropout)
        # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
        self.transformer = caffe.io.Transformer(
            {'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_mean('data', np.array([104, 117,
                                                    123]))  # mean pixel
        self.transformer.set_raw_scale(
            'data', 255
        )  # the reference model operates on images in [0,255] range instead of [0,1]
        self.transformer.set_channel_swap('data', (
            2, 1,
            0))  # the reference model has channels in BGR order instead of RGB
        # set net to batch size of 1
        #        image_resize = 300
        self.net.blobs['data'].reshape(1, 3, int(self.image_resize),
                                       int(self.image_resize))
        self.functions = TaskCollections(self.client_conf)

        credentials = pika.PlainCredentials('ye', '123456')
        parameters = pika.ConnectionParameters('192.168.0.23', 5672, '/',
                                               credentials)
        #这里要设置重连机制
        counectCounts = 0
        while (counectCounts < 3):
            try:
                connection = pika.BlockingConnection(parameters)
                counectCounts = 3
            except Exception, e:
                time.sleep(1)
                counectCounts += 1
                print "reconnecting..."
    def __init__(self):

        labelmap_file = '/home/davidbutra/Escritorio/caffe/data/VOC0712/labelmap_voc.prototxt'
        file = open(labelmap_file, 'r')
        self.labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), self.labelmap)

        #Net parameters necesary
        model_def = '/home/davidbutra/Escritorio/caffe/models/VGGNet/VOC0712/SSD_300x300/deploy.prototxt'
        model_weights = '/home/davidbutra/Escritorio/caffe/models/VGGNet/VOC0712/SSD_300x300/VGG_VOC0712_SSD_300x300_iter_120000.caffemodel'

        self.net = caffe.Net(
            model_def,  # defines the structure of the model
            model_weights,  # contains the trained weights
            caffe.TEST)  # use test mode (e.g., don't perform dropout)

        status = 0

        ic = None
        # Initializing the Ice run-time.
        ic = Ice.initialize(sys.argv)
        properties = ic.getProperties()

        self.lock = threading.Lock()

        try:
            obj = ic.propertyToProxy("Numberclassifier.Camera.Proxy")
            print obj
            self.camera = jderobot.CameraPrx.checkedCast(obj)
            if self.camera:
                self.image = self.camera.getImageData("RGB8")
                self.height = self.image.description.height
                self.width = self.image.description.width
            else:
                print 'Interface camera not connected'

        except:
            traceback.print_exc()
            exit()
            status = 1

        if ic:
            # Clean up
            try:
                print("destroy")
                print ic
                ic.destroy()
                print ic
            except:
                traceback.print_exc()
                status = 1
                print("except")
Exemplo n.º 20
0
 def _get_label(self):
     if hasattr(self, 'labelmap') and self.labelmap:
         return self.labelmap
     fp = open(self.label_net_path, 'r')
     labelmap = caffe_pb2.LabelMap()
     text_format.Merge(str(fp.read()), labelmap)
     num_labels = len(labelmap.item)
     label_map = {}
     for idx in xrange(0, num_labels):
         label_map[
             labelmap.item[idx].label] = labelmap.item[idx].display_name
     #label_map = {0: u'background', 1: u'hat', 2: u'glasses', 3: u'top', 4: u'shorts', 5: u'skirt', 6: u'trousers', 7: u'bag', 8: u'shoes', 9: u'dress', 10: u'outwear', 11: u'one_piece'}
     return label_map
Exemplo n.º 21
0
def labelMapSet(dataSets):
    # load datasets labels
    if dataSets == 'anngic':
        labelmap_file = '/caffe/train_data/anngic/labelmap_anngic.prototxt'
    elif dataSets == 'bdd100k':
        labelmap_file = '/caffe/train_data/bdd100k/labelmap_bdd100k.prototxt'
    elif dataSets == 'voc':
        labelmap_file = '/caffe/caffe-ssd/data/VOC0712/labelmap_voc.prototxt'

    file = open(labelmap_file, 'r')
    labelmap = caffe_pb2.LabelMap()
    text_format.Merge(str(file.read()), labelmap)
    return labelmap
Exemplo n.º 22
0
def load_label_prototxt(prototxt_file):
    from google.protobuf import text_format
    from caffe.proto import caffe_pb2 as cpb2

    with open(prototxt_file) as f:
        labelmap_aux = cpb2.LabelMap()
        text_format.Merge(str(f.read()), labelmap_aux)
        num_labels = len(labelmap_aux.item)
        labelmap = {}
        for item in labelmap_aux.item:
            index = item.label
            label = item.display_name
            labelmap[index] = label
    return labelmap
Exemplo n.º 23
0
    def __init__(self, gpu_id, model_def, model_weights, image_resize,
                 labelmap_file):

        self.image_resize = image_resize
        self.net = caffe.Net(model_def, model_weights, caffe.TEST)
        self.transformer = caffe.io.Transformer(
            {'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_mean('data', np.array([104, 117, 123]))
        self.transformer.set_raw_scale('data', 255)
        self.transformer.set_channel_swap('data', (2, 1, 0))

        file = open(labelmap_file, 'r')
        self.labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), self.labelmap)
Exemplo n.º 24
0
    def __init__(self, gpu_id, model_def, model_weights, image_resize, labelmap_file):
        #caffe.set_device(gpu_id)
        #caffe.set_mode_gpu()
        caffe.set_mode_cpu()

        self.image_resize = image_resize
        # Load the net in the test phase for inference, and configure input preprocessing.
        self.net = caffe.Net(model_def,      # defines the structure of the model
                             model_weights,  # contains the trained weights
                             caffe.TEST)     # use test mode (e.g., don't perform dropout)
         # input preprocessing: 'data' is the name of the input blob == net.inputs[0]

        # load PASCAL VOC labels
        file = open(labelmap_file, 'r')
        self.labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), self.labelmap)
Exemplo n.º 25
0
def get_classnames_from_labelmap(label_map_file):
    """Get classnames from labelmap"""
    check_if_exist('Label map file', label_map_file)

    labelmap = cpb2.LabelMap()
    with open(label_map_file, 'r') as f:
        text_format.Merge(str(f.read()), labelmap)

    classnames = []
    for item in labelmap.item:
        classnames.append(str(item.display_name))

    if len(classnames) == 0:
        print('No classnames found in labelmap: {:s}.'.format(label_map_file))
        sys.exit()

    return classnames
Exemplo n.º 26
0
    def __init__(self, config, gpu_id=0, tracked=False):

        # gpu preparation
        if gpu_id >= 0:
            caffe.set_device(gpu_id)
            caffe.set_mode_gpu()

        model_config = config['MODEL']

        self.model_def = os.path.join(caffe_root, model_config['DEF'])
        self.model_weights = os.path.join(caffe_root, model_config['WEIGHTS'])
        self.labelmap = os.path.join(caffe_root, model_config['LABELMAP'])

        self.is_tracked_results_ready = False
        self.tracked = tracked
        self.mot_tracker = None

        self.mean_pixel = list([104, 117, 223])

        self.num_classes = 32
        self.colors = plt.cm.hsv(np.linspace(0, 1, self.num_classes)).tolist()

        if self.tracked:
            self.mot_tracker = Sort()

        # load labelmap
        with open(self.labelmap, 'r') as f:
            labelmap = caffe_pb2.LabelMap()
            text_format.Merge(str(f.read()), labelmap)

        # load model
        self.net = caffe.Net(self.model_def, self.model_weights, caffe.TEST)

        # image preprocessing
        if '320' in self.model_def:
            img_resize = 320
        else:
            img_resize = 512

        self.net.blobs['data'].reshape(1, 3, img_resize, img_resize)

        self.transformer = caffe.io.Transformer(
            {'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_mean('data',
                                  np.array(self.mean_pixel))  # mean pixel
Exemplo n.º 27
0
    def init_model(self,caffe_root='/mnt/nndisk/tim/SmartVision/caffe-ssd'):
        web_path = os.getcwd()
        self.caffe_root =  os.path.join(web_path, caffe_root)
        # Make sure that caffe is on the python path:
        os.chdir(self.caffe_root)
        sys.path.insert(0, self.caffe_root + '/python')                                                 #####################
        import caffe
        self.caffe = caffe
        if svs.compute_mode == "GPU":
            self.caffe.set_device(0)
            self.caffe.set_mode_gpu()
        else:
            self.caffe.set_mode_cpu()

        ####################################################
        from caffe.proto import caffe_pb2
        self.caffe_pb2 = caffe_pb2
        # load PASCAL VOC labels
        labelmap_file = 'data/coco/labelmap_coco.prototxt'
        file = open(labelmap_file, 'r')
        self.labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), self.labelmap)


        self.capability = [C.OCCUPY_FOOTWAY_BY_CATERING]

        ####################################################

        model_def = 'models/VGGNet/coco/SSD_512x512/deploy.prototxt'                                  #####################
        model_weights = 'models/VGGNet/coco/SSD_512x512/VGG_coco_SSD_512x512_iter_360000.caffemodel'  #####################

        self.net = caffe.Net(model_def,      # defines the structure of the model
                        model_weights,  # contains the trained weights
                        caffe.TEST)     # use test mode (e.g., don't perform dropout)
        # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
        self.transformer = self.caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_mean('data', np.array([104,117,123])) # mean pixel
        self.transformer.set_raw_scale('data', 255)  # the reference model operates on images in [0,255] range instead of [0,1]
        self.transformer.set_channel_swap('data', (2,1,0))  # the reference model has channels in BGR order instead of RGB

        self.image_resize = 512   

        os.chdir(web_path)
        print("model initialization done!")
Exemplo n.º 28
0
def get_labelname(labelmap_file, labels):
    #pdb.set_trace()
    file = open(labelmap_file, 'r')
    labelmap = caffe_pb2.LabelMap()
    text_format.Merge(str(file.read()), labelmap)
    num_labels = len(labelmap.item)
    labelnames = []
    if type(labels) is not list:
        labels = [labels]
    for label in labels:
        found = False
    for i in xrange(0, num_labels):
        if label == labelmap.item[i].label:
            found = True
            labelnames.append(labelmap.item[i].display_name)
            break
        assert found == True
    return labelnames
Exemplo n.º 29
0
def show_MCDropout_result(transformed_image, resP6_mcdrop, resP5_mcdrop,
                          resP4_mcdrop, resP3_mcdrop):
    # load MC-dropout model
    model_def_bayes = os.path.abspath(
        CUR_PATH + 'bayesian_dropout_model/deploy_multiple_input.prototxt')
    model_weights_bayes = os.path.abspath(
        CUR_PATH +
        '../models/ResNet/coco/refinedet_resnet101_512x512/coco_refinedet_resnet101_512x512_iter_52000.caffemodel'
    )
    net_bayes = caffe.Net(model_def_bayes, model_weights_bayes, caffe.TEST)

    # image preprocessing
    if '320' in model_def_bayes:
        img_resize = 320
    else:
        img_resize = 512
    net_bayes.blobs['data'].reshape(1, 3, img_resize, img_resize)

    net_bayes.blobs['data'].data[...] = transformed_image
    net_bayes.blobs['dataP6'].data[...] = resP6_mcdrop
    net_bayes.blobs['dataP5'].data[...] = resP5_mcdrop
    net_bayes.blobs['dataP4'].data[...] = resP4_mcdrop
    net_bayes.blobs['dataP3'].data[...] = resP3_mcdrop

    res_bayes = net_bayes.forward()

    # load labelmap
    labelmap_file = 'data/coco/labelmap_coco.prototxt'
    file = open(labelmap_file, 'r')
    labelmap = caffe_pb2.LabelMap()
    text_format.Merge(str(file.read()), labelmap)

    detections = res_bayes['detection_out']
    det_label = detections[0, 0, :, 1]
    det_conf = detections[0, 0, :, 2]
    det_xmin = detections[0, 0, :, 3] * image.shape[1]
    det_ymin = detections[0, 0, :, 4] * image.shape[0]
    det_xmax = detections[0, 0, :, 5] * image.shape[1]
    det_ymax = detections[0, 0, :, 6] * image.shape[0]
    result = np.column_stack(
        [det_xmin, det_ymin, det_xmax, det_ymax, det_conf, det_label])

    ShowResults(image, None, result, labelmap, save_fig=False, show_fig=True)
    return
Exemplo n.º 30
0
def init_models():
    global CLS_LABEL_LIST
    global THRESHOLD
    global URLFLAG
    global IMAGE_SIZE
    global ONE_BATCH_SIZE
    ONE_BATCH_SIZE = 16
    URLFLAG = True if args.urlfileName else False
    THRESHOLD = args.threshold
    IMAGE_SIZE = 320
    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)
    deployName = os.path.join(args.modelBasePath, args.deployFileName)
    modelName = os.path.join(args.modelBasePath, args.modelName)
    labelName = os.path.join(args.modelBasePath, args.labelFileName)
    net_cls = caffe.Net(deployName, modelName, caffe.TEST)
    with open(labelName, 'r') as f:
        CLS_LABEL_LIST = caffe_pb2.LabelMap()
        text_format.Merge(str(f.read()), CLS_LABEL_LIST)
    return net_cls