예제 #1
0
def detect(cfgfile, weightfile, imgfile):
    if cfgfile.find('.prototxt') >= 0:
        from caffenet import CaffeNet
        m = CaffeNet(cfgfile)
    else:
        m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if m.num_classes == 20:
        namesfile = 'data/voc.names'
    elif m.num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/names'
    
    use_cuda = 1
    if use_cuda:
        m.cuda()

    img = Image.open(imgfile).convert('RGB')
    sized = img.resize((m.width, m.height))
    
    for i in range(2):
        start = time.time()
        boxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish-start)))

    class_names = load_class_names(namesfile)
    plot_boxes(img, boxes, 'predictions.jpg', class_names)
예제 #2
0
def detect(cfgfile, weightfile, imgfile):
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if m.num_classes == 20:
        namesfile = 'data/voc.names'
    elif m.num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/names'
    
    use_cuda = 1
    if use_cuda:
        m.cuda()

    img = Image.open(imgfile).convert('RGB')
    sized = img.resize((m.width, m.height))
    
    for i in range(2):
        start = time.time()
        boxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish-start)))

    class_names = load_class_names(namesfile)
    plot_boxes(img, boxes, 'predictions.jpg', class_names)
예제 #3
0
def main(args):
    model = Darknet(args.config)

    model.print_network()
    model.load_weights(args.weights)
    print(f'Loading weights from {args.weights}... Done!')

    use_cuda = torch.cuda.is_available()
    if use_cuda:
        model.cuda()

    size = (model.width, model.height)

    inputs = []
    if args.zip_imgs:
        inputs = _traverse_zip(args.zip_imgs)
    else:
        inputs = _traverse_image_list(args.imgfiles)

    model.eval()
    start = time.time()
    for input_ in inputs:
        img = input_['image'].convert('RGB').resize(size)
        imgfile = input_['filename']
        # used to be higher confidence threshold and nms threshold
        # boxes = detect(model, img, 0.5, 0.4, use_cuda)
        boxes = detect(model=model, img=img, conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh, use_cuda=use_cuda)
        class_names = load_class_names(args.names)
        savename = f'predicted_{os.path.basename(imgfile)}'
        plot_boxes(img, boxes, savename, class_names)

    finish = time.time()
    print(f'{args.imgfiles}: Predicted in {finish - start} seconds.')
예제 #4
0
파일: detect.py 프로젝트: yoxf/pytorch_code
def detect(cfgfile, weightfile, imgfile):
    m = Darknet(cfgfile)
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    img = Image.open(imgfile).convert('RGB')
    sized = img.resize((m.width, m.height))
    
    start = time.time()
    boxes = utils.do_detect(m, sized, 0.5, 0.4)
    finish = time.time()
    print('%s: Predicted in %f seconds.' % (imgfile, (finish-start)))

    class_names = utils.load_class_names(namesfile)
    utils.plot_boxes(img, boxes, 'predictions.jpg', class_names)
예제 #5
0
def image_detect(i_path):
    """Starting detect object"""
    # Load the image

    print(i_path)

    img = cv2.imread(i_path)
    # Convert the image to RGB
    original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    # We resize the image to the input width and height of the first layer of the network.
    resized_image = cv2.resize(original_image, (m.width, m.height))

    # Set the NMS threshold
    nms_thresh = 0.6
    # Set the IOU thresholdectec
    iou_thresh = 0.4

    # Detect objects in the image
    boxes = utils.detect_objects(m, resized_image, iou_thresh, nms_thresh)

    img = original_image.copy()
    width = img.shape[1]
    height = img.shape[0]

    for i in range(len(boxes)):
        box = boxes[i]
        # Get the (x,y) pixel coordinates of the lower-left and lower-right corners
        # of the bounding box relative to the size of the image.
        x1 = int(np.around((box[0] - box[2] / 2.0) * width))
        y1 = int(np.around((box[1] - box[3] / 2.0) * height))
        x2 = int(np.around((box[0] + box[2] / 2.0) * width))
        y2 = int(np.around((box[1] + box[3] / 2.0) * height))

        if len(box) >= 7 and class_names:
            cls_conf = box[5]
            cls_id = box[6]
            print('%i. %s: %f' % (i + 1, class_names[cls_id], cls_conf))
            print("left top right bottom :", x1, y1, x2, y2)

    # Print the objects found and the confidence level
    utils.print_objects(boxes, class_names)
    #Plot the image with bounding boxes and corresponding object class labels
    # Set the default figure size
    plt.rcParams['figure.figsize'] = [24.0, 14.0]
    utils.plot_boxes(original_image, boxes, class_names, plot_labels=True)
예제 #6
0
def eval_list(cfgfile, namefile, weightfile, testfile):
    m = Darknet(cfgfile)
    m.load_weights(weightfile)
    use_cuda = 1
    if use_cuda:
        m.cuda()

    class_names = load_class_names(namefile)

    file_list = []
    with open(testfile, "r") as fin:
        for f in fin:
            file_list.append(f.strip())

    for imgfile in file_list:
        img = Image.open(imgfile).convert('RGB')
        sized = img.resize((m.width, m.height))
        filename = os.path.basename(imgfile)
        filename = os.path.splitext(filename)[0]
        #print(filename, img.width, img.height, sized_width, sized_height)

        if m.width * m.height > 1024 * 2560:
            print('omit %s' % filename)
            continue

        if False:
            boxes = do_detect(m, sized, conf_thresh, nms_thresh, use_cuda)
        else:
            m.eval()
            sized = image2torch(sized).cuda();
            #output = m(Variable(sized, volatile=True)).data
            output = m(sized)
            #boxes = get_region_boxes(output, conf_thresh, m.num_classes, m.anchors, m.num_anchors, 0, 1)[0]
            boxes = get_all_boxes(output, conf_thresh, m.num_classes)[0]
            boxes = np.array(nms(boxes, nms_thresh))

        if False:
            savename = get_det_image_name(imgfile)
            print('img: save to %s' % savename)
            plot_boxes(img, boxes, savename, class_names)

        if False:
            savename = get_det_result_name(imgfile)
            print('det: save to %s' % savename)
            save_boxes(imgfile, img, boxes, savename)
예제 #7
0
def count_person(image, frame_no, maxpeople):
    maxpeople = maxpeople
    # Set the default figure 
    frame_no = frame_no
    plt.rcParams['figure.figsize'] = [24.0, 14.0] #wXh
    
    og_img = image
    # Load the image
#    img = cv2.imread(og_img)
    
    # Convert the image to RGB
    original_image = cv2.cvtColor(og_img, cv2.COLOR_BGR2RGB)
    
    # We resize the image to the input width and height of the first layer of the network.    
    resized_image = cv2.resize(original_image, (m.width, m.height))
    
    # Display the images
    #plt.subplot(121)
    #plt.title('Original Image')
    #plt.imshow(original_image)
    #plt.subplot(122)
    #plt.title('Resized Image')
    #plt.imshow(resized_image)
    #plt.show()
    
    # Set the NMS threshold
    nms_thresh = 0.5
    
    # Set the IOU threshold
    iou_thresh = 0.5
    
#    print("till here")
    # Detect objects in the image
    boxes = detect_objects(m, resized_image, iou_thresh, nms_thresh)
#    print("detect_object")
    # Print the objects found and the confidence level
    print_objects(maxpeople, boxes, class_names)
    
    #Plot the image with bounding boxes and corresponding object class labels
    plot_boxes(frame_no, original_image, boxes, class_names)

    
#https://www.pyimagesearch.com/2018/11/12/yolo-object-detection-with-opencv/
def eval_widerface(cfgfile, weightfile, valdir, savedir):
    m = Darknet(cfgfile)
    m.load_weights(weightfile)
    use_cuda = 1
    if use_cuda:
        m.cuda()

    scale_size = 16
    class_names = load_class_names('data/names')
    for parent, dirnames, filenames in os.walk(valdir):
        if parent != valdir:
            targetdir = os.path.join(savedir, os.path.basename(parent))
            if not os.path.isdir(targetdir):
                os.mkdir(targetdir)
            for filename in filenames:
                imgfile = os.path.join(parent, filename)
                img = Image.open(imgfile).convert('RGB')
                sized_width = int(round(img.width * 1.0 / scale_size) * 16)
                sized_height = int(round(img.height * 1.0 / scale_size) * 16)
                sized = img.resize((sized_width, sized_height))
                print(filename, img.width, img.height, sized_width,
                      sized_height)
                if sized_width * sized_height > 1024 * 2560:
                    print('omit %s' % filename)
                    continue
                boxes = do_detect(m, sized, 0.05, 0.4, use_cuda)
                if True:
                    savename = os.path.join(targetdir, filename)
                    print('save to %s' % savename)
                    plot_boxes(img, boxes, savename, class_names)
                if True:
                    savename = os.path.join(
                        targetdir,
                        os.path.splitext(filename)[0] + ".txt")
                    print('save to %s' % savename)
                    save_boxes(img, boxes, savename)
예제 #9
0
 def show_shift_samle(self, id, shift_plus=0):
     img, _ = self[id]
     _, ann = self[id - shift_plus]
     W, H = img.size
     boxes = []
     start = 0
     while ann[start + 1] > 0:
         box = [0 for i in range(7)]
         box[0] = ann[start + 1]
         box[1] = ann[start + 2]
         box[2] = ann[start + 3]
         box[3] = ann[start + 4]
         box[4] = 1
         box[5] = 1
         box[6] = ann[start]
         boxes.append(box)
         start += 5
     img = plot_boxes(img,
                      boxes,
                      None,
                      class_names=['Player', 'Goalkeeper', 'Judge', 'Goal'])
     img.show()
예제 #10
0
 def show_sample(self, id=None):
     if id == None:
         id = random.randint(0, self.nSamples - 1)
     img, ann = self[id]
     W, H = img.size
     boxes = []
     start = 0
     while ann[start + 1] > 0:
         box = [0 for i in range(7)]
         box[0] = ann[start + 1]
         box[1] = ann[start + 2]
         box[2] = ann[start + 3]
         box[3] = ann[start + 4]
         box[4] = 1
         box[5] = 1
         box[6] = ann[start]
         boxes.append(box)
         start += 5
     img = plot_boxes(img,
                      boxes,
                      None,
                      class_names=['Player', 'Goalkeeper', 'Judge', 'Goal'])
     img.show()
예제 #11
0
    def detect(self,
               img,
               outputs,
               modelName="yolo2",
               conf_thresh=0.5,
               nms_thresh=0.4,
               output_img_path='predictions.jpg'):
        print('Detect ...start')
        #load detection information
        import pickle
        pklfilepath = '{}_detection_information.pkl'.format(modelName)
        detection_information = pickle.load(open(pklfilepath, 'rb'))
        num_anchors, anchors, num_classes = [
            detection_information[k] for k in detection_information.keys()
        ]
        #use original pytorch-yolo2 module to decect outputs
        import torch
        from torch.autograd import Variable
        output = torch.FloatTensor(outputs).cuda()
        #from utils import *
        from utils import get_region_boxes, nms, load_class_names, plot_boxes
        if num_classes == 20:
            namesfile = 'data/voc.names'
        elif num_classes == 80:
            namesfile = 'data/coco.names'
        else:
            namesfile = 'data/names'

        for i in range(2):
            boxes = get_region_boxes(output, conf_thresh, num_classes, anchors,
                                     num_anchors)[0]
            boxes = nms(boxes, nms_thresh)

        class_names = load_class_names(namesfile)
        str_ = plot_boxes(img, boxes, output_img_path, class_names)
        print('Detect ... done!')
        return str_
예제 #12
0
# Set the IOU threshold
iou_thresh = 0.4

# Set the default figure size
plt.rcParams['figure.figsize'] = [24.0, 14.0]

# Load the image
img = cv2.imread('./images/1.jpg')

# Convert the image to RGB
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

# We resize the image to the input width and height of the first layer of the network.
resized_image = cv2.resize(original_image, (m.width, m.height))

# Set the IOU threshold. Default value is 0.4
iou_thresh = 0.4

# Set the NMS threshold. Default value is 0.6
nms_thresh = 0.6

# Detect objects in the image
boxes = ut.detect_objects(m, resized_image, iou_thresh, nms_thresh)

# Print the objects found and the confidence level
# print_objects(boxes, class_names)

# Plot the image with bounding boxes and corresponding object class labels
ut.plot_boxes(original_image, boxes, class_names, plot_labels=True)
                blobs[k] = v
        output_name = blobs.keys()[-1]
        print 'output_name', output_name
        return blobs[output_name]


if __name__ == '__main__':
    prototxt = 'tiny_yolo_nbn_reluface.prototxt'
    caffemodel = '/nfs/xiaohang/for_chenchao/tiny_yolo_nbn_reluface.caffemodel'
    imgfile = 'data/face.jpg'

    m = CaffeNet(prototxt, caffemodel)
    use_cuda = 1
    if use_cuda:
        m.cuda()

    img = Image.open(imgfile).convert('RGB')
    sized = img.resize((m.width, m.height))
    #if m.num_classes == 20:
    #    namesfile = '../data/voc.names'
    #class_names = load_class_names(namesfile)
    class_names = ['face']
    for i in range(1):
        start = time.time()
        boxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))

    plot_boxes(img, boxes, 'predictions.jpg', class_names)