def Recall(imageList,model,sample_number=5000,thresh=0.3):
    correct = 0
    obj_num = 0
    count = 0
    for image in imageList:
        count += 1
        #Get prediction from neural network
        img = crop_detection(image.imgPath,new_width=448,new_height=448)
        img = np.expand_dims(img, axis=0)
        out = model.predict(img)
        out = out[0]
        #for each ground truth, see we have predicted a corresponding result
        for i in range(49):
            preds = out[i*25:i*25+25]
            row = i/7
            col = i%7
            box = image.boxes[row][col]
            if(box.has_obj):
                for obj in box.objs:
                    obj_num += 1
                    true_class = obj.class_num
                    #see what we predict
                    if(preds[24] > thresh):
                        predcit_class = np.argmax(preds[4:24])
                        if(predcit_class == true_class):
                            correct += 1
    return correct*1.0/obj_num
def Recall(imageList, model, sample_number=5000, thresh=0.3):
    correct = 0
    obj_num = 0
    count = 0
    for image in imageList:
        count += 1
        #Get prediction from neural network
        img = crop_detection(image.imgPath, new_width=448, new_height=448)
        img = np.expand_dims(img, axis=0)
        out = model.predict(img)
        out = out[0]
        #for each ground truth, see we have predicted a corresponding result
        for i in range(49):
            preds = out[i * 25:i * 25 + 25]
            row = i / 7
            col = i % 7
            box = image.boxes[row][col]
            if (box.has_obj):
                for obj in box.objs:
                    obj_num += 1
                    true_class = obj.class_num
                    #see what we predict
                    if (preds[24] > thresh):
                        predcit_class = np.argmax(preds[4:24])
                        if (predcit_class == true_class):
                            correct += 1
    return correct * 1.0 / obj_num
Exemplo n.º 3
0
def Acc(imageList,model,sample_number=5000,thresh=0.3):
    correct = 0
    object_num = 0

    count = 0
    for image in imageList:
        count += 1
        if(count % 500 == 0):
        	print 'Image number:', count
        #Get prediction from neural network
        img = crop_detection(image.imgPath,new_width=448,new_height=448)
        img = np.expand_dims(img, axis=0)
        out = model.predict(img)
        out = out[0]

        for i in range(49):
            preds = out[i*25:(i+1)*25]
            if(preds[24] > thresh):
                object_num += 1
            	row = i/7
            	col = i%7
                '''
            	centerx = 64 * col + 64 * preds[0]
            	centery = 64 * row + 64 * preds[1]

            	h = preds[2] * preds[2]
            	h = h * 448.0
            	w = preds[3] * preds[3]
            	w = w * 448.0

            	left = centerx - w/2.0
            	right = centerx + w/2.0
            	up = centery - h/2.0
            	down = centery + h/2.0

            	if(left < 0): left = 0
            	if(right > 448): right = 447
            	if(up < 0): up = 0
            	if(down > 448): down = 447
                '''
            	class_num = np.argmax(preds[4:24])

                #Ground Truth
                box = image.boxes[row][col]
                if(box.has_obj):
                    for obj in box.objs:
                        true_class = obj.class_num
                        if(true_class == class_num):
                            correct += 1
                        break


    return correct*1.0/object_num
Exemplo n.º 4
0
def Acc(imageList, model, sample_number=5000, thresh=0.3):
    correct = 0
    object_num = 0

    count = 0
    for image in imageList:
        count += 1
        if (count % 500 == 0):
            print 'Image number:', count
    #Get prediction from neural network
        img = crop_detection(image.imgPath, new_width=448, new_height=448)
        img = np.expand_dims(img, axis=0)
        out = model.predict(img)
        predictions = out[0]

        #Post process predicting results
        boxes = convert_yolo_detections(predictions)
        boxes = do_nms_sort(boxes)

        for i in range(len(boxes)):
            preds = boxes[i].probs
            not_all_zeros = np.any(preds)
            if (not_all_zeros):
                object_num += 1
                x = boxes[i].x
                y = boxes[i].y
                h = boxes[i].h
                w = boxes[i].w
                class_num = boxes[i].class_num
                row = boxes[i].row
                col = boxes[i].col
                #Ground Truth
                box = image.boxes[row][col]
                if (box.has_obj):
                    for obj in box.objs:
                        true_class = obj.class_num
                        if (true_class == class_num):
                            correct += 1
                            break

    return correct * 1.0 / object_num
Exemplo n.º 5
0
def Acc(imageList,model,sample_number=5000,thresh=0.3):
    correct = 0
    object_num = 0

    count = 0
    for image in imageList:
    	count += 1
        if(count % 500 == 0):
            print 'Image number:', count
        #Get prediction from neural network
        img = crop_detection(image.imgPath,new_width=448,new_height=448)
        img = np.expand_dims(img, axis=0)
        out = model.predict(img)
        predictions = out[0]

    	#Post process predicting results
    	boxes = convert_yolo_detections(predictions)
    	boxes = do_nms_sort(boxes)

        for i in range(len(boxes)):
            preds = boxes[i].probs
            not_all_zeros = np.any(preds)
            if(not_all_zeros):
                object_num += 1
                x = boxes[i].x
                y = boxes[i].y
                h = boxes[i].h
                w = boxes[i].w
                class_num = boxes[i].class_num
                row = boxes[i].row
                col = boxes[i].col
                #Ground Truth
                box = image.boxes[row][col]
                if(box.has_obj):
                    for obj in box.objs:
                        true_class = obj.class_num
                        if(true_class == class_num):
                            correct += 1
                            break

    return correct*1.0/object_num
def Acc(imageList,model,sample_number=5000,thresh=0.3):
    fcat = open('comp4_det_test_cat.txt','write')
    fcar = open('comp4_det_test_car.txt','write')
    fbottle = open('comp4_det_test_bottle.txt','write')
    ftrain = open('comp4_det_test_train.txt','write')


    count = 0
    for image in imageList:
        img = Image.open(image.imgPath)
        img_name = image.imgPath.split('/')[-1].split('.')[0]
        W,H = img.size
        drawable = ImageDraw.Draw(img)

    	count += 1
        if(count % 500 == 0):
            print 'Image number:', count

        #Get prediction from neural network
        imgg = crop_detection(image.imgPath,new_width=448,new_height=448)
        imgg = np.expand_dims(imgg, axis=0)
        out = model.predict(imgg)
        predictions = out[0]

    	#Post process predicting results
    	boxes = convert_yolo_detections(predictions)
    	boxes = do_nms_sort(boxes)

        for i in range(len(boxes)):
            box = boxes[i]
            prob = np.max(box.probs)
            class_num = boxes[i].class_num
            if((class_num in [4,6,7,18]) and prob >= 0.2):#if this box contains a car

                centerx = boxes[i].x
                centery = boxes[i].y
                h = boxes[i].h
                w = boxes[i].w

                left = centerx - w/2.0
                right = centerx + w/2.0
                up = centery - h/2.0
                down = centery + h/2.0

                if(left < 0): left = 0
                if(right > 448): right = 447
                if(up < 0): up = 0
                if(down > 448): down = 447

                #convert the coords to image before streching to 448*448
                left = left/448.0 * W
                right = right/448.0 * W
                up = up/448.0 * H
                down = down/448.0 * H

                #drawable.rectangle([left,up,right,down],outline="red")
                if(class_num == 4):
                    fbottle.write(img_name+' '+str(round(prob,6))+' '+str(round(left,6))+' '+str(round(up,6))+' '+str(round(right,6))+' '+str(round(down,6))+'\n')
                elif(class_num == 6):
                    fcar.write(img_name+' '+str(round(prob,6))+' '+str(round(left,6))+' '+str(round(up,6))+' '+str(round(right,6))+' '+str(round(down,6))+'\n')
                elif(class_num == 7):
                    fcat.write(img_name+' '+str(round(prob,6))+' '+str(round(left,6))+' '+str(round(up,6))+' '+str(round(right,6))+' '+str(round(down,6))+'\n')
                elif(class_num == 18):
                    ftrain.write(img_name+' '+str(round(prob,6))+' '+str(round(left,6))+' '+str(round(up,6))+' '+str(round(right,6))+' '+str(round(down,6))+'\n')
        #img.save(os.path.join(os.getcwd(),'results',img_name+'.jpg'))

    return
Exemplo n.º 7
0
    "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]

#Predict output
#image = readImg(os.path.join(os.getcwd(),'Yolo_dog.img'),h=448,w=448)
images_path = '/home/media/Documents/YOLO.keras/images'
img_names = []
for root, dirs, files in os.walk(images_path):
    for name in files:
        img_names.append('images/' + name)

#img_names = ['images/eagle.jpg','images/000047.jpg','images/000009.jpg']
for img_name in img_names:
    print img_name
    img = crop_detection(os.path.join(os.getcwd(), img_name),
                         new_width=448,
                         new_height=448)
    img = np.expand_dims(img, axis=0)
    out = model.predict(img)
    predictions = out[0]

    #Post process predicting results
    boxes = convert_yolo_detections(predictions)
    boxes = do_nms_sort(boxes)

    #print len(boxes)
    draw_detections(
        os.path.join('/home/media/Documents/YOLO.keras/', img_name), 0.2,
        boxes, 20, labels,
        img_name.split('/')[1])
    print
	return loss

model = model_from_json(open('Tiny_Yolo_Architecture.json').read(),custom_objects={'custom_loss':custom_loss})
model.load_weights('weights2.hdf5')

#Predict output
#image = readImg(os.path.join(os.getcwd(),'Yolo_dog.img'),h=448,w=448)
images_path = '/home/media/Documents/YOLO.keras/images'
img_names = []
for root, dirs, files in os.walk(images_path):
	for name in files:
		img_names.append('images/'+name)
#img_names = ['images/eagle.jpg','images/000047.jpg','images/000009.jpg']
for img_name in img_names:
	print img_name
	img,im = crop_detection(os.path.join(os.getcwd(),img_name),new_width=448,new_height=448,save=True)
	im.save(os.getcwd()+'/images/'+img_name.split('/')[1].split('.')[0]+'_resize.jpg')
	img = np.expand_dims(img, axis=0)
	out = model.predict(img)
	out = out[0]

	#Post process predicting results
	thresh = 0.3
	imgPath = os.path.join(os.getcwd(),img_name)
	img = Image.open(imgPath)
	img_draw = Image.open(os.getcwd()+'/images/'+img_name.split('/')[1].split('.')[0]+'_resize.jpg')
	drawable = ImageDraw.Draw(img_draw)
	labels = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train","tvmonitor"]
	for i in range(49):
		preds = out[i*25:(i+1)*25]
		if(preds[24] > thresh):
Exemplo n.º 9
0
def Acc(imageList, model, sample_number=5000, thresh=0.3):
    classes = 20
    side = 7

    count = 0
    ap = 0.0
    rec = 0.0
    image_num = 0
    for image in imageList:
        count += 1
        if (count % 500 == 0):
            print 'Image number:', count
    #Get prediction from neural network
        img = crop_detection(image.imgPath, new_width=448, new_height=448)
        img = np.expand_dims(img, axis=0)
        out = model.predict(img)
        predictions = out[0]

        #Post process predicting results
        boxes = convert_yolo_detections(predictions)
        boxes = do_nms_sort(boxes)

        #pack ground truth boxes to true_boxes
        truth_boxes = []
        for i in range(side):
            for j in range(side):
                box_i_j = image.boxes[i][j]
                if (box_i_j.has_obj):
                    for obj in box_i_j.objs:
                        new_box = box(classes)
                        new_box.row = i
                        new_box.col = j
                        new_box.x = (j + obj.x) * 64
                        new_box.y = (i + obj.y) * 64
                        new_box.h = (obj.h**2) * 448
                        new_box.w = (obj.w**2) * 448
                        new_box.class_num = obj.class_num
                        truth_boxes.append(new_box)

    #assign detections to ground truth
        n_pos = len(truth_boxes)
        tp = np.zeros(len(boxes))
        gt_covered = np.zeros(n_pos)

        for i in range(len(boxes)):
            #find the one ground truth with highest iou
            boxa = boxes[i]
            ovmax = -1
            max_index = 0
            for j in range(n_pos):
                boxb = truth_boxes[j]
                iou = box_iou(boxa, boxb)
                if (iou > ovmax):
                    ovmax = iou
                    max_index = j
            if (ovmax > 0.5):
                if (gt_covered[max_index] == 0):
                    tp[i] = 1
                    gt_covered[max_index] = 1
        if (len(boxes) != 0):
            ap += sum(tp) * 1.0 / (len(boxes))
            image_num += 1
        rec += sum(gt_covered) * 1.0 / n_pos
    return ap / image_num, rec / len(imageList)
def Acc(imageList,model,sample_number=5000,thresh=0.3):
    classes = 20
    side = 7

    count = 0
    ap = 0.0
    rec = 0.0
    image_num = 0
    for image in imageList:
    	count += 1
        if(count % 500 == 0):
            print 'Image number:', count
        #Get prediction from neural network
        img = crop_detection(image.imgPath,new_width=448,new_height=448)
        img = np.expand_dims(img, axis=0)
        out = model.predict(img)
        predictions = out[0]

    	#Post process predicting results
    	boxes = convert_yolo_detections(predictions)
    	boxes = do_nms_sort(boxes)

        #pack ground truth boxes to true_boxes
        truth_boxes = []
        for i in range(side):
            for j in range(side):
                box_i_j = image.boxes[i][j]
                if(box_i_j.has_obj):
                    for obj in box_i_j.objs:
                        new_box = box(classes)
                        new_box.row = i
                        new_box.col = j
                        new_box.x = (j+obj.x)*64
                        new_box.y = (i+obj.y)*64
                        new_box.h = (obj.h ** 2) * 448
                        new_box.w = (obj.w ** 2) * 448
                        new_box.class_num = obj.class_num
                        truth_boxes.append(new_box)

        #assign detections to ground truth
        n_pos = len(truth_boxes)
        tp = np.zeros(len(boxes))
        gt_covered = np.zeros(n_pos)

        for i in range(len(boxes)):
            #find the one ground truth with highest iou
            boxa = boxes[i]
            ovmax = -1
            max_index = 0
            for j in range(n_pos):
                boxb = truth_boxes[j]
                iou = box_iou(boxa,boxb)
                if(iou > ovmax):
                    ovmax = iou
                    max_index = j
            if(ovmax > 0.5):
                if(gt_covered[max_index] == 0):
                    tp[i] = 1
                    gt_covered[max_index] = 1
        if(len(boxes) != 0):
            ap += sum(tp)*1.0/(len(boxes))
            image_num += 1
        rec += sum(gt_covered)*1.0/n_pos
    return ap / image_num,rec / len(imageList)