def plot_boxes(i, frame_no, img, boxes, class_names, plot_labels = True, color = None): queue_counter = 0 frame_no = frame_no # Define a tensor used to set the colors of the bounding boxes colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]) # Define a function to set the colors of the bounding boxes def get_color(c, x, max_val): ratio = float(x) / max_val * 5 i = int(np.floor(ratio)) j = int(np.ceil(ratio)) ratio = ratio - i r = (1 - ratio) * colors[i][c] + ratio * colors[j][c] return int(r * 255) # Get the width and height of the image width = img.shape[1] height = img.shape[0] # Create a figure and plot the image fig, a = plt.subplots(1,1) a.imshow(img) # cv2.imwrite('D:\HeadCount\YOLO-Object-Detection-master\YOLO-Object-Detection-master\Output\out' +str(frame_no)+'.jpeg', img) # Plot the bounding boxes and corresponding labels on top of the image for i in range(len(boxes)): # Get the ith bounding box box = boxes[i] # Get the (x,y) pixel coordinates of the lower-left and lower-right corners # of the bounding box relative to the size of the image. x1 = int(np.around((box[0] - box[2]/2.0) * width)) y1 = int(np.around((box[1] - box[3]/2.0) * height)) x2 = int(np.around((box[0] + box[2]/2.0) * width)) y2 = int(np.around((box[1] + box[3]/2.0) * height)) # Set the default rgb value to red rgb = (1, 0, 0) # Use the same color to plot the bounding boxes of the same object class if len(box) >= 7 and class_names: cls_conf = box[5] cls_id = box[6] classes = len(class_names) offset = cls_id * 123457 % classes red = get_color(2, offset, classes) / 255 green = get_color(1, offset, classes) / 255 blue = get_color(0, offset, classes) / 255 # If a color is given then set rgb to the given color instead if color is None: rgb = (red, green, blue) else: rgb = color # Calculate the width and height of the bounding box relative to the size of the image. width_x = x2 - x1 width_y = y1 - y2 check_point = consider.check(0, 270, 530, 310, 530, 170, 0, 200, x1, y2) # save_point = 0 if check_point == 1: #..................... queue_counter += 1 # save_point = 1 # print(x1, y2) #left corner cordinate # Set the postion and size of the bounding box. (x1, y2) is the pixel coordinate of the # lower-left corner of the bounding box relative to the size of the image. rect = patches.Rectangle((x1, y2), width_x, width_y, linewidth = 2, edgecolor = rgb, facecolor = 'none') # Draw the bounding box on top of the image a.add_patch(rect) #......................... # save_point = 1 # print(x1, y2) #left corner cordinate # Set the postion and size of the bounding box. (x1, y2) is the pixel coordinate of the # lower-left corner of the bounding box relative to the size of the image. # rect = patches.Rectangle((x1, y2), #..................... # width_x, width_y, # linewidth = 2, # edgecolor = rgb, # facecolor = 'none') # # # Draw the bounding box on top of the image # a.add_patch(rect) #............................. # If plot_labels = True then plot the corresponding label # if plot_labels: # # # Create a string with the object class name and the corresponding object class probability # conf_tx = class_names[cls_id] + ': {:.1f}'.format(cls_conf) # # # Define x and y offsets for the labels # lxc = (img.shape[1] * 0.266) / 100 # lyc = (img.shape[0] * 1.180) / 100 ## # # Draw the labels on top of the image # a.text(x1 + lxc, y1 - lyc, conf_tx, fontsize = 24, color = 'k', # bbox = dict(facecolor = rgb, edgecolor = rgb, alpha = 0.8)) print('person standing in queue: ',queue_counter) framename = 'output' + str(frame_no) # dataentry.datawrite(framename, queue_counter) plt.savefig('D:\Queue Managment v1\Output\out' +str(frame_no)+'.png') im = cv2.imread('D:\Queue Managment v1\Output\out' +str(frame_no)+'.png') imS = cv2.resize(im, (800,700)) cv2.imshow('output',imS) # cv2.imwrite('OutputTest', im) cv2.waitKey(2)
def plot_boxes(i, frame_no, img, boxes, class_names, r, plot_labels=True, color=None): queue_counter = 0 rect_lst = [] frame_no = frame_no # Define a tensor used to set the colors of the bounding boxes colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]]) # Define a function to set the colors of the bounding boxes def get_color(c, x, max_val): ratio = float(x) / max_val * 5 i = int(np.floor(ratio)) j = int(np.ceil(ratio)) ratio = ratio - i r = (1 - ratio) * colors[i][c] + ratio * colors[j][c] return int(r * 255) # Get the width and height of the image width = img.shape[1] height = img.shape[0] # cv2.imwrite('D:\HeadCount\YOLO-Object-Detection-master\YOLO-Object-Detection-master\Output\out' +str(frame_no)+'.jpeg', img) # Plot the bounding boxes and corresponding labels on top of the image for i in range(len(boxes)): rects = [] # Get the ith bounding box box = boxes[i] c = box[6] if class_names[c] == 'person': # print('true: ',class_names[c]) # Get the (x,y) pixel coordinates of the lower-left and lower-right corners # of the bounding box relative to the size of the image. nx = int(np.around((box[0] - box[1] / 2.0) * width)) ny = int(np.around((box[2] - box[3] / 2.0) * width)) x1 = int(np.around((box[0] - box[2] / 2.0) * width)) y1 = int(np.around((box[1] - box[3] / 2.0) * height)) x2 = int(np.around((box[0] + box[2] / 2.0) * width)) y2 = int(np.around((box[1] + box[3] / 2.0) * height)) # Set the default rgb value to red rgb = (1, 0, 0) # Use the same color to plot the bounding boxes of the same object class if len(box) >= 7 and class_names: cls_conf = box[5] cls_id = box[6] classes = len(class_names) offset = cls_id * 123457 % classes red = get_color(2, offset, classes) / 255 green = get_color(1, offset, classes) / 255 blue = get_color(0, offset, classes) / 255 # If a color is given then set rgb to the given color instead if color is None: rgb = (red, green, blue) else: rgb = color # Calculate the width and height of the bounding box relative to the size of the image. width_x = x2 - x1 # width_y = y1 - y2 width_y = -200 x = int((x1 + x2) / 2) y = int((y1 + y2) / 2) check_point = consider.check(r[0][0], r[0][1], r[1][0], r[0][1], r[1][0], r[1][1], r[0][0], r[1][1], x, y) # 99% # check_point = 1 shape = np.array([[[r[0][0], r[0][1]], [r[1][0], r[0][1]], [r[1][0], r[1][1]], [r[0][0], r[1][1]]]], np.int32) # shape = np.array([[r]], np.int32) # points = shape.reshape((-1, 1, 2)) img = cv2.polylines(img, [shape], True, (0, 200, 255), 5) # img = cv2.line(img, r[0], r[1], (0,255,255), 2) if check_point == 1: #..................... # print(x1,y1," ",x2,y2) queue_counter += 1 rect_lst.append([x, y]) img = cv2.line(img, (x, y1), (x, y2), (255, 0, 0), 2) img = cv2.circle(img, (x, y), 5, (0, 0, 255), 1) org = (x, y) font = cv2.FONT_HERSHEY_SIMPLEX fontScale = 0.5 color = (0, 255, 0) thickness = 2 txt = str(x) + ',' + str(y) cv2.putText(img, txt, org, font, fontScale, color, thickness, cv2.LINE_AA) # cv2.imshow('cent',img) # cv2.waitKey(0) # img = cv2.rectangle(img, (x1, y1, width_x, width_y), (0,255,0), 1) rect_lst = sorted(rect_lst) print('list', rect_lst) r = queueDetect.Detect() r.key_values(rect_lst) # print(p) print('person standing in queue: ', queue_counter) framename = 'output' + str(frame_no) # dataentry.datawrite(framename, queue_counter) # plt.savefig('D:\Queue Managment v1\Output\out' +str(frame_no)+'.png') # im = cv2.imread('D:\Queue Managment v1\Output\out' +str(frame_no)+'.png') # imS = cv2.resize(im, (800,700)) cv2.imwrite('Output/' + str(framename) + '.jpg', img) cv2.imshow('draw', img) # plt.show() # cv2.imwrite('Output/Output{}'.format(framename), img) cv2.waitKey(2)
def plot_boxes(i, frame_no, img, boxes, class_names, plot_labels=True, color=None): queue_counter = 0 frame_no = frame_no # Define a tensor used to set the colors of the bounding boxes colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]]) # Define a function to set the colors of the bounding boxes def get_color(c, x, max_val): ratio = float(x) / max_val * 5 i = int(np.floor(ratio)) j = int(np.ceil(ratio)) ratio = ratio - i r = (1 - ratio) * colors[i][c] + ratio * colors[j][c] return int(r * 255) # Get the width and height of the image width = img.shape[1] height = img.shape[0] # Create a figure and plot the image fig, a = plt.subplots(1, 1) a.imshow(img) # cv2.imwrite('D:\HeadCount\YOLO-Object-Detection-master\YOLO-Object-Detection-master\Output\out' +str(frame_no)+'.jpeg', img) # Plot the bounding boxes and corresponding labels on top of the image for i in range(len(boxes)): rects = [] # Get the ith bounding box box = boxes[i] c = box[6] if class_names[c] == 'person': # print('true: ',class_names[c]) # Get the (x,y) pixel coordinates of the lower-left and lower-right corners # of the bounding box relative to the size of the image. x1 = int(np.around((box[0] - box[2] / 2.0) * width)) y1 = int(np.around((box[1] - box[3] / 2.0) * height)) x2 = int(np.around((box[0] + box[2] / 2.0) * width)) y2 = int(np.around((box[1] + box[3] / 2.0) * height)) # Set the default rgb value to red rgb = (1, 0, 0) # Use the same color to plot the bounding boxes of the same object class if len(box) >= 7 and class_names: cls_conf = box[5] cls_id = box[6] classes = len(class_names) offset = cls_id * 123457 % classes red = get_color(2, offset, classes) / 255 green = get_color(1, offset, classes) / 255 blue = get_color(0, offset, classes) / 255 # If a color is given then set rgb to the given color instead if color is None: rgb = (red, green, blue) else: rgb = color # Calculate the width and height of the bounding box relative to the size of the image. width_x = x2 - x1 # width_y = y1 - y2 width_y = -200 # print(width_y) # # check_point = consider.check(0, 270, 530, 310, 530, 170, # 0, 200, x1, y2) check_point = consider.check(0, 150, 576, 208, 576, 300, 0, 215, x1, y2) # 99% # save_point = 0 if check_point == 1: #..................... # print(x1,y1," ",x2,y2) queue_counter += 1 rect = patches.Rectangle((x1, y2), width_x, width_y, linewidth=2, edgecolor=rgb, facecolor='none') rects.append(box) track_no = tracker.track(x1, x2, y1, y2, frame_no) # Draw the bounding box on top of the image a.add_patch(rect) #......................... # centroid = centroids_tracker.CentroidTracker() # trackable_object = centroid.updates(rects) # print(trackable_object) label = str(track_no) lxc = (img.shape[1] * 0.266) / 100 lyc = (img.shape[0] * 1.180) / 100 a.text(x1 + lxc, y1 - lyc, label, fontsize=24, color='k', bbox=dict(facecolor=rgb, edgecolor=rgb, alpha=0.8)) print('person standing in queue: ', queue_counter) framename = 'output' + str(frame_no) dataentry.datawrite(framename, queue_counter) plt.savefig('D:\Queue Managment v1\Output\out' + str(frame_no) + '.png') im = cv2.imread('D:\Queue Managment v1\Output\out' + str(frame_no) + '.png') imS = cv2.resize(im, (800, 700)) cv2.imshow('output', imS) # cv2.imwrite('OutputTest', im) cv2.waitKey(2)