def main(args): img = misc.imread(args[1]) predictions = detect_number(img) if predictions == None or predictions == []: print("NONE") sys.exit(0) best_prediction = find_best_prediction(predictions) labels, bboxes, probs = best_prediction for i in range(len(labels)): draw_bounding_box(img, bboxes[i], RED) if "/" in args[1]: st = args[1].index("/") else: st = 0 dt = args[1].index(".") result_name = args[1][st+1:dt] misc.imsave("output/detected_" + result_name + ".png", img) out_file = open("output/result_" + result_name + ".txt", "w") out_file.write(''.join(labels)) out_file.write("\n" + str(list(np.round(probs, 2)))) print("Detected licence plate number: ", ''.join(labels))
Ta.append(T_v) sa.append(speed_g[frame_itt + delta_fr]) angles_objects = [] boxes_main = [] time_start_itt = time.time() boxes_out = nn.get_features(img1) tt_nn.append(time_tressing(time_start_itt, '\t\tneural network')) t0 = time.time() if debug: out = img1.copy() for box in boxes_out: cl_name = box[0] pt1 = box[1] pt2 = box[2] if cl_name in vehicles_classes: out = draw.draw_bounding_box(out, cl_name, draw.red, pt1, pt2, 3) else: out = draw.draw_bounding_box(out, cl_name, draw.blue, pt1, pt2, 3) tt_draw_rec.append(time_tressing(t0, '\t\tbounding box drawing')) t0 = time.time() for box in boxes_out: cl_name = box[0] if cl_name in vehicles_classes: angles_objects.append( np.arctan2(-pai_in_frame[1] + box[2][1], -pai_in_frame[0] + box[1][0])) angles_objects.append( np.arctan2(-pai_in_frame[1] + box[2][1], -pai_in_frame[0] + box[2][0])) boxes_main.append(box)
class_ids.append(class_id) confidences.append(float(confidence)) boxes.append([x, y, w, h]) # apply non-max suppression indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold) # go through the detections remaining # after nms and draw bounding box boxes_out = [] for ind in indices: i_ = ind[0] box = boxes[i_] x = box[0] y = box[1] w = box[2] h = box[3] img1 = draw.draw_bounding_box(img1, None, COLORS[class_ids[i_]], confidences[i_], round(x), round(y), round(x+w), round(y+h)) boxes_out.append([[round(x), round(y)], [round(x+w), round(y+w)]]) # print(classes[class_ids[i_]], 'was drawed border') t0 = time.time() pts1_tem = [] pts2_tem = [] for i in range(len(pts1)): pt1 = pts1[i] pt2 = pts2[i] o = True for box in boxes_out: box_pt1 = box[0] box_pt2 = box[1] if box_pt1[0] <= pt1[0] <= box_pt2[0] and box_pt1[1] <= pt1[1] <= box_pt2[1]: o = False break
frameWidth = image.shape[1] frameHeight = image.shape[0] t_y = time.time() output_yolo = nn.get_features(image) times_yolo.append(time.time() - t_y) t_op = time.time() output_opnps = op.getPose(image, True) pp, ll = op.getPose(image) times_openpose.append(time.time() - t_op) yolo_result = image.copy() for box in output_yolo: name = box[0] pt1 = box[1] pt2 = box[2] color = box[3] yolo_result = draw.draw_bounding_box(yolo_result, name, color, pt1, pt2) yolo_result = op.drawPose(yolo_result, pp, ll, 3, 1) cv2.imwrite('output/cars&humans/{}_yolo.png'.format(itt), yolo_result) probMap = output_opnps[0, 0, :, :] probMap = cv2.resize(probMap, (frameWidth, frameHeight)) minM = np.min(probMap) probMap -= minM maxM = np.max(probMap) k = 255 / maxM probMap *= 255 probMap = np.uint8(probMap) pb = np.zeros((frameHeight, frameWidth, 3), np.uint8) # pb[:, :, 0] = probMap #blue pb[:, :, 1] = probMap #green # pb[:, :, 2] = probMap #red image[:, :, 1] = 0