def send_imgs(img_list):
    global got_speech
    if img_list is None or len(img_list) == 0:
        return
    detected_objects_list = list()
    for index, img in enumerate(img_list):
        cv2.imshow('Img ' + str(index), img)
        cv2.waitKey(1)
        rows, cols, d = img.shape
        detected_object = Detected_Object()
        detected_object.id = index
        detected_object.image = CvBridge().cv2_to_imgmsg(img, encoding="passthrough")
        detected_object.center_x = rows / 2
        detected_object.center_y = cols / 2
        hog, image_hog = get_img_hog(img)
        detected_object.features.hog_histogram = hog
        detected_object.hog_image = CvBridge().cv2_to_imgmsg(image_hog, encoding="passthrough")
        colors_histo, object_shape = getpixelfeatures(img)
        detected_object.features.colors_histogram = colors_histo.tolist()
        # detected_object.features.shape_histogram = object_shape.tolist()
        detected_objects_list.append(detected_object)
        # here dumps the sentences describing the shown object (only works with one object at a time and a known object)
        orientation.automatic_descriptor.automatic_descriptor(orientation.machinelearning.label_pred(img), detected_object)
    if got_speech == 0:
        return
    detected_objects_list_publisher.publish(detected_objects_list)
    got_speech = 0