def send_imgs(img_list):
    global got_speech
    if img_list is None or len(img_list) == 0:
        return
    detected_objects_list = list()
    for index, img in enumerate(img_list):
        cv2.imshow('Img ' + str(index), img)
        cv2.waitKey(1)
        rows, cols, d = img.shape
        detected_object = Detected_Object()
        detected_object.id = index
        detected_object.image = CvBridge().cv2_to_imgmsg(img, encoding="passthrough")
        detected_object.center_x = rows / 2
        detected_object.center_y = cols / 2
        hog, image_hog = get_img_hog(img)
        detected_object.features.hog_histogram = hog
        detected_object.hog_image = CvBridge().cv2_to_imgmsg(image_hog, encoding="passthrough")
        colors_histo, object_shape = getpixelfeatures(img)
        detected_object.features.colors_histogram = colors_histo.tolist()
        # detected_object.features.shape_histogram = object_shape.tolist()
        detected_objects_list.append(detected_object)
        # here dumps the sentences describing the shown object (only works with one object at a time and a known object)
        orientation.automatic_descriptor.automatic_descriptor(orientation.machinelearning.label_pred(img), detected_object)
    if got_speech == 0:
        return
    detected_objects_list_publisher.publish(detected_objects_list)
    got_speech = 0
def objects_detector(imgs_bgr8):
    global img_clean_gray_class
    global img_clean_bgr_learn
    global clf
    global n_bin
    global b_size
    global c_size
    global saving_learn
    global saved
    global saving_test
    global live
    global show
    global got_speech
    global speech
    global loaded_clf
    detected_objects_list = []
    objects_detector_time = time.time()
    for index, img_bgr8 in enumerate(imgs_bgr8):
        width, height, d = np.shape(img_bgr8)
        w, l, d = np.shape(img_bgr8)
        img_clean_bgr_learn = img_bgr8.copy()
        img_bgr8 = img_bgr8[13:w - 5, 13:l - 8]
        img_clean_bgr_class = img_bgr8.copy()
        img_clean_bgr_class = cv2.resize(img_clean_bgr_class, (128, 128), interpolation=cv2.INTER_AREA)  # resize image
        img_clr = cv2.resize(img_clean_bgr_learn, (128, 128))
        img_gray = cv2.resize(cv2.cvtColor(img_clean_bgr_learn, cv2.COLOR_BGR2GRAY), (128, 128))
        edges = cv2.Canny(img_gray, 40, 100)
        cv2.imshow('Edges', edges)
        cv2.imshow('img_gray', img_gray)
        cv2.imshow('Clean' + str(index), img_clean_bgr_class)
        if loaded_clf:
            final, confiance = get_img_to_be_sent(img_clean_bgr_class)
            print("storing " + str(confiance))

            if confiance < 0.85:
                str_img(img_clean_bgr_learn)
        if saving_learn == 1:
            save_imgs_learn(img_clean_bgr_learn)
        if saving_test == 1:
            save_imgs_test(img_clean_bgr_class)
        if live == 1:
            live_learn(img_bgr8)
        if show:
            if loaded_clf == 0:
                print("Classifier not fitted, trying to load one")
                if load_classifier(1) == -1:
                    print("Could not load it, quitting")
            else:
                cv2.imshow('Sent' + str(index), final)
        if got_speech == 0:
            pass
        else:
            if confiance < 0.7:
                learn_from_str(1)
        rows, cols, d = img_clean_bgr_class.shape
        detected_object = Detected_Object()
        detected_object.id = 1
        detected_object.image = CvBridge().cv2_to_imgmsg(img_clean_bgr_class, encoding="passthrough")
        detected_object.center_x = rows / 2
        detected_object.center_y = cols / 2
        detected_object.features.hog_histogram = get_img_hog(img_clean_bgr_class)[0]
        colors_histo, object_shape = getpixelfeatures(img_clean_bgr_class)
        detected_object.features.colors_histogram = colors_histo.tolist()
        detected_object.features.shape_histogram = object_shape.tolist()
        detected_objects_list.append(detected_object)
    if got_speech == 0:
        return
    detected_objects_list_msg = Detected_Objects_List()
    detected_objects_list_msg.detected_objects_list = detected_objects_list
    detected_objects_list_publisher.publish(detected_objects_list_msg)
    got_speech = 0
def objects_detector(uprightrects_tuples):
    global last_imgs
    global img_clean_gray_class
    global img_clean_bgr_class
    global img_clean_bgr_learn
    global clf
    global n_bin
    global b_size
    global c_size
    global saving_learn
    global saved
    global saving_test
    global live
    global show
    global got_speech
    global speech
    global loaded_clf
    global using_VGA
    global iterations
    global recording
    global obj_history
    iterations += 1
    detected_objects_list = []
    objects_detector_time = time.time()
    if len(obj_history) < 7:
        obj_history.append(uprightrects_tuples)
        return
    else:
        index_last_obj = iterations % 7
        obj_history[index_last_obj] = uprightrects_tuples
    if check_stability(obj_history) == 0:
        return
    # for indexx, snapshots in enumerate(obj_history):
    #     # print (snapshots[0][0])
    #     cv2.imshow('Good? ' + str(indexx), snapshots[0][0])
    #     cv2.waitKey(1)
    uprightrects_tuples = obj_history[2]
    for index, curr_tuple in enumerate(uprightrects_tuples):
        img_bgr8, center = curr_tuple
        width, height, d = np.shape(img_bgr8)
        w, l, d = np.shape(img_bgr8)
        img_clean_bgr_learn = img_bgr8.copy()
        if not using_VGA:
            img_bgr8 = img_bgr8[13:w - 5, 13:l - 8]
        else:
            img_bgr8 = img_bgr8[6:w - 2, 6:l - 4]
        img_clean_bgr_class = img_bgr8.copy()
        img_clean_bgr_class = cv2.resize(img_clean_bgr_class, (128, 128), interpolation=cv2.INTER_AREA)  # resize image
        img_clr = cv2.resize(img_clean_bgr_learn, (128, 128))
        # img_gray = cv2.resize(cv2.cvtColor(img_clean_bgr_learn, cv2.COLOR_BGR2GRAY), (128, 128))
        # edges = cv2.Canny(img_gray, 40, 100)
        # cv2.imshow('Edges', edges)
        # cv2.imshow('img_gray', img_gray)
        cv2.imshow('Clean' + str(index), img_clean_bgr_class)
        if loaded_clf:
            final, confiance = get_img_to_be_sent(img_clean_bgr_class)
            if iterations % 50 == 0:
                print (confiance)
            # print("storing " + str(confiance))
            if confiance < 0.85 and recording:
                learn_tuple(curr_tuple, obj_history)
                learn_from_str(1)
                final = cv2.resize(img_clean_bgr_class, (256, 256))
            # print("storing " + str(confiance))
        if saving_learn == 1:
            cv2.imshow('LEARN', img_clean_bgr_learn)
            cv2.waitKey(1000)
            save_imgs_learn(img_clean_bgr_learn)
        if saving_test == 1:
            cv2.imshow('TEST', img_clean_bgr_class)
            cv2.waitKey(1000)
            save_imgs_test(img_clean_bgr_class)
        if show:
            if loaded_clf == 0:
                print("Classifier not fitted, trying to load one")
                if load_classifier(1) == -1:
                    print("Could not load it, quitting")
            else:
                cv2.imshow('Sent' + str(index), final)
                cv2.waitKey(100)
        # if got_speech == 0:
        #     pass
        # else:
        if loaded_clf:
            rows, cols, d = final.shape
            detected_object = Detected_Object()
            detected_object.id = 1
            detected_object.image = CvBridge().cv2_to_imgmsg(final, encoding="passthrough")
            detected_object.center_x = rows / 2
            detected_object.center_y = cols / 2
            detected_object.features.hog_histogram = get_img_hog(final)[0]
            colors_histo, object_shape = getpixelfeatures(final)
            detected_object.features.colors_histogram = colors_histo.tolist()
            detected_object.features.shape_histogram = object_shape.tolist()
            detected_objects_list.append(detected_object)
            if recording == 1:
                cv2.imshow('Just Sent' + str(index), final)

    # if got_speech == 0:
    #     return
    if loaded_clf:
        detected_objects_list_msg = Detected_Objects_List()
        detected_objects_list_msg.detected_objects_list = detected_objects_list
        detected_objects_list_publisher.publish(detected_objects_list_msg)
    got_speech = 0
    recording = 0