Exemplo n.º 1
0
def segment_raw(filename, save_folder_masked, save_folder_segmentation_raw):
    f_str = os.path.split(filename)[-1]
    masked_f_str = f_str.replace('aligned.h5', 'masked.h5')
    save_path = os.path.join(os.path.normpath(save_folder_segmentation_raw),
                             f_str)
    save_path_std = save_path.replace('aligned.h5', 'raw_std_dev.h5')
    save_path_roi = save_path.replace('aligned.h5', 'raw_rois.npy')
    save_path_traces = save_path.replace('aligned.h5', 'raw_traces.npy')
    data_path = os.path.join(os.path.normpath(save_folder_masked),
                             masked_f_str)
    segmentation(data_path, save_path_std, save_path_roi, save_path_traces)
Exemplo n.º 2
0
def segment_detrended(filename, save_folder_detrending,
                      save_folder_segmentation_detrended):
    f_str = os.path.split(filename)[-1]
    detr_f_str = f_str.replace('aligned.h5', 'detrended.h5')
    save_path = os.path.join(
        os.path.normpath(save_folder_segmentation_detrended), f_str)
    save_path_std = save_path.replace('aligned.h5', 'detrended_std_dev.h5')
    save_path_roi = save_path.replace('aligned.h5', 'detrended_rois.npy')
    save_path_traces = save_path.replace('aligned.h5', 'detrended_traces.npy')
    data_path = os.path.join(os.path.normpath(save_folder_detrending),
                             detr_f_str)
    segmentation(data_path, save_path_std, save_path_roi, save_path_traces)
Exemplo n.º 3
0
def pushing_data():
    clientID = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)
    test = 1336
    if clientID != -1:
        while True:
            # Initialize environment
            panda = Panda(clientID)
            obj_pos, obj_ori, handles = panda.init_env()
            pointcloud = panda.get_cloud()
            nb_clutters = segmentation('data.pcd')
            print("Found %d clouds before pushing" % nb_clutters)
            push_poses = push_pose_generation(pointcloud, 30)
            # ----------------------------------------------------------------------------------------------------------
            for p_index in range(len(push_poses)):
                panda.push(push_poses[p_index])
                cloud_new = panda.get_cloud()
                nb_clutters_new = segmentation('data.pcd')
                print("Found %d clouds after pushing" % nb_clutters_new)
                if nb_clutters_new > nb_clutters:
                    result = 1
                else:
                    result = 0
                r_cloud = push_transform(push_poses[p_index], pointcloud)
                np.savetxt('forthehorde.pcd',
                           r_cloud,
                           fmt='%1.9f',
                           delimiter=' ')
                insertHeader('forthehorde.pcd')
                copyfile(
                    '/home/lou00015/cnn3d/scripts/forthehorde.pcd',
                    '/home/lou00015/dataset/push_UR5/test' + str(test) +
                    '.pcd')
                f = open('/home/lou00015/dataset/push_UR5/label.txt', "a+")
                f.write(str(result))
                f.close()
                test = test + 1
                vrep.simxStopSimulation(clientID, vrep.simx_opmode_blocking)
                time.sleep(3)
                vrep.simxStartSimulation(clientID, vrep.simx_opmode_blocking)
                for j in range(8):
                    vrep.simxSetObjectPosition(clientID, handles[j], -1,
                                               obj_pos[j],
                                               vrep.simx_opmode_oneshot_wait)
                    vrep.simxSetObjectOrientation(
                        clientID, handles[j], -1, obj_ori[j],
                        vrep.simx_opmode_oneshot_wait)
    else:
        print(
            'Failed to connect to simulation (V-REP remote API server). Exiting.'
        )
    exit()
Exemplo n.º 4
0
    def segmentation(self):
        input_dir = str(self.text_input.text())
        output_dir = str(self.text_output.text())


        if not os.path.isdir(input_dir):
            QMessageBox.information(self, u'مسیر اشتباه', u'لطفا مسیر درستی برای پوشه ورودی انتخاب کنید.')
        elif not os.path.isdir(output_dir):
            QMessageBox.information(self, u'مسیر اشتباه', u'لطفا مسیر درستی برای پوشه خروجی انتخاب کنید.')
        else:
            
            samples = []
#            counter = 0
            cores = []
            '''
            
            change here
            
            '''
            
            for image in os.listdir(input_dir):
                if (image.split('.')[-1] in ['jpg', 'JPG', 'png', 'PNG']):
                    samples.append(input_dir+image)
            for sample in samples:
                cores.extend(segmentation.segmentation(sample))
#                for i in range(counter, counter+len(cores)):
            for i in range(len(cores)):
                image_name = '%s/%i.png'%(output_dir, i)
                imsave(image_name, cores[i])
#                counter += len(cores)
            QMessageBox.information(self, u'پایان', u'تقسیم‌ بندی با موفقیت انجام شد!')
def main(image_file: str = "", k: int = 3, iterations: int = 5) -> None:
    # load image
    pil_image = Image.open(image_file)
    pil_image = np.array(pil_image)
    original = pil_image

    # create segmentation
    start = time.time()
    segmentation_image = segmentation(pil_image, k, iterations)
    duration = time.time() - start
    print("took:", duration, "seconds.")

    # plot original
    fig, axs = plt.subplots(1, 2)
    axs[0].imshow(original)
    title0 = axs[0].title
    title0.set_text("original")
    title0.set_position([.5, 1.17])
    axs[0].axis("off")

    # plot segmentation image
    axs[1].matshow(segmentation_image,
                   cmap=ListedColormap([
                       "y", "b", "g", "purple", "white", "gray", "cyan",
                       "pink", "orange"
                   ]))
    title1 = axs[1].title
    title1.set_text("k=" + str(k) + ", iterations= " + str(iterations))
    title1.set_position([.5, 1.0])
    axs[1].axis("off")

    # save fig
    # plt.savefig("images/k2" + str(k) + "_i" + str(iterations) + ".png")
    plt.show()
Exemplo n.º 6
0
def labeling(feature, image):
    output_file = '/Users/xchen/Desktop/Segmentation/output/photo_o.jpg'
    output_label = '/Users/xchen/Desktop/Segmentation/output/photo_l.jpg'

    feature_class = read_label_batch(feature)

    forest = segmentation(image, output_file)

    forest_vote = vote(forest, feature_class)

    label_matrix = labelseg(forest, feature_class)

    image_label = Image.open(image)
    draw = ImageDraw.Draw(image_label)
    drawfont = ImageFont.truetype('/Library/Fonts/Times New Roman.ttf', 8)

    for i in forest.superpixel:
        y = i % 240
        x = i / 240
        if forest_vote[i] >= 0:
            draw.text((x, y), Label(forest_vote[i]).name, font=drawfont)
        else:
            draw.text((x, y), 'unknown', font=drawfont)

    del draw
    image_label.save(output_label)
    # print forest_vote;
    print label_matrix
    return label_matrix
Exemplo n.º 7
0
def trainingSetFromImage(trainImage, target):
    grayImage = binarize.toGrayScale(trainImage)
    binarizedImage = binarize.binarize(grayImage)

    tmpBinImage = copy.deepcopy(binarizedImage)
    height, width = binarizedImage.shape
    imageRegionSet = segmentation.segmentation(binarizedImage)

    i = 1
    font = cv2.FONT_HERSHEY_SIMPLEX
    for imageRegion in imageRegionSet:
        dim = imageRegion[2]
        (x, y, w, h) = dim
        cv2.rectangle(tmpBinImage, (x, height - y - h), (x + w, height - y), (255, 255, 255), 2)
        cv2.putText(tmpBinImage, str(i), (x, height - y - h - 20), font, 0.4, (255, 255, 255), 1, cv2.LINE_AA)
        i = i + 1

    cv2.imshow("Training Image", tmpBinImage)
    k = cv2.waitKey(0)
    cv2.destroyAllWindows()

    print 'Number of Segments : ' + str(len(imageRegionSet))
    while True:
        print "(i)Include Training Set (r)Retry (q)Quit"
        option = str(raw_input("Option : "))
        if option == 'q':
            break
        elif option == 'i':
            includeTrainingSet(imageRegionSet, target)
            break
        elif option == 'r':
            break

    return option
Exemplo n.º 8
0
def get_answer_fromdb(sentence1, questionList, ansList):

    sentence = segmentation(sentence1)
    if len(sentence) == 0:
        print('我们现在无法回答这个问题')
        return
    score = []
    for i in questionList:
        score.append(cos_sim(sentence, i))
        #print(score)
    if len(set(score)) == 1:
        return '抱歉,我们现在无法回答这个问题'
    else:
        #index = score.index(max(score))
        num = 0
        count = 0
        maxNum = 0
        for i in score:
            if (i != math.nan):
                if (i > maxNum):
                    num = count
                    maxNum = i
                else:
                    count += 1
            else:
                count += 1
        if (num >= len(score)):
            return 'sorry'

        index = num
        #print(max(score))
        #print('index',index)
        return ansList[index]
Exemplo n.º 9
0
def parallel_reassignment(fibers, fiber_clusters, central_index, thr):
    size_filter = 5
    large_clusters,small_clusters,large_indices,small_indices,large_centroids,small_centroids = [], [], [], [], [], []
    small_indices = []
    for key, indices in fiber_clusters.items():
        if len(indices) > size_filter:
            large_indices.append(int(key.split("_")[central_index]))
            c = [fibers[i] for i in indices]
            large_clusters.append(c)
            large_centroids.append(metrics.centroid_mean_align(c))
        else:
            small_indices.append(int(key.split("_")[central_index]))
            c = [fibers[i] for i in indices]
            small_clusters.append(c)
            small_centroids.append(metrics.centroid_mean_align(c))
    reassignment = seg.segmentation(21, thr, large_centroids, small_centroids,
                                    len(small_centroids), len(large_centroids))
    count = 0
    num_fibers_reass = 0
    num_discarded = 0
    for small_index, large_index in enumerate(reassignment):
        fibers = small_clusters[small_index]
        if int(large_index) != -1:
            large_clusters[large_index].extend(fibers)
            num_fibers_reass += len(fibers)
            count += 1
        else:
            if len(fibers) > 2:
                recover_cluster = small_clusters[small_index]
                large_clusters.append(recover_cluster)
                large_indices.append(small_indices[small_index])
            else:
                num_discarded += 1
    return large_clusters, large_indices
Exemplo n.º 10
0
def prediction():
    characters, column_list = segmentation()
    current_dir = os.path.dirname(os.path.realpath(__file__))
    model_dir = os.path.join(current_dir, 'models/svc/svc.pkl')
    model = joblib.load(model_dir)

    classification_result = []
    for each_character in characters:
        # converts it to a 1D array
        each_character = each_character.reshape(1, -1)
        result = model.predict(each_character)
        classification_result.append(result)

    print(classification_result)

    plate_string = ''
    for eachPredict in classification_result:
        plate_string += eachPredict[0]

    print(plate_string)

    # it's possible the characters are wrongly arranged
    # since that's a possibility, the column_list will be
    # used to sort the letters in the right order

    column_list_copy = column_list[:]
    column_list.sort()
    rightplate_string = ''
    for each in column_list:
        rightplate_string += plate_string[column_list_copy.index(each)]

    print("Placa")
    print(rightplate_string)
    return (rightplate_string)
Exemplo n.º 11
0
def Recognition():
    # load model
    MyLeNet = torch.load(opt.model_load_path+opt.model_load_name)
    MyLeNet = MyLeNet.to(device)

    # start
    files = glob.glob('../Dataset/MNIST/my_number/t*.png')
    files.sort()
    for fn in files:
        # load images
        img_ori = cv2.imread(fn)
        img = cv2.imread(fn, 0)
        borders, img = segmentation(img)
        img = num_img2tensor(img)
        # output of model
        out = MyLeNet(img).to(device)
        # process pred label
        out = out.detach().numpy().tolist()
        result = []
        for out_i in out:
            number = (out_i.index(max(out_i)))
            result.append(number)
            print('Img {} Number : {}'.format(fn, number))
        splitShow(img_ori, borders, result)

    print('Done!!!')
Exemplo n.º 12
0
def extractMethods(fpath):
    config.log = open(fpath + "log", "w")

    config.init()  #	Initialize all the global variables
    adj = graph.read_graph(fpath)
    #	dotGenerator.generate_dot(adj,fpath+"Input")
    config.log.write("\n----- Main()\n")
    config.log.write("\t\t Input File Name : " + fpath + "\n")

    ctrList = pvh.read_CtrlLoc(fpath)
    pvh.InsertSource(adj)  # used in get_iter_p_index
    config.log.write("\t\tVertexHash Contents :\t" + str(config.vHash) + "\n")

    pc.precomputations(adj, ctrList)

    if config.flag['InGr'] != 0:
        config.log.write("\t\tInput Graph:\n")
        graph.write(adj, config.log)

    adj = sg.segmentation(adj, ctrList)

    #	commented the call to gp.generate(adj,fpath) method on June 10, 2019. Reason: Generates error.
    #	config.log.write("Calling argument generator\n")
    #	gp.generate(adj,fpath)	#Added on January 9, 2019

    #	output.dump_output(adj)

    #	remove_help_files(sys.argv[1])	#added on 4 July
    #	subprocess.call(["rm", "*.pyc")
    config.log.write("This is last of log.\n")
    config.log.close()

    formattedOutput(sys.argv[1])
Exemplo n.º 13
0
 def resetAll(self):
     self.exp = experiment.experiment()
     self.curve = curve.curve()
     self.ui.mainlist.clear()
     self.ui.pjlist.clear()
     self.ui.grafo.clear()
     self.noupdate = False
     self.generalsegmentation = segmentation.segmentation()
     self.populateGsegment()
Exemplo n.º 14
0
 def getCurrentSeg(self):
     s = segmentation.segmentation()
     s.slope = self.ui.sg_mm.value()
     s.mainth = self.ui.s_mth.value()
     s.window = self.ui.sg_fw.value()
     s.minlen = self.ui.s_vth.value()
     s.zmin = self.ui.plath.value()
     s.deltaF = self.ui.lasth.value()
     s.trorder = self.ui.derorder.value()
     return s
Exemplo n.º 15
0
 def __init__ ( self, parent = None ):
     QtWidgets.QMainWindow.__init__( self, parent )
     self.setWindowTitle( 'qt-ONE-View' )
     self.ui = view.Ui_facewindow()
     self.ui.setupUi( self )
     self.setConnections()
     self.exp = experiment.experiment()
     self.curve = curve.curve()
     self.noupdate = False
     self.generalsegmentation = segmentation.segmentation()
     self.populateGsegment()
Exemplo n.º 16
0
def fetchNpData(imgpath):
    npimg, gray_image, min_row, min_col, max_row, max_col = cc.cca(imgpath)
    #npimg = cv2.imread('./Number Plates/' + npname, 0)
    #print("returned image shape and data type : ", npimg.shape, type(npimg))
    npimg = npimg * 255
    img = np.array(npimg, dtype=np.uint8)
    #cv2.imshow("retured image", img)
    #cv2.waitKey(0)
    resized_img = cv2.resize(img, (250, 60))
    #print("image gathered from manish module ")
    char_list = seg.segmentation(resized_img)
    return char_list, gray_image, min_row, min_col, max_row, max_col
Exemplo n.º 17
0
def get_answer(sentence1):
    sentence1 = segmentation(sentence1)
    score = []
    for idx, sentence2 in enumerate(open('QuestionSeg.txt', 'r')):
        # print('idx: {}, sentence2: {}'.format(idx, sentence2))
        # print('idx: {}, cos_sim: {}'.format(idx, cos_sim(sentence1, sentence2)))
        score.append(cos_sim(sentence1, sentence2))
    if len(set(score)) == 1:
        print('暂时无法找到您想要的答案。')
    else:
        index = score.index(max(score))
        file = open('Answer.txt', 'r').readlines()
        print(file[index])
Exemplo n.º 18
0
def get_answer(inputSentence):
    sentence1 = segmentation(inputSentence)
    #print('sentence1',len(sentence1))
    if len(sentence1) == 0:
        print('我们现在无法回答这个问题')
        return

    score = []
    for idx, sentence2 in enumerate(open('QuestionSeg.txt', 'r')):
        score.append(cos_sim(sentence1, sentence2))
    if len(set(score)) == 1:
        print('我们现在无法回答这个问题')
    else:
        index = score.index(max(score))
        file = open('Answer.txt', 'r').readlines()
        print(file[index])
Exemplo n.º 19
0
 def recognition(self, img_ori):
     img = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)
     borders, img = segmentation(img)
     if borders != []:
         img = num_img2tensor(img).to(device)
         # output of model
         out = self.MyModel(img).cpu()
         # process pred label
         out = out.detach().numpy().tolist()
         result = []
         for out_i in out:
             number = (out_i.index(max(out_i)))
             result.append(number)
         return splitShow(img_ori, borders, result)
     else:
         return []
Exemplo n.º 20
0
def canonize(img_file, depth_file, opts, params):
    img = preprocess_image(img_file)
    segmask = segmentation(depth_file, opts["segmentation"])

    img, segmask = _canonize(img, segmask, opts)

    # Determine if image is upside down.
    diff = np.sum(np.abs(img - params["img_avg"]))
    diff_upsidedown = np.sum(np.abs(img - params["img_avg_upsidedown"]))
    upsidedown = diff > diff_upsidedown
    if dataset.is_upside_down(img_file) != upsidedown:
        print "Canonization failed!"
    if upsidedown:
        img = np.fliplr(np.flipud(img))
        segmask = np.fliplr(np.flipud(segmask))

    return img, segmask
Exemplo n.º 21
0
def canonization_training(opts):
    print "# Canonization training"
    params = caching.nul_repr_dict()
    # Generate average intensity image from a subset of the dataset.
    img_avg = np.zeros(opts["img_shape"], dtype=int)
    files = dataset.training_files(opts["num_train_images"])
    for img_file, depth_file in print_progress(files):
        img = preprocess_image(img_file)
        segmask = segmentation(depth_file, opts["segmentation"])
        img, segmask = _canonize(img, segmask, opts)
        # Orient correctly if image is upside down
        if dataset.is_upside_down(img_file):
            img = np.fliplr(np.flipud(img))
        img_avg += img
    img_avg /= len(files)
    params["img_avg"] = img_avg
    params["img_avg_upsidedown"] = np.fliplr(np.flipud(img_avg))
    return params
def main():
    config_window = config()

    # choose videos
    video = config_window.choose_video()

    # select the region for the experiment
    region = segmentation.select_region(video)
    print('select region:', region)

    # threshold configuration
    thread = MyThread(segmentation.config_segmentation, [video, region])
    thread.start()

    segmentation.stop_signal = False
    config_window.run_config()
    segmentation.stop_signal = True

    threshold = int(config_window.get_params())

    if config_window.get_params() is not None:
        # get the localization of rat's head and body, return lists
        head_loc_history, body_loc_history, fps = segmentation.segmentation(
            video, threshold, region)

        # pass the measurement data to Kalman Filter -> position estimation and velocity prediction
        measurements = np.array(head_loc_history)
        kalman_estimates, filtered_state_covariances = kalman_estimation(
            measurements)
        prediction = kalman_estimates

        # save prediction to .mat
        video_name = video.split('/')[-1]
        video_name = video_name.split('.')[0]
        sio.savemat(
            '../data/' + video_name + '_data', {
                'kalman_estimates': kalman_estimates,
                'measurements': measurements,
                'filtered_state_covariances': filtered_state_covariances
            })

        # visualize the head/body positions and velocity
        visualize_tracking(video, kalman_estimates, measurements,
                           body_loc_history)
Exemplo n.º 23
0
def cut(path):
    im = Image.open("../../../%s" % (path))
    mas = im.histogram()
    mas1 = im.histogram()
    mas.sort()
    mas.reverse()
    first_max_color, sec_max_color = mas1.index(mas[0]), mas1.index(mas[1])

    #CREATING NEW IMAGE

    im2 = Image.new("P", im.size, 255)
    temp = {}
    for y in range(im.size[1]):
        for x in range(im.size[0]):
            pix = im.getpixel((x, y))
            temp[pix] = pix
            if pix != first_max_color:  #what is more: letter or background?
                im2.putpixel((x, y), 0)

    #FILRATION
    for x in range(im.size[0]):
        for y in range(im.size[1]):
            pix = im2.getpixel((x, y))
            if pix == 0:
                count = 0
                for i_x in range(-3, 3, 1):
                    for i_y in range(-3, 3, 1):
                        x_p = min(im2.size[0], max(0, x + i_x))
                        y_p = min(im2.size[1], max(0, y + i_y))
                        if im2.getpixel((x_p, y_p)) == 0:
                            count += 1
                if count < 2:
                    im2.putpixel((x, y), 255)

    border = segmentation()
    border.string_border(im2)
    border.letter_border(im2)

    count = 100
    for letter in border.letters:
        if letter[0] - letter[2] < 0:
            im3 = im2.crop(letter)
            im3.save("./NEW_SETS/%s.gif" % (count))
        count = count + 1
Exemplo n.º 24
0
def perform_ocr(raw_image, model_dict):

    # Borro todo el contenido de la carpeta de output
    fileList = os.listdir(
        os.path.dirname(os.path.abspath(__file__)) + "/output_images")
    for fileName in fileList:
        os.remove(
            os.path.dirname(os.path.abspath(__file__)) + "/output_images/" +
            fileName)

    # Redimensionamos la region de interes seleccionada a 100 px de altura. La anchura dependerá de la resolución original.
    height = raw_image.shape[0]
    width = raw_image.shape[1]
    aspectRatio = width / (height * 1.0)
    height = 100
    width = int(height * aspectRatio)
    raw_image = cv2.resize(raw_image, (width, height))

    # Aplico un procesamiento a la imagen
    preprocessed_image = preprocessing(raw_image)

    # Segmento y redimensiono los dígitos para mantener la relación de aspecto
    all_digits = segmentation(preprocessed_image)
    all_results = ["" for _ in range(0, len(model_dict))]
    best_result = ""
    for digit in all_digits:
        current_predicted_digit = []
        '''
        plt.imshow(digit)
        plt.show()
        '''
        for key, value in model_dict.items():
            predicted = predict_image(digit, value['model'], value['graph'],
                                      value['session'])
            all_results[key] += str(predicted)
            current_predicted_digit.append(str(predicted))
        # De todas las predicciones, supongo que el digito correcto es el que más veces se predijo
        best_result += get_best_digit(current_predicted_digit)
    print(all_results)
    print(best_result)
    return best_result
Exemplo n.º 25
0
 def Segmentation(self):
     segmentation()
Exemplo n.º 26
0
import load, segmentation

load.dic_load("dict.txt".decode("utf-8"))


sen = raw_input("Input:")
while sen != "Q":
    res = segmentation.segmentation(sen.decode("gbk"))
    for w in res:
        print w.encode("gbk")
    sen = raw_input("Input:")
    kp, des = sift.detectAndCompute(image, None)
    # print(type(kp))
    # print("\n\n\n")
    # print(type(des))
    # break
    image_sift_sequence.append((kp, des))
    print(len(kp))
    out_image = image
    img = cv2.drawKeypoints(image, kp, out_image)
    cv2.imshow("features", img)
    if cv2.waitKey(10) & 0xFF == ord('q'):
        break

image_label_sequence = []
for image in image_sequence:
    labels = segmentation(image, 3)
    image_label_sequence.append(labels)
#
# np.save('segmentation.npy', np.asarray(image_label_sequence))

# image_label_sequence = np.load("segmentation.npy")
outputs = []
for i in range(len(image_sequence)):
    image = image_sequence[i]
    labels = image_label_sequence[i]
    j = 0
    for label in labels:
        outputs.append(mark_boundaries(image, label))
#         cv2.imwrite('segs_{}_{}.jpg'.format(i, j), mark_boundaries(image, label))
#         j += 1
#     break
Exemplo n.º 28
0
def match(img, img2):

    ################################## Segment ###############################
    #img = cv2.imread("assets/102_3.tif", cv2.IMREAD_GRAYSCALE)

    # img = cv2.normalize(img,img)
    out = segmentation(img, 120)  #110 for FP_DB and 45 for FP_DB2
    out2 = segmentation(img2, 120)
    # cv2.imshow("segmentation", out)
    # cv2.imshow("segmentation2", out2)
    cv2.imwrite("./seg.BMP", out)
    cv2.imwrite("./seg2.BMP", out2)

    #
    # # initialize the list of threshold methods
    # methods = [
    # 	("THRESH_BINARY", cv2.THRESH_BINARY),
    # 	("THRESH_BINARY_INV", cv2.THRESH_BINARY_INV),
    # 	("THRESH_TRUNC", cv2.THRESH_TRUNC),
    # 	("THRESH_TOZERO", cv2.THRESH_TOZERO),
    # 	("THRESH_TOZERO_INV", cv2.THRESH_TOZERO_INV)]
    #
    # # loop over the threshold methods
    # for (threshName, threshMethod) in methods:
    # 	# threshold the image and show it
    # 	(T, thresh) = cv2.threshold( img , 110 , 255, threshMethod)
    # 	cv2.imshow(threshName, thresh)
    # 	cv2.waitKey(0)
    # ################################ FpEnhancer ###############################
    #
    sourceImage = "./seg.BMP"
    sourceImage2 = "./seg2.BMP"
    # np.set_printoptions(
    #     threshold=np.inf,
    #     precision=4,
    #     suppress=True)

    print("Reading image")
    image = ndimage.imread(sourceImage, mode="L").astype("float64")
    image2 = ndimage.imread(sourceImage2, mode="L").astype("float64")
    # utils.showImage(image, "original", vmax=255.0)

    print("Normalizing")
    image = utils.normalize(image)
    image2 = utils.normalize(image2)
    #utils.showImage(image, "normalized")

    print("Finding mask")
    mask = utils.findMask(image)

    print("Applying local normalization")
    image = np.where(mask == 1.0, utils.localNormalize(image), image)
    image2 = np.where(mask == 1.0, utils.localNormalize(image2), image2)
    # utils.showImage(image, "locally normalized")

    print("Estimating orientations")
    orientations = np.where(mask == 1.0, utils.estimateOrientations(image),
                            -1.0)
    # utils.showOrientations(image, orientations, "orientations", 8)
    orientations2 = np.where(mask == 1.0, utils.estimateOrientations(image2),
                             -1.0)
    # utils.showOrientations(image2, orientations2, "orientations2", 8)

    print("Estimating frequencies")
    frequencies = np.where(mask == 1.0,
                           utils.estimateFrequencies(image, orientations),
                           -1.0)
    frequencies2 = np.where(mask == 1.0,
                            utils.estimateFrequencies(image2, orientations2),
                            -1.0)

    print("Filtering")

    image = gaborFilter(image, orientations, frequencies)
    image = np.where(mask == 1.0, image, 1.0)
    image2 = gaborFilter(image2, orientations2, frequencies2)
    image2 = np.where(mask == 1.0, image2, 1.0)
    # if options.images > 0:
    # utils.showImage(image, "gabor")

    print("Binarizing")
    image = np.where(mask == 1.0, utils.binarize(image, 16), 1.0)
    image2 = np.where(mask == 1.0, utils.binarize(image2, 16), 1.0)
    # utils.showImage(image, "binarized")

    destinationImage = "output.BMP"
    destinationImage2 = "output2.BMP"
    # save result image
    misc.imsave(destinationImage, image)
    misc.imsave(destinationImage2, image2)

    # reread the image in cv2 format
    img = cv2.imread('output.BMP', 0)
    img2 = cv2.imread('output2.BMP', 0)
    skel = skeletonize(img)
    skel2 = skeletonize(img2)
    neg = inverse(skel)
    # show all image during all processes
    neg2 = inverse(skel2)
    ################################ MnExtract ###############################
    mn1, mn2 = extractBoom(neg)
    mn3, mn4 = extractBoom(neg2)
    kp1 = MnMatcher.gatherKeyPoints(mn1)
    kp2 = MnMatcher.gatherKeyPoints(mn2)

    kp3 = MnMatcher.gatherKeyPoints(mn3)
    kp4 = MnMatcher.gatherKeyPoints(mn4)

    data = MnMatcher.checkORB(kp1, kp3, neg, neg2)
    data2 = MnMatcher.checkORB(kp2, kp4, neg, neg2)
    print(data)
    print(data2)
    result = (data + data2) / 2
    print(result > DECIDE_TRESHOLD)

    #plt.show()
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    print("result " + str(result))
    return result
Exemplo n.º 29
0
    #temp image
    tmpImage = np.copy(img)

    #Pre-processing - Improving the image
    tmpImage = filter.smoothing(tmpImage)
    tmpImage = filter.histogramEqualizing(tmpImage)

    #Pre-processing - Edge enhancement
    tmpImage = filter.laplacianOfGaussian(tmpImage)

    #Resizes image
    tmpImage = resize.resize(tmpImage)
    img = resize.resize(img)

    #Segmentation image
    tmpImage = segmentation.segmentation(tmpImage)

    #Multiplying to get segmented image
    img = np.multiply(tmpImage, img)

    #Converting image to greyscale to get descriptors from image
    imgGrey = np.zeros([img.shape[0], img.shape[1]], dtype=np.uint8)
    imgGrey[:, :] = np.uint8(
        np.floor(0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] +
                 0.114 * img[:, :, 2]))

    #Getting descriptors from image
    haralick = descriptors.texture_descriptors(imgGrey)
    angleHistogram = descriptors.gradient_descriptors(imgGrey)
    imgDescriptors = np.concatenate((haralick, angleHistogram), axis=0)
Exemplo n.º 30
0
def push():
    x_p = [[-0.125,0,0.05],[0.125,0,0.05]]
    x_n = [[0.125,0,0.05],[-0.125,0,0.05]]
    y_p = [[0,-0.125,0.05],[0,0.125,0.05]]
    y_n = [[0,0.125,0.05],[0,-0.125,0.05]]
    xy_p = [[-0.0884,-0.0884,0.05],[0.0884,0.0884,0.05]]
    xy_n = [[0.0884,0.0884,0.05],[-0.0884,-0.0884,0.05]]
    xp_yn = [[-0.0884,0.0884,0.05],[0.0884,-0.0884,0.05]]
    xn_yp = [[0.0884,-0.0884,0.05],[-0.0884,0.0884,0.05]]
    directions = [x_p,x_n,y_p,y_n,xy_p,xp_yn,xn_yp,xy_n]
            # add object
            object_name, object_handle = add_three_objects(clientID)
            time.sleep(5.0)
            num_obj = 3
            object_pos = []
            object_angle = []
            for obj_i in range(num_obj):
                res, pos = vrep.simxGetObjectPosition(clientID,object_handle[obj_i],-1,vrep.simx_opmode_oneshot_wait)
                res, orientation = vrep.simxGetObjectOrientation(clientID,object_handle[obj_i],-1,vrep.simx_opmode_oneshot_wait)
                object_pos.append(pos)
                object_angle.append(orientation)
            # Generate object pcd file from V-REP
            sim_ret, cam_handle = vrep.simxGetObjectHandle(clientID, 'kinect_depth', vrep.simx_opmode_blocking)
            emptyBuff = bytearray()
            res, retInts, retFloats, retStrings, retBuffer = vrep.simxCallScriptFunction(clientID, 'kinect',
                                                                                         vrep.sim_scripttype_childscript,
                                                                                         'absposition', [], [], [], emptyBuff,
                                                                                         vrep.simx_opmode_blocking)
            R = np.asarray([[retFloats[0], retFloats[1], retFloats[2], retFloats[3]],
                            [retFloats[4], retFloats[5], retFloats[6], retFloats[7]],
                            [retFloats[8], retFloats[9], retFloats[10], retFloats[11]]])
            # print('camera pose is: ',R)
            result, state, data = vrep.simxReadVisionSensor(clientID, cam_handle, vrep.simx_opmode_blocking)
            data = data[1]
            pointcloud = []
            for i in range(2, len(data), 4):
                p = [data[i], data[i + 1], data[i + 2], 1]
                pointcloud.append(np.matmul(R, p))
                # Object segmentation
            pcl = remove_clipping(pointcloud)
            # Save pcd file
            np.savetxt('data.pcd', pcl, delimiter=' ')
            insertHeader('data.pcd')
            nb_clutters = segmentation('data.pcd')
            label = [0,0,0,0,0,0,0,0]
            if nb_clutters == num_obj:
                continue
            print('Number of objects: %d' % nb_clutters)
            vg = transform(pcl)
            input_shape = vg.reshape((1,1,32,32,32))
            p = model.predict(input_shape, verbose=False)
            p = p[0]
            print('Predicted 8-dir success rate: ', p)
            best_dir = np.argmax(p)
            direction = directions[best_dir]
            print('The best direction is %d' % best_dir)
            f = open('predictions.txt', "a+")
            f.write(str(best_dir))
            f.close()
            tries = 1
            for direction in directions:
                res, target1 = vrep.simxGetObjectHandle(clientID, 'grasp', vrep.simx_opmode_oneshot_wait)
                res, target2 = vrep.simxGetObjectHandle(clientID, 'lift', vrep.simx_opmode_oneshot_wait)
                res, target3 = vrep.simxGetObjectHandle(clientID, 'lift0', vrep.simx_opmode_oneshot_wait)

                angles = [-3.14, 0, 0]
                # Set landing position
                res1 = vrep.simxSetObjectPosition(clientID, target1, -1, direction[0], vrep.simx_opmode_oneshot)
                res2 = vrep.simxSetObjectOrientation(clientID, target1, -1, angles, vrep.simx_opmode_oneshot)
                # Set pushing direction
                res3 = vrep.simxSetObjectPosition(clientID, target2, -1, direction[1], vrep.simx_opmode_oneshot)
                res4 = vrep.simxSetObjectOrientation(clientID, target2, -1, angles, vrep.simx_opmode_oneshot)
                # Set wait position
                res5 = vrep.simxSetObjectPosition(clientID, target3, -1, [direction[1][0],direction[1][1],direction[1][2]+0.15], vrep.simx_opmode_oneshot)
                res6 = vrep.simxSetObjectOrientation(clientID, target3, -1, angles, vrep.simx_opmode_oneshot)
                # Execute movements
                res, retInts, retFloats, retStrings, retBuffer = vrep.simxCallScriptFunction(clientID, 'Sphere',
                                                                                             vrep.sim_scripttype_childscript,
                                                                                             'go', [], [], [],
                                                                                             emptyBuff,
                                                                                             vrep.simx_opmode_blocking)
                print('execution signal sent')
                running = True
                while running:
                    res, signal = vrep.simxGetIntegerSignal(clientID, 'finish', vrep.simx_opmode_oneshot_wait)
                    if signal == tries:
                        running = False
                    else:
                        running = True
                print('recording data ...')
                # Recording data
                time.sleep(1.0)
                # After pushing
                result, state, new_data = vrep.simxReadVisionSensor(clientID, cam_handle, vrep.simx_opmode_blocking)
                new_data = new_data[1]
                new_pointcloud = []
                for i in range(2, len(data), 4):
                    p = [new_data[i], new_data[i + 1], new_data[i + 2], 1]
                    new_pointcloud.append(np.matmul(R, p))
                    # Object segmentation
                new_pcl = remove_clipping(new_pointcloud)
                np.savetxt('data_new.pcd', new_pcl, delimiter=' ')
                insertHeader('data_new.pcd')
                # v = pptk.viewer(new_pcl) # Visualize pcd if needed
                nb_clutters_new = segmentation('data_new.pcd')
                print('Number of objects: %d' % nb_clutters_new)
                if nb_clutters_new>nb_clutters:
                    dir_index = directions.index(direction)
                    print('Tried direction:', direction)
                    print('Number %d in directions list' % dir_index)
                    label[dir_index]=1
                    print('Updated label:', label)
                else:
                    print('Pushing not meaningful ...')
                for j in range(num_obj):
                    vrep.simxSetObjectPosition(clientID, object_handle[j], -1, object_pos[j], vrep.simx_opmode_oneshot_wait)
                    vrep.simxSetObjectOrientation(clientID, object_handle[j], -1, object_angle[j], vrep.simx_opmode_oneshot_wait)
                time.sleep(0.5)
                tries = tries+1
            print(label)
Exemplo n.º 31
0
def text(set, path):
    use_sym = 0
    start_time = time()
    im = Image.open("../../../%s" % (path))
    mas = im.histogram()
    mas1 = im.histogram()
    mas.sort()
    mas.reverse()
    first_max_color, sec_max_color = mas1.index(mas[0]), mas1.index(mas[1])

    #CREATING NEW IMAGE

    im2 = Image.new("P", im.size, 255)
    temp = {}
    for y in range(im.size[1]):
        for x in range(im.size[0]):
            pix = im.getpixel((x, y))
            temp[pix] = pix
            if pix != first_max_color:  #what is more: letter or background?
                im2.putpixel((x, y), 0)

    #FILRATION
    for x in range(im.size[0]):
        for y in range(im.size[1]):
            pix = im2.getpixel((x, y))
            if pix == 0:
                count = 0
                for i_x in range(-3, 3, 1):
                    for i_y in range(-3, 3, 1):
                        x_p = min(im2.size[0], max(0, x + i_x))
                        y_p = min(im2.size[1], max(0, y + i_y))
                        if im2.getpixel((x_p, y_p)) == 0:
                            count += 1
                if count < 2:
                    im2.putpixel((x, y), 255)

    border = segmentation()
    border.strings, border.letters = [], []
    border.string_border(im2)
    border.letter_border(im2)

    strings, letters = border.strings, border.letters
    #SIZE OF LABEL BETWEEN LETTERS AND BETWEEN WORDS

    delta = 0
    for letter in letters:
        delta = delta + (letter[2] - letter[0])
    delta = delta / len(letters)

    label = 0.5 * delta  #Let's think that label has size that is close to letter's size

    set_label = []
    place_label = 0
    num = 0
    start, end, prev = 0, 0, 0
    for letter in letters:
        #(a+c)(b+d)=ab
        is_new_string = letter[1] > end
        if not is_new_string:
            delta = letter[0] - prev
            if delta >= label:
                set_label.append((place_label, ' '))
            else:
                set_label.append((place_label, ''))
        else:
            if start != 0 or end != 0:
                set_label.append((place_label, '\n'))
            else:
                set_label.append((place_label, ''))
        place_label += 1
        start, end, prev = letter[1], letter[3], letter[2]

    #SIMPLE RECOGNITION OF SYMBOLS

    imageset = []
    for i in set:
        for img in os.listdir('%s' % (i[1])):
            temp = Image.open('%s/%s' % (i[1], img))
            imageset.append((i[0], temp, i[2]))

    v = vector.Common_Vector_Compare()
    text_letter = []
    z = 100
    for letter in letters:
        v.count = z
        guess = []
        cutlet = im2.crop(letter)
        if use_sym == 1:
            sym_of_cutlet = symmetry(cutlet)
        else:
            sym_of_cutlet = -1
        guess = recog.symb_recog(v, (cutlet, sym_of_cutlet), imageset)
        guess.sort()
        text_letter.append(guess[len(guess) - 1][1])
        z = z + 100
    output = open('out.txt', 'w')

    number_letter = 0
    k = set_label.pop(0)
    for i in text_letter:
        if number_letter == k[0]:
            output.write(k[1])
            if len(set_label) > 0:
                k = set_label.pop(0)
        output.write(i)
        number_letter += 1
    text_letter = []
    output.close()
    end_time = time()
    print end_time - start_time
Exemplo n.º 32
0
 def __init__(self, config_type):
     package_path = os.path.dirname(os.path.abspath(__file__))
     sys.path.append(package_path)
     self.config = load_config(config_type)
     self.segmentation_class = segmentation.segmentation("keyword")
Exemplo n.º 33
0
    img = cv2.resize(img,(int(round(width*sole)),720))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    #图片旋转
    # transport = cv2.transpose(img)
    # img = cv2.flip(transport,0)

    return img

if __name__ == '__main__':
    from glob import glob
    for fn in glob('./test/*.bmp'):
        img = cv2.imread(fn)
        img = img_process(img)


        roi = square.find_squares(img)
        cv2.imshow("squares", roi)

        Num = segmentation.segmentation(roi)
        Num.reverse()

        print Num


        ch = 0xFF & cv2.waitKey()
        if ch == 27:
            break

    cv2.destroyAllWindows()
Exemplo n.º 34
0
        # Object segmentation
        pcd = remove_clipping(pointcloud)
        # v = pptk.viewer(pcd)
        # Save pcd file
        np.savetxt('data.pcd', pcd, delimiter=' ')
        insertHeader('data.pcd')
        # ----------------------------------------------------------------------------------------------------------
        # Push or grasp?
        # ----------------------------------------------------------------------------------------------------------
        # Push metrics
        push_sc = push_scores(pushing_model, pcd)
        print('Pushing metrics: ', push_sc)
        push_dir = np.where(push_sc == max(push_sc))

        # Grasp metrics
        nb_clutters = segmentation('data.pcd')
        print('Found %d objects' % nb_clutters)
        poses = GraspPoseGeneration('cloud_cluster_0.pcd')
        poses.generate_candidates()
        grasp_sc = grasp_scores(grasping_model, poses, pcd)
        print('Grasping metrics: ', grasp_sc)
        grasp_i = np.where(grasp_sc == max(grasp_sc))
        # Evaluate success rate of each action
        if poses.n_samples != 0 and max(push_sc) < max(grasp_sc):
            grasp(poses.rotm[grasp_i[0]], poses.surface[grasp_i[0]], clientID)
        else:
            push(push_dir, pcd, clientID)
            continue
    else:
        print(
            'Failed to connect to simulation (V-REP remote API server). Exiting.'
Exemplo n.º 35
0
input_dir = '/media/mehdi/New/Works/Projects/Saratan/saratan/photos/'
output_dir = '/media/mehdi/New/Works/Projects/Saratan/saratan/res'
#cores = segmentation(sample)


samples = []
cores = []
C = []
cnt = 0
for image in os.listdir(input_dir):
    if (image.split('.')[-1] in ['jpg', 'JPG', 'png', 'PNG']):
        samples.append(input_dir+image)
for sample in samples:
#    print sample
    c = segmentation.segmentation(sample)
    print cnt, ': ', len(c)
    cnt += 1
    
#    print len(c)
#    for core in c:
#        print len(core)
#        print '-------------'
#        if np.all(cores[:]==core)==False:
#        print (core in cores)==False
#        if (core in cores)==False:
#            print '###########'
#            cores.append(core)
#        else:
#            print 'duplicated'
    C.append([c])
Exemplo n.º 36
0
def multiple_objects_evaluation():
    grasping_model = load_model('trained_models/grasping.h5')
    pushing_model = load_model('trained_models/pushing.h5')
    for layer in grasping_model.layers:
        layer.name = layer.name + '_grasping'
    for layer in pushing_model.layers:
        layer.name = layer.name + '_pushing'
    experiment_number = 0
    clientID = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)
    if clientID != -1:
        # Initialize environment
        panda = Panda(clientID)
        num_obj = 3
        obj_pos, handles = panda.init_env()
        while num_obj > 0:
            pointcloud = panda.get_cloud()
            # ----------------------------------------------------------------------------------------------------------
            # Push or grasp?
            nb_clutters = segmentation('data.pcd')
            print('Found %d objects' % nb_clutters)
            push_poses = push_pose_generation(pointcloud, 30)
            push_sc = push_scores(pushing_model, push_poses, pointcloud)
            print('Pushing scores:')
            for scores in push_sc:
                print(scores)
            push_dir = push_poses[push_sc.index(max(push_sc))]
            pc = pypcd.PointCloud.from_path('cloud_cluster_0.pcd')
            data = [pc.pc_data['x'], pc.pc_data['y'], pc.pc_data['z']]
            data = np.transpose(data)
            # v = pptk.viewer(data)
            grasp_poses, points = grasp_pose_generation()
            grasp_sc = grasp_scores(grasping_model, grasp_poses, points, data)
            print('Grasping scores:')
            for scores in grasp_sc:
                print(scores)
            best_grasp = max(grasp_sc)
            grasp_i = grasp_sc.index(best_grasp)
            print('Highest grasping score index: ', grasp_i)
            # ----------------------------------------------------------------------------------------------------------
            # Evaluate success rate of each action
            if len(points) != 0:
                if max(grasp_sc) > max(push_sc):
                    panda.grasp(grasp_poses[grasp_i], points[grasp_i])
                else:
                    panda.push(push_dir)
            else:
                continue
        # ----------------------------------------------------------------------------------------------------------
        # print('Re-analyzing geometrics ...')
        # # # Recording data
        # label = []
            for j in range(num_obj):
                res, current_pos = vrep.simxGetObjectPosition(
                    clientID, handles[j], -1, vrep.simx_opmode_oneshot_wait)
                print(current_pos)
                if current_pos[2] > obj_pos[j][2] + 0.03:
                    # res = 1
                    vrep.simxSetObjectPosition(clientID, handles[j], -1,
                                               [2, 2, 0.5],
                                               vrep.simx_opmode_oneshot_wait)
                    num_obj = num_obj - 1
                    print('Grasp successful, %d objects left' % num_obj)
        print('test completed, starting next iteration ...')
    else:
        print(
            'Failed to connect to simulation (V-REP remote API server). Exiting.'
        )
        exit()
def hist_creation(model_image_path, query_image_path, sensitivity_value, number_of_parts_X, number_of_parts_Y,
                  color_model, num_of_dimensions, metric):
    # Читаем изображения эталонной и целевой печатных плат
    model_img = cv2.imread(model_image_path)
    query_img = cv2.imread(query_image_path)
    result_img = query_img

    # Получаем размеры изображений
    (model_height, model_width) = model_img.shape[:2]
    (query_height, query_width) = query_img.shape[:2]

    # Вызываем сегментацию
    coords = segmentation((model_height, model_width), (query_height, query_width), number_of_parts_X,
                          number_of_parts_Y)
    if not coords:
        return 0

    # Строим гистограммы для каждого из сегментов и
    # сравниваем их друг с другом.
    # Переменная defect, показывающая, есть ли хоть один
    # дефект на плате (0 - нет, 1 - есть)
    defect = 0

    # Выбираем цветовую модель
    if color_model == 'GRAY':
        model_temp = cv2.cvtColor(model_img, cv2.COLOR_BGR2GRAY)
        query_temp = cv2.cvtColor(query_img, cv2.COLOR_BGR2GRAY)
    elif color_model == 'HSV' or color_model == 'H' or color_model == 'HS':
        model_temp = cv2.cvtColor(model_img, cv2.COLOR_BGR2HSV)
        query_temp = cv2.cvtColor(query_img, cv2.COLOR_BGR2HSV)
    elif color_model == 'Lab' or 'ab':
        model_temp = cv2.cvtColor(model_img, cv2.COLOR_BGR2LAB)
        query_temp = cv2.cvtColor(query_img, cv2.COLOR_BGR2LAB)
    else:
        model_temp = model_img
        query_temp = query_img

    # Строим гистограммы для сегментов
    for segment in coords:
        mask = numpy.zeros(model_img.shape[:2], numpy.uint8)
        mask[segment[0][1]:segment[1][1], segment[0][0]:segment[1][0]] = 255

        # Выбираем размерность гистограммы и цветовую модель.
        # Исходя из этого, строим нужные гистограммы, нормализуем,
        # выводим в виде вектора
        if num_of_dimensions == 1 and color_model == 'GRAY':
            model_flat_h, query_flat_h = grayscale_1D(model_temp, query_temp, mask)
        elif num_of_dimensions == 1 and color_model == 'RGB':
            model_flat_h, query_flat_h = RGB_1D(model_temp, query_temp, mask)
        elif num_of_dimensions == 2 and color_model == 'RGB':
            model_flat_h, query_flat_h = RGB_2D(model_temp, query_temp, mask)
        elif num_of_dimensions == 3 and color_model == 'RGB':
            model_flat_h, query_flat_h = RGB_3D(model_temp, query_temp, mask)
        elif num_of_dimensions == 1 and color_model == 'HSV':
            model_flat_h, query_flat_h = HSV_1D(model_temp, query_temp, mask)
        elif num_of_dimensions == 2 and color_model == 'HSV':
            model_flat_h, query_flat_h = HSV_2D(model_temp, query_temp, mask)
        elif num_of_dimensions == 3 and color_model == 'HSV':
            model_flat_h, query_flat_h = HSV_3D(model_temp, query_temp, mask)
        elif color_model == 'H':
            model_flat_h, query_flat_h = H_1D(model_temp, query_temp, mask)
        elif color_model == 'HS':
            model_flat_h, query_flat_h = HS_2D(model_temp, query_temp, mask)
        elif color_model == 'ab':
            model_flat_h, query_flat_h = Lab_2D(model_temp, query_temp, mask)
        elif color_model == 'Lab':
            model_flat_h, query_flat_h = Lab_3D(model_temp, query_temp, mask)

        # Считаем расстояние между гистограммами
        if metric == 'EUCLID':
            distance = euclid(model_flat_h, query_flat_h)
        elif metric == 'MANHATTAN':
            distance = manhattan(model_flat_h, query_flat_h)
        elif metric == 'CHEBISHEV':
            distance = chebishev(model_flat_h, query_flat_h)
        elif metric == 'HAMMING':
            distance = hamming(model_flat_h, query_flat_h)

        # Проверяем расстояние (больше/меньше порога) и
        # помечаем дефектные области красным прямоугольником.
        # Если меньше, считаем сегменты одинаковыми, в
        # противном случае - различными (возможен дефект)
        if distance > sensitivity_value:
            result_img = cv2.rectangle(result_img, (segment[0][0], segment[0][1]), (segment[1][0], segment[1][1]), (0, 0, 255), thickness=6)
            result_img = cv2.line(result_img, (segment[0][0], segment[0][1]), (segment[1][0], segment[1][1]), (0, 0, 255), 3)
            defect = 1

    if not defect:
        return 1
    else:
        return result_img