Пример #1
0
def segmentation_processing(img, segm_total, max_indices, i):

    # performs all the dirty work of image segmentation. returns 
    # the segmented image according to the salient point provided by 
    # max_indices and i

    img_parcial = None

    # checking whether saliency point is in previously grown region
    #print(max_indices)
    if (segm_total[int(max_indices[1][i]), int(max_indices[0][i])] == 0).all():
        # x coordinate is the second element (not sure why...)
        segm_current = ml.stochastic_reg_grow(img, (max_indices[1][i],\
            max_indices[0][i]),\
            30, 30, 20, 30, 60)

        segm_current[np.where(np.mean(segm_current, axis=2) != 0)] = 1
        segm_current = np.asarray(segm_current, dtype=np.bool)
        segm_current = np.multiply(segm_current, img)

        (T, B, L, R) = rectangle_detect(segm_current)
        # t b l r will be None if there is too few or too many pixels in segm_current
        if T is not None:

            segm_current[T:B, L:R] = 1
            segm_current = np.asarray(segm_current, dtype=np.bool)
            segm_total[np.where(np.mean(segm_total, axis=2) != 0)] = 1
            segm_total = np.asarray(segm_total, dtype=np.bool)

            if (np.bitwise_and(segm_current, segm_total) == 0).all():
                segm_current[T:B, L:R] = 1
                segm_current = np.asarray(segm_current, dtype=np.bool)

                # updating segm_total
                segm_total = np.bitwise_or(segm_current, segm_total)
                segm_total = np.multiply(segm_total, img)

                img_parcial = np.multiply(segm_current, img)

                # building box in segmented image, only if pixel_counts are 
                # in between thresholds (see transfere function)
                img_parcial = transfere(img_parcial, img)

    return img_parcial, segm_total
Пример #2
0
def search_mode(sgng, dictionary, last_joint_pos, clientID, v0):

    global ior_table

    print('\n\nEntering search mode.\n')
    # make sure sgng has at least two nodes
    if len(sgng.node_list) < 2:
        print("SGNG must have at least two nodes. Run some exploration first.")
        return 0

    category = None

    while (category not in dictionary):

        category = input('\n\nWhat object category should I look for? (Type in \
\'exploration\' to return to exploration mode.)\n')

        if category == 'exploration':
            return 0
        elif category not in dictionary:
            print('\nCategory %s not in dictionary.' %(category))

    img_parcial = None
    moves = 0

    # Keep moving while max moves not attained
    while moves < max_moves:

        res,resolution,image=vrep.simxGetVisionSensorImage(clientID,v0,0,vrep.simx_opmode_buffer)
        if res==vrep.simx_return_ok:

            # converting vrep img to numpy array bgr for proper opencv handling
            img = img2rgbnp(image, resolution)
            cv2.imshow('search', img)
            cv2.waitKey(50)

            # First check in ior_table whether the object category has been seen before. 
            # Must check the y_fields of ior_table

            visited_cats = [i[1] for i in ior_table]
            if category in visited_cats:

                while category in visited_cats:

                    ior_index = visited_cats.index(category)
                    obj_pose = ior_table[ior_index][0]

                    # Now moving to obj_pose and grabbing image
                    vrep.simxSetJointTargetPosition(clientID, joint_z,\
                            obj_pose[0],\
                            vrep.simx_opmode_oneshot)

                    vrep.simxSetJointTargetPosition(clientID, joint_x,\
                            obj_pose[1],\
                            vrep.simx_opmode_oneshot)
                    # updating last_joint_pos
                    last_joint_pos = obj_pose


                    cv2.waitKey(1500)

                    res,resolution,image=vrep.simxGetVisionSensorImage(clientID,v0,0,vrep.simx_opmode_buffer)
                    img = img2rgbnp(image, resolution)
                    cv2.imshow('search', img)
                    cv2.waitKey(50)

                    # Now processing the seed, which is at the center of the image (256, 256) for 512 res
                    segm_current = ml.stochastic_reg_grow(img, (x_resolution//2,\
                        y_resolution//2),\
                        30, 30, 20, 30, 60)
                    cv2.imshow('aa', segm_current)
                    cv2.waitKey(500)
                    cv2.destroyWindow('aa')

                    segm_current[np.where(np.mean(segm_current, axis=2) != 0)] = 1
                    segm_current = np.asarray(segm_current, dtype=np.bool)
                    segm_current = np.multiply(segm_current, img)

                    (T, B, L, R) = rectangle_detect(segm_current)

                    if T is not None:

                        segm_current[T:B, L:R] = 1
                        segm_current = np.asarray(segm_current, dtype=np.bool)
                        img_parcial = np.multiply(segm_current, img)
                        img_parcial = transfere(img_parcial, img)

                        # Now checking whether segmented image is of desired category

                        #classeme = extract_single_classeme(img_parcial)
                        features = single_feature_extractor(img_parcial)

                        #winner, sec_winner, __ = sgng._bmu(classeme, 0) 
                        winner, sec_winner, __ = sgng._bmu(features, 0) 
                        bmu_label = number2name(dictionary, winner.label)
                        sbmu_label = number2name(dictionary, sec_winner.label)
                        print(bmu_label, sbmu_label)

                        if category in (bmu_label, sbmu_label):
                            
                            cv2.imshow('parc', img_parcial)
                            cv2.waitKey(2000)
                            yn = input('\n\nI think I found it. Is that correct?(y/n)\n\n')
                            if yn == 'y':
                                print('\n')
                                cv2.destroyWindow('parc')
                                cv2.destroyWindow('search')
                                #ior_table = ior_update(ior, obj_pose, category, None)
                                return last_joint_pos
                            elif yn == 'n':
                                ior_table[ior_index][1] = None
                                ior_table[ior_index][2] = category

                            cv2.destroyWindow('parc')

                    # After checking the first pose, update visited_cats and 
                    # verify again if there is category in ior_table
                    visited_cats[ior_index] = -1
            
            # if there is no category in ior_table
            else:

                segm_total = np.zeros((img.shape[0], img.shape[1], 3))

                #sal = get_saliency(img, thresh)
                sal = get_saliency(img)
                max_indices = get_n_max_sal(sal, qty_peaks)

                # Browsing through salient points

                for i in range(qty_peaks):

                    # checking whether saliency point is in previously grown region
                    if (segm_total[max_indices[1][i], max_indices[0][i]] == 0).all():
                        # x coordinate is the second element (not sure why...)
                        segm_current = ml.stochastic_reg_grow(img, (max_indices[1][i],\
                            max_indices[0][i]),\
                            30, 30, 20, 30, 60)

                        segm_current[np.where(np.mean(segm_current, axis=2) != 0)] = 1
                        segm_current = np.asarray(segm_current, dtype=np.bool)
                        segm_current = np.multiply(segm_current, img)

                        (T, B, L, R) = rectangle_detect(segm_current)
                        # t b l r will be None if there is too few or too many pixels in segm_current
                        if T is not None:

                            segm_current[T:B, L:R] = 1
                            segm_current = np.asarray(segm_current, dtype=np.bool)
                            segm_total[np.where(np.mean(segm_total, axis=2) != 0)] = 1
                            segm_total = np.asarray(segm_total, dtype=np.bool)

                            # Checking whether there is intersection between the 
                            # current grown region and the total grown region of the current
                            # scene
                            if (np.bitwise_and(segm_current, segm_total) == 0).all():
                                #segm_current[T:B, L:R] = 1
                                #segm_current = np.asarray(segm_current, dtype=np.bool)

                                # updating segm_total
                                segm_total = np.bitwise_or(segm_current, segm_total)
                                segm_total = np.multiply(segm_total, img)

                                cv2.waitKey(50)
                                img_parcial = np.multiply(segm_current, img)

                                cv2.waitKey(50)
                                # building box in segmented image, only if pixel_counts are 
                                # in between thresholds (see transfere function)
                                img_parcial = transfere(img_parcial, img)
                                cv2.waitKey(50)

                                if img_parcial is not None:

                                    #cv2.waitKey(100)
                                    #cv2.imshow('parc', img_parcial)
                                    #cv2.waitKey(100)

                                    #classeme = extract_single_classeme(img_parcial)
                                    features = single_feature_extractor(img_parcial)

                                    #winner, sec_winner, __ = sgng._bmu(classeme, 0) 
                                    winner, sec_winner, __ = sgng._bmu(features, 0) 
                                    bmu_label = number2name(dictionary, winner.label)
                                    sbmu_label = number2name(dictionary, sec_winner.label)
                                    print(bmu_label, sbmu_label)
                                    #bmu_label = 'clock'
                                    #sbmu_label = 'clock'

                                    if category in (bmu_label, sbmu_label):
                                        
                                        # grabbing mid point, which will be used to calculate the 
                                        # pose
                                        #mid_point = ((B-T)//2, (R-L)//2)
                                        x_joint_pos = get_x_joint_pos(persp_angle, x_resolution, max_indices[1][i],\
                                                last_joint_pos[0])
                                        y_joint_pos = get_y_joint_pos(persp_angle, y_resolution, max_indices[0][i],\
                                                last_joint_pos[1])
                                        obj_pose = (x_joint_pos, y_joint_pos)

                                        cv2.imshow('parc', img_parcial)
                                        cv2.waitKey(2000)
                                        yn = input('\n\nI think I found it. Is that correct?(y/n)\n\n')
                                        if yn == 'y':
                                            print('\n')
                                            cv2.destroyWindow('parc')
                                            cv2.destroyWindow('search')
                                            ior_table = ior_update(ior_table, obj_pose, category, None)
                                            return last_joint_pos
                                        elif yn == 'n':
                                            ior_table = ior_update(ior_table, obj_pose, None, category)

                                        cv2.destroyWindow('parc')
                                        """
                                        last_joint_pos = (0, 0)
                                        x_joint_pos = get_x_joint_pos(np.pi/4, 512, max_indices[1][i],\
                                                last_joint_pos[0])

                                        y_joint_pos = get_y_joint_pos(np.pi/4, 512, max_indices[0][i],\
                                                last_joint_pos[1])

                                        vrep.simxSetJointTargetPosition(clientID, joint_z,\
                                                x_joint_pos,\
                                                vrep.simx_opmode_oneshot)

                                        vrep.simxSetJointTargetPosition(clientID, joint_x,\
                                                y_joint_pos,\
                                                vrep.simx_opmode_oneshot)
                                        """
                # Didnt find in the current scene. Asking permission to move and continue search
                yn = input('\nCouldn\'t find %s in the current scene. Continue search? (y/n)\n' %(category))
                if yn == 'n':
                    cv2.destroyWindow('search')
                    return last_joint_pos
                else:

                    # If continue, move towards the right edge of the screen
                    x_joint_pos = get_x_joint_pos(persp_angle, x_resolution, x_resolution,\
                            last_joint_pos[0])

                    vrep.simxSetJointTargetPosition(clientID, joint_z,\
                            x_joint_pos,\
                            vrep.simx_opmode_oneshot)

                    last_joint_pos = (x_joint_pos, last_joint_pos[1])

                    cv2.waitKey(1000)

                # incrementing moves count
                moves += 1

    # if it gets here, it means that moves has attained max_moves
    print('\nMaximum number of moves attained. Aborting search...\n\n')
    return last_joint_pos
Пример #3
0
def search_mode(sgng, dictionary, last_joint_pos, clientID, v0):

    print('\n\nEntering search mode.\n')
    # make sure sgng has at least two nodes
    if len(sgng.node_list) < 2:
        print("SGNG must have at least two nodes. Run some exploration first.")
        return 0

    category = None

    while (category not in dictionary):

        category = input(
            '\n\nWhat object category should I look for? (Type in \
\'exploration\' to return to exploration mode.)\n')

        if category == 'exploration':
            return 0
        elif category not in dictionary:
            print('\nCategory %s not in dictionary.' % (category))

    img_parcial = None
    moves = 0

    # Keep moving while max moves not attained
    while moves < max_moves:

        res, resolution, image = vrep.simxGetVisionSensorImage(
            clientID, v0, 0, vrep.simx_opmode_buffer)
        if res == vrep.simx_return_ok:

            # converting vrep img to numpy array bgr for proper opencv handling
            img = img2rgbnp(image, resolution)
            cv2.imshow('search', img)
            cv2.waitKey(50)

            segm_total = np.zeros((img.shape[0], img.shape[1], 3))

            sal = get_saliency(img)
            max_indices = get_n_max_sal(sal, n)

            # Browsing through salient points

            for i in range(n):

                # checking whether saliency point is in previously grown region
                if (segm_total[max_indices[1][i],
                               max_indices[0][i]] == 0).all():
                    # x coordinate is the second element (not sure why...)
                    segm_current = ml.stochastic_reg_grow(img, (max_indices[1][i],\
                        max_indices[0][i]),\
                        30, 30, 5, 50, 60)

                    segm_current[np.where(
                        np.mean(segm_current, axis=2) != 0)] = 1
                    segm_current = np.asarray(segm_current, dtype=np.bool)
                    segm_current = np.multiply(segm_current, img)

                    (T, B, L, R) = rectangle_detect(segm_current)
                    # t b l r will be None if there is too few or too many pixels in segm_current
                    if T is not None:

                        segm_current[T:B, L:R] = 1
                        segm_current = np.asarray(segm_current, dtype=np.bool)
                        segm_total[np.where(
                            np.mean(segm_total, axis=2) != 0)] = 1
                        segm_total = np.asarray(segm_total, dtype=np.bool)

                        if (np.bitwise_and(segm_current,
                                           segm_total) == 0).all():
                            #if 1 ==1:
                            #segm_current[T:B, L:R] = 1
                            #segm_current = np.asarray(segm_current, dtype=np.bool)

                            segm_total = np.bitwise_or(segm_current,
                                                       segm_total)
                            segm_total = np.multiply(segm_total, img)

                            cv2.waitKey(50)
                            img_parcial = np.multiply(segm_current, img)

                            cv2.waitKey(50)
                            # building box in segmented image, only if pixel_counts are
                            # in between thresholds (see transfere function)
                            img_parcial = transfere(img_parcial, img)
                            cv2.waitKey(50)

                            if img_parcial is not None:

                                #cv2.waitKey(100)
                                cv2.imshow('parc', img_parcial)
                                cv2.waitKey(100)
                                classeme = extract_single_classeme(img_parcial)

                                winner, sec_winner, __ = sgng._bmu(classeme, 0)
                                print(sgng.activation(winner.weight, classeme),
                                      flush=True)
                                #print(sgng.activation(sec_winner.weight, classeme), flush=True)
                                bmu_label = number2name(
                                    dictionary, winner.label)
                                sbmu_label = number2name(
                                    dictionary, sec_winner.label)
                                print(bmu_label, sbmu_label, flush=True)
                                #bmu_label = 'clock'
                                #sbmu_label = 'clock'

                                if category in (bmu_label, sbmu_label):

                                    cv2.imshow('parc', img_parcial)
                                    cv2.waitKey(2000)
                                    yn = input(
                                        '\n\nI think I found it. Is that correct?(y/n)\n\n'
                                    )
                                    if yn == 'y':
                                        print('\n')
                                        cv2.destroyWindow('parc')
                                        return last_joint_pos
                                    cv2.destroyWindow('parc')
                                    """
                                    last_joint_pos = (0, 0)
                                    x_joint_pos = get_x_joint_pos(np.pi/4, 512, max_indices[1][i],\
                                            last_joint_pos[0])

                                    y_joint_pos = get_y_joint_pos(np.pi/4, 512, max_indices[0][i],\
                                            last_joint_pos[1])

                                    vrep.simxSetJointTargetPosition(clientID, joint_z,\
                                            x_joint_pos,\
                                            vrep.simx_opmode_oneshot)

                                    vrep.simxSetJointTargetPosition(clientID, joint_x,\
                                            y_joint_pos,\
                                            vrep.simx_opmode_oneshot)
                                    """
            # Didnt find in the current scene. Asking permission to move and continue search
            yn = input(
                '\nCouldn\'t find %s in the current scene. Continue search? (y/n)\n'
                % (category))
            if yn == 'n':
                cv2.destroyWindow('search')
                return last_joint_pos
            else:

                # If continue, move towards the right edge of the screen
                x_joint_pos = get_x_joint_pos(persp_angle, x_resolution, x_resolution,\
                        last_joint_pos[0])

                vrep.simxSetJointTargetPosition(clientID, joint_z,\
                        x_joint_pos,\
                        vrep.simx_opmode_oneshot)

                last_joint_pos = (x_joint_pos, last_joint_pos[1])

                cv2.waitKey(1000)

            # incrementing moves count
            moves += 1

    # if it gets here, it means that moves has attained max_moves
    print('\nMaximum number of moves attained. Aborting search...\n\n')
    return last_joint_pos
Пример #4
0
                max_indices = get_n_max_sal(sal, n)
                max_sal_point = (max_indices[0][0], max_indices[1][0])

                #cv2.imshow('static sal', sal)
                #cv2.waitKey(500)

                # initializing segmentation
                segm_total = np.zeros((resolution[0], resolution[1], 3))

                # iterating over all salient points
                for i in range(n):

                    # x coordinate is the second element (not sure why...)
                    segm_current = ml.stochastic_reg_grow(img, (max_indices[1][i],\
                        max_indices[0][i]),\
                        15, 15, 10, 30, 20)

                    # transforming segm_current into binary mask
                    segm_current[np.where(
                        np.mean(segm_current, axis=2) != 0)] = 1
                    segm_current = np.asarray(segm_current, dtype=np.bool)

                    # transforming segm_total into binary mask
                    segm_total[np.where(np.mean(segm_total, axis=2) != 0)] = 1
                    segm_total = np.asarray(segm_total, dtype=np.bool)

                    # updating segm_total
                    segm_total = np.bitwise_or(segm_current, segm_total)

                segm_total = np.multiply(segm_total, img)
Пример #5
0
def search_mode(sgng, dictionary, last_joint_pos, clientID, v0):

    global ior_table

    print('\n\nEntering search mode.\n')

    # first, remove all temporary images in temp dir
    remove_temp_files()

    # make sure sgng has at least two nodes
    if len(sgng.node_list) < 2:
        print("SGNG must have at least two nodes. Run some exploration first.")
        return 0

    category = None

    while (category not in dictionary):

        category = input(
            '\n\nWhat object category should I look for? (Type in \
\'exploration\' to return to exploration mode.)\n')

        if category == 'exploration':
            return 0
        elif category not in dictionary:
            print('\nCategory %s not in dictionary.' % (category))

    img_parcial = None
    moves = 0

    # Keep moving while max moves not attained
    while moves < max_moves:

        res, resolution, image = vrep.simxGetVisionSensorImage(
            clientID, v0, 0, vrep.simx_opmode_buffer)
        if res == vrep.simx_return_ok:

            # converting vrep img to numpy array bgr for proper opencv handling
            img = img2rgbnp(image, resolution)
            cv2.imshow('search', img)
            cv2.waitKey(50)

            # First check in ior_table whether the object category has been seen before.
            # Must check the y_fields of ior_table

            visited_cats = [i[1] for i in ior_table]
            if category in visited_cats:

                while category in visited_cats:

                    ior_index = visited_cats.index(category)
                    obj_pose = ior_table[ior_index][0]

                    # Now moving to obj_pose and grabbing image
                    vrep.simxSetJointTargetPosition(clientID, joint_z,\
                            obj_pose[0],\
                            vrep.simx_opmode_oneshot)

                    vrep.simxSetJointTargetPosition(clientID, joint_x,\
                            obj_pose[1],\
                            vrep.simx_opmode_oneshot)
                    # updating last_joint_pos
                    last_joint_pos = obj_pose

                    cv2.waitKey(1500)

                    res, resolution, image = vrep.simxGetVisionSensorImage(
                        clientID, v0, 0, vrep.simx_opmode_buffer)
                    img = img2rgbnp(image, resolution)

                    cv2.imshow('search', img)
                    cv2.waitKey(50)

                    # Now processing the seed, which is at the center of the image (256, 256) for 512 res
                    segm_current = ml.stochastic_reg_grow(img, (x_resolution//2,\
                        y_resolution//2),\
                        30, 30, 20, 30, 60)
                    #cv2.imshow('aa', segm_current)
                    #cv2.waitKey(500)
                    #cv2.destroyWindow('aa')

                    segm_current[np.where(
                        np.mean(segm_current, axis=2) != 0)] = 1
                    segm_current = np.asarray(segm_current, dtype=np.bool)
                    segm_current = np.multiply(segm_current, img)

                    (T, B, L, R) = rectangle_detect(segm_current)

                    if T is not None:

                        segm_current[T:B, L:R] = 1
                        segm_current = np.asarray(segm_current, dtype=np.bool)
                        img_parcial = np.multiply(segm_current, img)
                        img_parcial = transfere(img_parcial, img)

                        # Now checking whether segmented image is of desired category

                        #classeme = extract_single_classeme(img_parcial)
                        cv2.waitKey(50)
                        features = single_feature_extractor(img_parcial)

                        #winner, sec_winner, __ = sgng._bmu(classeme, 0)
                        winner, sec_winner, __ = sgng._bmu(features, 0)
                        bmu_label = number2name(dictionary, winner.label)
                        sbmu_label = number2name(dictionary, sec_winner.label)
                        print(bmu_label, sbmu_label)

                        if category in (bmu_label, sbmu_label):

                            cv2.imshow('parc', img_parcial)
                            cv2.waitKey(2000)
                            yn = input(
                                '\n\nI think I found it. Is that correct?(y/n)\n\n'
                            )
                            if yn == 'y':
                                print('\n')
                                cv2.destroyWindow('parc')
                                cv2.destroyWindow('search')
                                #ior_table = ior_update(ior, obj_pose, category, None)
                                return last_joint_pos
                            elif yn == 'n':
                                ior_table[ior_index][1] = None
                                ior_table[ior_index][2] = category

                            cv2.destroyWindow('parc')
                    """
                    bbs = generate_bbs(img)

                    for bb in bbs:

                        T, B, L, R = bb[1], bb[1] + bb[2], bb[0], bb[0] + bb[3]  
                        curr_img = img[T:B, L:R]

                        #features = single_feature_extractor(curr_img, net, transformer)
                        features = single_feature_extractor(curr_img)

                        winner, sec_winner, __ = sgng._bmu(features, 0) 
                        bmu_label = number2name(dictionary, winner.label)
                        sbmu_label = number2name(dictionary, sec_winner.label)

                        if category in (bmu_label, sbmu_label):

                            y_joint_pos = get_y_joint_pos(persp_angle, y_resolution, int((R+L)/2),\
                                    last_joint_pos[1])
                            x_joint_pos = get_x_joint_pos(persp_angle, x_resolution, int((B+T)/2),\
                                    last_joint_pos[0])
                            obj_pose = (x_joint_pos, y_joint_pos)

                            cv2.imshow('parc', curr_img)
                            cv2.waitKey(300)
                            yn = input('\n\nI think I found it. Is that correct?(y/n)\n\n')
                            if yn == 'y':
                                print('\n')
                                cv2.destroyWindow('parc')
                                cv2.destroyWindow('search')
                                ior_table = ior_update(ior_table, obj_pose, category, None)
                                return last_joint_pos
                            elif yn == 'n':
                                ior_table = ior_update(ior_table, obj_pose, None, category)

                            cv2.destroyWindow('parc')
                    """
                    # After checking the first pose, update visited_cats and
                    # verify again if there is category in ior_table
                    visited_cats[ior_index] = -1

            # if there is no category in ior_table
            else:

                bbs = generate_bbs(img)
                bbs = bbs[:50]

                # calculating features for each bb in parallel
                pool = Pool(processes=4)
                begin_starmap = time.time()
                features_matrix = pool.starmap(process_bb,
                                               zip(bbs, itertools.repeat(img)))
                features_matrix = np.array(features_matrix)
                pool.close()
                pool.join()
                end_starmap = time.time() - begin_starmap
                print(end_starmap)

                #begin_comp = time.time()
                #for bb in bbs:
                for i in range(bbs.shape[0]):

                    #T, B, L, R = bb[1], bb[1] + bb[2], bb[0], bb[0] + bb[3]
                    #curr_img = img[T:B, L:R]

                    #features = single_feature_extractor(curr_img)

                    features = features_matrix[i]

                    winner, sec_winner, __ = sgng._bmu(features, 0)
                    bmu_label = number2name(dictionary, winner.label)
                    sbmu_label = number2name(dictionary, sec_winner.label)

                    print(bmu_label, sbmu_label)

                    if category in (bmu_label, sbmu_label):
                        #if category == bmu_label:

                        T, B, L, R = bbs[i][1], bbs[i][1] + bbs[i][2], bbs[i][
                            0], bbs[i][0] + bbs[i][3]
                        curr_img = img[T:B, L:R]

                        y_joint_pos = get_y_joint_pos(persp_angle, y_resolution, int((B+T)/2),\
                                last_joint_pos[1])
                        x_joint_pos = get_x_joint_pos(persp_angle, x_resolution, int((L+R)/2),\
                                last_joint_pos[0])
                        obj_pose = (x_joint_pos, y_joint_pos)

                        cv2.imshow('parc', curr_img)
                        cv2.waitKey(300)
                        yn = input(
                            '\n\nI think I found it. Is that correct?(y/n)\n\n'
                        )
                        if yn == 'y':
                            print('\n')
                            cv2.destroyWindow('parc')
                            cv2.destroyWindow('search')
                            ior_table = ior_update(ior_table, obj_pose,
                                                   category, None)
                            return last_joint_pos
                        elif yn == 'n':
                            ior_table = ior_update(ior_table, obj_pose, None,
                                                   category)

                        cv2.destroyWindow('parc')
                    #end_comp = time.time() - begin_comp
                    #print(end_comp)

                # Didnt find in the current scene. Asking permission to move and continue search
                yn = input(
                    '\nCouldn\'t find %s in the current scene. Continue search? (y/n)\n'
                    % (category))
                if yn == 'n':
                    cv2.destroyWindow('search')
                    return last_joint_pos
                else:

                    # If continue, move towards the right edge of the screen
                    x_joint_pos = get_x_joint_pos(persp_angle, x_resolution, x_resolution,\
                            last_joint_pos[0])

                    vrep.simxSetJointTargetPosition(clientID, joint_z,\
                            x_joint_pos,\
                            vrep.simx_opmode_oneshot)

                    last_joint_pos = (x_joint_pos, last_joint_pos[1])

                    cv2.waitKey(1000)

                # incrementing moves count
                moves += 1

    # if it gets here, it means that moves has attained max_moves
    print('\nMaximum number of moves attained. Aborting search...\n\n')
    return last_joint_pos
Пример #6
0
                max_indices = get_n_max_sal(sal, n)
                max_sal_point = (max_indices[0][0], max_indices[1][0])

                #cv2.imshow('static sal', sal)
                #cv2.waitKey(500)

                # initializing segmentation
                segm_total = np.zeros((resolution[0], resolution[1], 3))

                # iterating over all salient points
                for i in range(n):

                    # x coordinate is the second element (not sure why...)
                    segm_current = ml.stochastic_reg_grow(img, (max_indices[1][i],\
                        max_indices[0][i]),\
                        30, 30, 5, 30, 40)

                    # transforming segm_current into binary mask
                    segm_current[np.where(
                        np.mean(segm_current, axis=2) != 0)] = 1
                    segm_current = np.asarray(segm_current, dtype=np.bool)

                    # transforming segm_total into binary mask
                    segm_total[np.where(np.mean(segm_total, axis=2) != 0)] = 1
                    segm_total = np.asarray(segm_total, dtype=np.bool)

                    # updating segm_total
                    segm_total = np.bitwise_or(segm_current, segm_total)

                    img_parcial = np.multiply(segm_current, img)
Пример #7
0
            # finding the most salient point
            #max_sal_arg = np.unravel_index(np.argmax(sal), sal.shape)
            #print(max_sal_arg)

            cv2.imshow('static sal', sal)
            cv2.waitKey(2)

            # move joints
            vrep.simxSetJointTargetPosition(clientID, joint_z,\
                    get_x_joint_pos(np.pi/4, 256, max_sal_arg[0]),\
                    vrep.simx_opmode_oneshot)

            vrep.simxSetJointTargetPosition(clientID, joint_x,\
                    get_y_joint_pos(np.pi/4, 256, max_sal_arg[1]),\
                    vrep.simx_opmode_oneshot)

            # segmentation
            segm = ml.stochastic_reg_grow(img, (105, 110), 15, 15, 6,\
                    30, 20)
            cv2.imshow('segm', segm)
            cv2.waitKey(10)


    vrep.simxFinish(clientID)



vrep.simxFinish(clientID)