예제 #1
0
    def __init__(self, user_id=None, exp_id=None, demo_id=None):

        self.shape_set = set()
        self.color_set = set()
        self.displacement_set = set()
        if user_id is not None and exp_id is not None and demo_id is not None:
            yaml_file = lfd.getYamlFile(user_id, exp_id, demo_id)
            #print yaml_file
            #get world data from yaml_file
            data = lfd.getYamlData(yaml_file)
            #print data
            #add objects on table
            for obj in data['objects']:
                self.shape_set.add(obj[3])
                self.color_set.add(obj[4])
            #add task object
            self.shape_set.add(data['task_object'][1])
            self.color_set.add(data['task_object'][2])
            #add displacements
            displacements = getDisplacements(user_id, exp_id, demo_id)
            for d in displacements:
                self.displacement_set.add(d[0])
예제 #2
0
def getNLP_LfD_LandmarkDisplacement(nlp_grounder, user_id, exp_id, n_demos):
    relation_offset = 11  #hard coded offset to get back to relations in range 0:8
    relation, object_info = nlp_grounder.predict_goal(user_id, exp_id, n_demos)
    print(len(relation), len(object_info))
    print(relation)
    #get lfd to use along with language or in place of language if ambiguous
    lfd_shape_color, lfd_displacement = lfd.getMostLikelyLandmarkDisplacement(
        user_id, exp_id, n_demos)

    #check if we have one relation and one object
    if len(relation) == 1 and len(object_info) == 1:

        #print "grounding both"
        #first check if nlp grounded landmark is actually a possible landmark
        nlp_shape_color = (object_info[0][3], object_info[0][4])
        yaml_file = lfd.getYamlFile(user_id, exp_id, 0)
        landmark_coord = lfd.getFeatureCoordinates(nlp_shape_color, yaml_file)
        if landmark_coord is None:

            #print "NLP returned invalid landmark"
            return lfd_shape_color, lfd_displacement
        else:  #use valid landmark and displacement to guess placement location

            #get shape,color
            nlp_shape_color = (object_info[0][3], object_info[0][4])
            #get displacement from learned gmm
            gmm_filename = "../data/gmm_params.yaml"
            model_data = lfd.getYamlData(gmm_filename)
            nlp_displacement = model_data['mu'][relation[0] - relation_offset]
            return nlp_shape_color, nlp_displacement
    #check if we have only one object and can ground on that
    elif len(object_info) == 1:
        #print "grounding on object only"
        nlp_shape_color = (object_info[0][3], object_info[0][4])
        nlp_displacement = lfd.getDisplacementFromLandmark(
            user_id, exp_id, n_demos, nlp_shape_color)
        return nlp_shape_color, nlp_displacement
    #if we have one relationship and multiple items, figure out which item by using relationship
    elif len(relation) == 1 and len(object_info) > 0:
        #print "grounding on relation only"
        #get predicted displacement based on relationship
        gmm_filename = "../data/gmm_params.yaml"
        model_data = lfd.getYamlData(gmm_filename)
        nlp_displacement = model_data['mu'][relation[0] - relation_offset]
        #get placement data and object locations
        #TODO which demo should I pick from, what should n_demos be??
        yaml_file = lfd.getYamlFile(user_id, exp_id, n_demos)
        placement_coord = lfd.getPlacementCoordinates(yaml_file, user_id,
                                                      exp_id)
        min_dist = 10000
        for obj in object_info:
            shape_color = (obj[3], obj[4])
            landmark_coord = lfd.getFeatureCoordinates(shape_color, yaml_file)
            predicted_placement = np.array(landmark_coord) + np.array(
                nlp_displacement)
            placement_error = np.linalg.norm(predicted_placement -
                                             placement_coord)
            if placement_error < min_dist:
                min_dist = placement_error
                best_landmark = shape_color
        return best_landmark, nlp_displacement
    #TODO case where only relationship is grounded?
    #elif len(relation) == 1:
    #fall back on pure lfd as a last resort
    else:

        #print "ambiguous grounding"
        return lfd_shape_color, lfd_displacement
예제 #3
0
def getNLP_LfD_LandmarkDisplacementDoubleCheck(nlp_grounder, user_id, exp_id,
                                               n_demos, thresh):
    relation_offset = 11  #hard coded offset to get back to relations in range 0:8
    relation, object_info = nlp_grounder.predict_goal(user_id, exp_id, n_demos)
    #print len(relation), len(object_info)
    #print relation
    #get lfd to use along with language or in place of language if ambiguous
    lfd_shape_color, lfd_displacement = lfd.getMostLikelyLandmarkDisplacement(
        user_id, exp_id, n_demos)

    #check if we have one relation and one object
    grounded_prediction = False  #flag to check if we think we've found a grounding
    if len(relation) == 1 and len(object_info) == 1:

        #print "grounding both"
        #first check if nlp grounded landmark is actually a possible landmark
        nlp_shape_color = (object_info[0][3], object_info[0][4])
        yaml_file = lfd.getYamlFile(user_id, exp_id, 0)
        landmark_coord = lfd.getFeatureCoordinates(nlp_shape_color, yaml_file)
        if landmark_coord is not None:

            #get shape,color
            nlp_shape_color = (object_info[0][3], object_info[0][4])
            #get displacement from learned gmm
            gmm_filename = "../data/gmm_params.yaml"
            model_data = lfd.getYamlData(gmm_filename)
            nlp_displacement = model_data['mu'][relation[0] - relation_offset]
            grounded_prediction = True

    #check if we have only one object and can ground on that
    elif len(object_info) == 1:
        #print "grounding on object only"
        nlp_shape_color = (object_info[0][3], object_info[0][4])
        nlp_displacement = lfd.getDisplacementFromLandmark(
            user_id, exp_id, n_demos, nlp_shape_color)
        grounded_prediction = True

    #if we have one relationship and multiple items, figure out which item by using relationship
    elif len(relation) == 1 and len(object_info) > 0:
        #print "grounding on relation only"
        #get predicted displacement based on relationship
        gmm_filename = "../data/gmm_params.yaml"
        model_data = lfd.getYamlData(gmm_filename)
        nlp_displacement = model_data['mu'][relation[0] - relation_offset]
        #get placement data and object locations
        #TODO which demo should I pick from, what should n_demos be??
        yaml_file = lfd.getYamlFile(user_id, exp_id, n_demos)
        placement_coord = lfd.getPlacementCoordinates(yaml_file, user_id,
                                                      exp_id)
        min_dist = 10000
        for obj in object_info:
            shape_color = (obj[3], obj[4])
            landmark_coord = lfd.getFeatureCoordinates(shape_color, yaml_file)
            predicted_placement = np.array(landmark_coord) + np.array(
                nlp_displacement)
            placement_error = np.linalg.norm(predicted_placement -
                                             placement_coord)
            if placement_error < min_dist:
                min_dist = placement_error
                best_landmark = shape_color
        nlp_shape_color = best_landmark
        grounded_prediction = True

#TODO case where only relationship is grounded?
#elif len(relation) == 1:

#see if I have a grounded prediction or if I should go with pure lfd
    if grounded_prediction:
        #check if grounding prediction seems to match what's been demonstrated
        ave_error = averageDistanceError(nlp_shape_color, nlp_displacement,
                                         user_id, exp_id, 0, n_demos + 1)
        #print "ave error", ave_error
        if ave_error < thresh:
            #print "confident in grounding"
            return nlp_shape_color, nlp_displacement, relation, object_info
    #fall back on pure lfd as a last resort
    #print "ambiguous grounding"
    return lfd_shape_color, lfd_displacement, relation, object_info
예제 #4
0
def getDisplacements(user_id, exp_id, demo_id):
    yaml_file = "../data/user_" + str(user_id) + "/experiment_" + str(
        exp_id) + "/displacements" + str(demo_id) + ".yaml"
    data = lfd.getYamlData(yaml_file)
    return data['objects']