def get_knower_pattern(self): # compute a string describing the behavior of this knower-level resp = [ self(set(sample_sets_of_objects(n, all_objects))) for n in xrange(1, 10) ] return ''.join([ str(word_to_number[x]) if (x is not None and x is not 'undef') else 'U' for x in resp ])
def generate_data(data_size): """ Sample some data according to the target """ data = [] for i in range(data_size): # how many in this set set_size = weighted_sample( range(1,10+1), probs=[7187, 1484, 593, 334, 297, 165, 151, 86, 105, 112] ) # get the objects in the current set s = set(sample_sets_of_objects(set_size, all_objects)) # sample according to the target if random() < ALPHA: r = WORDS[len(s)-1] else: r = weighted_sample( WORDS ) # and append the sampled utterance data.append(FunctionData(input=[s], output=r)) # convert to "FunctionData" and store return data
def make_data(data_size=300, alpha=0.75): """ Sample some data according to the target """ data = [] for i in range(data_size): # how many in this set set_size = weighted_sample( range(1, 10 + 1), probs=[7187, 1484, 593, 334, 297, 165, 151, 86, 105, 112]) # get the objects in the current set s = set(sample_sets_of_objects(set_size, all_objects)) # sample according to the target if random() < alpha: r = WORDS[len(s) - 1] else: r = weighted_sample(WORDS) # and append the sampled utterance data.append(FunctionData(input=[s], output=r, alpha=alpha)) return data
def get_knower_pattern(ne): out = '' resp = [ ne(set(sample_sets_of_objects(n, all_objects))) for n in xrange(1, 10)] return ''.join([str(word_to_number[x]) if (x is not None and x is not 'undef') else 'U' for x in resp])
for i in range(data_size): # how many in this set set_size = weighted_sample( range(1,10+1), probs=[7187, 1484, 593, 334, 297, 165, 151, 86, 105, 112] ) # get the objects in the current set s = set(sample_sets_of_objects(set_size, all_objects)) # sample according to the target if random() < ALPHA: r = WORDS[len(s)-1] else: r = weighted_sample( WORDS ) # and append the sampled utterance data.append(FunctionData(input=[s], output=r)) # convert to "FunctionData" and store return data # compute a string describing the behavior of this knower-level def get_knower_pattern(ne): out = '' resp = [ ne(set(sample_sets_of_objects(n, all_objects))) for n in xrange(1, 10)] return ''.join([str(word_to_number[x]) if (x is not None and x is not 'undef') else 'U' for x in resp]) # ============================================================================================================ # All objects -- not very exciting #here this is really just a dummy -- one type of object, which is replicated in sample_sets_of_objects all_objects = make_all_objects(shape=['duck']) # all possible data sets on 10 objects all_possible_data = [ ('', set(sample_sets_of_objects(n, all_objects))) for n in xrange(1,10) ]
def get_knower_pattern(self): # compute a string describing the behavior of this knower-level resp = [self(set(sample_sets_of_objects(n, all_objects))) for n in xrange(1, 10)] return "".join([str(word_to_number[x]) if (x is not None and x is not "undef") else "U" for x in resp])
s = set(sample_sets_of_objects(set_size, all_objects)) # sample according to the target if random() < alpha: r = WORDS[len(s) - 1] else: r = weighted_sample(WORDS) # and append the sampled utterance data.append(FunctionData(input=[s], output=r, alpha=alpha)) return data #here this is really just a dummy -- one type of object, which is replicated in sample_sets_of_objects all_objects = make_all_objects(shape=['duck']) # all possible data sets on 10 objects all_possible_data = [('', set(sample_sets_of_objects(n, all_objects))) for n in xrange(1, 10)] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Grammar # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ from LOTlib.Grammar import Grammar from LOTlib.Miscellaneous import q # The priors here are somewhat hierarchical by type in generation, tuned to be a little more efficient # (but the actual RR prior does not care about these probabilities) grammar = Grammar() grammar.add_rule('START', '', ['WORD'], 1.0)