def main(argv): flags = get_flags(argv, __doc__, argparser) print_result( get_total_events( flags.table_id, flags.label, flags.action ), flags.action, )
## Standardize data train_mean, train_std = get_data_info(train_images) # train_images = standardize_data(train_images, train_mean, train_std) # validation_images = standardize_data(validation_images, train_mean, train_std) # test_images = standardize_data(test_images, train_mean, train_std) data = DataSet(train_images, train_labels) test = DataSet(test_images, test_labels) val = DataSet(validation_images, validation_labels) return data, test, val if __name__ == '__main__': FLAGS = utils.get_flags() ## Set random seed for tensorflow and numpy operations tf.set_random_seed(FLAGS.seed) np.random.seed(FLAGS.seed) data, test, _ = import_mnist() ## Here we define a custom loss for dgp to show error_rate = losses.ZeroOneLoss(data.Dout) ## Likelihood like = likelihoods.Softmax() ## Optimizer optimizer = utils.get_optimizer(FLAGS.optimizer, FLAGS.learning_rate)
# Load CS objects for single stimuli plotting iter_vars_dims = [] for iter_var in iter_vars: iter_vars_dims.append(len(iter_vars[iter_var])) print('Loading object list for single stimulus plot...'), CS_object_array = load_aggregated_object_list(iter_vars_dims, data_flag) print('...loaded.') # Plot signal and inset ax['signal_%s' % Kk_split_idx] = \ signal_plot(ax['signal_%s' % Kk_split_idx]) ax['signal_insert_%s' % Kk_split_idx] = \ signal_plot(ax['signal_insert_%s' % Kk_split_idx]) if Weber_idx == 1: mark_inset(ax['signal_%s' % Kk_split_idx], ax['signal_insert_%s' % Kk_split_idx], loc1=3, loc2=4, fc="none", ec="0.5") save_signal_discrimination_weber_law_fig(fig) if __name__ == '__main__': data_flags = get_flags() plot_signal_discrimination_weber_law( data_flags, axes_to_plot=[0, 1], projected_variable_components=dict(normal_eps_tuning_width=15))
def get_answer(question, story, sch_flag=False): flag_500 = story['sid'].startswith( 'mc500') # mctrain500 missing the sch data, changes pattern qflags = utils.get_flags(question) sch_flag = 'Sch' in question['type'] whole = nltk.word_tokenize(question['text']) if not any(qflags[key] for key in qflags): for i in whole: qflags = utils.get_flags(i) if qflags['who']: #sentence selection: #resolve anaphora if necesary #similarity overlap , fallback to word overlap answer, i = who_baseline(question, story, sch_flag=sch_flag) best_dep = wn_extract(question, story, i, sch_flag=sch_flag) answer = (best_dep if best_dep else answer) #'next code' #answer = 'next code' elif qflags['what']: # distinguish between verb and noun and quote return type # select sentence with similarity overlap as a first choice # failing onto word overlap of sch if possible return_type = utils.return_type(question) answer, i = what_baseline(question, story, return_type, sch_flag=sch_flag) best_dep = wn_extract(question, story, i, sch_flag=sch_flag) answer = (best_dep if best_dep else answer) #answer = 'next code' elif qflags['when']: kw_adds = [ 'Saturday', 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday' ] kw_adds += pp_filter answer, i = when_baseline( question, story, kw_adds, sch_flag, ) best_dep = wn_extract(question, story, i, sch_flag=sch_flag) answer = (best_dep if best_dep else answer) #'next code' #answer = 'next code' elif qflags['why']: #add why answer triggers to the question when looking for overlap kw_adds = why_filter answer, i = when_baseline(question, story, kw_adds, sch_flag) best_dep = wn_extract(question, story, i, sch_flag=sch_flag) answer = (best_dep if best_dep else answer) #'next code' #answer = 'next code' elif qflags['where']: kw_adds = ['in', 'where', 'at', 'front', 'back', 'outside', 'inside'] answer, i = when_baseline(question, story, kw_adds, sch_flag=sch_flag) best_dep = wn_extract(question, story, i, sch_flag=sch_flag) answer = (best_dep if best_dep else answer) elif qflags['which']: #question reformation kw_adds = [] answer, i = when_baseline(question, story, kw_adds, sch_flag) best_dep = wn_extract(question, story, i) answer = (best_dep if best_dep else answer) #answer = 'next code' elif qflags['did']: #question reformation #simple overlap , look for 'nt in answer, make a score threshold #based on the number of key words kw_adds = [] answer, i = when_baseline(question, story, kw_adds, sch_flag=sch_flag) best_dep = wn_extract(question, story, i) answer = (best_dep if best_dep else answer) answer = 'yes' if "'nt" not in answer else 'no' #answer = 'next code' elif qflags['how']: #resovle whether adj or verb gerund return type kw_adds = [] answer, i = when_baseline(question, story, kw_adds, sch_flag=sch_flag) best_dep = wn_extract(question, story, i, sch_flag=sch_flag) answer = (best_dep if best_dep else answer) else: #dialogues questions #seriously word overlap #just get the sentence then question reformation kw_adds = [] answer, i = when_baseline(question, story, kw_adds, sch_flag=sch_flag) best_dep = wn_extract(question, story, i) answer = (best_dep if best_dep else answer) #answer = 'next code' return answer