add_graph_to_summary=False, train_dataset_text=train_text, validation_datasets=dict(valid=valid_text), batch_size=BATCH_SIZE) for conf in confs: build_hyperparameters = dict(init_parameter=conf['init_parameter']) # other_hyperparameters={'dropout': [.3, .5, .7, .8, .9, .95]}, other_hyperparameters = dict( learning_rate=dict(varying=dict(init=conf['learning_rate']), fixed=dict(decay=.5, max_no_progress_points=10, path_to_target_metric_storage=('valid', 'loss')), hp_type='built-in', type='adaptive_change')) tf.set_random_seed(1) _, biggest_idx, _ = get_num_exps_and_res_files(save_path) if biggest_idx is None: initial_experiment_counter_value = 0 else: initial_experiment_counter_value = biggest_idx + 1 env.grid_search( evaluation, kwargs_for_building, build_hyperparameters=build_hyperparameters, other_hyperparameters=other_hyperparameters, initial_experiment_counter_value=initial_experiment_counter_value, **launch_kwargs)
), train_batch_kwargs=dict( valid_size=VALID_SIZE ), valid_batch_kwargs=dict( valid_size=VALID_SIZE ), # train_dataset_text='abc', validation_datasets=dict( valid='validation' ), batch_size=BATCH_SIZE ) for conf in confs: build_hyperparameters = dict( init_parameter=conf['init_parameter'], rho=[1.-v for v in conf['1_minus_rho']], ) # other_hyperparameters={'dropout': [.3, .5, .7, .8, .9, .95]}, other_hyperparameters = dict() tf.set_random_seed(1) env.grid_search( evaluation, kwargs_for_building, build_hyperparameters=build_hyperparameters, other_hyperparameters=other_hyperparameters, **launch_kwargs )