def __init__(self): FLAGS.max_epoch = 15 FLAGS.max_max_epoch = 30 FLAGS.max_num_threads = 20 #for 4G gpu should be ok? FLAGS.dataset = 'one_oa' self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( )
def __init__(self): #Overwrite the FLAGS from the user according to the flags #defined in this configuration FLAGS.feature_type = 'app' #flow or app or both FLAGS.network = 'logits' #or logits_noisy FLAGS.learning_rate = 0.01 training_objects, validate_objects, test_objects = pilot_data.get_objects( )
def __init__(self): FLAGS.max_epoch = 15 #50 FLAGS.max_max_epoch = 30 FLAGS.dataset = '../../../emerald/tmp/remote_images/continuous_expert' self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( ) FLAGS.window_size = 300 FLAGS.batch_size_fnn = 2 FLAGS.max_num_threads = 5
def __init__(self): FLAGS.max_epoch = 15 FLAGS.max_max_epoch = 30 FLAGS.dataset = '../../../emerald/tmp/remote_images/wall_expert' self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( ) FLAGS.batchwise_learning = True FLAGS.finetune = True FLAGS.init_model_dir = '/esat/qayd/kkelchte/tensorflow/lstm_logs/cwall_batchwise'
def __init__(self): FLAGS.max_epoch = 15 # let the learning rate decay faster FLAGS.max_max_epoch = 30 FLAGS.dataset = 'selected' if FLAGS.fc_only: FLAGS.hidden_size = 400 self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( )
def __init__(self): FLAGS.cut_end = 20 #100 FLAGS.max_epoch = 50 #leave 50 instead of 10 in order to make sure it convergers to a minimum FLAGS.max_max_epoch = 100 FLAGS.batch_size_fnn = 100 FLAGS.fc_only = True FLAGS.hidden_size = 400 self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( )
def __init__(self): FLAGS.max_epoch = 100 FLAGS.max_max_epoch = 200 FLAGS.window_size = 20 FLAGS.batch_size_fnn = 32 FLAGS.network = 'no_cnn_depth' FLAGS.dataset = 'sequential_oa_depth' #'sequential_oa' self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( )
def __init__(self): FLAGS.max_epoch = 50 #leave 50 instead of 10 in order to make sure it convergers to a minimum FLAGS.max_max_epoch = 100 FLAGS.batch_size_fnn = 100 FLAGS.fc_only = True FLAGS.hidden_size = 400 #FLAGS.dataset='inc_fc_dagger' #'sequential_oa_rec' self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( )
def __init__(self): FLAGS.max_epoch = 50 #30 #15 #30 FLAGS.max_max_epoch = 100 #60 #30 #60 FLAGS.max_num_threads = 20 #for 4G gpu should be ok? #FLAGS.dataset='inc_lstm_dagger' #'sequential_oa_rec' #FLAGS.window_size = 20 #FLAGS.batch_size_fnn = 32 self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( )
def __init__(self): FLAGS.max_epoch = 100 FLAGS.max_max_epoch = 200 FLAGS.batch_size_fnn = 100 FLAGS.network = 'no_cnn_depth' FLAGS.fc_only = True FLAGS.hidden_size = 400 FLAGS.dataset = 'sequential_oa_depth' #sequential_oa self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( )
def __init__(self): FLAGS.sliding_window = True FLAGS.max_epoch = 50 FLAGS.max_max_epoch = 100 #FLAGS.dataset='sequential_oa' FLAGS.data_type = 'grouped' FLAGS.window_size = 20 #FLAGS.batch_size_fnn = 32 self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( )
def __init__(self): FLAGS.max_grad_norm = 1 #FLAGS.cut_end=100 FLAGS.max_epoch = 50 FLAGS.max_max_epoch = 100 FLAGS.batch_size_fnn = 100 FLAGS.fc_only = True FLAGS.hidden_size = 400 #FLAGS.dataset='sequential_oa' self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( )
def __init__(self): FLAGS.max_epoch = 15 #50 FLAGS.max_max_epoch = 100 #FLAGS.hidden_size = 10 #dimensionality of cell state and output #FLAGS.max_epoch = 2 #FLAGS.max_max_epoch = 5 FLAGS.continuous = False FLAGS.dataset = '../../../emerald/tmp/remote_images/discrete_expert_2' self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( ) FLAGS.finetune = True FLAGS.init_model_dir = '/esat/qayd/kkelchte/tensorflow/lstm_logs/dis_wsize_300'
def __init__(self): FLAGS.max_epoch = 50 FLAGS.max_max_epoch = 100 FLAGS.dataset = 'sequential_oa' FLAGS.data_type = 'grouped' FLAGS.batchwise_learning = True FLAGS.sample = 5 FLAGS.preloading = False FLAGS.window_size = 20 FLAGS.batch_size_fnn = 32 self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( )
def __init__(self): FLAGS.max_num_windows = 500 FLAGS.max_epoch = 100 FLAGS.max_max_epoch = 200 FLAGS.window_size = 20 FLAGS.batch_size_fnn = 32 FLAGS.preloading = False FLAGS.conv_layers = True FLAGS.network = 'no_cnn' #FLAGS.normalized = True FLAGS.dataset = 'seq_oa_huge' #sequential_oa_rec #'sequential_oa' self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects( )
def __init__(self): training_objects, validate_objects, test_objects = pilot_data.get_objects( )