Exemplo n.º 1
0
 def __init__(self):
     FLAGS.max_epoch = 15
     FLAGS.max_max_epoch = 30
     FLAGS.max_num_threads = 20  #for 4G gpu should be ok?
     FLAGS.dataset = 'one_oa'
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
Exemplo n.º 2
0
 def __init__(self):
     #Overwrite the FLAGS from the user according to the flags
     #defined in this configuration
     FLAGS.feature_type = 'app'  #flow or app or both
     FLAGS.network = 'logits'  #or logits_noisy
     FLAGS.learning_rate = 0.01
     training_objects, validate_objects, test_objects = pilot_data.get_objects(
     )
Exemplo n.º 3
0
 def __init__(self):
     FLAGS.max_epoch = 15  #50
     FLAGS.max_max_epoch = 30
     FLAGS.dataset = '../../../emerald/tmp/remote_images/continuous_expert'
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
     FLAGS.window_size = 300
     FLAGS.batch_size_fnn = 2
     FLAGS.max_num_threads = 5
Exemplo n.º 4
0
 def __init__(self):
     FLAGS.max_epoch = 15
     FLAGS.max_max_epoch = 30
     FLAGS.dataset = '../../../emerald/tmp/remote_images/wall_expert'
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
     FLAGS.batchwise_learning = True
     FLAGS.finetune = True
     FLAGS.init_model_dir = '/esat/qayd/kkelchte/tensorflow/lstm_logs/cwall_batchwise'
Exemplo n.º 5
0
    def __init__(self):
        FLAGS.max_epoch = 15  # let the learning rate decay faster
        FLAGS.max_max_epoch = 30
        FLAGS.dataset = 'selected'
        if FLAGS.fc_only:
            FLAGS.hidden_size = 400

        self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
        )
Exemplo n.º 6
0
 def __init__(self):
     FLAGS.cut_end = 20  #100
     FLAGS.max_epoch = 50  #leave 50 instead of 10 in order to make sure it convergers to a minimum
     FLAGS.max_max_epoch = 100
     FLAGS.batch_size_fnn = 100
     FLAGS.fc_only = True
     FLAGS.hidden_size = 400
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
Exemplo n.º 7
0
 def __init__(self):
     FLAGS.max_epoch = 100
     FLAGS.max_max_epoch = 200
     FLAGS.window_size = 20
     FLAGS.batch_size_fnn = 32
     FLAGS.network = 'no_cnn_depth'
     FLAGS.dataset = 'sequential_oa_depth'  #'sequential_oa'
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
Exemplo n.º 8
0
 def __init__(self):
     FLAGS.max_epoch = 50  #leave 50 instead of 10 in order to make sure it convergers to a minimum
     FLAGS.max_max_epoch = 100
     FLAGS.batch_size_fnn = 100
     FLAGS.fc_only = True
     FLAGS.hidden_size = 400
     #FLAGS.dataset='inc_fc_dagger' #'sequential_oa_rec'
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
Exemplo n.º 9
0
 def __init__(self):
     FLAGS.max_epoch = 50  #30 #15 #30
     FLAGS.max_max_epoch = 100  #60 #30 #60
     FLAGS.max_num_threads = 20  #for 4G gpu should be ok?
     #FLAGS.dataset='inc_lstm_dagger' #'sequential_oa_rec'
     #FLAGS.window_size = 20
     #FLAGS.batch_size_fnn = 32
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
Exemplo n.º 10
0
 def __init__(self):
     FLAGS.max_epoch = 100
     FLAGS.max_max_epoch = 200
     FLAGS.batch_size_fnn = 100
     FLAGS.network = 'no_cnn_depth'
     FLAGS.fc_only = True
     FLAGS.hidden_size = 400
     FLAGS.dataset = 'sequential_oa_depth'  #sequential_oa
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
Exemplo n.º 11
0
 def __init__(self):
     FLAGS.sliding_window = True
     FLAGS.max_epoch = 50
     FLAGS.max_max_epoch = 100
     #FLAGS.dataset='sequential_oa'
     FLAGS.data_type = 'grouped'
     FLAGS.window_size = 20
     #FLAGS.batch_size_fnn = 32
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
Exemplo n.º 12
0
 def __init__(self):
     FLAGS.max_grad_norm = 1
     #FLAGS.cut_end=100
     FLAGS.max_epoch = 50
     FLAGS.max_max_epoch = 100
     FLAGS.batch_size_fnn = 100
     FLAGS.fc_only = True
     FLAGS.hidden_size = 400
     #FLAGS.dataset='sequential_oa'
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
Exemplo n.º 13
0
 def __init__(self):
     FLAGS.max_epoch = 15  #50
     FLAGS.max_max_epoch = 100
     #FLAGS.hidden_size = 10 #dimensionality of cell state and output
     #FLAGS.max_epoch = 2
     #FLAGS.max_max_epoch = 5
     FLAGS.continuous = False
     FLAGS.dataset = '../../../emerald/tmp/remote_images/discrete_expert_2'
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
     FLAGS.finetune = True
     FLAGS.init_model_dir = '/esat/qayd/kkelchte/tensorflow/lstm_logs/dis_wsize_300'
Exemplo n.º 14
0
 def __init__(self):
     FLAGS.max_epoch = 50
     FLAGS.max_max_epoch = 100
     FLAGS.dataset = 'sequential_oa'
     FLAGS.data_type = 'grouped'
     FLAGS.batchwise_learning = True
     FLAGS.sample = 5
     FLAGS.preloading = False
     FLAGS.window_size = 20
     FLAGS.batch_size_fnn = 32
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
Exemplo n.º 15
0
 def __init__(self):
     FLAGS.max_num_windows = 500
     FLAGS.max_epoch = 100
     FLAGS.max_max_epoch = 200
     FLAGS.window_size = 20
     FLAGS.batch_size_fnn = 32
     FLAGS.preloading = False
     FLAGS.conv_layers = True
     FLAGS.network = 'no_cnn'
     #FLAGS.normalized = True
     FLAGS.dataset = 'seq_oa_huge'  #sequential_oa_rec #'sequential_oa'
     self.training_objects, self.validate_objects, self.test_objects = pilot_data.get_objects(
     )
Exemplo n.º 16
0
 def __init__(self):
     training_objects, validate_objects, test_objects = pilot_data.get_objects(
     )