def __init__(self, source, transform, extra_params=None):
     """
     :param source: The signal source to be preprocessed
     :param transform: A function that takes at least a list and only kwargs beyond that
     :param extra_params: Extra things the transform function needs
     """
     params = extra_params or {}  # Mutable kwargs 'n all that
     Transformer.__init__(self, [source], {source.getName(): 'l'},
                          lambda l: transform(l, **params))
Beispiel #2
0
 def __init__(self
             ,input_dim
             ,bw_source
             ,ph_source = Source(lambda: None)
             ,mode = TRAINING
             ,batch_size = 60
             ,epochs = 60
             ,num_sizes = 5
             ,encdec_optimizer = 'rmsprop'
             ,class_optimizer = 'adadelta'
             ,class_loss = 'categorical_crossentropy'
             ,drop_rate = 0.001
             ,gauss_base_sigma = 0.001
             ,gauss_sigma_factor = 2
             ,l1 = 0.0
             ,l2 = 0.001
             ,model_name = 'latest'
             ,encdecs_name = None):
     if mode is AutoTransformer.TUNING: raise ValueError("Can't instantiate an AT in 'tuning' mode")
     self.mode = mode.instantiate(self, [bw_source, ph_source]) # Source order matters!
     self.batch_size = batch_size
     self.epochs = epochs
     self.bw_source = bw_source
     self.ph_source = ph_source
     self.layer_sizes = [input_dim] + [2**i for i in xrange(6, 6-num_sizes+1, -1)]
     self.enc_decs = []
     self.current_batch = [[] for i in range(batch_size)]
     self.previous_data = {}
     self.current_phase = phase_names[0]
     self.batched = 0
     self.model = None
     self.model_name = model_name
     self.encdecs_name = encdecs_name
     self.maxes = self.get_from_catalog("maxes", model_name) or np.ones(input_dim)
     self.enc_opt = encdec_optimizer
     self.cls_opt = class_optimizer
     self.cls_lss = class_loss
     self.drop_rate = drop_rate
     self.sigma_base = gauss_base_sigma
     self.sigma_fact = gauss_sigma_factor
     self.best_encdecs = ("latest",0.0)
     self.enc_use_drop = False
     self.enc_noise = False
     self.l1 = l1
     self.l2 = l2
     Transformer.__init__(self
                         ,self.mode.sources
                         ,self.mode.t_assignments
                         ,self.mode.transform
                         ,no_updates=self.mode.no_updates)
     if self.mode.name == 'training':
         self.new_encdecs()
     else:
         self.load_model()
 def __init__(self, source, transform, extra_params=None):
     """
     :param source: The signal source to be preprocessed
     :param transform: A function that takes at least a list and only kwargs beyond that
     :param extra_params: Extra things the transform function needs
     """
     params = extra_params or {} # Mutable kwargs 'n all that
     Transformer.__init__(self
                         ,[source]
                         ,{source.getName(): 'l'}
                         ,lambda l: transform(l,**params))
 def __init__(self, source):
     self.original_source = source
     if hasattr(source, 'select'):
         raw_source = source.select('raw')
     else:
         raw_source = source
     fixlentrans = Transformer([raw_source], {raw_source.getName(): 'l'}, lambda l: fix_length(l, 512))
     self.wt = Preprocessor(fixlentrans, wavelet_trans)
     self.ft = Preprocessor(fixlentrans, fourier_trans)
     self.et = Preprocessor(fixlentrans, extremes)
     Transformer.__init__(self,
                         [self.wt, self.ft, self.et]
                         ,{self.wt.getName(): 'w',
                           self.ft.getName(): 'f',
                           self.et.getName(): 'e'}
                         ,lambda w,f,e: merge(w,f,e))
     #if hasattr(source, 'initialized'):
     #    if not source.initialized:
     #        source.push()
     source.push()
     self.pull()
     # For some reason this needs to happen at least once for the output_dim to be correct
     # TODO check why ^
     self.output_dim = len(self.pull())
 def __init__(self, source):
     self.original_source = source
     if hasattr(source, 'select'):
         raw_source = source.select('raw')
     else:
         raw_source = source
     fixlentrans = Transformer([raw_source], {raw_source.getName(): 'l'},
                               lambda l: fix_length(l, 512))
     self.wt = Preprocessor(fixlentrans, wavelet_trans)
     self.ft = Preprocessor(fixlentrans, fourier_trans)
     self.et = Preprocessor(fixlentrans, extremes)
     Transformer.__init__(self, [self.wt, self.ft, self.et], {
         self.wt.getName(): 'w',
         self.ft.getName(): 'f',
         self.et.getName(): 'e'
     }, lambda w, f, e: merge(w, f, e))
     #if hasattr(source, 'initialized'):
     #    if not source.initialized:
     #        source.push()
     source.push()
     self.pull()
     # For some reason this needs to happen at least once for the output_dim to be correct
     # TODO check why ^
     self.output_dim = len(self.pull())