예제 #1
0
    def parse_config_common_sda(self, arguments):
        if arguments.has_key('corruption_level'):
            self.corruption_levels = [float(arguments['corruption_level']) for n in xrange(100)]
        if arguments.has_key('learning_rate'):
            self.learning_rates = [float(arguments['learning_rate']) for n in xrange(100)]
        if arguments.has_key('batch_size'):
            self.batch_size = int(arguments['batch_size'])
        if arguments.has_key('epoch_number'):
            self.epochs = int(arguments['epoch_number'])
        if arguments.has_key('momentum'):
            self.momentum = float(arguments['momentum'])

        # parse DNN network structure
        if arguments.has_key('nnet_spec'):
                nnet_layers = arguments['nnet_spec'].split(':')
                self.n_ins = int(nnet_layers[0])
                self.hidden_layers_sizes = [int(nnet_layers[i]) for i in range(1, len(nnet_layers)-1)]
                self.n_outs = int(nnet_layers[-1])

        # parse pre-training layer number
        self.ptr_layer_number = len(self.hidden_layers_sizes)
        if arguments.has_key('ptr_layer_number'):
            self.ptr_layer_number = int(arguments['ptr_layer_number'])

        # parse activation function
        if arguments.has_key('hidden_activation'):
            self.hidden_activation = parse_activation(arguments['hidden_activation'])
            if arguments['hidden_activation'].startswith('maxout'):
                self.do_maxout = True; self.pool_size = int(arguments['hidden_activation'].replace('maxout:',''))
        if arguments.has_key('1stlayer_reconstruct_activation'):
            self.firstlayer_reconstruct_activation = parse_activation(arguments['1stlayer_reconstruct_activation'])

        # parse sparsity setting
        if arguments.has_key('sparsity'):
            self.sparsity = float(arguments['sparsity'])
        if arguments.has_key('sparsity_weight'):
            self.sparsity_weight = float(arguments['sparsity_weight'])

        # parse various paths for model saving
        if arguments.has_key('cfg_output_file'):
            self.cfg_output_file = arguments['cfg_output_file']
        if arguments.has_key('param_output_file'):
            self.param_output_file = arguments['param_output_file']
        if arguments.has_key('kaldi_output_file'):
            self.kaldi_output_file = arguments['kaldi_output_file']
예제 #2
0
 def parse_config_cnn(self, arguments, nnet_spec, conv_nnet_spec):
     self.parse_config_dnn(arguments, nnet_spec)
     # parse convolutional layer structure
     self.conv_layer_configs = parse_conv_spec(conv_nnet_spec, self.batch_size)
     # parse convolutional layer activation
     # parse activation function, including maxout
     if arguments.has_key('conv_activation'):
         self.conv_activation_text = arguments['conv_activation']
         self.conv_activation = parse_activation(arguments['conv_activation'])
         # maxout not supported yet
     # whether we use the fast version of convolution 
     if arguments.has_key('use_fast'):
         self.use_fast = string_2_bool(arguments['use_fast'])
예제 #3
0
    def parse_config_common(self, arguments):
        # parse batch_size, momentum and learning rate 
        self.parse_config_common_sda(arguments)
        if arguments.has_key('batch_size'):
            self.batch_size = int(arguments['batch_size'])
        if arguments.has_key('momentum'):
            self.momentum = float(arguments['momentum'])
        if arguments.has_key('lrate'):
            self.lrate = parse_lrate(arguments['lrate'])
        if arguments.has_key('wdir'):
            self.wdir  = arguments['wdir']
        if arguments.has_key('contraction_level'):
            self.contraction_level  = arguments['contraction_level']
        # parse activation function, including maxout
        if arguments.has_key('activation'):
            self.activation_text = arguments['activation']
            self.activation = parse_activation(arguments['activation'])
            if arguments['activation'].startswith('maxout'):
                self.do_maxout = True
                self.pool_size = int(arguments['activation'].replace('maxout:',''))
                self.activation_text = 'maxout'

        # parse dropout. note that dropout can be applied to the input features only when dropout is also
        # applied to hidden-layer outputs at the same time. that is, you cannot apply dropout only to the
        # input features
        if arguments.has_key('dropout_factor'):
            self.do_dropout = True
            self.dropout_factor = [float(arguments['dropout_factor']) for n in xrange(20)]
        if arguments.has_key('input_dropout_factor'):
            self.input_dropout_factor = float(arguments['input_dropout_factor'])
        if arguments.has_key('l2_reg'):
            self.l2_reg = arguments['l2_reg'] 
        if arguments.has_key('l1_reg'):
            self.l1_reg = arguments['l1_reg'] 
        if arguments.has_key('cfg_output_file'):
            self.cfg_output_file = arguments['cfg_output_file'] 
        if arguments.has_key('param_output_file'):
            self.param_output_file = arguments['param_output_file']
        if arguments.has_key('kaldi_output_file'):
            self.kaldi_output_file = arguments['kaldi_output_file']

        if arguments.has_key('model_save_step'):
            self.model_save_step = int(arguments['model_save_step'])

        if arguments.has_key('non_updated_layers'):
            layers = arguments['non_updated_layers'].split(",")
            self.non_updated_layers = [int(layer) for layer in layers]