Exemple #1
0
    def parse_config_common(self, arguments):
        if arguments.has_key('corruption_level'):
            self.corruption_levels = [
                float(arguments['corruption_level']) for n in xrange(100)
            ]
        if arguments.has_key('learning_rate'):
            self.learning_rates = [
                float(arguments['learning_rate']) for n in xrange(100)
            ]
        if arguments.has_key('batch_size'):
            self.batch_size = int(arguments['batch_size'])
        if arguments.has_key('epoch_number'):
            self.epochs = int(arguments['epoch_number'])
        if arguments.has_key('momentum'):
            self.momentum = float(arguments['momentum'])

        # parse DNN network structure
        nnet_layers = arguments['nnet_spec'].split(':')
        self.n_ins = int(nnet_layers[0])
        self.hidden_layers_sizes = [
            int(nnet_layers[i]) for i in range(1,
                                               len(nnet_layers) - 1)
        ]
        self.n_outs = int(nnet_layers[-1])

        # parse pre-training layer number
        self.ptr_layer_number = len(self.hidden_layers_sizes)
        if arguments.has_key('ptr_layer_number'):
            self.ptr_layer_number = int(arguments['ptr_layer_number'])

        # parse activation function
        if arguments.has_key('hidden_activation'):
            self.hidden_activation = parse_activation(
                arguments['hidden_activation'])
            if arguments['hidden_activation'].startswith('maxout'):
                self.do_maxout = True
                self.pool_size = int(arguments['hidden_activation'].replace(
                    'maxout:', ''))
        if arguments.has_key('1stlayer_reconstruct_activation'):
            self.firstlayer_reconstruct_activation = parse_activation(
                arguments['1stlayer_reconstruct_activation'])

        # parse sparsity setting
        if arguments.has_key('sparsity'):
            self.sparsity = float(arguments['sparsity'])
        if arguments.has_key('sparsity_weight'):
            self.sparsity_weight = float(arguments['sparsity_weight'])

        # parse various paths for model saving
        if arguments.has_key('cfg_output_file'):
            self.cfg_output_file = arguments['cfg_output_file']
        if arguments.has_key('param_output_file'):
            self.param_output_file = arguments['param_output_file']
        if arguments.has_key('kaldi_output_file'):
            self.kaldi_output_file = arguments['kaldi_output_file']
Exemple #2
0
    def parse_config_common(self, arguments):
        if arguments.has_key('corruption_level'):
            self.corruption_levels = [float(arguments['corruption_level']) for n in xrange(100)]
        if arguments.has_key('learning_rate'):
            self.learning_rates = [float(arguments['learning_rate']) for n in xrange(100)]
        if arguments.has_key('batch_size'):
            self.batch_size = int(arguments['batch_size'])
        if arguments.has_key('epoch_number'):
            self.epochs = int(arguments['epoch_number'])
        if arguments.has_key('momentum'):
            self.momentum = float(arguments['momentum'])

        # parse DNN network structure
        nnet_layers = arguments['nnet_spec'].split(':')
        self.n_ins = int(nnet_layers[0])
        self.hidden_layers_sizes = [int(nnet_layers[i]) for i in range(1, len(nnet_layers)-1)]
        self.n_outs = int(nnet_layers[-1])

        # parse pre-training layer number
        self.ptr_layer_number = len(self.hidden_layers_sizes)
        if arguments.has_key('ptr_layer_number'):
            self.ptr_layer_number = int(arguments['ptr_layer_number'])

        # parse activation function
        if arguments.has_key('hidden_activation'):
            self.hidden_activation = parse_activation(arguments['hidden_activation'])
            if arguments['hidden_activation'].startswith('maxout'):
                self.do_maxout = True; self.pool_size = int(arguments['hidden_activation'].replace('maxout:',''))
        if arguments.has_key('1stlayer_reconstruct_activation'):
            self.firstlayer_reconstruct_activation = parse_activation(arguments['1stlayer_reconstruct_activation'])

        # parse sparsity setting
        if arguments.has_key('sparsity'):
            self.sparsity = float(arguments['sparsity'])
        if arguments.has_key('sparsity_weight'):
            self.sparsity_weight = float(arguments['sparsity_weight'])

        # parse various paths for model saving
        if arguments.has_key('cfg_output_file'):
            self.cfg_output_file = arguments['cfg_output_file']
        if arguments.has_key('param_output_file'):
            self.param_output_file = arguments['param_output_file']
        if arguments.has_key('kaldi_output_file'):
            self.kaldi_output_file = arguments['kaldi_output_file']
Exemple #3
0
 def parse_config_cnn(self, arguments, nnet_spec, conv_nnet_spec):
     self.parse_config_dnn(arguments, nnet_spec)
     # parse convolutional layer structure
     self.conv_layer_configs = parse_conv_spec(conv_nnet_spec, self.batch_size)
     # parse convolutional layer activation
     # parse activation function, including maxout
     if arguments.has_key('conv_activation'):
         self.conv_activation_text = arguments['conv_activation']
         self.conv_activation = parse_activation(arguments['conv_activation'])
         # maxout not supported yet
     # whether we use the fast version of convolution
     if arguments.has_key('use_fast'):
         self.use_fast = string_2_bool(arguments['use_fast'])
 def parse_config_cnn(self, arguments, nnet_spec, conv_nnet_spec):
     self.parse_config_dnn(arguments, nnet_spec)
     # parse convolutional layer structure
     self.conv_layer_configs = parse_conv_spec(conv_nnet_spec, self.batch_size)
     # parse convolutional layer activation
     # parse activation function, including maxout
     if arguments.has_key('conv_activation'):
         self.conv_activation_text = arguments['conv_activation']
         self.conv_activation = parse_activation(arguments['conv_activation'])
         # maxout not supported yet
     # whether we use the fast version of convolution
     if arguments.has_key('use_fast'):
         self.use_fast = string2bool(arguments['use_fast'])
    def parse_config_common(self, arguments):
        # parse batch_size, momentum, learning rate and regularization
        if arguments.has_key('batch_size'):
            self.batch_size = int(arguments['batch_size'])
        if arguments.has_key('momentum'):
            self.momentum = float(arguments['momentum'])
        if arguments.has_key('lrate'):
            self.lrate = parse_lrate(arguments['lrate'])
        if arguments.has_key('l1_reg'):
            self.l1_reg = float(arguments['l1_reg'])
        if arguments.has_key('l2_reg'):
            self.l2_reg = float(arguments['l2_reg'])
        if arguments.has_key('max_col_norm'):
            self.max_col_norm = float(arguments['max_col_norm'])

        # parse activation function, including maxout
        if arguments.has_key('activation'):
            self.activation_text = arguments['activation']
            self.activation = parse_activation(arguments['activation'])
            if arguments['activation'].startswith('maxout'):
                self.do_maxout = True
                self.pool_size = int(arguments['activation'].replace('maxout:',''))
                self.activation_text = 'maxout'

        # parse dropout. note that dropout can be applied to the input features only when dropout is also
        # applied to hidden-layer outputs at the same time. that is, you cannot apply dropout only to the
        # input features
        if arguments.has_key('dropout_factor'):
            self.do_dropout = True
            factors = arguments['dropout_factor'].split(',')
            self.dropout_factor = [float(factor) for factor in factors]
            if arguments.has_key('input_dropout_factor'):
                self.input_dropout_factor = float(arguments['input_dropout_factor'])
        #Added by me. Will see how this works in practice
        if arguments.has_key('regression'):
            self.do_regression = True

        if arguments.has_key('cfg_output_file'):
            self.cfg_output_file = arguments['cfg_output_file']
        if arguments.has_key('param_output_file'):
            self.param_output_file = arguments['param_output_file']
        if arguments.has_key('kaldi_output_file'):
            self.kaldi_output_file = arguments['kaldi_output_file']

        if arguments.has_key('model_save_step'):
            self.model_save_step = int(arguments['model_save_step'])

        if arguments.has_key('non_updated_layers'):
            layers = arguments['non_updated_layers'].split(",")
            self.non_updated_layers = [int(layer) for layer in layers]
Exemple #6
0
    def parse_config_common(self, arguments):
        # parse batch_size, momentum, learning rate and regularization
        if arguments.has_key('batch_size'):
            self.batch_size = int(arguments['batch_size'])
        if arguments.has_key('momentum'):
            self.momentum = float(arguments['momentum'])
        if arguments.has_key('lrate'):
            self.lrate = parse_lrate(arguments['lrate'])
        if arguments.has_key('l1_reg'):
            self.l1_reg = float(arguments['l1_reg'])
        if arguments.has_key('l2_reg'):
            self.l2_reg = float(arguments['l2_reg'])
        if arguments.has_key('max_col_norm'):
            self.max_col_norm = float(arguments['max_col_norm'])

        # parse activation function, including maxout
        if arguments.has_key('activation'):
            self.activation_text = arguments['activation']
            self.activation = parse_activation(arguments['activation'])
            if arguments['activation'].startswith('maxout'):
                self.do_maxout = True
                self.pool_size = int(arguments['activation'].replace(
                    'maxout:', ''))
                self.activation_text = 'maxout'

        # parse dropout. note that dropout can be applied to the input features only when dropout is also
        # applied to hidden-layer outputs at the same time. that is, you cannot apply dropout only to the
        # input features
        if arguments.has_key('dropout_factor'):
            self.do_dropout = True
            factors = arguments['dropout_factor'].split(',')
            self.dropout_factor = [float(factor) for factor in factors]
            if arguments.has_key('input_dropout_factor'):
                self.input_dropout_factor = float(
                    arguments['input_dropout_factor'])

        if arguments.has_key('cfg_output_file'):
            self.cfg_output_file = arguments['cfg_output_file']
        if arguments.has_key('param_output_file'):
            self.param_output_file = arguments['param_output_file']
        if arguments.has_key('kaldi_output_file'):
            self.kaldi_output_file = arguments['kaldi_output_file']

        if arguments.has_key('model_save_step'):
            self.model_save_step = int(arguments['model_save_step'])

        if arguments.has_key('non_updated_layers'):
            layers = arguments['non_updated_layers'].split(",")
            self.non_updated_layers = [int(layer) for layer in layers]
Exemple #7
0
 def parse_config_cldnn(self, arguments, nnet_spec, conv_nnet_spec, lstm_nnet_spec):
     self.parse_config_common(arguments)
     # parse CNN network structure
     self.conv_layer_configs = parse_conv_spec(conv_nnet_spec, self.batch_size)
     if arguments.has_key('conv_activation'):
         self.conv_activation_text = arguments['conv_activation']
         self.conv_activation = parse_activation(arguments['conv_activation'])
     if arguments.has_key('use_fast'):
         self.use_fast = string_2_bool(arguments['use_fast'])
     # parse LSTM network structure
     lstm_layers = lstm_nnet_spec.split(':')
     self.lstm_layers_sizes = [int(lstm_layers[i]) for i in range(0, len(lstm_layers))]
     # parse DNN network structure
     fc_layers = nnet_spec.split(':')
     self.hidden_layers_sizes = [int(fc_layers[i]) for i in range(0, len(fc_layers)-1)]
     self.n_outs = int(fc_layers[-1])
Exemple #8
0
    def parse_config_common(self, arguments):
        # parse batch_size, momentum and learning rate 
        if arguments.has_key('batch_size'):
            self.batch_size = int(arguments['batch_size'])
        if arguments.has_key('momentum'):
            self.momentum = float(arguments['momentum'])
        if arguments.has_key('lrate'):
            self.lrate = parse_lrate(arguments['lrate'])

        # parse activation function, including maxout
        if arguments.has_key('activation'):
            self.activation_text = arguments['activation']
            self.activation = parse_activation(arguments['activation'])
            if arguments['activation'].startswith('maxout'):
                self.do_maxout = True
                self.pool_size = int(arguments['activation'].replace('maxout:',''))
                self.activation_text = 'maxout'

        # parse dropout. note that dropout can be applied to the input features only when dropout is also
        # applied to hidden-layer outputs at the same time. that is, you cannot apply dropout only to the
        # input features
        if arguments.has_key('dropout_factor'):
            self.do_dropout = True
            factors = arguments['dropout_factor'].split(',')
            self.dropout_factor = [float(factor) for factor in factors]
            if arguments.has_key('input_dropout_factor'):
                self.input_dropout_factor = float(arguments['input_dropout_factor'])

        if arguments.has_key('cfg_output_file'):
            self.cfg_output_file = arguments['cfg_output_file'] 
        if arguments.has_key('param_output_file'):
            self.param_output_file = arguments['param_output_file']
        if arguments.has_key('kaldi_output_file'):
            self.kaldi_output_file = arguments['kaldi_output_file']

        if arguments.has_key('model_save_step'):
            self.model_save_step = int(arguments['model_save_step'])
    batch_size=1000,
    drop_last=False
)

# various dimensions for autoregressive and energy nets
dim = 2  # D
output_dim_multiplier = args.context_dim + 3 * args.n_mixture_components  # K + 3M

# Create MADE
made = models.ResidualMADE(
    input_dim=dim,
    n_residual_blocks=args.n_residual_blocks_made,
    hidden_dim=args.hidden_dim_made,
    output_dim_multiplier=output_dim_multiplier,
    conditional=False,
    activation=utils.parse_activation(args.activation_made),
    use_batch_norm=args.use_batch_norm_made,
    dropout_probability=args.dropout_probability_made
).to(device)

# create energy net
energy_net = models.ResidualEnergyNet(
    input_dim=(args.context_dim + 1),
    n_residual_blocks=args.n_residual_blocks_energy_net,
    hidden_dim=args.hidden_dim_energy_net,
    energy_upper_bound=args.energy_upper_bound,
    activation=utils.parse_activation(args.activation_energy_net),
    use_batch_norm=args.use_batch_norm_energy_net,
    dropout_probability=args.dropout_probability_energy_net
).to(device)
 def init_activation(self):
     self.activation = parse_activation(self.activation_text)
Exemple #11
0
 def init_activation(self):
     self.activation = parse_activation(self.activation_text)