예제 #1
0
            format_results(train_error, pred, labels, multi_label, cfg)
        else:
            format_results(train_error, pred, cfg.train_sets.label_vec, multi_label, cfg)
        # validation
        valid_error, pred2, labels = validate_by_minibatch(valid_fn, cfg)
        n_data_valid = len(pred2)
        log("- Validation:\n")
        if multi_label:
            format_results(valid_error, pred2, labels, multi_label, cfg)
        else:
            format_results(valid_error, pred2, cfg.valid_sets.label_vec, multi_label, cfg)
        cfg.lrate.get_next_rate(current_error = 100 * numpy.mean(valid_error))
        # output nnet parameters and lrate, for training resume
        if cfg.lrate.epoch % cfg.model_save_step == 0:
            _nnet2file(dnn.layers, filename=wdir + '/nnet.tmp')
            _lrate2file(cfg.lrate, wdir + '/training_state.tmp')

    # save the model and network configuration
    for i in range(len(cfg.hidden_layers_sizes)):
        if i==0:
            n_params = (cfg.n_ins + 1) * cfg.hidden_layers_sizes[i]
        else:
            n_params += (cfg.hidden_layers_sizes[i-1] + 1) * cfg.hidden_layers_sizes[i]
    n_params += cfg.n_outs * (cfg.hidden_layers_sizes[i-1] + 1)
    ratio = float(n_params) / float(n_data_train)
    log('-->> Ratio Parameters / Data : ' + str(ratio))
    if cfg.param_output_file != '':
        _nnet2file(dnn.layers, filename=cfg.param_output_file, input_factor = cfg.input_dropout_factor, factor = cfg.dropout_factor)
        log('> ... the final PDNN model parameter is ' + cfg.param_output_file)
    if cfg.cfg_output_file != '':
        _cfg2file(dnn.cfg, filename=cfg.cfg_output_file)
예제 #2
0
        # one epoch of sgd training
        train_acc, train_error = train_sgd(train_fn, cfg)
        log('> epoch %d, lrate %f, training accuracy %f' %
            (cfg.lrate.epoch, cfg.lrate.get_rate(),
             100 * numpy.mean(train_acc)) + '(%)')
        # validation
        valid_acc, valid_error = validate_by_minibatch(valid_fn, cfg)
        log('> epoch %d, lrate %f, validate accuracy %f' %
            (cfg.lrate.epoch, cfg.lrate.get_rate(),
             100 * numpy.mean(valid_acc)) + '(%)')
        cfg.lrate.get_next_rate(current_error=100 * numpy.mean(valid_error))

        # output nnet parameters and lrate, for training resume
        if cfg.lrate.epoch % cfg.model_save_step == 0:
            _nnet2file(cnn, filename=wdir + '/nnet.tmp_CNN_LACEA')
            _lrate2file(cfg.lrate, wdir + '/training_state.tmp_CNN_LACEA')

    # save the model and network configuration
    if cfg.param_output_file != '':
        _nnet2file(cnn,
                   filename=cfg.param_output_file,
                   input_factor=cfg.input_dropout_factor,
                   factor=cfg.dropout_factor)
        log('> ... the final PDNN model parameter is ' + cfg.param_output_file)
    if cfg.cfg_output_file != '':
        _cfg2file(cnn.cfg, filename=cfg.cfg_output_file)
        log('> ... the final PDNN model config is ' + cfg.cfg_output_file)

    # output the fully-connected part into Kaldi-compatible format
    if cfg.kaldi_output_file != '':
        log('> ... start to output the FC part')
예제 #3
0
파일: run_DRN.py 프로젝트: magic2du/dlnn
                (cfg.train_x, cfg.train_y), (cfg.valid_x, cfg.valid_y),
                batch_size=cfg.batch_size)

    log('> ... finetuning the model')
    while (cfg.lrate.get_rate() != 0):
        # one epoch of sgd training 
        train_error = train_sgd(train_fn, cfg)
        log('> epoch %d, training error %f ' % (cfg.lrate.epoch, 100*numpy.mean(train_error)) + '(%)')
        # validation 
        valid_error = validate_by_minibatch(valid_fn, cfg)
        log('> epoch %d, lrate %f, validation error %f ' % (cfg.lrate.epoch, cfg.lrate.get_rate(), 100*numpy.mean(valid_error)) + '(%)')
        cfg.lrate.get_next_rate(current_error = 100*numpy.mean(valid_error))
        # output nnet parameters and lrate, for training resume
        if cfg.lrate.epoch % cfg.model_save_step == 0:
            _nnet2file(dnn.layers, filename=wdir + '/nnet.tmp')
            _lrate2file(cfg.lrate, wdir + '/training_state.tmp') 

    # save the model and network configuration
    if cfg.param_output_file != '':
        _nnet2file(dnn.layers, filename=cfg.param_output_file, input_factor = cfg.input_dropout_factor, factor = cfg.dropout_factor)
        log('> ... the final PDNN model parameter is ' + cfg.param_output_file)
    if cfg.cfg_output_file != '':
        _cfg2file(dnn.cfg, filename=cfg.cfg_output_file)
        log('> ... the final PDNN model config is ' + cfg.cfg_output_file)

    # output the model into Kaldi-compatible format
    if cfg.kaldi_output_file != '':
        dnn.write_model_to_kaldi(cfg.kaldi_output_file)
        log('> ... the final Kaldi model is ' + cfg.kaldi_output_file) 

    # remove the tmp files (which have been generated from resuming training) 
예제 #4
0
     log('> task %d, epoch %d, training error %f ' % (n, cfg.lrate.epoch, 100*numpy.mean(train_error_array[n])) + '(%)')
     train_error_array[n] = []
     # perform validation, output valid error rate, and adjust learning rate based on the learning rate
     valid_error = validate_by_minibatch(valid_fn_array[n], cfg)
     log('> task %d, epoch %d, lrate %f, validation error %f ' % (n, cfg.lrate.epoch, cfg.lrate.get_rate(), 100*numpy.mean(valid_error)) + '(%)')
     cfg.lrate.get_next_rate(current_error = 100 * numpy.mean(valid_error))
 else:
     log('> task %d, epoch %d, training error %f ' % (n, cfg.lrate.epoch, numpy.mean(train_error_array[n])) + '(%)')
     train_error_array[n] = []
     # perform validation, output valid error rate, and adjust learning rate based on the learning rate
     valid_error = validate_by_minibatch(valid_fn_array[n], cfg)
     log('> task %d, epoch %d, lrate %f, validation error %f ' % (n, cfg.lrate.epoch, cfg.lrate.get_rate(), numpy.mean(valid_error)) + '(%)')
     cfg.lrate.get_next_rate(current_error = numpy.mean(valid_error))
 # output nnet parameters and lrate, for training resume
 _nnet2file(dnn_array[n].layers, filename=wdir + '/nnet.tmp.task' + str(n))
 _lrate2file(cfg.lrate, wdir + '/training_state.tmp.task' + str(n))
 # if the lrate of a task decays to 0, training on this task terminates; it will be excluded from future training
 if cfg.lrate.get_rate() == 0:
     active_tasks_new.remove(n)
     # save the model and network configuration
     if cfg.param_output_file != '':
         _nnet2file(dnn_array[n].layers, filename=cfg.param_output_file + '.task' + str(n),
                    input_factor = cfg.input_dropout_factor, factor = cfg.dropout_factor)
         log('> ... the final PDNN model parameter is ' + cfg.param_output_file + '.task' + str(n))
     if cfg.cfg_output_file != '':
         _cfg2file(dnn_array[n].cfg, filename=cfg.cfg_output_file + '.task' + str(n))
         log('> ... the final PDNN model config is ' + cfg.cfg_output_file + '.task' + str(n))
     # output the model into Kaldi-compatible format
     if cfg.kaldi_output_file != '':
         dnn_array[n].write_model_to_kaldi(cfg.kaldi_output_file + '.task' + str(n))
         log('> ... the final Kaldi model is ' + cfg.kaldi_output_file + '.task' + str(n))
예제 #5
0
        # one epoch of sgd training
        train_error = train_sgd(train_fn, cfg)
        log("> epoch %d, training error %f " % (cfg.lrate.epoch, 100 * numpy.mean(train_error)) + "(%)")
        # validation
        valid_error = validate_by_minibatch(valid_fn, cfg)
        log(
            "> epoch %d, lrate %f, validation error %f "
            % (cfg.lrate.epoch, cfg.lrate.get_rate(), 100 * numpy.mean(valid_error))
            + "(%)"
        )
        cfg.lrate.get_next_rate(current_error=100 * numpy.mean(valid_error))

        # output nnet parameters and lrate, for training resume
        if cfg.lrate.epoch % cfg.model_save_step == 0:
            _nnet2file(cnn.layers, filename=wdir + "/nnet.tmp")
            _lrate2file(cfg.lrate, wdir + "/training_state.tmp")

    # save the model and network configuration
    if cfg.param_output_file != "":
        _nnet2file(
            cnn.layers, filename=cfg.param_output_file, input_factor=cfg.input_dropout_factor, factor=cfg.dropout_factor
        )
        log("> ... the final PDNN model parameter is " + cfg.param_output_file)
    if cfg.cfg_output_file != "":
        _cfg2file(cnn.cfg, filename=cfg.cfg_output_file)
        log("> ... the final PDNN model config is " + cfg.cfg_output_file)

    # output the fully-connected part into Kaldi-compatible format
    if cfg.kaldi_output_file != "":
        cnn.fc_dnn.write_model_to_kaldi(cfg.kaldi_output_file)
        log("> ... the final Kaldi model (only FC layers) is " + cfg.kaldi_output_file)
예제 #6
0
        # one epoch of sgd training
        train_acc, train_error = train_sgd(train_fn, cfg)
        log('> epoch %d, lrate %f, training accuracy %f' %
            (cfg.lrate.epoch, cfg.lrate.get_rate(),
             100 * numpy.mean(train_acc)) + '(%)')
        # validation
        valid_acc, valid_error = validate_by_minibatch(valid_fn, cfg)
        log('> epoch %d, lrate %f, validate accuracy %f' %
            (cfg.lrate.epoch, cfg.lrate.get_rate(),
             100 * numpy.mean(valid_acc)) + '(%)')
        cfg.lrate.get_next_rate(current_error=100 * numpy.mean(valid_error))

        # output nnet parameters and lrate, for training resume
        if cfg.lrate.epoch % cfg.model_save_step == 0:
            _nnet2file(densenet, filename=wdir + '/nnet.tmp_DENSENET')
            _lrate2file(cfg.lrate, wdir + '/training_state.tmp_DENSENET')

    # save the model and network configuration
    if cfg.param_output_file != '':
        _nnet2file(densenet,
                   filename=cfg.param_output_file,
                   input_factor=cfg.input_dropout_factor,
                   factor=cfg.dropout_factor)
        log('> ... the final PDNN model parameter is ' + cfg.param_output_file)
    if cfg.cfg_output_file != '':
        _cfg2file(densenet.cfg, filename=cfg.cfg_output_file)
        log('> ... the final PDNN model config is ' + cfg.cfg_output_file)

    # output the fully-connected part into Kaldi-compatible format
    if cfg.kaldi_output_file != '':
        densenet.write_model_to_kaldi(cfg.kaldi_output_file)
예제 #7
0
파일: dnn_run.py 프로젝트: plaffitte/script
def dnn_run(arguments):
    
    required_arguments = ['train_data', 'valid_data', 'nnet_spec', 'wdir']
    for arg in required_arguments:
        if arguments.has_key(arg) == False:
            print "Error: the argument %s has to be specified" % (arg); exit(1)
    train_data_spec = arguments['train_data']
    valid_data_spec = arguments['valid_data']
    nnet_spec = arguments['nnet_spec']
    wdir = arguments['wdir']
    cfg = NetworkConfig()
    cfg.parse_config_dnn(arguments, nnet_spec)
    cfg.init_data_reading(train_data_spec, valid_data_spec)
    
    # parse pre-training options
    # pre-training files and layer number (how many layers are set to the pre-training parameters)
    ptr_layer_number = 0; ptr_file = ''
    if arguments.has_key('ptr_file') and arguments.has_key('ptr_layer_number'):
        ptr_file = arguments['ptr_file']
        ptr_layer_number = int(arguments['ptr_layer_number'])

    # check working dir to see whether it's resuming training
    resume_training = False
    if os.path.exists(wdir + '/nnet.tmp') and os.path.exists(wdir + '/training_state.tmp'):
        resume_training = True
        cfg.lrate = _file2lrate(wdir + '/training_state.tmp')
        log('> ... found nnet.tmp and training_state.tmp, now resume training from epoch ' + str(cfg.lrate.epoch))

    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
    log('> ... building the model')
    # setup model
    if cfg.do_dropout:
        dnn = DNN_Dropout(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)
    else:
        dnn = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)

    # initialize model parameters
    # if not resuming training, initialized from the specified pre-training file
    # if resuming training, initialized from the tmp model file
    if (ptr_layer_number > 0) and (resume_training is False):
        _file2nnet(dnn.layers, set_layer_num = ptr_layer_number, filename = ptr_file)
    if resume_training:
        _file2nnet(dnn.layers, filename = wdir + '/nnet.tmp')

    # get the training, validation and testing function for the model
    log('> ... getting the finetuning functions')
    train_fn, valid_fn = dnn.build_finetune_functions((cfg.train_x, cfg.train_y), (cfg.valid_x, cfg.valid_y),
                batch_size=cfg.batch_size)

    log('> ... finetuning the model')
    while (cfg.lrate.get_rate() != 0):
        # one epoch of sgd training
        train_error = train_sgd(train_fn, cfg)
        log('> epoch %d, training error %f ' % (cfg.lrate.epoch, 100*numpy.mean(train_error)) + '(%)')
        # validation
        valid_error = validate_by_minibatch(valid_fn, cfg)
        log('> epoch %d, lrate %f, validation error %f ' % (cfg.lrate.epoch, cfg.lrate.get_rate(), 100*numpy.mean(valid_error)) + '(%)')
        cfg.lrate.get_next_rate(current_error = 100*numpy.mean(valid_error))
        # output nnet parameters and lrate, for training resume
        if cfg.lrate.epoch % cfg.model_save_step == 0:
            _nnet2file(dnn.layers, filename=wdir + '/nnet.tmp')
            _lrate2file(cfg.lrate, wdir + '/training_state.tmp')

    # save the model and network configuration
    if cfg.param_output_file != '':
        _nnet2file(dnn.layers, filename=cfg.param_output_file, input_factor = cfg.input_dropout_factor, factor = cfg.dropout_factor)
        log('> ... the final PDNN model parameter is ' + cfg.param_output_file)
    if cfg.cfg_output_file != '':
        _cfg2file(dnn.cfg, filename=cfg.cfg_output_file)
        log('> ... the final PDNN model config is ' + cfg.cfg_output_file)
예제 #8
0
def dnn_run(arguments):

    required_arguments = ['train_data', 'valid_data', 'nnet_spec', 'wdir']
    for arg in required_arguments:
        if arguments.has_key(arg) == False:
            print "Error: the argument %s has to be specified" % (arg)
            exit(1)
    train_data_spec = arguments['train_data']
    valid_data_spec = arguments['valid_data']
    nnet_spec = arguments['nnet_spec']
    wdir = arguments['wdir']
    cfg = NetworkConfig()
    cfg.parse_config_dnn(arguments, nnet_spec)
    cfg.init_data_reading(train_data_spec, valid_data_spec)

    # parse pre-training options
    # pre-training files and layer number (how many layers are set to the pre-training parameters)
    ptr_layer_number = 0
    ptr_file = ''
    if arguments.has_key('ptr_file') and arguments.has_key('ptr_layer_number'):
        ptr_file = arguments['ptr_file']
        ptr_layer_number = int(arguments['ptr_layer_number'])

    # check working dir to see whether it's resuming training
    resume_training = False
    if os.path.exists(wdir +
                      '/nnet.tmp') and os.path.exists(wdir +
                                                      '/training_state.tmp'):
        resume_training = True
        cfg.lrate = _file2lrate(wdir + '/training_state.tmp')
        log('> ... found nnet.tmp and training_state.tmp, now resume training from epoch '
            + str(cfg.lrate.epoch))

    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2**30))
    log('> ... building the model')
    # setup model
    if cfg.do_dropout:
        dnn = DNN_Dropout(numpy_rng=numpy_rng, theano_rng=theano_rng, cfg=cfg)
    else:
        dnn = DNN(numpy_rng=numpy_rng, theano_rng=theano_rng, cfg=cfg)

    # initialize model parameters
    # if not resuming training, initialized from the specified pre-training file
    # if resuming training, initialized from the tmp model file
    if (ptr_layer_number > 0) and (resume_training is False):
        _file2nnet(dnn.layers,
                   set_layer_num=ptr_layer_number,
                   filename=ptr_file)
    if resume_training:
        _file2nnet(dnn.layers, filename=wdir + '/nnet.tmp')

    # get the training, validation and testing function for the model
    log('> ... getting the finetuning functions')
    train_fn, valid_fn = dnn.build_finetune_functions(
        (cfg.train_x, cfg.train_y), (cfg.valid_x, cfg.valid_y),
        batch_size=cfg.batch_size)

    log('> ... finetuning the model')
    while (cfg.lrate.get_rate() != 0):
        # one epoch of sgd training
        train_error = train_sgd(train_fn, cfg)
        log('> epoch %d, training error %f ' %
            (cfg.lrate.epoch, 100 * numpy.mean(train_error)) + '(%)')
        # validation
        valid_error = validate_by_minibatch(valid_fn, cfg)
        log('> epoch %d, lrate %f, validation error %f ' %
            (cfg.lrate.epoch, cfg.lrate.get_rate(),
             100 * numpy.mean(valid_error)) + '(%)')
        cfg.lrate.get_next_rate(current_error=100 * numpy.mean(valid_error))
        # output nnet parameters and lrate, for training resume
        if cfg.lrate.epoch % cfg.model_save_step == 0:
            _nnet2file(dnn.layers, filename=wdir + '/nnet.tmp')
            _lrate2file(cfg.lrate, wdir + '/training_state.tmp')

    # save the model and network configuration
    if cfg.param_output_file != '':
        _nnet2file(dnn.layers,
                   filename=cfg.param_output_file,
                   input_factor=cfg.input_dropout_factor,
                   factor=cfg.dropout_factor)
        log('> ... the final PDNN model parameter is ' + cfg.param_output_file)
    if cfg.cfg_output_file != '':
        _cfg2file(dnn.cfg, filename=cfg.cfg_output_file)
        log('> ... the final PDNN model config is ' + cfg.cfg_output_file)