Exemple #1
0
    cfg = NetworkConfig(multi_label)
    cfg.parse_config_dnn(arguments, nnet_spec)
    cfg.init_data_reading(train_data_spec, valid_data_spec)

    # parse pre-training options
    # pre-training files and layer number (how many layers are set to the pre-training parameters)
    ptr_layer_number = 0; ptr_file = ''
    if arguments.has_key('ptr_file') and arguments.has_key('ptr_layer_number'):
        ptr_file = arguments['ptr_file']
        ptr_layer_number = int(arguments['ptr_layer_number'])

    # check working dir to see whether it's resuming training
    resume_training = False
    if os.path.exists(wdir + '/nnet.tmp') and os.path.exists(wdir + '/training_state.tmp'):
        resume_training = True
        cfg.lrate = _file2lrate(wdir + '/training_state.tmp')
        log('> ... found nnet.tmp and training_state.tmp, now resume training from epoch ' + str(cfg.lrate.epoch))

    numpy_rng = numpy.random.RandomState()
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
    log('> ... building the model')
    # setup model
    if cfg.do_dropout:
        dnn = DNN_Dropout(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)
    else:
        dnn = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)

    # initialize model parameters
    # if not resuming training, initialized from the specified pre-training file
    # if resuming training, initialized from the tmp model file
    if (ptr_layer_number > 0) and (resume_training is False):
Exemple #2
0
    conv_nnet_spec = arguments['conv_nnet_spec']
    nnet_spec = arguments['nnet_spec']
    wdir = arguments['wdir']

    # parse network configuration from arguments, and initialize data reading
    cfg = NetworkConfig()
    cfg.model_type = 'CNN_LACEA'
    cfg.parse_config_cnn(arguments, '10:' + nnet_spec, conv_nnet_spec)
    cfg.init_data_reading(train_data_spec, valid_data_spec)

    # check working dir to see whether it's resuming training
    resume_training = False
    if os.path.exists(wdir + '/nnet.tmp_CNN_LACEA') and os.path.exists(
            wdir + '/training_state.tmp_CNN_LACEA'):
        resume_training = True
        cfg.lrate = _file2lrate(wdir + '/training_state.tmp_CNN_LACEA')
        log('> ... found nnet.tmp_CNN_LACEA and training_state.tmp_CNN_LACEA, now resume training from epoch '
            + str(cfg.lrate.epoch))

    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2**30))
    log('> ... initializing the model')
    # construct the cnn architecture
    cnn = CNN_LACEA(numpy_rng=numpy_rng, theano_rng=theano_rng, cfg=cfg)
    # load the pre-training networks, if any, for parameter initialization
    if resume_training:
        _file2nnet(cnn, filename=wdir + '/nnet.tmp_CNN_LACEA')

    # get the training, validation and testing function for the model
    log('> ... getting the finetuning functions')
    train_fn, valid_fn = cnn.build_finetune_functions(
Exemple #3
0
    cfg = NetworkConfig()
    cfg.parse_config_dnn(arguments, nnet_spec)
    cfg.init_data_reading(train_data_spec, valid_data_spec)

    # parse pre-training options
    # pre-training files and layer number (how many layers are set to the pre-training parameters)
    ptr_layer_number = 0; ptr_file = ''
    if arguments.has_key('ptr_file') and arguments.has_key('ptr_layer_number'):
        ptr_file = arguments['ptr_file']
        ptr_layer_number = int(arguments['ptr_layer_number'])

    # check working dir to see whether it's resuming training
    resume_training = False
    if os.path.exists(wdir + '/nnet.tmp') and os.path.exists(wdir + '/training_state.tmp'):
        resume_training = True
        cfg.lrate = _file2lrate(wdir + '/training_state.tmp')
        log('> ... found nnet.tmp and training_state.tmp, now resume training from epoch ' + str(cfg.lrate.epoch))

    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
    log('> ... building the model')
    # setup model
    if cfg.do_dropout:
        dnn = DNN_Dropout(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)
    else:
        dnn = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)

    # initialize model parameters
    # if not resuming training, initialized from the specified pre-training file
    # if resuming training, initialized from the tmp model file
    if (ptr_layer_number > 0) and (resume_training is False):
Exemple #4
0
        if n > 0:
            dnn_shared = dnn_array[0]; shared_layers = [m for m in xrange(shared_layers_num)]
        print shared_layers
        dnn = DNN_MTL(task_id=n,numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg,
                      dnn_shared = dnn_shared, shared_layers = shared_layers)

        # get the training, validation and testing function for the model
        log('> ... getting the finetuning functions for task %d' % (n))
        train_fn, valid_fn = dnn.build_finetune_functions((cfg.train_x, cfg.train_y), (cfg.valid_x, cfg.valid_y), batch_size=cfg.batch_size)
        # add dnn and the functions to the list   
        dnn_array.append(dnn)
        train_fn_array.append(train_fn); valid_fn_array.append(valid_fn)
        # check the working dir to decide whether it's resuming training; if yes, load the tmp network files for initialization
        if os.path.exists(wdir + '/nnet.tmp.task' + str(n)) and os.path.exists(wdir + '/training_state.tmp.task' + str(n)):
            resume_training = True; resume_tasks.append(n)
            cfg.lrate = _file2lrate(wdir + '/training_state.tmp.task' + str(n))
            log('> ... found nnet.tmp.task%d and training_state.tmp.task%d, now resume task%d training from epoch %d' % (n, n, n, cfg.lrate.epoch))
            _file2nnet(dnn.layers, filename = wdir + '/nnet.tmp.task' + str(n))

    # pre-training works only if we are NOT resuming training
    # we assume that we only pre-train the shared layers; thus we only use dnn_array[0] to load the parameters
    # because the parameters are shared across the tasks
    ptr_layer_number = 0; ptr_file = ''
    if arguments.has_key('ptr_file') and arguments.has_key('ptr_layer_number'):
        ptr_file = arguments['ptr_file']; ptr_layer_number = int(arguments['ptr_layer_number'])
    if (ptr_layer_number > 0) and (resume_training is False):
        _file2nnet(dnn_array[0].layers, set_layer_num = ptr_layer_number, filename = ptr_file)

    log('> ... finetuning the model')
    train_error_array = [[] for n in xrange(task_number)]
    active_tasks = [n for n in xrange(task_number)]  # the tasks with 0 learning rate are not considered
Exemple #5
0
    cfg.parse_config_cnn(arguments, "10:" + nnet_spec, conv_nnet_spec)
    cfg.init_data_reading(train_data_spec, valid_data_spec)

    # parse pre-training options
    # pre-training files and layer number (how many layers are set to the pre-training parameters)
    ptr_layer_number = 0
    ptr_file = ""
    if arguments.has_key("ptr_file") and arguments.has_key("ptr_layer_number"):
        ptr_file = arguments["ptr_file"]
        ptr_layer_number = int(arguments["ptr_layer_number"])

    # check working dir to see whether it's resuming training
    resume_training = False
    if os.path.exists(wdir + "/nnet.tmp") and os.path.exists(wdir + "/training_state.tmp"):
        resume_training = True
        cfg.lrate = _file2lrate(wdir + "/training_state.tmp")
        log("> ... found nnet.tmp and training_state.tmp, now resume training from epoch " + str(cfg.lrate.epoch))

    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
    log("> ... initializing the model")
    # construct the cnn architecture
    cnn = CNN(numpy_rng=numpy_rng, theano_rng=theano_rng, cfg=cfg)
    # load the pre-training networks, if any, for parameter initialization
    if (ptr_layer_number > 0) and (resume_training is False):
        _file2nnet(cnn.layers, set_layer_num=ptr_layer_number, filename=ptr_file)
    if resume_training:
        _file2nnet(cnn.layers, filename=wdir + "/nnet.tmp")

    # get the training, validation and testing function for the model
    log("> ... getting the finetuning functions")
Exemple #6
0
    conv_nnet_spec = arguments['conv_nnet_spec']
    nnet_spec = arguments['nnet_spec']
    wdir = arguments['wdir']

    # parse network configuration from arguments, and initialize data reading
    cfg = NetworkConfig()
    cfg.model_type = 'CNN'
    cfg.parse_config_cnn(arguments, '10:' + nnet_spec, conv_nnet_spec)
    cfg.init_data_reading(train_data_spec, valid_data_spec)

    # check working dir to see whether it's resuming training
    resume_training = False
    if os.path.exists(wdir + '/nnet.tmp_DENSENET') and os.path.exists(
            wdir + '/training_state.tmp_DENSENET'):
        resume_training = True
        cfg.lrate = _file2lrate(wdir + '/training_state.tmp_DENSENET')
        log('> ... found nnet.tmp_DENSENET and training_state.tmp_DENSENET, now resume training from epoch '
            + str(cfg.lrate.epoch))

    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2**30))
    log('> ... initializing the model')
    # construct the cnn architecture
    densenet = DENSENET(numpy_rng=numpy_rng, theano_rng=theano_rng, cfg=cfg)
    # load the pre-training networks, if any, for parameter initialization
    if resume_training:
        _file2nnet(densenet, filename=wdir + '/nnet.tmp_DENSENET')

    # get the training, validation and testing function for the model
    log('> ... getting the finetuning functions')
    train_fn, valid_fn = densenet.build_finetune_functions(
Exemple #7
0
def dnn_run(arguments):
    
    required_arguments = ['train_data', 'valid_data', 'nnet_spec', 'wdir']
    for arg in required_arguments:
        if arguments.has_key(arg) == False:
            print "Error: the argument %s has to be specified" % (arg); exit(1)
    train_data_spec = arguments['train_data']
    valid_data_spec = arguments['valid_data']
    nnet_spec = arguments['nnet_spec']
    wdir = arguments['wdir']
    cfg = NetworkConfig()
    cfg.parse_config_dnn(arguments, nnet_spec)
    cfg.init_data_reading(train_data_spec, valid_data_spec)
    
    # parse pre-training options
    # pre-training files and layer number (how many layers are set to the pre-training parameters)
    ptr_layer_number = 0; ptr_file = ''
    if arguments.has_key('ptr_file') and arguments.has_key('ptr_layer_number'):
        ptr_file = arguments['ptr_file']
        ptr_layer_number = int(arguments['ptr_layer_number'])

    # check working dir to see whether it's resuming training
    resume_training = False
    if os.path.exists(wdir + '/nnet.tmp') and os.path.exists(wdir + '/training_state.tmp'):
        resume_training = True
        cfg.lrate = _file2lrate(wdir + '/training_state.tmp')
        log('> ... found nnet.tmp and training_state.tmp, now resume training from epoch ' + str(cfg.lrate.epoch))

    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
    log('> ... building the model')
    # setup model
    if cfg.do_dropout:
        dnn = DNN_Dropout(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)
    else:
        dnn = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)

    # initialize model parameters
    # if not resuming training, initialized from the specified pre-training file
    # if resuming training, initialized from the tmp model file
    if (ptr_layer_number > 0) and (resume_training is False):
        _file2nnet(dnn.layers, set_layer_num = ptr_layer_number, filename = ptr_file)
    if resume_training:
        _file2nnet(dnn.layers, filename = wdir + '/nnet.tmp')

    # get the training, validation and testing function for the model
    log('> ... getting the finetuning functions')
    train_fn, valid_fn = dnn.build_finetune_functions((cfg.train_x, cfg.train_y), (cfg.valid_x, cfg.valid_y),
                batch_size=cfg.batch_size)

    log('> ... finetuning the model')
    while (cfg.lrate.get_rate() != 0):
        # one epoch of sgd training
        train_error = train_sgd(train_fn, cfg)
        log('> epoch %d, training error %f ' % (cfg.lrate.epoch, 100*numpy.mean(train_error)) + '(%)')
        # validation
        valid_error = validate_by_minibatch(valid_fn, cfg)
        log('> epoch %d, lrate %f, validation error %f ' % (cfg.lrate.epoch, cfg.lrate.get_rate(), 100*numpy.mean(valid_error)) + '(%)')
        cfg.lrate.get_next_rate(current_error = 100*numpy.mean(valid_error))
        # output nnet parameters and lrate, for training resume
        if cfg.lrate.epoch % cfg.model_save_step == 0:
            _nnet2file(dnn.layers, filename=wdir + '/nnet.tmp')
            _lrate2file(cfg.lrate, wdir + '/training_state.tmp')

    # save the model and network configuration
    if cfg.param_output_file != '':
        _nnet2file(dnn.layers, filename=cfg.param_output_file, input_factor = cfg.input_dropout_factor, factor = cfg.dropout_factor)
        log('> ... the final PDNN model parameter is ' + cfg.param_output_file)
    if cfg.cfg_output_file != '':
        _cfg2file(dnn.cfg, filename=cfg.cfg_output_file)
        log('> ... the final PDNN model config is ' + cfg.cfg_output_file)
Exemple #8
0
def dnn_run(arguments):

    required_arguments = ['train_data', 'valid_data', 'nnet_spec', 'wdir']
    for arg in required_arguments:
        if arguments.has_key(arg) == False:
            print "Error: the argument %s has to be specified" % (arg)
            exit(1)
    train_data_spec = arguments['train_data']
    valid_data_spec = arguments['valid_data']
    nnet_spec = arguments['nnet_spec']
    wdir = arguments['wdir']
    cfg = NetworkConfig()
    cfg.parse_config_dnn(arguments, nnet_spec)
    cfg.init_data_reading(train_data_spec, valid_data_spec)

    # parse pre-training options
    # pre-training files and layer number (how many layers are set to the pre-training parameters)
    ptr_layer_number = 0
    ptr_file = ''
    if arguments.has_key('ptr_file') and arguments.has_key('ptr_layer_number'):
        ptr_file = arguments['ptr_file']
        ptr_layer_number = int(arguments['ptr_layer_number'])

    # check working dir to see whether it's resuming training
    resume_training = False
    if os.path.exists(wdir +
                      '/nnet.tmp') and os.path.exists(wdir +
                                                      '/training_state.tmp'):
        resume_training = True
        cfg.lrate = _file2lrate(wdir + '/training_state.tmp')
        log('> ... found nnet.tmp and training_state.tmp, now resume training from epoch '
            + str(cfg.lrate.epoch))

    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2**30))
    log('> ... building the model')
    # setup model
    if cfg.do_dropout:
        dnn = DNN_Dropout(numpy_rng=numpy_rng, theano_rng=theano_rng, cfg=cfg)
    else:
        dnn = DNN(numpy_rng=numpy_rng, theano_rng=theano_rng, cfg=cfg)

    # initialize model parameters
    # if not resuming training, initialized from the specified pre-training file
    # if resuming training, initialized from the tmp model file
    if (ptr_layer_number > 0) and (resume_training is False):
        _file2nnet(dnn.layers,
                   set_layer_num=ptr_layer_number,
                   filename=ptr_file)
    if resume_training:
        _file2nnet(dnn.layers, filename=wdir + '/nnet.tmp')

    # get the training, validation and testing function for the model
    log('> ... getting the finetuning functions')
    train_fn, valid_fn = dnn.build_finetune_functions(
        (cfg.train_x, cfg.train_y), (cfg.valid_x, cfg.valid_y),
        batch_size=cfg.batch_size)

    log('> ... finetuning the model')
    while (cfg.lrate.get_rate() != 0):
        # one epoch of sgd training
        train_error = train_sgd(train_fn, cfg)
        log('> epoch %d, training error %f ' %
            (cfg.lrate.epoch, 100 * numpy.mean(train_error)) + '(%)')
        # validation
        valid_error = validate_by_minibatch(valid_fn, cfg)
        log('> epoch %d, lrate %f, validation error %f ' %
            (cfg.lrate.epoch, cfg.lrate.get_rate(),
             100 * numpy.mean(valid_error)) + '(%)')
        cfg.lrate.get_next_rate(current_error=100 * numpy.mean(valid_error))
        # output nnet parameters and lrate, for training resume
        if cfg.lrate.epoch % cfg.model_save_step == 0:
            _nnet2file(dnn.layers, filename=wdir + '/nnet.tmp')
            _lrate2file(cfg.lrate, wdir + '/training_state.tmp')

    # save the model and network configuration
    if cfg.param_output_file != '':
        _nnet2file(dnn.layers,
                   filename=cfg.param_output_file,
                   input_factor=cfg.input_dropout_factor,
                   factor=cfg.dropout_factor)
        log('> ... the final PDNN model parameter is ' + cfg.param_output_file)
    if cfg.cfg_output_file != '':
        _cfg2file(dnn.cfg, filename=cfg.cfg_output_file)
        log('> ... the final PDNN model config is ' + cfg.cfg_output_file)