Esempio n. 1
0
    cfg.parse_config_dnn(arguments, nnet_spec)
    cfg.init_data_reading(train_data_spec, valid_data_spec)

    # parse pre-training options
    # pre-training files and layer number (how many layers are set to the pre-training parameters)
    ptr_layer_number = 0; ptr_file = ''
    if arguments.has_key('ptr_file') and arguments.has_key('ptr_layer_number'):
        ptr_file = arguments['ptr_file']
        ptr_layer_number = int(arguments['ptr_layer_number'])

    # check working dir to see whether it's resuming training
    resume_training = False
    if os.path.exists(wdir + '/nnet.tmp') and os.path.exists(wdir + '/training_state.tmp'):
        resume_training = True
        cfg.lrate = _file2lrate(wdir + '/training_state.tmp')
        log('> ... found nnet.tmp and training_state.tmp, now resume training from epoch ' + str(cfg.lrate.epoch))

    numpy_rng = numpy.random.RandomState()
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
    log('> ... building the model')
    # setup model
    if cfg.do_dropout:
        dnn = DNN_Dropout(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)
    else:
        dnn = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)

    # initialize model parameters
    # if not resuming training, initialized from the specified pre-training file
    # if resuming training, initialized from the tmp model file
    if (ptr_layer_number > 0) and (resume_training is False):
        _file2nnet(dnn.layers, set_layer_num = ptr_layer_number, filename = ptr_file)
Esempio n. 2
0
test, label_test, mask_test = read(test_data_spec)
valid, label_valid, mask_valid = read(valid_data_spec)
n_streams = np.shape(label)[1]
len_batch = np.shape(label)[2]
n_classes = np.shape(label)[3]
n_batches = len(input)
n_test = len(test)
n_valid = len(valid)
error = np.zeros((n_test, n_streams, len_batch, n_classes))

# ############################ Create instance of class RNN #########################
var = np.random.RandomState()
seed = var.randint(90000)
if os.path.exists(filesave):
    filename = filesave
    log('...found previous configuration...')
rnn = RNN(Nlayers, Ndirs, Nx, Nh, n_classes, Ah, Ay, predictPer, loss, L1reg, L2reg, momentum,
          seed, frontEnd, filename, initParams)

################################ TRAIN THE RNN #############################
train_cost = []
delta_train = 5.0
delta_valid = 10.0
old_training_error = 0.0
old_valid_error = 0.0
result = []  # list for saving all predictions made by the network
# file = 'training_pred.pickle.gz'
for k in range(n_epoch):
    correct_number_train = 0.0
    correct_number_valid = 0.0
    class_occurrence_train = np.zeros(n_classes)
Esempio n. 3
0
test, label_test, mask_test = read(test_data_spec)
valid, label_valid, mask_valid = read(valid_data_spec)
n_streams = np.shape(label)[1]
len_batch = np.shape(label)[2]
n_classes = np.shape(label)[3]
n_batches = len(input)
n_test = len(test)
n_valid = len(valid)
error = np.zeros((n_test, n_streams, len_batch, n_classes))

# ############################ Create instance of class RNN #########################
var = np.random.RandomState()
seed = var.randint(90000)
if os.path.exists(filesave):
    filename = filesave
    log('...found previous configuration...')
rnn = RNN(Nlayers, Ndirs, Nx, Nh, n_classes, Ah, Ay, predictPer, loss, L1reg,
          L2reg, momentum, seed, frontEnd, filename, initParams)

################################ TRAIN THE RNN #############################
train_cost = []
delta_train = 5.0
delta_valid = 10.0
old_training_error = 0.0
old_valid_error = 0.0
result = []  # list for saving all predictions made by the network
# file = 'training_pred.pickle.gz'
for k in range(n_epoch):
    correct_number_train = 0.0
    correct_number_valid = 0.0
    class_occurrence_train = np.zeros(n_classes)
Esempio n. 4
0
    cfg.parse_config_dnn(arguments, nnet_spec)
    cfg.init_data_reading(train_data_spec, valid_data_spec)

    # parse pre-training options
    # pre-training files and layer number (how many layers are set to the pre-training parameters)
    ptr_layer_number = 0; ptr_file = ''
    if arguments.has_key('ptr_file') and arguments.has_key('ptr_layer_number'):
        ptr_file = arguments['ptr_file']
        ptr_layer_number = int(arguments['ptr_layer_number'])

    # check working dir to see whether it's resuming training
    resume_training = False
    if os.path.exists(wdir + '/nnet.tmp') and os.path.exists(wdir + '/training_state.tmp'):
        resume_training = True
        cfg.lrate = _file2lrate(wdir + '/training_state.tmp')
        log('> ... found nnet.tmp and training_state.tmp, now resume training from epoch ' + str(cfg.lrate.epoch))

    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
    log('> ... building the model')
    # setup model
    if cfg.do_dropout:
        dnn = DNN_Dropout(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)
    else:
        dnn = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg)

    # initialize model parameters
    # if not resuming training, initialized from the specified pre-training file
    # if resuming training, initialized from the tmp model file
    if (ptr_layer_number > 0) and (resume_training is False):
        _file2nnet(dnn.layers, set_layer_num = ptr_layer_number, filename = ptr_file)
Esempio n. 5
0
def format_results(error, pred, labels, multi_label, cfg):
    correct_number = 0.0
    confusion_matrix = numpy.zeros((cfg.n_outs, cfg.n_outs))
    class_occurrence = numpy.zeros((1, cfg.n_outs))
    if multi_label:
        recall_matrix = numpy.zeros(cfg.n_outs)
        false_pred_matrix = numpy.zeros(cfg.n_outs)
        precision_matrix = numpy.zeros(cfg.n_outs)
        N_pred_true = numpy.zeros(cfg.n_outs)
        N_pred_false = numpy.zeros(cfg.n_outs)
        [a, b, c] = numpy.shape(pred)
        N_samples = 0
        for i in range(a):
            for j in range(b):
                out = numpy.array(pred[i][j], dtype=int)
                lab = numpy.array(labels[i][j], dtype=int)
                res = ~(~(out == 1) & lab) | (out & ~(lab == 1))
                correct_number += len(numpy.where(res))
                for k in range(cfg.n_outs):
                    if lab[k]:
                        class_occurrence[0, k] += 1
                        if out[k] == lab[k]:
                            N_pred_true[k] += 1
                    else:
                        if out[k] != lab[k]:
                            N_pred_false[k] += 1
                N_samples += 1
        recall_matrix = 100 * N_pred_true / class_occurrence.T[:, 0]
        precision_matrix = 100 * N_pred_true / (N_pred_false + N_pred_true)
        false_pred_matrix = 100 * N_pred_false / (N_samples -
                                                  class_occurrence.T[:, 0])
        log('Error %f ' % (100 * numpy.mean(error)) + '(%)')
        log('Accuracy (Recall) Matrix: \n\n ' +
            str(numpy.around(recall_matrix, 2)) + ' (%)\n')
        log('Precision Matrix: \n\n ' +
            str(numpy.around(precision_matrix, 2)) + ' (%)\n')
        log('False Predictions Matrix: \n\n ' +
            str(numpy.around(false_pred_matrix, 2)) + ' (%)\n')
    else:
        for i in range(len(pred)):
            p_sorted = pred[i]
            if p_sorted == labels[i]:
                correct_number += 1
                confusion_matrix[labels[i], labels[i]] += 1
            else:
                confusion_matrix[labels[i], p_sorted] += 1
            class_occurrence[0, labels[i]] += 1
        confusion_matrix = 100 * confusion_matrix / class_occurrence.T
        print 100 * numpy.mean(error), cfg.lrate.epoch
        log('Error %f ' % (100 * numpy.mean(error)) + '(%)')
        log('Confusion Matrix \n\n ' + str(numpy.around(confusion_matrix, 2)) +
            ' (%)\n')