def train_lstm( # word embedding in ACE's context can be regarded as the feature vector size of each ns frame dim_proj=None, # word embeding dimension and LSTM number of hidden units. xdim=None, ydim=None, format=None, patience=10, # Number of epoch to wait before early stop if no progress max_epochs=500, # The maximum number of epoch to run dispFreq=10, # Display to stdout the training progress every N updates decay_c=0., # Weight decay for the classifier applied to the U weights. lrate=0.001, # Learning rate for sgd (not used for adadelta and rmsprop) # n_words=10000, # Vocabulary size optimizer=adadelta, # sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate). encoder='lstm', # TODO: can be removed must be lstm. dumppath='blstm_model.npz', # The best model will be saved there validFreq=400, # Compute the validation error after this number of update. saveFreq=1000, # Save the parameters after every saveFreq updates maxlen=None, # Sequence longer then this get ignored batch_size=100, # The batch size during training. valid_batch_size=100, # The batch size used for validation/test set. dataset=None, # Parameter for extra option noise_std=0., use_dropout=True, # if False slightly faster, but worst test error # This frequently need a bigger model. reload_model=None, # Path to a saved model we want to start from. test_size=-1, # If >0, we keep only this number of test example. scaling=1): # Model options model_options = locals().copy() print "model options", model_options #load_data, prepare_data = get_dataset(dataset) print 'Loading data' train, valid, test = load_data_varlen(dataset=dataset, valid_portion=0.1, test_portion=0.1, maxlen=None, scaling=scaling, robust=0, format=format, h5py=1) print 'data loaded' ''' if test_size > 0: # The test set is sorted by size, but we want to keep random # size example. So we must select a random selection of the # examples. idx = numpy.arange(len(test[0])) numpy.random.shuffle(idx) idx = idx[:test_size] test = ([test[0][n] for n in idx], [test[1][n] for n in idx]) ''' ydim = numpy.max(train[1]) + 1 # ydim = numpy.max(train[1]) print 'ydim = %d' % ydim model_options['ydim'] = ydim model_options['xdim'] = xdim model_options['dim_proj'] = dim_proj print 'Building model' # This create the initial parameters as numpy ndarrays. # Dict name (string) -> numpy ndarray params = init_params(model_options) if reload_model: load_params('lstm_model.npz', params) # This create Theano Shared Variable from the parameters. # Dict name (string) -> Theano Tensor Shared Variable # params and tparams have different copy of the weights. tparams = init_tparams(params) # use_noise is for dropout (use_noise, x, mask, oh_mask, y, f_pred_prob, f_pred, cost) = build_model(tparams, model_options) if decay_c > 0.: decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c') weight_decay = 0. weight_decay += (tparams['U']**2).sum() weight_decay *= decay_c cost += weight_decay f_cost = theano.function([x, mask, oh_mask, y], cost, name='f_cost') grads = T.grad(cost, wrt=tparams.values()) f_grad = theano.function([x, mask, oh_mask, y], grads, name='f_grad') lr = T.scalar(name='lr') f_grad_shared, f_update = optimizer(lr, tparams, grads, x, mask, oh_mask, y, cost) print 'Optimization' kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size) kf_test = get_minibatches_idx(len(test[0]), valid_batch_size) print "%d train examples" % len(train[0]) print "%d valid examples" % len(valid[0]) print "%d test examples" % len(test[0]) history_errs = [] best_p = None bad_count = 0 if validFreq == -1: validFreq = len(train[0]) / batch_size if saveFreq == -1: saveFreq = len(train[0]) / batch_size uidx = 0 # the number of update done estop = False # early stop start_time = time.time() # SP contains an ordered list of (pos), ordered by chord class number [0,ydim-1] SP = balanced_seg.balanced_noeval(ydim, train[1]) try: for eidx in xrange(max_epochs): n_samples = 0 # Get new shuffled index for the training set. kf = get_minibatches_idx(len(train[0]), batch_size, shuffle=True) for _, train_index in kf: uidx += 1 use_noise.set_value(1.) # FIXME: train_index is not used, kf is not used bc_idx = balanced_seg.get_bc_idx(SP, ydim) # Select the random examples for this minibatch y = [train[1][t] for t in bc_idx] x = [train[0][t] for t in bc_idx] # Get the data in numpy.ndarray format # This swap the axis! # Return something of shape (minibatch maxlen, n samples) x, mask, oh_mask, y = prepare_data(x, y, xdim=xdim, maxlen=maxlen) n_samples += x.shape[1] cost = f_grad_shared(x, mask, oh_mask, y) f_update(lrate) if numpy.isnan(cost) or numpy.isinf(cost): print 'NaN detected' return 1., 1., 1. if numpy.mod(uidx, dispFreq) == 0: print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost if dumppath and numpy.mod(uidx, saveFreq) == 0: print 'Saving...', # save the best param set to date (best_p) if best_p is not None: params = best_p else: params = unzip(tparams) numpy.savez(dumppath, history_errs=history_errs, **params) # pkl.dump(model_options, open('%s.pkl' % dumppath, 'wb'), -1) print 'Done' if numpy.mod(uidx, validFreq) == 0: use_noise.set_value(0.) train_err = pred_error(f_pred, prepare_data, train, kf) valid_err = pred_error(f_pred, prepare_data, valid, kf_valid) # test_err = pred_error(f_pred, prepare_data, test, kf_test) test_err = 1 history_errs.append([valid_err, test_err]) # save param only if the validation error is less than the history minimum if (uidx == 0 or valid_err <= numpy.array(history_errs)[:, 0].min()): best_p = unzip(tparams) bad_counter = 0 print('Train ', train_err, 'Valid ', valid_err, 'Test ', test_err) # early stopping if (len(history_errs) > patience and valid_err >= numpy.array(history_errs)[:-patience, 0].min()): bad_counter += 1 if bad_counter > patience: print 'Early Stop!' estop = True break print 'Seen %d samples' % n_samples if estop: break except KeyboardInterrupt: print "Training interupted" end_time = time.time() if best_p is not None: zipp(best_p, tparams) else: best_p = unzip(tparams) use_noise.set_value(0.) kf_train_sorted = get_minibatches_idx(len(train[0]), batch_size) train_err = pred_error(f_pred, prepare_data, train, kf_train_sorted) valid_err = pred_error(f_pred, prepare_data, valid, kf_valid) # test_err = pred_error(f_pred, prepare_data, test, kf_test) test_err = 1 print 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err if dumppath: numpy.savez(dumppath, train_err=train_err, valid_err=valid_err, test_err=test_err, history_errs=history_errs, **best_p) print 'The code run for %d epochs, with %f sec/epochs' % ( (eidx + 1), (end_time - start_time) / (1. * (eidx + 1))) print >> sys.stderr, ('Training took %.1fs' % (end_time - start_time)) return train_err, valid_err, test_err
def train_blstm( # word embedding in ACE's context can be regarded as the feature vector size of each ns frame dim_proj=None, # word embeding dimension and LSTM number of hidden units. xdim=None, ydim=None, format='matrix', n_epochs=500, # The maximum number of epoch to run decay_c=0., # Weight decay for the classifier applied to the U weights. lrate=0.001, # Learning rate for sgd (not used for adadelta and rmsprop) # n_words=10000, # Vocabulary size optimizer=adadelta, # sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate). encoder='lstm', # TODO: can be removed must be lstm. trainpath='../data/cv/', trainlist='../cvlist/JK-ch-1234.txt', validset='../data/cv/C-ch.mat', dumppath='../model/blstm_model.npz', # The best model will be saved there validFreq=-1, # Compute the validation error after this number of update. saveFreq=-1, # Save the parameters after every saveFreq updates maxlen=None, # Sequence longer then this get ignored batch_size=100, # The batch size during training. valid_batch_size=100, # The batch size used for validation/test set. dataset=None, # Parameter for extra option noise_std=0., earlystop=True, dropout=True, # if False slightly faster, but worst test error # This frequently need a bigger model. reload_model=None, # Path to a saved model we want to start from. scaling=1 ): # Model options model_options = locals().copy() print "model options", model_options #load_data, prepare_data = get_dataset(dataset) print 'Loading data' train, valid = load_data_varlen(trainpath=trainpath,trainlist=trainlist,validset=validset) print 'data loaded' ydim = numpy.max(train[1]) + 1 # ydim = numpy.max(train[1]) print 'ydim = %d'%ydim model_options['ydim'] = ydim model_options['xdim'] = xdim model_options['dim_proj'] = dim_proj print 'Building model' # This create the initial parameters as numpy ndarrays. # Dict name (string) -> numpy ndarray params = init_params(model_options) if reload_model: load_params('lstm_model.npz', params) # This create Theano Shared Variable from the parameters. # Dict name (string) -> Theano Tensor Shared Variable # params and tparams have different copy of the weights. tparams = init_tparams(params) # use_noise is for dropout (use_noise, x, mask, oh_mask, y, f_pred_prob, f_pred, cost) = build_model(tparams, model_options) if decay_c > 0.: decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c') weight_decay = 0. weight_decay += (tparams['U'] ** 2).sum() weight_decay *= decay_c cost += weight_decay f_cost = theano.function([x, mask, oh_mask, y], cost, name='f_cost') grads = T.grad(cost, wrt=tparams.values()) f_grad = theano.function([x, mask, oh_mask, y], grads, name='f_grad') lr = T.scalar(name='lr') f_grad_shared, f_update = optimizer(lr, tparams, grads, x, mask, oh_mask, y, cost) print 'Optimization' kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size) print "%d train examples" % len(train[0]) print "%d valid examples" % len(valid[0]) best_validation_loss = numpy.inf history_errs = [] best_p = None n_train_batches = len(train[0]) / batch_size patience = 10 * n_train_batches # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is found done_looping = False improvement_threshold = 0.996 # a relative improvement of this much is validation_frequency = min(n_train_batches, patience / 2) training_history = [] start_time = time.time() for epoch in xrange(n_epochs): if earlystop and done_looping: print 'early-stopping' break # Get new shuffled index for the training set. kf = get_minibatches_idx(len(train[0]), batch_size, shuffle=True) for minibatch_index, minibatch in kf: iter = epoch * n_train_batches + minibatch_index use_noise.set_value(1.) # Select the random examples for this minibatch y = [train[1][t] for t in minibatch] x = [train[0][t] for t in minibatch] # Get the data in numpy.ndarray format # This swap the axis! # Return something of shape (minibatch maxlen, n samples) x, mask, oh_mask, y = prepare_data(x, y, xdim=xdim, maxlen=maxlen) cost = f_grad_shared(x, mask, oh_mask, y) f_update(lrate) if (iter + 1) % validation_frequency == 0: use_noise.set_value(0.) #this_training_loss = pred_error(f_pred, prepare_data, train, kf) this_validation_loss = pred_error(f_pred, prepare_data, valid, kf_valid) #training_history.append([iter,this_training_loss,this_validation_loss]) training_history.append([iter,this_validation_loss]) # print('epoch %i, minibatch %i/%i, training error %f %%' % # (epoch, minibatch_index + 1, n_train_batches, # this_training_loss * 100.)) print('epoch %i, minibatch %i/%i, validation error %f %%' % (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.)) print('iter = %d' % iter) print('patience = %d' % patience) if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if ( this_validation_loss < best_validation_loss * improvement_threshold ): patience = max(patience, iter * patience_increase) params = unzip(tparams) numpy.savez(dumppath, training_history=training_history, best_validation_loss=best_validation_loss,**params) # save best validation score and iteration number best_validation_loss = this_validation_loss best_iter = iter print('best_validation_loss %f' % best_validation_loss) if patience <= iter: done_looping = True if earlystop: break end_time = time.time() # final save numpy.savez(dumppath, training_history=training_history, best_validation_loss=best_validation_loss, **params) print( ( 'Optimization complete with best validation score of %f %%, ' 'obtained at iteration %i, ' ) % (best_validation_loss * 100., best_iter + 1) ) print >> sys.stderr, ('The fine tuning code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.))
def train_lstm( # word embedding in ACE's context can be regarded as the feature vector size of each ns frame dim_proj=None, # word embeding dimension and LSTM number of hidden units. xdim=None, ydim=None, format=None, patience=10, # Number of epoch to wait before early stop if no progress max_epochs=500, # The maximum number of epoch to run dispFreq=10, # Display to stdout the training progress every N updates decay_c=0., # Weight decay for the classifier applied to the U weights. lrate=0.001, # Learning rate for sgd (not used for adadelta and rmsprop) # n_words=10000, # Vocabulary size optimizer=adadelta, # sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate). encoder='lstm', # TODO: can be removed must be lstm. dumppath='blstm_model.npz', # The best model will be saved there validFreq=400, # Compute the validation error after this number of update. saveFreq=1000, # Save the parameters after every saveFreq updates maxlen=None, # Sequence longer then this get ignored batch_size=100, # The batch size during training. valid_batch_size=100, # The batch size used for validation/test set. dataset=None, # Parameter for extra option noise_std=0., use_dropout=True, # if False slightly faster, but worst test error # This frequently need a bigger model. reload_model=None, # Path to a saved model we want to start from. test_size=-1, # If >0, we keep only this number of test example. scaling=1 ): # Model options model_options = locals().copy() print "model options", model_options #load_data, prepare_data = get_dataset(dataset) print 'Loading data' train, valid, test = load_data_varlen(dataset=dataset, valid_portion=0.1, test_portion=0.1, maxlen=None, scaling=scaling, robust=0, format=format, h5py=1) print 'data loaded' ''' if test_size > 0: # The test set is sorted by size, but we want to keep random # size example. So we must select a random selection of the # examples. idx = numpy.arange(len(test[0])) numpy.random.shuffle(idx) idx = idx[:test_size] test = ([test[0][n] for n in idx], [test[1][n] for n in idx]) ''' ydim = numpy.max(train[1]) + 1 # ydim = numpy.max(train[1]) print 'ydim = %d'%ydim model_options['ydim'] = ydim model_options['xdim'] = xdim model_options['dim_proj'] = dim_proj print 'Building model' # This create the initial parameters as numpy ndarrays. # Dict name (string) -> numpy ndarray params = init_params(model_options) if reload_model: load_params('lstm_model.npz', params) # This create Theano Shared Variable from the parameters. # Dict name (string) -> Theano Tensor Shared Variable # params and tparams have different copy of the weights. tparams = init_tparams(params) # use_noise is for dropout (use_noise, x, mask, oh_mask, y, f_pred_prob, f_pred, cost) = build_model(tparams, model_options) if decay_c > 0.: decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c') weight_decay = 0. weight_decay += (tparams['U'] ** 2).sum() weight_decay *= decay_c cost += weight_decay f_cost = theano.function([x, mask, oh_mask, y], cost, name='f_cost') grads = T.grad(cost, wrt=tparams.values()) f_grad = theano.function([x, mask, oh_mask, y], grads, name='f_grad') lr = T.scalar(name='lr') f_grad_shared, f_update = optimizer(lr, tparams, grads, x, mask, oh_mask, y, cost) print 'Optimization' kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size) kf_test = get_minibatches_idx(len(test[0]), valid_batch_size) print "%d train examples" % len(train[0]) print "%d valid examples" % len(valid[0]) print "%d test examples" % len(test[0]) history_errs = [] best_p = None bad_count = 0 if validFreq == -1: validFreq = len(train[0]) / batch_size if saveFreq == -1: saveFreq = len(train[0]) / batch_size uidx = 0 # the number of update done estop = False # early stop start_time = time.time() # SP contains an ordered list of (pos), ordered by chord class number [0,ydim-1] SP = balanced_seg.balanced_noeval(ydim,train[1]) try: for eidx in xrange(max_epochs): n_samples = 0 # Get new shuffled index for the training set. kf = get_minibatches_idx(len(train[0]), batch_size, shuffle=True) for _, train_index in kf: uidx += 1 use_noise.set_value(1.) # FIXME: train_index is not used, kf is not used bc_idx = balanced_seg.get_bc_idx(SP,ydim) # Select the random examples for this minibatch y = [train[1][t] for t in bc_idx] x = [train[0][t] for t in bc_idx] # Get the data in numpy.ndarray format # This swap the axis! # Return something of shape (minibatch maxlen, n samples) x, mask, oh_mask, y = prepare_data(x, y, xdim=xdim, maxlen=maxlen) n_samples += x.shape[1] cost = f_grad_shared(x, mask, oh_mask, y) f_update(lrate) if numpy.isnan(cost) or numpy.isinf(cost): print 'NaN detected' return 1., 1., 1. if numpy.mod(uidx, dispFreq) == 0: print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost if dumppath and numpy.mod(uidx, saveFreq) == 0: print 'Saving...', # save the best param set to date (best_p) if best_p is not None: params = best_p else: params = unzip(tparams) numpy.savez(dumppath, history_errs=history_errs, **params) # pkl.dump(model_options, open('%s.pkl' % dumppath, 'wb'), -1) print 'Done' if numpy.mod(uidx, validFreq) == 0: use_noise.set_value(0.) train_err = pred_error(f_pred, prepare_data, train, kf) valid_err = pred_error(f_pred, prepare_data, valid, kf_valid) # test_err = pred_error(f_pred, prepare_data, test, kf_test) test_err = 1 history_errs.append([valid_err, test_err]) # save param only if the validation error is less than the history minimum if (uidx == 0 or valid_err <= numpy.array(history_errs)[:, 0].min()): best_p = unzip(tparams) bad_counter = 0 print ('Train ', train_err, 'Valid ', valid_err, 'Test ', test_err) # early stopping if (len(history_errs) > patience and valid_err >= numpy.array(history_errs)[:-patience, 0].min()): bad_counter += 1 if bad_counter > patience: print 'Early Stop!' estop = True break print 'Seen %d samples' % n_samples if estop: break except KeyboardInterrupt: print "Training interupted" end_time = time.time() if best_p is not None: zipp(best_p, tparams) else: best_p = unzip(tparams) use_noise.set_value(0.) kf_train_sorted = get_minibatches_idx(len(train[0]), batch_size) train_err = pred_error(f_pred, prepare_data, train, kf_train_sorted) valid_err = pred_error(f_pred, prepare_data, valid, kf_valid) # test_err = pred_error(f_pred, prepare_data, test, kf_test) test_err = 1 print 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err if dumppath: numpy.savez(dumppath, train_err=train_err, valid_err=valid_err, test_err=test_err, history_errs=history_errs, **best_p) print 'The code run for %d epochs, with %f sec/epochs' % ( (eidx + 1), (end_time - start_time) / (1. * (eidx + 1))) print >> sys.stderr, ('Training took %.1fs' % (end_time - start_time)) return train_err, valid_err, test_err
def train_blstm( # word embedding in ACE's context can be regarded as the feature vector size of each ns frame dim_proj=None, # word embeding dimension and LSTM number of hidden units. xdim=None, ydim=None, format='matrix', n_epochs=500, # The maximum number of epoch to run decay_c=0., # Weight decay for the classifier applied to the U weights. lrate=0.001, # Learning rate for sgd (not used for adadelta and rmsprop) # n_words=10000, # Vocabulary size optimizer=adadelta, # sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate). encoder='lstm', # TODO: can be removed must be lstm. trainpath='../data/cv/', trainlist='../cvlist/JK-ch-1234.txt', validset='../data/cv/C-ch.mat', dumppath='../model/blstm_model.npz', # The best model will be saved there validFreq=-1, # Compute the validation error after this number of update. saveFreq=-1, # Save the parameters after every saveFreq updates maxlen=None, # Sequence longer then this get ignored batch_size=100, # The batch size during training. valid_batch_size=100, # The batch size used for validation/test set. dataset=None, # Parameter for extra option noise_std=0., earlystop=True, dropout=True, # if False slightly faster, but worst test error # This frequently need a bigger model. reload_model=None, # Path to a saved model we want to start from. scaling=1): # Model options model_options = locals().copy() print "model options", model_options #load_data, prepare_data = get_dataset(dataset) print 'Loading data' train, valid = load_data_varlen(trainpath=trainpath, trainlist=trainlist, validset=validset) print 'data loaded' ydim = numpy.max(train[1]) + 1 # ydim = numpy.max(train[1]) print 'ydim = %d' % ydim model_options['ydim'] = ydim model_options['xdim'] = xdim model_options['dim_proj'] = dim_proj print 'Building model' # This create the initial parameters as numpy ndarrays. # Dict name (string) -> numpy ndarray params = init_params(model_options) if reload_model: load_params('lstm_model.npz', params) # This create Theano Shared Variable from the parameters. # Dict name (string) -> Theano Tensor Shared Variable # params and tparams have different copy of the weights. tparams = init_tparams(params) # use_noise is for dropout (use_noise, x, mask, oh_mask, y, f_pred_prob, f_pred, cost) = build_model(tparams, model_options) if decay_c > 0.: decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c') weight_decay = 0. weight_decay += (tparams['U']**2).sum() weight_decay *= decay_c cost += weight_decay f_cost = theano.function([x, mask, oh_mask, y], cost, name='f_cost') grads = T.grad(cost, wrt=tparams.values()) f_grad = theano.function([x, mask, oh_mask, y], grads, name='f_grad') lr = T.scalar(name='lr') f_grad_shared, f_update = optimizer(lr, tparams, grads, x, mask, oh_mask, y, cost) print 'Optimization' kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size) print "%d train examples" % len(train[0]) print "%d valid examples" % len(valid[0]) best_validation_loss = numpy.inf history_errs = [] best_p = None n_train_batches = len(train[0]) / batch_size patience = 10 * n_train_batches # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is found done_looping = False improvement_threshold = 0.996 # a relative improvement of this much is validation_frequency = min(n_train_batches, patience / 2) training_history = [] start_time = time.time() for epoch in xrange(n_epochs): if earlystop and done_looping: print 'early-stopping' break # Get new shuffled index for the training set. kf = get_minibatches_idx(len(train[0]), batch_size, shuffle=True) for minibatch_index, minibatch in kf: iter = epoch * n_train_batches + minibatch_index use_noise.set_value(1.) # Select the random examples for this minibatch y = [train[1][t] for t in minibatch] x = [train[0][t] for t in minibatch] # Get the data in numpy.ndarray format # This swap the axis! # Return something of shape (minibatch maxlen, n samples) x, mask, oh_mask, y = prepare_data(x, y, xdim=xdim, maxlen=maxlen) cost = f_grad_shared(x, mask, oh_mask, y) f_update(lrate) if (iter + 1) % validation_frequency == 0: use_noise.set_value(0.) #this_training_loss = pred_error(f_pred, prepare_data, train, kf) this_validation_loss = pred_error(f_pred, prepare_data, valid, kf_valid) #training_history.append([iter,this_training_loss,this_validation_loss]) training_history.append([iter, this_validation_loss]) # print('epoch %i, minibatch %i/%i, training error %f %%' % # (epoch, minibatch_index + 1, n_train_batches, # this_training_loss * 100.)) print('epoch %i, minibatch %i/%i, validation error %f %%' % (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.)) print('iter = %d' % iter) print('patience = %d' % patience) if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if (this_validation_loss < best_validation_loss * improvement_threshold): patience = max(patience, iter * patience_increase) params = unzip(tparams) numpy.savez(dumppath, training_history=training_history, best_validation_loss=best_validation_loss, **params) # save best validation score and iteration number best_validation_loss = this_validation_loss best_iter = iter print('best_validation_loss %f' % best_validation_loss) if patience <= iter: done_looping = True if earlystop: break end_time = time.time() # final save numpy.savez(dumppath, training_history=training_history, best_validation_loss=best_validation_loss, **params) print(('Optimization complete with best validation score of %f %%, ' 'obtained at iteration %i, ') % (best_validation_loss * 100., best_iter + 1)) print >> sys.stderr, ('The fine tuning code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.))