h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT * DIM), dtype='float32') # Initial load train dataset tr_feeder = load_data(train_feeder) ### Handling the resume option: if RESUME: # Check if checkpoint from previous run is not corrupted. # Then overwrite some of the variables above. iters_to_consume, res_path, epoch, total_iters,\ [lowest_valid_cost, corresponding_test_cost, test_cost] = \ lib.resumable(path=FOLDER_PREFIX, iter_key=iter_str, epoch_key=epoch_str, add_resume_counter=True, other_keys=[lowest_valid_str, corresp_test_str, test_nll_str]) # At this point we saved the pkl file. last_print_iters = total_iters print "### RESUMING JOB FROM EPOCH {}, ITER {}".format(epoch, total_iters) # Consumes this much iters to get to the last point in training data. consume_time = time() for i in xrange(iters_to_consume): tr_feeder.next() consume_time = time() - consume_time print "Train data ready in {:.2f}secs after consuming {} minibatches.".\ format(consume_time, iters_to_consume) lib.load_params(res_path)
OVERLAP, Q_LEVELS, Q_ZERO, Q_TYPE) if RESUME: # Check if checkpoint from previous run is not corrupted. # Then overwrite some of the variables above. iters_to_consume, res_path, epoch, total_iters,\ [lowest_valid_cost, corresponding_test_cost, test_cost] = \ lib.resumable(path=FOLDER_PREFIX, iter_key=iter_str, epoch_key=epoch_str, add_resume_counter=True, other_keys=[lowest_valid_str, corresp_test_str, test_nll_str]) # At this point we saved the pkl file. last_print_iters = total_iters print "### RESUMING JOB FROM EPOCH {}, ITER {}".format(epoch, total_iters) # Consumes this much iters to get to the last point in training data. consume_time = time.time() for i in xrange(iters_to_consume): tr_feeder.next() consume_time = time.time() - consume_time print "Train data ready in {:.2f}secs after consuming {} minibatches.".\ format(consume_time, iters_to_consume) lib.load_params(res_path)