predictions_chunk = predictions_chunk[:
                                              chunk_length]  # cut off zeros / padding

        print("  compute average over transforms")
        predictions_chunk_avg = predictions_chunk.reshape(
            -1, len(augmentation_transforms), 37).mean(1)

        predictions_list.append(predictions_chunk_avg)

        time_since_start = time.time() - start_time
        print("  %s since start" % load_data.hms(time_since_start))

    all_predictions = np.vstack(predictions_list)

    print("Write predictions to %s" % target_path_valid)
    load_data.save_gz(target_path_valid, all_predictions)

    print("Evaluate")
    rmse_valid = analysis['losses_valid'][-1]
    rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
    print("  MSE (last iteration):\t%.6f" % rmse_valid)
    print("  MSE (augmented):\t%.6f" % rmse_augmented)

if DO_TEST:
    print()
    print("TEST SET")
    print("Compute predictions")
    predictions_list = []
    start_time = time.time()

    for e, (chunk_data, chunk_length) in enumerate(test_gen):
Ejemplo n.º 2
0
else:
    try:
        print ''
        print 'Re-evalulating and predicting'

        if DO_VALID:
            evalHist = winsol.evaluate([xs_valid[0], xs_valid[1]],
                                       y_valid=y_valid)
            winsol.save_loss(modelname='model_norm_metrics')
            evalHist = winsol.load_loss(modelname='model_norm_metrics')

            print ''
            predictions = winsol.predict([xs_valid[0], xs_valid[1]])

            print "Write predictions to %s" % target_path_valid
            load_data.save_gz(target_path_valid, predictions)

    except KeyboardInterrupt:
        print "\ngot keyboard interuption"
        save_exit()
    except ValueError:
        print "\ngot value error, could be the end of the generator in the fit"
        save_exit()

evalHist = winsol.load_loss(modelname='model_norm_metrics')

if np.shape(predictions) != np.shape(y_valid):
    raise ValueError(
        'prediction and validation set have different shapes, %s to %s ' %
        (np.shape(predictions), np.shape(y_valid)))
        predictions_chunk = np.vstack(predictions_chunk_list)
        predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding

        print "  compute average over transforms"
        predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)

        predictions_list.append(predictions_chunk_avg)

        time_since_start = time.time() - start_time
        print "  %s since start" % load_data.hms(time_since_start)


    all_predictions = np.vstack(predictions_list)

    print "Write predictions to %s" % target_path_valid
    load_data.save_gz(target_path_valid, all_predictions)

    print "Evaluate"
    rmse_valid = analysis['losses_valid'][-1]
    rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
    print "  MSE (last iteration):\t%.6f" % rmse_valid
    print "  MSE (augmented):\t%.6f" % rmse_augmented



if DO_TEST:
    print
    print "TEST SET"
    print "Compute predictions"
    predictions_list = []
    start_time = time.time()
    # print "  %s" % os.path.basename(path)
    predictions = load_data.load_gz(path)
    predictions_uniform = predictions * (1.0 / len(predictions_test_paths))
    predictions_separate = predictions * weights_separate[None, :]
    predictions *= weight # inplace scaling

    if blended_predictions is None:
        blended_predictions = predictions
        blended_predictions_separate = predictions_separate
        blended_predictions_uniform = predictions_uniform
    else:
        blended_predictions += predictions
        blended_predictions_separate += predictions_separate
        blended_predictions_uniform += predictions_uniform


print
print "Storing blended predictions (shared) in %s" % TARGET_PATH
load_data.save_gz(TARGET_PATH, blended_predictions)

print
print "Storing blended predictions (separate) in %s" % TARGET_PATH_SEPARATE
load_data.save_gz(TARGET_PATH_SEPARATE, blended_predictions_separate)

print
print "Storing uniformly blended predictions in %s" % TARGET_PATH_UNIFORM
load_data.save_gz(TARGET_PATH_UNIFORM, blended_predictions_uniform)

    
print
print "Done!"
Ejemplo n.º 5
0
for path, weight, weights_separate in zip(predictions_test_paths, out_s,
                                          out_s2):
    # print "  %s" % os.path.basename(path)
    predictions = load_data.load_gz(path)
    predictions_uniform = predictions * (1.0 / len(predictions_test_paths))
    predictions_separate = predictions * weights_separate[None, :]
    predictions *= weight  # inplace scaling

    if blended_predictions is None:
        blended_predictions = predictions
        blended_predictions_separate = predictions_separate
        blended_predictions_uniform = predictions_uniform
    else:
        blended_predictions += predictions
        blended_predictions_separate += predictions_separate
        blended_predictions_uniform += predictions_uniform

print()
print("Storing blended predictions (shared) in %s" % TARGET_PATH)
load_data.save_gz(TARGET_PATH, blended_predictions)

print()
print("Storing blended predictions (separate) in %s" % TARGET_PATH_SEPARATE)
load_data.save_gz(TARGET_PATH_SEPARATE, blended_predictions_separate)

print()
print("Storing uniformly blended predictions in %s" % TARGET_PATH_UNIFORM)
load_data.save_gz(TARGET_PATH_UNIFORM, blended_predictions_uniform)

print()
print("Done!")
    print "SUBSET: %s" % subset
    print

    if subset == 'train':
        num_images = load_data.num_train
        ids = load_data.train_ids
    elif subset == 'test':
        num_images = load_data.num_test
        ids = load_data.test_ids
    

    def process(k):
        print "image %d/%d (%s)" % (k + 1, num_images, subset)
        img_id = ids[k]
        img = load_data.load_image(img_id, from_ram=True, subset=subset)
        return estimate_params(img)

    pool = mp.Pool(NUM_PROCESSES)

    estimated_params = pool.map(process, xrange(num_images), chunksize=100)
    pool.close()
    pool.join()

    # estimated_params = map(process, xrange(num_images)) # no mp for debugging

    params_array = np.array(estimated_params)

    target_path = TARGET_PATTERN % subset
    print "Saving to %s..." % target_path
    load_data.save_gz(target_path, params_array)