Beispiel #1
0
#     return sorted(os.listdir('train_history/'), key=lambda x: os.path.getmtime(os.path.join('train_history', x)), reverse=True)[0]

if options.config_file:
    config_file = options.config_file
else:
    # config_file = get_most_recent_file_from_dir('train_history/')
    config_file = choose_file_with_stdin('train_history/')
print("Config file: {}".format(config_file))
cm = ConfigManager(config_file, folder='train_history')
cfg = cm.get_json()

model = get_model_and_load_weights(cfg)
compile_model(model, cfg['training'])

data = Data(options.test_path, cfg['training'], train_test='test')
print("Testing on {} samples".format(data.test_len))
data_gen = data.test_generator()

displayer = Display()
for frame_batch in data_gen:
    print('Frame batch')
    pred = model.predict_on_batch(frame_batch)
    # print pred
    H = np.reshape(pred, (3,3))
    print('Prediction:\n{}'.format(H))
    frame = np.array(frame_batch[0]*255).astype('uint8')

    warped = displayer.get_warp_overlay(frame, H, dsize=(300,300))
    displayer.display_vstacked(frame, warped, dsize=(600,600))
    # displayer.display_warp_overlay(frame, H, dsize=(600,600))
    print 'done'
Beispiel #2
0
def evaluate(csv_file, eval_path="", s3_bucket="bsivisiondata", train_config=None, write_output_images=False):
    '''
    eval_path: top directory, look for frames/ and annotations/ underneath here.  If csv specified, then paths
        will be relative to this eval_path
    csv_file: a csv of paths_to_images,labels.  Paths are relative to eval_path
    s3_bucket: name of an s3 bucket where image and label data is read from. If csv is specified, labels will be
        taken from the csv
    train_config: a .json config file from train_history that gives information on the training job that we're
        evaluating
    write_output_images: bool, whether or not to write output images to data/eval_images/training_job_name_steps
    loop_eval_period: int, the period in seconds to loop evaluation runs.  If not set, only runs once
    '''

    if train_config:
        config_file = train_config
    else:
        # config_file = get_most_recent_file_from_dir('train_history/')
        config_file = choose_file_with_stdin('train_history/')
    print("Config file: {}".format(config_file))
    cm = ConfigManager(config_file, folder='train_history')
    cfg = cm.get_json()

    data = DataExtractor(eval_path, s3_bucket, csv_file=csv_file)
        
    # batch_size = cfg['training']['hyperparameters']['batch_size']
    batch_size = 1
    target_shape = cfg['training']['model']['target_shape']
    data_gen = data.get_processed_data_generator(True, batch_size, target_shape, True)

    print("Testing on {} samples".format(data.get_data_length()))

    displayer = Display()

    model, path_and_prefix = get_model_and_load_weights(cfg)
    compile_model(model, cfg['training'])
    ckpt_prefix = os.path.basename(path_and_prefix)
    
    total_loss = 0
    num_testing_samples = 0
    for X, y, filepaths, originals in data_gen:

        pred = model.predict_on_batch(X)

        mse_out = mean_squared_error(y, pred)
        loss = mse_out.eval(session=K.get_session())
        total_loss += loss[0]
        num_testing_samples += 1
        print("Loss: {}".format(loss[0]))

        H = np.reshape(pred, (3,3))
        # print('Prediction:\n{}'.format(H))
        
        if write_output_images: 
            frame = originals[0]
            filepath = filepaths[0]
            image_text = "Pred: {}\nTruth: {}\nLoss: {}".format(np.array_str(pred), np.array_str(y[0]), str(loss[0]))
            warped = displayer.get_warp_overlay(frame, H, dsize=(1000,425))
            vstacked = displayer.get_vstacked(frame, warped, dsize=(1000,850))
            displayer.put_rectangle(vstacked, (10, 10), (800, 80))
            displayer.put_text(vstacked, "Pred: {}".format(np.array_str(pred, precision=2)), text_loc=(10,30))
            displayer.put_text(vstacked, "Truth: {}".format(np.array_str(y[0], precision=2)), text_loc=(10,50))
            displayer.put_text(vstacked, "loss: {}".format(loss[0]), text_loc=(10,70))
            step_number = path_and_prefix.split('-')[-1]
            job_name_and_step_num = cfg['sagemaker_job_info']['job_name'] + '-step' + str(step_number)
            job_name_dir = os.path.join('data/eval_images', job_name_and_step_num)
            if not os.path.exists(job_name_dir):
                os.makedirs(job_name_dir)
            filename = "{:.2E}-{}".format(loss[0], os.path.basename(filepath))
            displayer.save_image(os.path.join(job_name_dir, filename), vstacked)
        # displayer.display_vstacked(frame, warped, dsize=(1000,850))
        # displayer.display_warp_overlay(frame, H, dsize=(600,600))
    average_loss = total_loss / num_testing_samples
    print("Average loss: {}".format(average_loss))
    return average_loss, path_and_prefix