Esempio n. 1
0
def train(config):

    training_set_list = load_file_list(pretrain_config['training_set'])
    train_dataset = SolDataset(
        training_set_list,
        rescale_range=pretrain_config['sol']['training_rescale_range'],
        transform=CropTransform(pretrain_config['sol']['crop_params']))

    train_dataloader = DataLoader(
        train_dataset,
        batch_size=pretrain_config['sol']['batch_size'],
        shuffle=True,
        num_workers=0,
        collate_fn=sol.sol_dataset.collate)

    batches_per_epoch = int(pretrain_config['sol']['images_per_epoch'] /
                            pretrain_config['sol']['batch_size'])
    train_dataloader = DatasetWrapper(train_dataloader, batches_per_epoch)

    if not os.path.exists("snapshots/sol_train"):
        os.makedirs("snapshots/sol_train")

    solf.train()
    sum_loss = 0.0
    steps = 0.0

    for step_i, x in enumerate(train_dataloader):
        img = Variable(x['img'].type(dtype), requires_grad=False)

        sol_gt = None
        if x['sol_gt'] is not None:
            # This is needed because if sol_gt is None it means that there
            # no GT positions in the image. The alignment loss will handle,
            # it correctly as None
            sol_gt = Variable(x['sol_gt'].type(dtype), requires_grad=False)

        # print((img.shape))
        predictions = solf(img)
        loss = alignment_loss(predictions, sol_gt, x['label_sizes'],
                              alpha_alignment, alpha_backprop)

        org_img = img[0].data.cpu().numpy().transpose([2, 1, 0])
        org_img = ((org_img + 1) * 128).astype(np.uint8)
        org_img = org_img.copy()

        org_img = drawing.draw_sol_torch(predictions, org_img)
        cv2.imwrite("snapshots/sol_train/{}.png".format(step_i), org_img)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        sum_loss += loss.data.cpu().numpy()
        steps += 1
        predictions = None
        loss = None

        gc.collect()

    return sum_loss / steps
Esempio n. 2
0
    def val():

        test_set_list = load_file_list(pretrain_config['validation_set'])
        test_dataset = SolDataset(
            test_set_list,
            rescale_range=pretrain_config['sol']['validation_rescale_range'],
            transform=None)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=0,
                                     collate_fn=sol.sol_dataset.collate,
                                     pin_memory=True)

        writep = config['evaluation']['output_path'].split('_')[0]
        writep = 'snapshots/{}_val'.format(writep)
        if not os.path.exists(writep):
            os.makedirs(writep)

        solf.eval()
        sum_loss = 0.0
        steps = 0.0

        for step_i, x in enumerate(test_dataloader):
            img = x['img'].type(dtype)
            if x['sol_gt'] is not None:
                # This is needed because if sol_gt is None it means that there
                # no GT positions in the image. The alignment loss will handle,
                # it correctly as None
                sol_gt = x['sol_gt'].type(dtype)

            predictions = solf(img)
            loss = alignment_loss(predictions, sol_gt, x['label_sizes'],
                                  alpha_alignment, alpha_backprop)

            # Write images to file to visualization
            org_img = img[0].data.cpu().numpy().transpose([2, 1, 0])
            org_img = ((org_img + 1) * 128).astype(np.uint8)
            org_img = org_img.copy()
            org_img = drawing.draw_sol_torch(predictions, org_img)
            # out = Image.fromarray((org_img * 255).astype(np.uint8))
            # out.save(os.path.join(writep, "{}.png".format(step_i)))
            cv2.imwrite(os.path.join(writep, "{}.png".format(step_i)), org_img)

            sum_loss += loss.data.cpu().numpy()
            steps += 1
            predictions = None
            loss = None

            gc.collect()

        return sum_loss / steps
Esempio n. 3
0
def training_step(config):

    train_config = config['training']

    allowed_training_time = train_config['sol']['reset_interval']
    init_training_time = time.time()

    training_set_list = load_file_list(train_config['training_set'])
    train_dataset = SolDataset(
        training_set_list,
        rescale_range=train_config['sol']['training_rescale_range'],
        transform=CropTransform(train_config['sol']['crop_params']))

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=train_config['sol']['batch_size'],
                                  shuffle=True,
                                  num_workers=0,
                                  collate_fn=sol_dataset.collate)

    batches_per_epoch = int(train_config['sol']['images_per_epoch'] /
                            train_config['sol']['batch_size'])
    train_dataloader = DatasetWrapper(train_dataloader, batches_per_epoch)

    test_set_list = load_file_list(train_config['validation_set'])
    test_dataset = SolDataset(
        test_set_list,
        rescale_range=train_config['sol']['validation_rescale_range'],
        random_subset_size=train_config['sol']['validation_subset_size'],
        transform=None)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=sol_dataset.collate)

    alpha_alignment = train_config['sol']['alpha_alignment']
    alpha_backprop = train_config['sol']['alpha_backprop']

    sol, lf, hw = init_model(config, only_load='sol')

    dtype = torch.cuda.FloatTensor

    lowest_loss = np.inf
    lowest_loss_i = 0
    epoch = -1
    while True:  #This ends on a break based on the current itme
        epoch += 1
        print "Train Time:", (
            time.time() -
            init_training_time), "Allowed Time:", allowed_training_time

        sol.eval()
        sum_loss = 0.0
        steps = 0.0
        start_time = time.time()
        for step_i, x in enumerate(test_dataloader):
            img = Variable(x['img'].type(dtype), requires_grad=False)

            sol_gt = None
            if x['sol_gt'] is not None:
                sol_gt = Variable(x['sol_gt'].type(dtype), requires_grad=False)

            predictions = sol(img)
            predictions = transformation_utils.pt_xyrs_2_xyxy(predictions)
            loss = alignment_loss(predictions, sol_gt, x['label_sizes'],
                                  alpha_alignment, alpha_backprop)
            sum_loss += loss.data[0]
            steps += 1

        if epoch == 0:
            print "First Validation Step Complete"
            print "Benchmark Validation CER:", sum_loss / steps
            lowest_loss = sum_loss / steps

            sol, lf, hw = init_model(config,
                                     sol_dir='current',
                                     only_load='sol')

            optimizer = torch.optim.Adam(
                sol.parameters(), lr=train_config['sol']['learning_rate'])
            optim_path = os.path.join(train_config['snapshot']['current'],
                                      "sol_optim.pt")
            if os.path.exists(optim_path):
                print "Loading Optim Settings"
                optimizer.load_state_dict(safe_load.torch_state(optim_path))
            else:
                print "Failed to load Optim Settings"

        elif lowest_loss > sum_loss / steps:
            lowest_loss = sum_loss / steps
            print "Saving Best"

            dirname = train_config['snapshot']['best_validation']
            if not len(dirname) != 0 and os.path.exists(dirname):
                os.makedirs(dirname)

            save_path = os.path.join(dirname, "sol.pt")

            torch.save(sol.state_dict(), save_path)
            lowest_loss_i = epoch

        print "Test Loss", sum_loss / steps, lowest_loss
        print "Time:", time.time() - start_time
        print ""

        print "Epoch", epoch

        if allowed_training_time < (time.time() - init_training_time):
            print "Out of time. Saving current state and exiting..."
            dirname = train_config['snapshot']['current']
            if not len(dirname) != 0 and os.path.exists(dirname):
                os.makedirs(dirname)

            save_path = os.path.join(dirname, "sol.pt")
            torch.save(sol.state_dict(), save_path)

            optim_path = os.path.join(dirname, "sol_optim.pt")
            torch.save(optimizer.state_dict(), optim_path)
            break

        sol.train()
        sum_loss = 0.0
        steps = 0.0
        start_time = time.time()
        for step_i, x in enumerate(train_dataloader):
            img = Variable(x['img'].type(dtype), requires_grad=False)

            sol_gt = None
            if x['sol_gt'] is not None:
                sol_gt = Variable(x['sol_gt'].type(dtype), requires_grad=False)

            predictions = sol(img)
            predictions = transformation_utils.pt_xyrs_2_xyxy(predictions)
            loss = alignment_loss(predictions, sol_gt, x['label_sizes'],
                                  alpha_alignment, alpha_backprop)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            sum_loss += loss.data[0]
            steps += 1

        print "Train Loss", sum_loss / steps
        print "Real Epoch", train_dataloader.epoch
        print "Time:", time.time() - start_time
Esempio n. 4
0
import cv2
import json
import yaml
import sys
import os
import math

from utils import transformation_utils, drawing

with open(sys.argv[1]) as f:
    config = yaml.load(f)

sol_network_config = config['network']['sol']
pretrain_config = config['pretraining']

training_set_list = load_file_list(pretrain_config['training_set'])
train_dataset = SolDataset(
    training_set_list,
    rescale_range=pretrain_config['sol']['training_rescale_range'],
    transform=CropTransform(pretrain_config['sol']['crop_params']))

train_dataloader = DataLoader(train_dataset,
                              batch_size=pretrain_config['sol']['batch_size'],
                              shuffle=True,
                              num_workers=0,
                              collate_fn=sol.sol_dataset.collate)

batches_per_epoch = int(pretrain_config['sol']['images_per_epoch'] /
                        pretrain_config['sol']['batch_size'])
train_dataloader = DatasetWrapper(train_dataloader, batches_per_epoch)
Esempio n. 5
0
def main():

    config_path = sys.argv[1]

    with open(config_path) as f:
        config = yaml.load(f)

    for dataset_lookup in ['training_set', 'validation_set']:
        set_list = load_file_list(config['training'][dataset_lookup])
        dataset = AlignmentDataset(set_list, None)
        dataloader = DataLoader(dataset,
                                batch_size=1,
                                shuffle=False,
                                num_workers=8,
                                collate_fn=alignment_dataset.collate)

        output_grid_size = config['network']['hw']['input_height']
        t = ((np.arange(output_grid_size) + 0.5) /
             float(output_grid_size))[:, None].astype(np.float32)
        t = np.repeat(t, axis=1, repeats=output_grid_size)
        t = Variable(torch.from_numpy(t), requires_grad=False, volatile=True)
        s = t.t()

        t = t[:, :, None]
        s = s[:, :, None]

        interpolations = torch.cat([
            (1 - t) * s,
            (1 - t) * (1 - s),
            t * s,
            t * (1 - s),
        ],
                                   dim=-1)

        for l in dataloader:
            for l_i in l:
                img = Variable(l_i['full_img'],
                               requires_grad=False,
                               volatile=True)
                renorm_matrix = transformation_utils.compute_renorm_matrix(
                    img)[None, ...]

                print l_i['img_key']

                all_lf_paths = defaultdict(list)
                for j, item in enumerate(l_i['gt_json']):

                    if 'lf' not in item or 'hw_path' not in item:
                        continue

                    start = time.time()
                    hw_path = item['hw_path']

                    lf_pts = item['lf']
                    if 'after_lf' in item:
                        lf_pts += item['after_lf']

                    lf_path = []
                    for i, step in enumerate(lf_pts):
                        x0 = step['x0']
                        x1 = step['x1']
                        y0 = step['y0']
                        y1 = step['y1']

                        pt = torch.Tensor([[x1, x0], [y1, y0], [1, 1]])[None,
                                                                        ...]
                        pt = Variable(pt, requires_grad=False, volatile=True)
                        lf_path.append(pt)

                    all_lf_paths[len(lf_path)].append((lf_path, hw_path))

                for cnt, pairs in all_lf_paths.iteritems():
                    lf_paths = [p[0] for p in pairs]
                    hw_paths = [p[1] for p in pairs]

                    to_join = [[] for i in xrange(cnt)]
                    for lf_path in lf_paths:
                        for i in xrange(len(lf_path)):
                            to_join[i].append(lf_path[i])

                    for i in xrange(len(to_join)):
                        to_join[i] = torch.cat(to_join[i], dim=0)

                    lf_path = to_join

                    grid_line = []
                    for i in xrange(0, len(lf_path) - 1):
                        pts_0 = lf_path[i]
                        pts_1 = lf_path[i + 1]
                        pts = torch.cat([pts_0, pts_1], dim=2)

                        grid_pts = renorm_matrix.matmul(pts)

                        grid = interpolations[None, :, :,
                                              None, :] * grid_pts[:, None,
                                                                  None, :, :]
                        grid = grid.sum(dim=-1)[..., :2]

                        grid_line.append(grid)

                    grid_line = torch.cat(grid_line, dim=1)

                    expand_img = img.expand(grid_line.size(0), img.size(1),
                                            img.size(2), img.size(3))

                    line = torch.nn.functional.grid_sample(
                        expand_img.transpose(2, 3), grid_line)
                    line = line.transpose(2, 3)

                    for line_i, line_i_path in zip(line, hw_paths):
                        line_i = line_i.transpose(0, 1).transpose(1, 2)
                        line_i = (line_i + 1) * 128
                        l_np = line_i.data.cpu().numpy()

                        cv2.imwrite(line_i_path, l_np)
Esempio n. 6
0
def alignment_step(config,
                   dataset_lookup=None,
                   model_mode='best_validation',
                   percent_range=None):

    set_list = load_file_list(config['training'][dataset_lookup])

    if percent_range is not None:
        start = int(len(set_list) * percent_range[0])
        end = int(len(set_list) * percent_range[1])
        set_list = set_list[start:end]

    dataset = AlignmentDataset(set_list, None)
    dataloader = DataLoader(dataset,
                            batch_size=1,
                            shuffle=False,
                            num_workers=0,
                            collate_fn=alignment_dataset.collate)

    char_set_path = config['network']['hw']['char_set_path']

    with open(char_set_path) as f:
        char_set = json.load(f)

    idx_to_char = {}
    for k, v in char_set['idx_to_char'].iteritems():
        idx_to_char[int(k)] = v

    sol, lf, hw = init_model(config,
                             sol_dir=model_mode,
                             lf_dir=model_mode,
                             hw_dir=model_mode)

    e2e = E2EModel(sol, lf, hw)
    dtype = torch.cuda.FloatTensor
    e2e.eval()

    post_processing_config = config['training']['alignment'][
        'validation_post_processing']
    sol_thresholds = post_processing_config['sol_thresholds']
    sol_thresholds_idx = range(len(sol_thresholds))

    lf_nms_ranges = post_processing_config['lf_nms_ranges']
    lf_nms_ranges_idx = range(len(lf_nms_ranges))

    lf_nms_thresholds = post_processing_config['lf_nms_thresholds']
    lf_nms_thresholds_idx = range(len(lf_nms_thresholds))

    results = defaultdict(list)
    aligned_results = []
    best_ever_results = []

    prev_time = time.time()
    cnt = 0
    a = 0
    for x in dataloader:
        sys.stdout.flush()
        a += 1

        if a % 100 == 0:
            print a, np.mean(aligned_results)

        x = x[0]
        if x is None:
            print "Skipping alignment because it returned None"
            continue

        img = x['resized_img'].numpy()[0, ...].transpose([2, 1, 0])
        img = ((img + 1) * 128).astype(np.uint8)

        full_img = x['full_img'].numpy()[0, ...].transpose([2, 1, 0])
        full_img = ((full_img + 1) * 128).astype(np.uint8)

        gt_lines = x['gt_lines']
        gt = "\n".join(gt_lines)

        out_original = e2e(x)
        if out_original is None:
            #TODO: not a good way to handle this, but fine for now
            print "Possible Error: Skipping alignment on image"
            continue

        out_original = e2e_postprocessing.results_to_numpy(out_original)
        out_original['idx'] = np.arange(out_original['sol'].shape[0])
        e2e_postprocessing.trim_ends(out_original)
        decoded_hw, decoded_raw_hw = e2e_postprocessing.decode_handwriting(
            out_original, idx_to_char)
        pick, costs = e2e_postprocessing.align_to_gt_lines(
            decoded_hw, gt_lines)

        best_ever_pred_lines, improved_idxs = validation_utils.update_ideal_results(
            pick, costs, decoded_hw, x['gt_json'])
        validation_utils.save_improved_idxs(
            improved_idxs, decoded_hw, decoded_raw_hw, out_original, x,
            config['training'][dataset_lookup]['json_folder'])

        best_ever_pred_lines = "\n".join(best_ever_pred_lines)
        error = error_rates.cer(gt, best_ever_pred_lines)
        best_ever_results.append(error)

        aligned_pred_lines = [decoded_hw[i] for i in pick]
        aligned_pred_lines = "\n".join(aligned_pred_lines)
        error = error_rates.cer(gt, aligned_pred_lines)
        aligned_results.append(error)

        if dataset_lookup == "validation_set":
            # We only care about the hyperparameter postprocessing seach for the validation set
            for key in itertools.product(sol_thresholds_idx, lf_nms_ranges_idx,
                                         lf_nms_thresholds_idx):
                i, j, k = key
                sol_threshold = sol_thresholds[i]
                lf_nms_range = lf_nms_ranges[j]
                lf_nms_threshold = lf_nms_thresholds[k]

                out = copy.copy(out_original)

                out = e2e_postprocessing.postprocess(
                    out,
                    sol_threshold=sol_threshold,
                    lf_nms_params={
                        "overlap_range": lf_nms_range,
                        "overlap_threshold": lf_nms_threshold
                    })
                order = e2e_postprocessing.read_order(out)
                e2e_postprocessing.filter_on_pick(out, order)

                e2e_postprocessing.trim_ends(out)

                preds = [decoded_hw[i] for i in out['idx']]
                pred = "\n".join(preds)

                error = error_rates.cer(gt, pred)

                results[key].append(error)

    sum_results = None
    if dataset_lookup == "validation_set":
        # Skipping because we didn't do the hyperparameter search
        sum_results = {}
        for k, v in results.iteritems():
            sum_results[k] = np.mean(v)

        sum_results = sorted(sum_results.iteritems(),
                             key=operator.itemgetter(1))
        sum_results = sum_results[0]

    return sum_results, np.mean(aligned_results), np.mean(
        best_ever_results), sol, lf, hw
Esempio n. 7
0
def training_step(config):

    hw_network_config = config['network']['hw']
    train_config = config['training']

    allowed_training_time = train_config['hw']['reset_interval']
    init_training_time = time.time()

    char_set_path = hw_network_config['char_set_path']

    with open(char_set_path) as f:
        char_set = json.load(f)

    idx_to_char = {}
    for k, v in char_set['idx_to_char'].iteritems():
        idx_to_char[int(k)] = v

    training_set_list = load_file_list(train_config['training_set'])
    train_dataset = HwDataset(training_set_list,
                              char_set['char_to_idx'],
                              augmentation=True,
                              img_height=hw_network_config['input_height'])

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=train_config['hw']['batch_size'],
                                  shuffle=False,
                                  num_workers=0,
                                  collate_fn=hw_dataset.collate)

    batches_per_epoch = int(train_config['hw']['images_per_epoch'] /
                            train_config['hw']['batch_size'])
    train_dataloader = DatasetWrapper(train_dataloader, batches_per_epoch)

    test_set_list = load_file_list(train_config['validation_set'])
    test_dataset = HwDataset(
        test_set_list,
        char_set['char_to_idx'],
        img_height=hw_network_config['input_height'],
        random_subset_size=train_config['hw']['validation_subset_size'])

    test_dataloader = DataLoader(test_dataset,
                                 batch_size=train_config['hw']['batch_size'],
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=hw_dataset.collate)

    hw = cnn_lstm.create_model(hw_network_config)
    hw_path = os.path.join(train_config['snapshot']['best_validation'],
                           "hw.pt")
    hw_state = safe_load.torch_state(hw_path)
    hw.load_state_dict(hw_state)
    hw.cuda()
    criterion = CTCLoss()
    dtype = torch.cuda.FloatTensor

    lowest_loss = np.inf
    lowest_loss_i = 0
    for epoch in xrange(10000000000):
        sum_loss = 0.0
        steps = 0.0
        hw.eval()
        for x in test_dataloader:
            sys.stdout.flush()
            line_imgs = Variable(x['line_imgs'].type(dtype),
                                 requires_grad=False,
                                 volatile=True)
            labels = Variable(x['labels'], requires_grad=False, volatile=True)
            label_lengths = Variable(x['label_lengths'],
                                     requires_grad=False,
                                     volatile=True)

            preds = hw(line_imgs).cpu()

            output_batch = preds.permute(1, 0, 2)
            out = output_batch.data.cpu().numpy()

            for i, gt_line in enumerate(x['gt']):
                logits = out[i, ...]
                pred, raw_pred = string_utils.naive_decode(logits)
                pred_str = string_utils.label2str_single(
                    pred, idx_to_char, False)
                cer = error_rates.cer(gt_line, pred_str)
                sum_loss += cer
                steps += 1

        if epoch == 0:
            print "First Validation Step Complete"
            print "Benchmark Validation CER:", sum_loss / steps
            lowest_loss = sum_loss / steps

            hw = cnn_lstm.create_model(hw_network_config)
            hw_path = os.path.join(train_config['snapshot']['current'],
                                   "hw.pt")
            hw_state = safe_load.torch_state(hw_path)
            hw.load_state_dict(hw_state)
            hw.cuda()

            optimizer = torch.optim.Adam(
                hw.parameters(), lr=train_config['hw']['learning_rate'])
            optim_path = os.path.join(train_config['snapshot']['current'],
                                      "hw_optim.pt")
            if os.path.exists(optim_path):
                print "Loading Optim Settings"
                optimizer.load_state_dict(safe_load.torch_state(optim_path))
            else:
                print "Failed to load Optim Settings"

        if lowest_loss > sum_loss / steps:
            lowest_loss = sum_loss / steps
            print "Saving Best"

            dirname = train_config['snapshot']['best_validation']
            if not len(dirname) != 0 and os.path.exists(dirname):
                os.makedirs(dirname)

            save_path = os.path.join(dirname, "hw.pt")

            torch.save(hw.state_dict(), save_path)
            lowest_loss_i = epoch

        print "Test Loss", sum_loss / steps, lowest_loss
        print ""

        if allowed_training_time < (time.time() - init_training_time):
            print "Out of time: Exiting..."
            break

        print "Epoch", epoch
        sum_loss = 0.0
        steps = 0.0
        hw.train()
        for i, x in enumerate(train_dataloader):

            line_imgs = Variable(x['line_imgs'].type(dtype),
                                 requires_grad=False)
            labels = Variable(x['labels'], requires_grad=False)
            label_lengths = Variable(x['label_lengths'], requires_grad=False)

            preds = hw(line_imgs).cpu()

            output_batch = preds.permute(1, 0, 2)
            out = output_batch.data.cpu().numpy()

            # if i == 0:
            #     for i in xrange(out.shape[0]):
            #         pred, pred_raw = string_utils.naive_decode(out[i,...])
            #         pred_str = string_utils.label2str_single(pred_raw, idx_to_char, True)
            #         print pred_str

            for i, gt_line in enumerate(x['gt']):
                logits = out[i, ...]
                pred, raw_pred = string_utils.naive_decode(logits)
                pred_str = string_utils.label2str_single(
                    pred, idx_to_char, False)
                cer = error_rates.cer(gt_line, pred_str)
                sum_loss += cer
                steps += 1

            batch_size = preds.size(1)
            preds_size = Variable(torch.IntTensor([preds.size(0)] *
                                                  batch_size))

            loss = criterion(preds, labels, preds_size, label_lengths)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print "Train Loss", sum_loss / steps
        print "Real Epoch", train_dataloader.epoch

    ## Save current snapshots for next iteration
    print "Saving Current"
    dirname = train_config['snapshot']['current']
    if not len(dirname) != 0 and os.path.exists(dirname):
        os.makedirs(dirname)

    save_path = os.path.join(dirname, "hw.pt")
    torch.save(hw.state_dict(), save_path)

    optim_path = os.path.join(dirname, "hw_optim.pt")
    torch.save(optimizer.state_dict(), optim_path)
Esempio n. 8
0
def training_step(config):

    char_set_path = config['network']['hw']['char_set_path']

    with open(char_set_path) as f:
        char_set = json.load(f)

    idx_to_char = {}
    for k, v in char_set['idx_to_char'].iteritems():
        idx_to_char[int(k)] = v

    train_config = config['training']

    allowed_training_time = train_config['lf']['reset_interval']
    init_training_time = time.time()

    training_set_list = load_file_list(train_config['training_set'])
    train_dataset = LfDataset(training_set_list, augmentation=True)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=1,
                                  shuffle=True,
                                  num_workers=0,
                                  collate_fn=lf_dataset.collate)
    batches_per_epoch = int(train_config['lf']['images_per_epoch'] /
                            train_config['lf']['batch_size'])
    train_dataloader = DatasetWrapper(train_dataloader, batches_per_epoch)

    test_set_list = load_file_list(train_config['validation_set'])
    test_dataset = LfDataset(
        test_set_list,
        random_subset_size=train_config['lf']['validation_subset_size'])
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=lf_dataset.collate)

    _, lf, hw = init_model(config, only_load=['lf', 'hw'])
    hw.eval()

    dtype = torch.cuda.FloatTensor

    lowest_loss = np.inf
    lowest_loss_i = 0
    for epoch in xrange(10000000):
        lf.eval()
        sum_loss = 0.0
        steps = 0.0
        start_time = time.time()
        for step_i, x in enumerate(test_dataloader):
            if x is None:
                continue
            #Only single batch for now
            x = x[0]
            if x is None:
                continue

            positions = [
                Variable(x_i.type(dtype), requires_grad=False)[None, ...]
                for x_i in x['lf_xyrs']
            ]
            xy_positions = [
                Variable(x_i.type(dtype), requires_grad=False)[None, ...]
                for x_i in x['lf_xyxy']
            ]
            img = Variable(x['img'].type(dtype), requires_grad=False)[None,
                                                                      ...]

            #There might be a way to handle this case later,
            #but for now we will skip it
            if len(xy_positions) <= 1:
                print "Skipping"
                continue

            grid_line, _, _, xy_output = lf(img,
                                            positions[:1],
                                            steps=len(positions),
                                            skip_grid=False)

            line = torch.nn.functional.grid_sample(img.transpose(2, 3),
                                                   grid_line)
            line = line.transpose(2, 3)
            predictions = hw(line)

            out = predictions.permute(1, 0, 2).data.cpu().numpy()
            gt_line = x['gt']
            pred, raw_pred = string_utils.naive_decode(out[0])
            pred_str = string_utils.label2str_single(pred, idx_to_char, False)
            cer = error_rates.cer(gt_line, pred_str)
            sum_loss += cer
            steps += 1

            # l = line[0].transpose(0,1).transpose(1,2)
            # l = (l + 1)*128
            # l_np = l.data.cpu().numpy()
            #
            # cv2.imwrite("example_line_out.png", l_np)
            # print "Saved!"
            # raw_input()

            # loss = lf_loss.point_loss(xy_output, xy_positions)
            #
            # sum_loss += loss.data[0]
            # steps += 1

        if epoch == 0:
            print "First Validation Step Complete"
            print "Benchmark Validation Loss:", sum_loss / steps
            lowest_loss = sum_loss / steps

            _, lf, _ = init_model(config, lf_dir='current', only_load="lf")

            optimizer = torch.optim.Adam(
                lf.parameters(), lr=train_config['lf']['learning_rate'])
            optim_path = os.path.join(train_config['snapshot']['current'],
                                      "lf_optim.pt")
            if os.path.exists(optim_path):
                print "Loading Optim Settings"
                optimizer.load_state_dict(safe_load.torch_state(optim_path))
            else:
                print "Failed to load Optim Settings"

        if lowest_loss > sum_loss / steps:
            lowest_loss = sum_loss / steps
            print "Saving Best"

            dirname = train_config['snapshot']['best_validation']
            if not len(dirname) != 0 and os.path.exists(dirname):
                os.makedirs(dirname)

            save_path = os.path.join(dirname, "lf.pt")

            torch.save(lf.state_dict(), save_path)
            lowest_loss_i = 0

        test_loss = sum_loss / steps

        print "Test Loss", sum_loss / steps, lowest_loss
        print "Time:", time.time() - start_time
        print ""

        if allowed_training_time < (time.time() - init_training_time):
            print "Out of time: Exiting..."
            break

        print "Epoch", epoch
        sum_loss = 0.0
        steps = 0.0
        lf.train()
        start_time = time.time()
        for x in train_dataloader:
            if x is None:
                continue
            #Only single batch for now
            x = x[0]
            if x is None:
                continue

            positions = [
                Variable(x_i.type(dtype), requires_grad=False)[None, ...]
                for x_i in x['lf_xyrs']
            ]
            xy_positions = [
                Variable(x_i.type(dtype), requires_grad=False)[None, ...]
                for x_i in x['lf_xyxy']
            ]
            img = Variable(x['img'].type(dtype), requires_grad=False)[None,
                                                                      ...]

            #There might be a way to handle this case later,
            #but for now we will skip it
            if len(xy_positions) <= 1:
                continue

            reset_interval = 4
            grid_line, _, _, xy_output = lf(img,
                                            positions[:1],
                                            steps=len(positions),
                                            all_positions=positions,
                                            reset_interval=reset_interval,
                                            randomize=True,
                                            skip_grid=True)

            loss = lf_loss.point_loss(xy_output, xy_positions)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            sum_loss += loss.data.item()
            steps += 1

        print "Train Loss", sum_loss / steps
        print "Real Epoch", train_dataloader.epoch
        print "Time:", time.time() - start_time

    ## Save current snapshots for next iteration
    print "Saving Current"
    dirname = train_config['snapshot']['current']
    if not len(dirname) != 0 and os.path.exists(dirname):
        os.makedirs(dirname)

    save_path = os.path.join(dirname, "lf.pt")
    torch.save(lf.state_dict(), save_path)

    optim_path = os.path.join(dirname, "lf_optim.pt")
    torch.save(optimizer.state_dict(), optim_path)