示例#1
0
def main():
    args = parse_arguments()
    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

    datapoints = utils.prepare_datapoints(data_raw_path=args.data_raw_dir,
                                          intent_data_file=args.intent_data,
                                          fix_rectangles=args.fix_rectangles)
    train_datapoints, test_datapoints = \
        utils.train_test_split_datapoints(datapoints, test_size=0.2)

    train_loader = CornellDataLoader(train_datapoints)
    test_loader = CornellDataLoader(test_datapoints)

    model = Net()
    if args.cuda:
        model.cuda()

    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)

    for epoch in range(1, args.epochs + 1):
        while not train(model, train_loader, optimizer, args, epoch):
            pass
        test(model, test_loader, optimizer, args)


    target_dir = os.path.abspath('./predictions')
    try:
        os.makedirs(target_dir)
    except FileExistsError:
        pass

    visualize_result(datapoints[:10], model, args.cuda, target_dir)
示例#2
0
def main():
    args = parse_arguments()
    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

    datapoints = utils.prepare_datapoints(data_raw_path=args.data_raw_dir)
    train_datapoints, test_datapoints = \
        utils.train_test_split_datapoints(datapoints, test_size=0.2)

    # window_size = (240, 320)
    window_size = (480, 640)
    train_loader = CornellDataLoader(train_datapoints, window_size=window_size)
    test_loader = CornellDataLoader(test_datapoints, window_size=window_size)

    linear_model_weights = [
        # 640*480,
        320*240,
        # 240*180,
        # 160*120,
        40*30,
        20*15,
        6
    ]
    # linear_model_weights = [
    #    320*240,
    #    40*30,
    #    6
    # ] 
    # model = models.LinearNet(linear_model_weights)
    model = models.ConvNet()
    if args.cuda:
        model.cuda()

    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)

    print("Here's the model")
    print(model)
    for epoch in range(1, args.epochs + 1):
        while not train(model, train_loader, optimizer, args, epoch):
            model.reset_parameters()
            print('Restart train...')
        test(model, test_loader, optimizer, args)

    import pdb; pdb.set_trace()

    target_dir = os.path.abspath('./predictions')
    try:
        os.makedirs(target_dir)
    except FileExistsError:
        pass

    visualize_result(datapoints[:10], model, args.cuda, target_dir, window_size)
示例#3
0
def main():
    args = parse_args()
    datapoints = utils.prepare_datapoints(args.data_raw)
    unique_datapoints = utils.filter_unique_datapoints(datapoints)
    cartographers = [
        Cartographer(dp, dp.image_name) for dp in unique_datapoints
    ]

    output_file = args.output_file
    mapping = read_mappings(output_file)
    print()

    for c in cartographers:
        if c.image_name in mapping:
            ok = c.label_image(mapping[c.image_name])
        else:
            ok = c.label_image()
        if ok:
            mapping[c.image_name] = c.rectangles
        if SHALL_QUIT:
            break

    save_mappings(mapping, output_file)
    print('Saved mappings to', output_file)
示例#4
0
def main():
    args = parse_arguments()
    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

    linear_model_weights = [
        # 640*480,
        # 320*240,
        160 * 120,
        40 * 30,
        20 * 15,
        6
    ]
    model = models.LinearNet(linear_model_weights)
    # model = models.ConvNet()
    if args.cuda:
        model.cuda()

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)
    print("Here's the model")
    print(model)

    datapoints = utils.prepare_datapoints(data_raw_path=args.data_raw_dir)
    train_datapoints, test_datapoints = \
                                        utils.train_test_split_datapoints(datapoints, test_size=0.2)

    bg_mapping = utils.get_background_mappings(args.data_raw_dir +
                                               '/backgroundMapping.txt')
    backgrounds = utils.read_backgrounds(args.data_raw_dir + '/backgrounds')

    window_size = (120, 160)
    # window_size = (240, 320)
    # window_size = (480, 640)
    # normalize = True
    normalize = False
    train_loader = CornellDataLoader(train_datapoints,
                                     backgrounds,
                                     bg_mapping,
                                     window_size=window_size,
                                     normalize=normalize)
    test_loader = CornellDataLoader(test_datapoints,
                                    backgrounds,
                                    bg_mapping,
                                    window_size=window_size,
                                    normalize=normalize)

    perf = 0
    for epoch in range(1, args.epochs + 1):
        while not train(model, train_loader, optimizer, args, epoch):
            model.reset_parameters()
            print('Restart train...')
            perf = test(model, test_loader, optimizer, args)

    target_dir = os.path.abspath('./predictions')
    try:
        os.makedirs(target_dir)
    except FileExistsError:
        pass

    visualize_result(datapoints[:10], model, args.cuda, target_dir,
                     backgrounds, bg_mapping, window_size, normalize)

    from datetime import datetime
    model_fname = '{}-{}.pth'.format(datetime.now().isoformat(), perf)
    print('saving model as', model_fname)
    torch.save(model, model_fname)
示例#5
0
# coding: utf-8
import torch
from torch.autograd import Variable

import utils
from train import CornellDataLoader

model = torch.load('saved-model.pth')
dps = utils.prepare_datapoints('/home/irm15006/DataRaw')
dl = CornellDataLoader(dps)

# visualizing fifth image
p = []
for v, actual in dl:
    v, actual = Variable(v), Variable(actual)
    # cuda and cpu methods are required.. this checkpoint is from GPU
    prediction = model(v.cuda()).cpu()
    p.append(prediction)
    print('\r {} / {}'.format(len(p), len(dl)), end='')