Example #1
0
def cycle_runs(workdir, cycle_size = 1000, ncycles = 10, gpuid = 0, batch_size_test = 50, n_testinstances = 50, testnet_prototxt = 'testnet.prototxt', snapshot_prefix = 'snapshot', scorelayer = 'score'):
    """
    cycle_runs is a wrapper around run and classify_from_datalayer. After training the net for cycle_sizes iterations, it will run through the TEST net and store the results to disk.
    """
    for cycle in range(ncycles):
        run(workdir = workdir, nbr_iters = cycle_size, gpuid = gpuid)
        caffemodel = find_latest_caffemodel(workdir, snapshot_prefix = snapshot_prefix)
        net = load_model(workdir, caffemodel, gpuid = gpuid, net_prototxt = testnet_prototxt, phase = caffe.TEST)
        scorelist = classify_from_datalayer(net, n_testinstances = n_testinstances, batch_size = batch_size_test, scorelayer = scorelayer)
        bmt.psave(scorelist, osp.join(workdir, 'predictions_using_' + caffemodel +  '.p'))
        del net
Example #2
0
def write_split(basedir, workdir, region, split, labelset, scale_method,
                scale_factor):

    imlist, imdict = load_data(basedir, split, labelset, region)
    im_mean = bct.calculate_image_mean(imlist[::10])

    with open(osp.join(workdir, '{}list.txt'.format(split)), 'w') as fp:
        for im in imlist:
            fp.write(im + '\n')

    with open(osp.join(workdir, '{}dict.json'.format(split)), 'w') as outfile:
        json.dump(imdict, outfile)

    pyparams = dict(batch_size=45,
                    imlistfile='{}list.txt'.format(split),
                    imdictfile='{}dict.json'.format(split),
                    imgs_per_batch=3,
                    crop_size=224,
                    scaling_method=scale_method,
                    scaling_factor=scale_factor,
                    rand_offset=1,
                    random_seed=100,
                    im_mean=list(im_mean),
                    dalevel='cheap')

    psave(pyparams, osp.join(workdir, split + 'pyparams.pkl'))

    #net = caffe.NetSpec()

    #net['data'], net['label'] = L.Python(module = 'beijbom_caffe_data_layers', layer = 'RandomPointDataLayer', ntop=2, param_str=str(pyparams))

    #net = vgg_core(net)
    #net.score = L.InnerProduct(net.fc7, num_output=len(labelset), param=[dict(lr_mult=5, decay_mult=1), dict(lr_mult=10, decay_mult=0)])
    #net.loss = L.SoftmaxWithLoss(net.score, net.label)

    with open(osp.join(workdir, '{}net.prototxt'.format(split)), 'w') as w:
        n = bct.vgg(pyparams, 'RandomPointDataLayer', len(labelset))
        #w.write(str(net.to_proto()))
        w.write(str(n.to_proto()))
Example #3
0
def cycle_runs(
    workdir,
    cycle_size=1000,
    ncycles=10,
    gpuid=0,
    batch_size_test=50,
    n_testinstances=50,
    testnet_prototxt="testnet.prototxt",
    snapshot_prefix="snapshot",
    scorelayer="score",
):
    """
    cycle_runs is a wrapper around run and classify_from_datalayer. After training the net for cycle_sizes iterations, it will run through the TEST net and store the results to disk.
    """
    for cycle in range(ncycles):
        run(workdir=workdir, nbr_iters=cycle_size, gpuid=gpuid)
        caffemodel = find_latest_caffemodel(workdir, snapshot_prefix=snapshot_prefix)
        net = load_model(workdir, caffemodel, gpuid=gpuid, net_prototxt=testnet_prototxt, phase=caffe.TEST)
        scorelist = classify_from_datalayer(
            net, n_testinstances=n_testinstances, batch_size=batch_size_test, scorelayer=scorelayer
        )
        bmt.psave(scorelist, osp.join(workdir, "predictions_using_" + caffemodel + ".p"))
        del net
 def test_save_load(self):
     a = {'field': 22, 'another_field': 'value'}
     bmt.psave(a, osp.join(self.workdir, 'test.p'))
     b = bmt.pload(osp.join(self.workdir, 'test.p'))
     self.assertEqual(a, b)
Example #5
0
 def test_save_load(self):
     a = {'field':22, 'another_field':'value'}
     bmt.psave(a, osp.join(self.workdir, 'test.p'))
     b = bmt.pload(osp.join(self.workdir, 'test.p'))
     self.assertEqual(a, b)