예제 #1
0
def master_nonc(model_dir, viz=False):

    model_def = model_dir + 'validate.prototxt'
    model_weights = caffe_root + 'models/finetune_flickr_tag/finetune_flickr_tag_1_iter_680000.caffemodel'
    # model_weights = caffe_root + 'models/finetune_flickr_tag_2/finetune_flickr_tag_0_iter_357194.caffemodel'
    # model_weights = caffe_root + 'models/finetune_flickr_tag/finetune_flickr_tag_3_iter_80000.caffemodel'
    # model_weights = caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
    net = caffe.Net(
        model_def,  # defines the structure of the model
        model_weights,  # contains the trained weights
        caffe.TEST)  # use test mode (e.g., don't perform dropout)

    # net.step(1)
    # print 'Baseline accuracy:{0:.4f}'.format(check_baseline_accuracy(solver.test_nets[0], n_samples/batch_size))

    if viz:
        transformer = tools.SimpleTransformer(
        )  # This is simply to add back the bias, re-shuffle the color channels to RGB, and so on...
        image_index = 50  # First image in the batch.
        plt.figure()
        plt.imshow(
            transformer.deprocess(
                copy(net.blobs['data'].data[image_index, ...])))
        gtlist = net.blobs['label'].data[image_index, ...].astype(np.int)
        plt.title('GT: {}'.format(classes[np.where(gtlist[0, 0, :])]))
        plt.axis('off')
        plt.show()

        for image_index in range(10, 15):
            plt.figure()
            plt.imshow(
                transformer.deprocess(net.blobs['data'].data[image_index,
                                                             ...]))
            gtlist = net.blobs['label'].data[image_index,
                                             ...].astype(np.int)[0, 0, :]
            estlist = net.blobs['fc8_flickr'].data[image_index,
                                                   ...].argsort()[::-1][:5]
            plt.title('GT: {} \n EST: {}'.format(classes[np.where(gtlist)],
                                                 classes[estlist]))
            plt.axis('off')
            plt.show()

    # predict_tags(net)

    fp = open('results_nonc.list', 'w')
    for k in (1, 3, 5, 10):
        res = check_accuracy_nonc(net, k)
        fp.write('k: %d\n' % (k))
        fp.write('accuracy precision recall\n')
        np.savetxt(fp, res, fmt='%.6f')
    fp.close()
예제 #2
0
def master(model_dir, viz=False):

    model_def = model_dir + 'test.prototxt'
    # model_weights = caffe_root + 'models/finetune_flickr_tag/finetune_flickr_tag_1_2_iter_40000.caffemodel'
    model_weights = caffe_root + 'models/vgg_flickr_tag/vgg_flickr_tag_1_iter_340000.caffemodel'
    # model_weights = caffe_root + 'models/finetune_flickr_tag_2/finetune_flickr_tag_0_iter_525999.caffemodel'
    # model_weights = caffe_root + 'models/finetune_flickr_tag_2/finetune_flickr_tag_1_iter_240000.caffemodel'
    # model_weights = caffe_root + 'models/finetune_flickr_tag/finetune_flickr_tag_3_iter_80000.caffemodel'
    # model_weights = caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
    net = caffe.Net(
        str(model_def),  # defines the structure of the model
        str(model_weights),  # contains the trained weights
        caffe.TEST)  # use test mode (e.g., don't perform dropout)

    # net.step(1)
    # print 'Baseline accuracy:{0:.4f}'.format(check_baseline_accuracy(solver.test_nets[0], n_samples/batch_size))

    if viz:
        transformer = tools.SimpleTransformer(
        )  # This is simply to add back the bias, re-shuffle the color channels to RGB, and so on...
        image_index = 50  # First image in the batch.
        plt.figure()
        plt.imshow(
            transformer.deprocess(
                copy(net.blobs['data'].data[image_index, ...])))
        gtlist = net.blobs['label'].data[image_index, ...].astype(np.int)
        plt.title('GT: {}'.format(classes[np.where(gtlist[0, 0, :])]))
        plt.axis('off')
        plt.show()

        for image_index in range(10, 15):
            plt.figure()
            plt.imshow(
                transformer.deprocess(net.blobs['data'].data[image_index,
                                                             ...]))
            gtlist = net.blobs['label'].data[image_index,
                                             ...].astype(np.int)[0, 0, :]
            estlist = net.blobs['fc8_flickr'].data[image_index,
                                                   ...].argsort()[::-1][:5]
            plt.title('GT: {} \n EST: {}'.format(classes[np.where(gtlist)],
                                                 classes[estlist]))
            plt.axis('off')
            plt.show()

    predict_tags(net)
예제 #3
0
## This net uses a python datalayer: 'PascalMultilabelDataLayerSync',
## which is defined in './pycaffe/layers/pascal_multilabel_datalayers.py'.
## Take a look at the code. It's quite straight-forward, and gives you full control over data and labels.
## Now we can load the caffe solver as usual.

solver = caffe.SGDSolver(osp.join(workdir, 'solver.prototxt'))
solver.net.copy_from(
    caffe_root +
    'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
solver.test_nets[0].share_with(solver.net)
solver.step(1)

## Let's check the data we have loaded.
# This is simply to add back the bias, re-shuffle the color channels to RGB, and so on...
# (this block is just showing, or checking, the ground truth of first image)
transformer = tools.SimpleTransformer()

image_index = 0  # First image in the batch.
plt.figure()
plt.imshow(
    transformer.deprocess(copy(solver.net.blobs['data'].data[image_index,
                                                             ...])))
gtlist = solver.net.blobs['label'].data[image_index, ...].astype(np.int)
print(solver.net.blobs['data'].data[image_index, ...])
print(solver.net.blobs['label'].data[image_index, ...])
print(gtlist)
print(np.where(gtlist))
plt.title('GT: {}'.format(classes[np.where(gtlist)]))
plt.axis('off')

예제 #4
0
    data_layer_params = dict(batch_size=128,
                             im_shape=[227, 227],
                             split='val',
                             pascal_root=pascal_root)
    f.write(
        caffenet_multilabel(data_layer_params,
                            'PascalMultilabelDataLayerSync'))

solver = caffe.SGDSolver(osp.join(workdir, 'solver.prototxt'))
solver.net.copy_from(
    caffe_root +
    'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
solver.test_nets[0].share_with(solver.net)
solver.step(1)

transformer = tools.SimpleTransformer(
)  # This is simply to add back the bias, re-shuffle the color channels to RGB, and so on...
image_index = 0  # First image in the batch.
plt.figure()
plt.imshow(
    transformer.deprocess(copy(solver.net.blobs['data'].data[image_index,
                                                             ...])))
gtlist = solver.net.blobs['label'].data[image_index, ...].astype(np.int)
plt.title('GT: {}'.format(classes[np.where(gtlist)]))
plt.axis('off')


def hamming_distance(gt, est):
    return sum([1 for (g, e) in zip(gt, est) if g == e]) / float(len(gt))


def check_accuracy(net, num_batches, batch_size=128):