Esempio n. 1
0
def load_image(imgfile):
    height, width = 700, 700
    image = caffe.io.load_image(imgfile)
    transformer = caffe.io.Transformer({'data': (1, 3, height, width)})
    transformer.set_transpose('data', (2, 0, 1))
    transformer.set_mean('data', np.array([104., 117., 123.]))
    transformer.set_raw_scale('data', 7.2801098892805181)
    transformer.set_channel_swap('data', (2, 1, 0))
    image = transformer.preprocess('data', image)
    image = image.reshape(1, 3, height, width)
    return image

    net = caffenet.CaffeNet(protofile)
    print(net)
    net.load_weights(weightfile)
    net.eval()
    image = torch.from_numpy(image)
    image = Variable(image)
    blobs = net(image)
    return blobs, net.models
Esempio n. 2
0
    transformer.set_raw_scale('data', 7.2801098892805181)
    transformer.set_channel_swap('data', (2, 1, 0))
    image = transformer.preprocess('data', image)
    image = image.reshape(1, 3, height, width)
    return image

    net = caffenet.CaffeNet(protofile)
    print(net)
    net.load_weights(weightfile)
    net.eval()
    image = torch.from_numpy(image)
    image = Variable(image)
    blobs = net(image)
    return blobs, net.models


protofile = '/models/testpy_val_91_500_pkg.prototxt'
weightfile = '/models/test2.caffemodel'
imgfn = '08127_image.png'

image = torch.from_numpy(load_image(imgfn))
image = Variable(image)
net = caffenet.CaffeNet(protofile)
net.load_weights(weightfile)
# net = caffenet.CaffeNet(protofile)
# print(net)
# net.load_weights(weightfile)
# net.eval()

blobs = net(image)
Esempio n. 3
0
def do(fname):
    net = caffenet.CaffeNet(os.path.join(srcdir, fname))
    net.load_weights(
        os.path.join(srcdir, 'vgg16_fast_rcnn_iter_80000.caffemodel'))
    torch.save(net.state_dict(), '%s.statedict.pth' % fname)
    torch.save(net, '%s.full.pth' % fname)
Esempio n. 4
0
    model = alexbn.AlexBN()
elif args.arch == 'googlenet':
    import googlenet
    model = googlenet.GoogLeNet()
elif args.arch == 'googlenetbn':
    import googlenetbn
    model = googlenetbn.GoogLeNetBN()
elif args.arch == 'vgg11':
    import vgg
    model = vgg.VGG11()
elif args.arch == 'vgg16':
    import vgg
    model = vgg.VGG16()
elif args.arch == 'caffenet':
    import caffenet
    model = caffenet.CaffeNet()
else:
    raise ValueError('Invalid architecture name')

if args.gpu >= 0:
    cuda.get_device(args.gpu).use()
    model.to_gpu()

# Setup optimizer
optimizer_class = optimizers.MomentumSGD
if args.compress_gradient:
    from optimizer_sgd_compress import MomentumSGDCompress
    optimizer_class = MomentumSGDCompress
optimizer = optimizer_class(lr=args.lr, momentum=0.9)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(5e-4))
Esempio n. 5
0
    transforms.ToTensor(),  # mmb
])

# torch.utils.data.DataLoader
set = myImageFloder(root="./data/PETA dataset",
                    label="testdata.txt",
                    transform=mytransform)
imgLoader = torch.utils.data.DataLoader(set,
                                        batch_size=1,
                                        shuffle=True,
                                        num_workers=2)

print len(set)

path = "./checkpoint1/checkpoint_epoch_60"
net = caffenet.CaffeNet()
net.load_state_dict(torch.load(path))
net.eval()
net.cuda()
'''weight = torch.ones(1,26)
weight[0][2] = 0.3       #18-60
weight[0][22] = 0.4      #trousers
criterion = nn.BCEWithLogitsLoss(weight = weight)          #TODO:1.learn 2. weight'''

dataiter = iter(imgLoader)

count = 0

TP = [0.0] * 35
P = [0.0] * 35
TN = [0.0] * 35