Пример #1
0
def detect(cfgfile, weightfile, imgfile):
    if cfgfile.find('.prototxt') >= 0:
        from caffenet import CaffeNet
        m = CaffeNet(cfgfile)
    else:
        m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if m.num_classes == 20:
        namesfile = 'data/voc.names'
    elif m.num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/names'

    use_cuda = 1
    if use_cuda:
        m.cuda()

    img = Image.open(imgfile).convert('RGB')
    sized = img.resize((m.width, m.height))

    for i in range(2):
        start = time.time()
        boxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))

    class_names = load_class_names(namesfile)
    plot_boxes(img, boxes, 'predictions.jpg', class_names)
Пример #2
0
def get_model(name):
    if name == 'vgg16':
        model = VGG16()
    elif name == 'caffenet':
        model = CaffeNet()
    elif name == 'vgg_cnn_m_1024':
        model = VGG_CNN_M_1024()
    else:
        raise ValueError('Unsupported model name: %s' % name)
    S.load_hdf5('models/%s.chainermodel' % name, model)
    model.to_gpu()
    return model
Пример #3
0
def get_model(name):
    if name == 'vgg16':
        model = VGG16()
    elif name == 'caffenet':
        model = CaffeNet()
    elif name == 'vgg_cnn_m_1024':
        model = VGG_CNN_M_1024()
    else:
        raise ValueError('Unsupported model name: %s' % name)
    S.load_hdf5('models/%s.chainermodel' % name, model)
    model.to_gpu()
    return model
Пример #4
0
def detect(cfgfile, weightfile, imgfile):
    if cfgfile.find('.prototxt') >= 0:
        from caffenet import CaffeNet
        m = CaffeNet(cfgfile)
    else:
        m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if m.num_classes == 20:
        namesfile = 'data/voc.names'
    elif m.num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/names'
    
    use_cuda = 1
    if use_cuda:
        m.cuda()

    img = Image.open(imgfile).convert('RGB')
    sized = img.resize((m.width, m.height))
    
    for i in range(2):
        start = time.time()
        boxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish-start)))

    class_names = load_class_names(namesfile)
    plot_boxes(img, boxes, 'predictions.jpg', class_names)
Пример #5
0
def main():
    """Create the model and start the training."""
    args = get_arguments()

    # Default image.
    image_batch = tf.constant(0, tf.float32, shape=[1, 321, 321, 3])
    # Create network.
    net = CaffeNet({'data': image_batch})
    var_list = tf.global_variables()

    # Set up tf session and initialize variables.
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        # Loading .npy weights.
        net.load(args.npy_path, sess)

        # Saver for converting the loaded weights into .ckpt.
        saver = tf.train.Saver(var_list=var_list, write_version=1)
        save(saver, sess, args.save_dir)
Пример #6
0
class EventEvaluation:

    # Init caffenet for evaluation
    net = CaffeNet()

    def __init__(self, clip):
        self.clip = clip

    # Evaluate frames
    # Evaluate an array of frames
    # Returns: corrosponding array of boolean values, corrosponding array of vectors from CaffeNet

    def eval(self, vectors):
        maxVector = self.getMax(vectors)

        result = np.zeros(vectors.shape[0])

        for idx, vector in enumerate(vectors):
            # Compare mediaclip thumbnail concepts with frame concepts

            # Cosine distance
            dist = 1 - spatial.distance.cosine(vector, maxVector)

            result[idx] = dist

        return result

    def getMean(self, vectors):
        # Calculate mean of vectors
        mean = np.mean(vectors, axis=0)
        return mean

    def getMax(self, vectors):
        # Calculate mean of vectors
        maxVector = np.amax(vectors, axis=0)
        return maxVector
Пример #7
0
solver        = parse_solver(args.solver)
protofile     = solver['net']
base_lr       = float(solver['base_lr'])
momentum      = float(solver['momentum'])
weight_decay  = float(solver['weight_decay'])
test_iter     = int(solver['test_iter'])
max_iter      = int(solver['max_iter'])
test_interval = int(solver['test_interval'])
snapshot      = int(solver['snapshot'])
snapshot_prefix = solver['snapshot_prefix']

torch.manual_seed(int(time.time()))
if args.gpu:
    torch.cuda.manual_seed(int(time.time()))

net = CaffeNet(protofile)
net.set_verbose(False)
net.set_train_outputs('loss')
net.set_eval_outputs('loss', 'accuracy')
print(net)

if args.gpu:
    device_ids = args.gpu.split(',')
    device_ids = [int(i) for i in device_ids]
    print('device_ids', device_ids)
    if len(device_ids) > 1:
        print('---- Multi GPUs ----')
        net = ParallelCaffeNet(net.cuda(), device_ids=device_ids)
        #net = nn.DataParallel(net.cuda(), device_ids=device_ids)
    else:
        print('---- Single GPU ----')
Пример #8
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        #        if args.arch == 'resnet50':
        #            import resnet_model
        #            model = resnet_model.resnet50_new(pretrained=True)
        #            print('save resnet50 to resnet50.weights')
        #            model.saveas_darknet_weights('resnet50.weights')
        if args.arch == 'resnet50-pytorch':
            model = models.resnet50(pretrained=True)
        elif args.arch == 'resnet50-darknet':
            from darknet import Darknet
            model = Darknet('cfg/resnet50.cfg')
            print('load weights from resnet50.weights')
            model.load_weights('resnet50.weights')
        elif args.arch == 'resnet50-caffe2darknet':
            from darknet import Darknet
            model = Darknet('resnet50-caffe2darknet.cfg')
            print('load weights from resnet50-caffe2darknet.weights')
            model.load_weights('resnet50-caffe2darknet.weights')
        elif args.arch == 'resnet50-pytorch2caffe':
            from caffenet import CaffeNet
            model = CaffeNet('resnet50-pytorch2caffe.prototxt')
            print('load weights resnet50-pytorch2caffe.caffemodel')
            model.load_weights('resnet50-pytorch2caffe.caffemodel')
        elif args.arch == 'resnet50-darknet2caffe':
            from caffenet import CaffeNet
            model = CaffeNet('resnet50-darknet2caffe.prototxt')
            print('load weights resnet50-darknet2caffe.caffemodel')
            model.load_weights('resnet50-darknet2caffe.caffemodel')
        elif args.arch == 'resnet50-kaiming':
            from caffenet import CaffeNet
            model = CaffeNet('ResNet-50-deploy.prototxt')
            print('load weights from ResNet-50-model.caffemodel')
            model.load_weights('ResNet-50-model.caffemodel')
        elif args.arch == 'resnet50-kaiming-dk':
            from darknet import Darknet
            model = Darknet('ResNet-50-model.cfg')
            print('load weights from ResNet-50-model.weights')
            model.load_weights('ResNet-50-model.weights')
        elif args.arch == 'resnet18-caffe':
            from caffenet import CaffeNet
            model = CaffeNet('cfg/resnet-18.prototxt')
            print('load weights from resnet-18.caffemodel')
            model.load_weights('resnet-18.caffemodel')
        elif args.arch == 'resnet18-darknet':
            from darknet import Darknet
            model = Darknet('resnet-18.cfg')
            print('load weights from resnet-18.weights')
            model.load_weights('resnet-18.weights')
        elif args.arch == 'resnet50-test':
            from darknet import Darknet
            model = Darknet('test/ResNet-50-model.cfg')
            print('load weights from test/ResNet-50-model.weights')
            model.load_weights('test/ResNet-50-model.weights')
        else:
            model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        if args.arch.startswith('mobilenet'):
            model = Net()
            print(model)
        else:
            model = models.__dict__[args.arch]()

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    if args.arch == 'resnet50-test' or args.arch == 'resnet50-kaiming' or args.arch == 'resnet50-kaiming-dk':
        normalize = transforms.Normalize(mean=[0.0, 0.0, 0.0],
                                         std=[1.0, 1.0, 1.0])
    elif args.arch == 'resnet18-darknet' or args.arch == 'resnet18-caffe':
        normalize = transforms.Normalize(
            mean=[104 / 255.0, 117 / 255.0, 123 / 255.0], std=[1.0, 1.0, 1.0])
    else:
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best)
Пример #9
0
test_iter = 0
max_iter = int(solver['max_iter'])
test_interval = 99999999
snapshot = int(solver['snapshot'])
snapshot_prefix = solver['snapshot_prefix']
stepvalues = solver['stepvalue']
stepvalues = [int(item) for item in stepvalues]

if args.lr != None:
    base_lr = args.lr

#torch.manual_seed(int(time.time()))
#if args.gpu:
#    torch.cuda.manual_seed(int(time.time()))
#print(protofile)
net = CaffeNet(protofile)
if args.weights:
    net.load_weights(args.weights)
#net.set_verbose(False)
net.set_train_outputs('mbox_loss')

if args.gpu:
    device_ids = args.gpu.split(',')
    device_ids = [int(i) for i in device_ids]
    print('device_ids', device_ids)
    if len(device_ids) > 1:
        print('---- Multi GPUs ----')
        net = ParallelCaffeNet(net.cuda(), device_ids=device_ids)
    else:
        print('---- Single GPU ----')
        net.cuda()
Пример #10
0
class Clip:
    
    # Init caffenet for evaluation
    net = CaffeNet()
    
    def __init__(self, clip=None, vidFolder=None, thumbFolder=None):
        if clip != None:
            self.clipId = clip['id']
            self.clip = clip
        else:
            raise NameError('No clip(id) supplied')
        
        if vidFolder == None:
            self.vidFolder = 'data/videos/'
        else:
            self.vidFolder = vidFolder
        
        if thumbFolder == None:
            self.thumbFolder = 'data/thumbnails/'
        else:
            self.thumbFolder = thumbFolder
        
        self.interval = 25
        self.start = 0
        self.__tmpLocation = 'tmp/'
        
        self.cache = True
        self.concepts = None
        
    
    def hasVideo(self):
        self.videoReader = VideoReader(self.getVideoFile())
        return self.videoReader.success
    
    def getClip(self):
        return self.clip
    
    
    def getClipId(self):
        return self.clipId    
    
    
    def getVideoFile(self):
        return self.vidFolder + self.clipId + '.mp4'
    
    def getRawFrames(self):
        self.videoReader = VideoReader(self.getVideoFile())
        return self.videoReader.getFrames(self.interval, self.start, False)
    
    def getFrames(self):
        self.videoReader = VideoReader(self.getVideoFile())
        return self.videoReader.getFrames(self.interval, self.start)
            
    
    def getConcepts(self, start=None, end=None, indexes=None):
        fileName = self.__tmpLocation + 'concepts/' + self.clipId + '_' + str(self.interval) + '.concepts.npy'
        # Check if a file with concepts with this interval already exists
        if self.concepts == None:
            try:
                self.concepts = np.load(fileName)
            except (IOError):
                # No file found, so get concepts and save them
                frames = self.getFrames()
                self.concepts = self.net.classify(frames)
                np.save(fileName, self.concepts)
        
        if start != None and end != None and indexes == None:
            return self.concepts[start:end]
        
        if indexes != None:
            return np.take(self.concepts, indexes, axis=0)
            
        return self.concepts
    
    
    # Get concepts for mediaclip thumbnail image with caffenet
    def getThumbnailConcepts(self):
        thumbnail = self.getThumbnail()
        
        if thumbnail != False:
            frame = self.net.loadImage(thumbnail)
            return self.net.classify([frame])[0]
        else:
            return False
            
    
    # Returns relevant metadata
    def getMetadata(self):
        ret = []
        if 'title' in self.clip:
            ret.append(self.clip['title'])
        if 'description' in self.clip:
            ret.append(self.clip['description'])
        if 'cat' in self.clip:
            ret.append(' '.join(self.clip['cat']))
        
        return ret
    
    
    # Returns clip title
    def getTitle(self):
        if 'title' in self.clip:
            return self.clip['title']
        else:
            return 'n/a'
        
        
    # Returns relevant metadata as single text blob
    def getMetadataBlob(self):
        return ' '.join(self.getMetadata())
    
    
    def getThumbnailFrame(self):
        thumbnailUrl = self.getThumbnail()
        
        if thumbnailUrl:
            return cv2.imread(thumbnailUrl, flags=cv2.IMREAD_COLOR)
        else:
            return False
        
    
    def getThumbnail(self):
        if 'thumbnail' in self.clip:
            ext = os.path.splitext(self.clip['thumbnail']['sourcepath'])[1]
            fileName = str(self.thumbFolder) + str(self.clipId) + str(ext)
            
            if os.path.isfile(fileName) == False:
                # Does not exist, so fetch it from online
                self._downloadThumbnail(fileName)
            
            return str(self.thumbFolder) + str(self.clipId) + '.jpg'
            
        else:
            return False
        
    
    def setInterval(self, interval):
        self.interval = int(interval)
    
    def setStart(self, start):
        self.start = int(start)
    
    def setCache(self, cache):
        self.cache = bool(cache)
    
    def _downloadThumbnail(self, fileName):
        thumbnail = urllib.URLopener()
        # Cloudfront: Gets 403 Forbidden
        # url = 'https://d2vt09yn8fcn7w.cloudfront.net' + self.clip['thumbnail']['sourcepath']
        # pthumbnail:
        url = 'http://landmark.bbvms.com/mediaclip/' + self.clipId + '/pthumbnail/default/default.jpg'
        
        print url
        thumbnail.retrieve(url, fileName)
cfgfile1 = 'reid.cfg'
weightfile1 = 'reid.weights'
cfgfile2 = 'reid_nbn.cfg'
weightfile2 = 'reid_nbn.weights'
cfgfile3 = 'reid_nbn.prototxt'
weightfile3 = 'reid_nbn.caffemodel'

m1 = Darknet(cfgfile1)
m1.load_weights(weightfile1)
m1.eval()

m2 = Darknet(cfgfile2)
m2.load_weights(weightfile2)
m2.eval()

m3 = CaffeNet(cfgfile3)
m3.load_weights(weightfile3)
m3.eval()

img = torch.rand(8, 3, 128, 64)
img = Variable(img)

output1 = m1(img).clone()
output2 = m2(img).clone()
output3 = m3(img).clone()
print('----- output1 ------------------')
print(output1.data.storage()[0:100])
print('----- output2 ------------------')
print(output2.data.storage()[0:100])
print('----- output3 ------------------')
print(output3.data.storage()[0:100])
Пример #12
0
import Generate_train_test2
import config
from caffenet import CaffeNet
from keras.applications.imagenet_utils import decode_predictions
import numpy as np
from load_cifar10 import load_cifar10_data

if __name__ == '__main__':
    finetune = True
    img_rows, img_cols = 227, 227  # Resolution of inputs
    channel = 3
    batch_size, nb_epoch, num_classes, save_weights = config.Load()
    # Load Cifar10 data. Please implement your own load_data() module for your own dataset
    X_train, Y_train, X_valid, Y_valid = load_cifar10_data(img_rows, img_cols)

    model = CaffeNet(weights='caffenet_weights_th.h5', classes=num_classes)

    # Start Fine-tuning
    if finetune:
        model.fit(
            X_train,
            Y_train,
            batch_size=batch_size,
            nb_epoch=nb_epoch,
            shuffle=True,
            verbose=1,
            validation_data=(X_valid, Y_valid),
        )

    #model.save('Eggs_weights/caffenet_weights_th.h5')
    if save_weights:
Пример #13
0
parser.add_argument('--model', default='vgg_cnn_m_1024')
args = parser.parse_args()

model_name = args.model
if model_name == 'caffenet':
    model_name_capital = 'CaffeNet'
else:
    model_name_capital = model_name.upper()

param_dir = 'fast-rcnn/data/fast_rcnn_models'
param_fn = '%s/%s_fast_rcnn_iter_40000.caffemodel' % (param_dir, model_name)
model_dir = 'fast-rcnn/models/%s' % model_name_capital
model_fn = '%s/test.prototxt' % model_dir

if model_name == 'caffenet':
    model = CaffeNet()
elif model_name == 'vgg16':
    model = VGG16()
elif model_name == 'vgg_cnn_m_1024':
    model = VGG_CNN_M_1024()
else:
    raise ValueError('Unsupported model name: %s' % model_name)

net = caffe.Net(model_fn, param_fn, caffe.TEST)
for name, param in net.params.iteritems():
    layer = getattr(model, name)

    print name, param[0].data.shape, param[1].data.shape,
    print layer.W.data.shape, layer.b.data.shape

    assert layer.W.data.shape == param[0].data.shape
Пример #14
0
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('mode', help='train/time/test')
parser.add_argument('--gpu', help='gpu ids e.g "0,1,2,3"')
parser.add_argument('--solver', help='the solver prototxt')
parser.add_argument('--model', help='the network definition prototxt')
parser.add_argument('--snapshot', help='the snapshot solver state to resume training')
parser.add_argument('--weights', help='the pretrained weight')

args = parser.parse_args()
if args.mode == 'time':

    protofile  = args.model
    net_info   = parse_prototxt(protofile)
    model      = CaffeNet(protofile)
    batch_size = 64

    model.print_network()
    model.eval()

    niters = 50
    total = 0
    for i in range(niters):
        x = torch.rand(batch_size, 1, 28, 28)
        x = Variable(x)
        t0 = time.time()
        output = model(x)
        t1 = time.time()
        print('iteration %d: %fs' %(i, t1-t0))
        total = total + (t1-t0)