Пример #1
0
 def __init__(self, allObjectsFile, categoryObjects, convnet):
   self.allObjects  = cu.loadBoxIndexFile(allObjectsFile)
   self.categoryObjects = categoryObjects
   negativeSamples = reduce(lambda x,y:x+y, map(len,self.allObjects.values()))
   positiveSamples = reduce(lambda x,y:x+y, map(len,self.categoryObjects.values()))
   self.N = np.zeros( (negativeSamples, 4096), np.float32 )
   self.P = np.zeros( (positiveSamples, 4096), np.float32 )
   idx = 0
   # Populate negative examples
   print '# Processing',negativeSamples,'negative prior samples'
   for key in self.allObjects.keys():
     try:
       boxes = self.categoryObjects[key]
       cover = True
     except:
       boxes = self.allObjects[key]
       cover = False
     convnet.prepareImage(key)
     for box in boxes:
       if cover:
         convnet.coverRegion(box)
       activations = convnet.getActivations(box)
       self.N[idx,:] = activations[config.get('convnetLayer')]
       idx += 1
   # Populate positive examples
   print '# Processing',positiveSamples,'positive prior samples'
   idx = 0
   for key in self.categoryObjects.keys():
     convnet.prepareImage(key)
     for box in self.categoryObjects[key]:
       activations = convnet.getActivations(box)
       self.P[idx,:] = activations[config.get('convnetLayer')]
Пример #2
0
class QNetwork(ActionValueInterface):

    networkFile = config.get('networkDir') + config.get(
        'snapshotPrefix') + '_iter_' + config.get(
            'trainingIterationsPerBatch') + '.caffemodel'

    def __init__(self):
        self.net = None
        print 'QNetwork::Init. Loading ', self.networkFile
        self.loadNetwork()
        self.sampler = defaultSampler

    def releaseNetwork(self):
        if self.net != None:
            del self.net
            self.net = None

    def loadNetwork(self, definition='deploy.prototxt'):
        if os.path.isfile(self.networkFile):
            modelFile = config.get('networkDir') + definition
            self.net = caffe.Net(modelFile, self.networkFile)
            self.net.set_phase_test()
            self.net.set_mode_gpu()
            print 'QNetwork loaded'
        else:
            self.net = None
            print 'QNetwork not found'

    def getMaxAction(self, state):
        values = self.getActionValues(state)
        return np.argmax(values, 1)

    def getActionValues(self, state):
        if self.net == None or self.exploreOrExploit() == EXPLORE:
            return self.sampler()
        else:
            return self.getActivations(state)

    def getActivations(self, state):
        out = self.net.forward_all(
            **{
                self.net.inputs[0]:
                state.reshape((state.shape[0], state.shape[1], 1, 1))
            })
        return out['qvalues'].squeeze(axis=(2, 3))

    def setEpsilonGreedy(self, epsilon, sampler=None):
        if sampler is not None:
            self.sampler = sampler
        self.epsilon = epsilon

    def exploreOrExploit(self):
        if self.epsilon > 0:
            if random.random() < self.epsilon:
                return EXPLORE
        return EXPLOIT
 def __init__(self, mode):
   self.mode = mode
   cu.mem('Reinforcement Learning Started')
   self.environment = BoxSearchEnvironment(config.get(mode+'Database'), mode, config.get(mode+'GroundTruth'))
   self.controller = QNetwork()
   cu.mem('QNetwork controller created')
   self.learner = None
   self.agent = BoxSearchAgent(self.controller, self.learner)
   self.task = BoxSearchTask(self.environment, config.get(mode+'GroundTruth'))
   self.experiment = Experiment(self.task, self.agent)
Пример #4
0
 def loadNetwork(self):
   self.imgDim = config.geti('imageDim')
   self.cropSize = config.geti('cropSize')
   self.contextPad = config.geti('contextPad')
   #self.stateContextFactor = config.geti('stateContextFactor')
   modelFile = config.get('convnetDir') + config.get('convNetDef')
   networkFile = config.get('convnetDir') + config.get('trainedConvNet')
   self.net = wrapperv0.ImageNetClassifier(modelFile, networkFile, IMAGE_DIM=self.imgDim, CROPPED_DIM=self.cropSize, MEAN_IMAGE=config.get('meanImage'))
   self.net.caffenet.set_mode_gpu()
   self.net.caffenet.set_phase_test()
   self.imageMean = self.net._IMAGENET_MEAN.swapaxes(1, 2).swapaxes(0, 1).astype('float32')
Пример #5
0
 def __init__(self, mode):
     self.mode = mode
     cu.mem('Reinforcement Learning Started')
     self.environment = BoxSearchEnvironment(
         config.get(mode + 'Database'), mode,
         config.get(mode + 'GroundTruth'))
     self.controller = QNetwork()
     cu.mem('QNetwork controller created')
     self.learner = None
     self.agent = BoxSearchAgent(self.controller, self.learner)
     self.task = BoxSearchTask(self.environment,
                               config.get(mode + 'GroundTruth'))
     self.experiment = Experiment(self.task, self.agent)
Пример #6
0
 def __init__(self, imageName, randomStart=False, groundTruth=None):
     self.imageName = imageName
     self.visibleImage = Image.open(
         config.get('imageDir') + '/' + self.imageName + '.jpg')
     if not randomStart:
         self.box = map(float, [
             0, 0, self.visibleImage.size[0] - 1,
             self.visibleImage.size[1] - 1
         ])
         self.boxW = self.box[2] + 1.0
         self.boxH = self.box[3] + 1.0
         self.aspectRatio = self.boxH / self.boxW
     else:
         wlimit = self.visibleImage.size[0] / 4
         hlimit = self.visibleImage.size[1] / 4
         a = random.randint(wlimit, self.visibleImage.size[0] - wlimit)
         b = random.randint(hlimit, self.visibleImage.size[1] - hlimit)
         c = random.randint(wlimit, min(self.visibleImage.size[0] - a, a))
         d = random.randint(hlimit, min(self.visibleImage.size[1] - b, b))
         self.box = map(float, [a - c, b - d, a + c, b + d])
         self.boxW = 2.0 * c
         self.boxH = 2.0 * d
         self.aspectRatio = self.boxH / self.boxW
     self.splitsQueue = []
     self.actionChosen = 2
     self.actionValue = 0
     self.groundTruth = groundTruth
     if self.groundTruth is not None:
         self.task = bst.BoxSearchTask()
         self.task.groundTruth = self.groundTruth
         self.task.loadGroundTruth(self.imageName)
 def __init__(self, imageName, randomStart=False, groundTruth=None):
   self.imageName = imageName
   self.visibleImage = Image.open(config.get('imageDir') + '/' + self.imageName + '.jpg')
   if not randomStart:
     self.box = map(float, [0,0,self.visibleImage.size[0]-1,self.visibleImage.size[1]-1])
     self.boxW = self.box[2]+1.0
     self.boxH = self.box[3]+1.0
     self.aspectRatio = self.boxH/self.boxW
   else:
     wlimit = self.visibleImage.size[0]/4
     hlimit = self.visibleImage.size[1]/4
     a = random.randint(wlimit, self.visibleImage.size[0] - wlimit)
     b = random.randint(hlimit, self.visibleImage.size[1] - hlimit)
     c = random.randint(wlimit, min(self.visibleImage.size[0] - a, a) )
     d = random.randint(hlimit, min(self.visibleImage.size[1] - b, b) )
     self.box = map(float, [a-c, b-d, a+c, b+d] )
     self.boxW = 2.0*c
     self.boxH = 2.0*d
     self.aspectRatio = self.boxH/self.boxW
   self.splitsQueue = []
   self.actionChosen = 2
   self.actionValue = 0
   self.groundTruth = groundTruth
   if self.groundTruth is not None:
     self.task = bst.BoxSearchTask()
     self.task.groundTruth = self.groundTruth
     self.task.loadGroundTruth(self.imageName)
Пример #8
0
 def loadNetwork(self):
     self.imgDim = config.geti('imageDim')
     self.cropSize = config.geti('cropSize')
     self.contextPad = config.geti('contextPad')
     #self.stateContextFactor = config.geti('stateContextFactor')
     modelFile = config.get('convnetDir') + config.get('convNetDef')
     networkFile = config.get('convnetDir') + config.get('trainedConvNet')
     self.net = wrapperv0.ImageNetClassifier(
         modelFile,
         networkFile,
         IMAGE_DIM=self.imgDim,
         CROPPED_DIM=self.cropSize,
         MEAN_IMAGE=config.get('meanImage'))
     self.net.caffenet.set_mode_gpu()
     self.net.caffenet.set_phase_test()
     self.imageMean = self.net._IMAGENET_MEAN.swapaxes(1, 2).swapaxes(
         0, 1).astype('float32')
Пример #9
0
 def train(self):
     networkFile = config.get('networkDir') + config.get(
         'snapshotPrefix') + '_iter_' + config.get(
             'trainingIterationsPerBatch') + '.caffemodel'
     interactions = config.geti('trainInteractions')
     minEpsilon = config.getf('minTrainingEpsilon')
     epochSize = len(self.environment.imageList) / 1
     epsilon = 1.0
     self.controller.setEpsilonGreedy(epsilon,
                                      self.environment.sampleAction)
     epoch = 1
     exEpochs = config.geti('explorationEpochs')
     while epoch <= exEpochs:
         s = cu.tic()
         print 'Epoch', epoch, ': Exploration (epsilon=1.0)'
         self.runEpoch(interactions, len(self.environment.imageList))
         self.task.flushStats()
         s = cu.toc('Epoch done in ', s)
         epoch += 1
     self.learner = QLearning()
     self.agent.learner = self.learner
     egEpochs = config.geti('epsilonGreedyEpochs')
     while epoch <= egEpochs + exEpochs:
         s = cu.tic()
         epsilon = epsilon - (1.0 - minEpsilon) / float(egEpochs)
         if epsilon < minEpsilon: epsilon = minEpsilon
         self.controller.setEpsilonGreedy(epsilon,
                                          self.environment.sampleAction)
         print 'Epoch', epoch, '(epsilon-greedy:{:5.3f})'.format(epsilon)
         self.runEpoch(interactions, epochSize)
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         epoch += 1
     maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs
     while epoch <= maxEpochs:
         s = cu.tic()
         print 'Epoch', epoch, '(exploitation mode: epsilon={:5.3f})'.format(
             epsilon)
         self.runEpoch(interactions, epochSize)
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         shutil.copy(networkFile, networkFile + '.' + str(epoch))
         epoch += 1
Пример #10
0
 def loadNetwork(self, definition='deploy.prototxt'):
     if os.path.isfile(self.networkFile):
         modelFile = config.get('networkDir') + definition
         self.net = caffe.Net(modelFile, self.networkFile)
         self.net.set_phase_test()
         self.net.set_mode_gpu()
         print 'QNetwork loaded'
     else:
         self.net = None
         print 'QNetwork not found'
 def doValidation(self, epoch):
   if epoch % config.geti('validationEpochs') != 0:
     return
   auxRL = BoxSearchRunner('test')
   auxRL.run()
   indexType = config.get('evaluationIndexType')
   category = config.get('category')
   if indexType == 'pascal':
     categories, catIndex = bse.get20Categories()
   elif indexType == 'relations':
     categories, catIndex = bse.getCategories()
   elif indexType == 'finetunedRelations':
     categories, catIndex = bse.getRelationCategories()
   catI = categories.index(category)
   scoredDetections = bse.loadScores(config.get('testMemory'), catI)
   groundTruthFile = config.get('testGroundTruth')
   ps,rs = bse.evaluateCategory(scoredDetections, 'scores', groundTruthFile)
   pl,rl = bse.evaluateCategory(scoredDetections, 'landmarks', groundTruthFile)
   line = lambda x,y,z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y,z)
   print line('Validation Scores:',ps,rs)
   print line('Validation Landmarks:',pl,rl)
 def train(self):
   networkFile = config.get('networkDir') + config.get('snapshotPrefix') + '_iter_' + config.get('trainingIterationsPerBatch') + '.caffemodel'
   interactions = config.geti('trainInteractions')
   minEpsilon = config.getf('minTrainingEpsilon')
   epochSize = len(self.environment.imageList)/1
   epsilon = 1.0
   self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction)
   epoch = 1
   exEpochs = config.geti('explorationEpochs')
   while epoch <= exEpochs:
     s = cu.tic()
     print 'Epoch',epoch,': Exploration (epsilon=1.0)'
     self.runEpoch(interactions, len(self.environment.imageList))
     self.task.flushStats()
     s = cu.toc('Epoch done in ',s)
     epoch += 1
   self.learner = QLearning()
   self.agent.learner = self.learner
   egEpochs = config.geti('epsilonGreedyEpochs')
   while epoch <= egEpochs + exEpochs:
     s = cu.tic()
     epsilon = epsilon - (1.0-minEpsilon)/float(egEpochs)
     if epsilon < minEpsilon: epsilon = minEpsilon
     self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction)
     print 'Epoch',epoch ,'(epsilon-greedy:{:5.3f})'.format(epsilon)
     self.runEpoch(interactions, epochSize)
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     epoch += 1
   maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs
   while epoch <= maxEpochs:
     s = cu.tic()
     print 'Epoch',epoch,'(exploitation mode: epsilon={:5.3f})'.format(epsilon)
     self.runEpoch(interactions, epochSize)
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     shutil.copy(networkFile, networkFile + '.' + str(epoch))
     epoch += 1
Пример #13
0
    def getSensors(self):
        # Compute features of visible region (4096)
        activations = self.cnn.getActivations(self.state.box)
        # Action history (90)
        actions = np.ones((ACTION_HISTORY_SIZE)) * self.state.actionHistory

        # Concatenate all info in the state representation vector
        state = np.hstack((activations[config.get('convnetLayer')], actions))
        self.scores = activations['prob'][0:21].tolist()
        return {
            'image': self.imageList[self.idx],
            'state': state,
            'negEpisode': self.negativeEpisode
        }
Пример #14
0
 def coverRegion(self, box, otherImg=None):
   if otherImg is not None:
     boxes = [map(int,box)]
     self.net.caffenet.CoverRegions(boxes, config.get('imageDir') + otherImg + '.jpg', self.id)
   else:
     # Create two perpendicular boxes
     w = box[2]-box[0]
     h = box[3]-box[1]
     b1 = map(int, [box[0] + w*0.5 - w*MARK_WIDTH, box[1], box[0] + w*0.5 + w*MARK_WIDTH, box[3]])
     b2 = map(int, [box[0], box[1] + h*0.5 - h*MARK_WIDTH, box[2], box[1] + h*0.5 + h*MARK_WIDTH])
     boxes = [b1, b2]
     self.net.caffenet.CoverRegions(boxes, '', self.id)
   self.id += 1
   return True
Пример #15
0
 def doValidation(self, epoch):
     if epoch % config.geti('validationEpochs') != 0:
         return
     auxRL = BoxSearchRunner('test')
     auxRL.run()
     indexType = config.get('evaluationIndexType')
     category = config.get('category')
     if indexType == 'pascal':
         categories, catIndex = bse.get20Categories()
     elif indexType == 'relations':
         categories, catIndex = bse.getCategories()
     elif indexType == 'finetunedRelations':
         categories, catIndex = bse.getRelationCategories()
     catI = categories.index(category)
     scoredDetections = bse.loadScores(config.get('testMemory'), catI)
     groundTruthFile = config.get('testGroundTruth')
     ps, rs = bse.evaluateCategory(scoredDetections, 'scores',
                                   groundTruthFile)
     pl, rl = bse.evaluateCategory(scoredDetections, 'landmarks',
                                   groundTruthFile)
     line = lambda x, y, z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y, z)
     print line('Validation Scores:', ps, rs)
     print line('Validation Landmarks:', pl, rl)
Пример #16
0
 def writeSolverFile(self):
   out = open(self.directory + '/solver.prototxt','w')
   out.write('train_net: "' + self.directory + 'train.prototxt"\n')
   out.write('base_lr: ' + config.get('learningRate') + '\n')
   out.write('lr_policy: "step"\n')
   out.write('gamma: ' + config.get('gamma') + '\n')
   out.write('stepsize: ' + config.get('stepSize') + '\n')
   out.write('display: 1\n')
   out.write('max_iter: ' + config.get('trainingIterationsPerBatch') + '\n')
   out.write('momentum: ' + config.get('momentum') + '\n')
   out.write('weight_decay: ' + config.get('weightDecay') + '\n')
   out.write('snapshot: ' + config.get('trainingIterationsPerBatch') + '\n')
   out.write('snapshot_prefix: "' + self.directory + 'multilayer_qlearner"\n')
   out.close()
Пример #17
0
 def __init__(self, imageName, boxReset='Full', groundTruth=None):
   self.imageName = imageName
   self.visibleImage = Image.open(config.get('imageDir') + '/' + self.imageName + '.jpg')
   self.box = [0,0,0,0]
   self.resets = 1
   self.reset(boxReset)
   self.landmarkIndex = {}
   self.actionChosen = 2
   self.actionValue = 0
   self.groundTruth = groundTruth
   if self.groundTruth is not None:
     self.taskSimulator = bst.BoxSearchTask()
     self.taskSimulator.groundTruth = self.groundTruth
     self.taskSimulator.loadGroundTruth(self.imageName)
   self.stepsWithoutLandmark = 0
   self.actionHistory = [0 for i in range(NUM_ACTIONS*config.geti('actionHistoryLength'))]
Пример #18
0
 def __init__(self, imageList, mode, groundTruthFile=None):
     self.mode = mode
     self.cnn = cn.ConvNet()
     self.testRecord = None
     self.idx = -1
     self.imageList = [x.strip() for x in open(imageList)]
     self.groundTruth = cu.loadBoxIndexFile(groundTruthFile)
     #self.imageList = self.rankImages()
     #self.imageList = self.imageList[0:10]
     allImgs = set([x.strip() for x in open(config.get('allImagesList'))])
     self.negativeSamples = list(
         allImgs.difference(set(self.groundTruth.keys())))
     self.negativeEpisode = False
     if self.mode == 'train':
         self.negativeProbability = config.getf('negativeEpisodeProb')
         random.shuffle(self.imageList)
         #self.priorMemory = PriorMemory(config.get('allObjectsBoxes'), self.groundTruth, self.cnn)
     self.loadNextEpisode()
Пример #19
0
 def writeSolverFile(self):
     out = open(self.directory + '/solver.prototxt', 'w')
     out.write('train_net: "' + self.directory + 'train.prototxt"\n')
     out.write('base_lr: ' + config.get('learningRate') + '\n')
     out.write('lr_policy: "step"\n')
     out.write('gamma: ' + config.get('gamma') + '\n')
     out.write('stepsize: ' + config.get('stepSize') + '\n')
     out.write('display: 1\n')
     out.write('max_iter: ' + config.get('trainingIterationsPerBatch') +
               '\n')
     out.write('momentum: ' + config.get('momentum') + '\n')
     out.write('weight_decay: ' + config.get('weightDecay') + '\n')
     out.write('snapshot: ' + config.get('trainingIterationsPerBatch') +
               '\n')
     out.write('snapshot_prefix: "' + self.directory +
               'multilayer_qlearner"\n')
     out.close()
Пример #20
0
 def loadNextEpisode(self):
     self.episodeDone = False
     self.extraSteps = 5
     self.negativeEpisode = False
     if self.selectNegativeSample(): return
     # Save actions performed during this episode
     if self.mode == 'test' and self.testRecord != None:
         with open(
                 config.get('testMemory') + self.imageList[self.idx] +
                 '.txt', 'w') as outfile:
             json.dump(self.testRecord, outfile)
     # Load a new episode
     self.idx += 1
     if self.idx < len(self.imageList):
         # Initialize state
         self.cnn.prepareImage(self.imageList[self.idx])
         restartMode = {'train': 'Random', 'test': 'Full'}
         self.state = bs.BoxSearchState(self.imageList[self.idx],
                                        groundTruth=self.groundTruth,
                                        boxReset=restartMode[self.mode])
         print 'Environment::LoadNextEpisode => Image', self.idx, self.imageList[
             self.idx], '(' + str(
                 self.state.visibleImage.size[0]) + ',' + str(
                     self.state.visibleImage.size[1]) + ')'
     else:
         if self.mode == 'train':
             random.shuffle(self.imageList)
             self.idx = -1
             self.loadNextEpisode()
         else:
             print 'No more images available'
     # Restart record for new episode
     if self.mode == 'test':
         self.testRecord = {
             'boxes': [],
             'actions': [],
             'values': [],
             'rewards': [],
             'scores': []
         }
Пример #21
0
 def coverRegion(self, box, otherImg=None):
     if otherImg is not None:
         boxes = [map(int, box)]
         self.net.caffenet.CoverRegions(
             boxes,
             config.get('imageDir') + otherImg + '.jpg', self.id)
     else:
         # Create two perpendicular boxes
         w = box[2] - box[0]
         h = box[3] - box[1]
         b1 = map(int, [
             box[0] + w * 0.5 - w * MARK_WIDTH, box[1],
             box[0] + w * 0.5 + w * MARK_WIDTH, box[3]
         ])
         b2 = map(int, [
             box[0], box[1] + h * 0.5 - h * MARK_WIDTH, box[2],
             box[1] + h * 0.5 + h * MARK_WIDTH
         ])
         boxes = [b1, b2]
         self.net.caffenet.CoverRegions(boxes, '', self.id)
     self.id += 1
     return True
Пример #22
0
 def __init__(self, alpha=0.5):
   ValueBasedLearner.__init__(self)
   self.alpha = alpha
   self.gamma = config.getf('gammaDiscountReward')
   self.netManager = CaffeMultiLayerPerceptronManagement(config.get('networkDir'))
Пример #23
0
 def __init__(self, alpha=0.5):
     ValueBasedLearner.__init__(self)
     self.alpha = alpha
     self.gamma = config.getf('gammaDiscountReward')
     self.netManager = CaffeMultiLayerPerceptronManagement(
         config.get('networkDir'))
Пример #24
0
__author__ = "Juan C. Caicedo, [email protected]"

import os
import utils.utils as cu
import numpy as np

import caffe
from caffe import wrapperv0

import learn.rl.RLConfig as config

LAYER = config.get('convnetLayer')
MARK_WIDTH = config.getf('markWidth')

class ConvNet():

  def __init__(self):
    self.net = None
    self.image = ''
    self.id = 0
    self.loadNetwork()

  def loadNetwork(self):
    self.imgDim = config.geti('imageDim')
    self.cropSize = config.geti('cropSize')
    self.contextPad = config.geti('contextPad')
    #self.stateContextFactor = config.geti('stateContextFactor')
    modelFile = config.get('convnetDir') + config.get('convNetDef')
    networkFile = config.get('convnetDir') + config.get('trainedConvNet')
    self.net = wrapperv0.ImageNetClassifier(modelFile, networkFile, IMAGE_DIM=self.imgDim, CROPPED_DIM=self.cropSize, MEAN_IMAGE=config.get('meanImage'))
    self.net.caffenet.set_mode_gpu()
Пример #25
0
 def prepareImage(self, image):
     if self.image != '':
         self.net.caffenet.ReleaseImageData()
     self.image = config.get('imageDir') + image + '.jpg'
     self.net.caffenet.InitializeImage(self.image, self.imgDim,
                                       self.imageMean, self.cropSize)
Пример #26
0
__author__ = "Juan C. Caicedo, [email protected]"

import os
import utils.utils as cu
import numpy as np

import caffe
from caffe import wrapperv0

import learn.rl.RLConfig as config

LAYER = config.get('convnetLayer')
MARK_WIDTH = config.getf('markWidth')


class ConvNet():
    def __init__(self):
        self.net = None
        self.image = ''
        self.id = 0
        self.loadNetwork()

    def loadNetwork(self):
        self.imgDim = config.geti('imageDim')
        self.cropSize = config.geti('cropSize')
        self.contextPad = config.geti('contextPad')
        #self.stateContextFactor = config.geti('stateContextFactor')
        modelFile = config.get('convnetDir') + config.get('convNetDef')
        networkFile = config.get('convnetDir') + config.get('trainedConvNet')
        self.net = wrapperv0.ImageNetClassifier(
            modelFile,
Пример #27
0
 def prepareImage(self, image):
   if self.image != '':
     self.net.caffenet.ReleaseImageData()
   self.image = config.get('imageDir') + image + '.jpg'
   self.net.caffenet.InitializeImage(self.image, self.imgDim, self.imageMean, self.cropSize)