예제 #1
0
 def __init__(self, allObjectsFile, categoryObjects, convnet):
   self.allObjects  = cu.loadBoxIndexFile(allObjectsFile)
   self.categoryObjects = categoryObjects
   negativeSamples = reduce(lambda x,y:x+y, map(len,self.allObjects.values()))
   positiveSamples = reduce(lambda x,y:x+y, map(len,self.categoryObjects.values()))
   self.N = np.zeros( (negativeSamples, 4096), np.float32 )
   self.P = np.zeros( (positiveSamples, 4096), np.float32 )
   idx = 0
   # Populate negative examples
   print '# Processing',negativeSamples,'negative prior samples'
   for key in self.allObjects.keys():
     try:
       boxes = self.categoryObjects[key]
       cover = True
     except:
       boxes = self.allObjects[key]
       cover = False
     convnet.prepareImage(key)
     for box in boxes:
       if cover:
         convnet.coverRegion(box)
       activations = convnet.getActivations(box)
       self.N[idx,:] = activations[config.get('convnetLayer')]
       idx += 1
   # Populate positive examples
   print '# Processing',positiveSamples,'positive prior samples'
   idx = 0
   for key in self.categoryObjects.keys():
     convnet.prepareImage(key)
     for box in self.categoryObjects[key]:
       activations = convnet.getActivations(box)
       self.P[idx,:] = activations[config.get('convnetLayer')]
예제 #2
0
class QNetwork(ActionValueInterface):

    networkFile = config.get('networkDir') + config.get(
        'snapshotPrefix') + '_iter_' + config.get(
            'trainingIterationsPerBatch') + '.caffemodel'

    def __init__(self):
        self.net = None
        print 'QNetwork::Init. Loading ', self.networkFile
        self.loadNetwork()
        self.sampler = defaultSampler

    def releaseNetwork(self):
        if self.net != None:
            del self.net
            self.net = None

    def loadNetwork(self, definition='deploy.prototxt'):
        if os.path.isfile(self.networkFile):
            modelFile = config.get('networkDir') + definition
            self.net = caffe.Net(modelFile, self.networkFile)
            self.net.set_phase_test()
            self.net.set_mode_gpu()
            print 'QNetwork loaded'
        else:
            self.net = None
            print 'QNetwork not found'

    def getMaxAction(self, state):
        values = self.getActionValues(state)
        return np.argmax(values, 1)

    def getActionValues(self, state):
        if self.net == None or self.exploreOrExploit() == EXPLORE:
            return self.sampler()
        else:
            return self.getActivations(state)

    def getActivations(self, state):
        out = self.net.forward_all(
            **{
                self.net.inputs[0]:
                state.reshape((state.shape[0], state.shape[1], 1, 1))
            })
        return out['qvalues'].squeeze(axis=(2, 3))

    def setEpsilonGreedy(self, epsilon, sampler=None):
        if sampler is not None:
            self.sampler = sampler
        self.epsilon = epsilon

    def exploreOrExploit(self):
        if self.epsilon > 0:
            if random.random() < self.epsilon:
                return EXPLORE
        return EXPLOIT
예제 #3
0
 def __init__(self, workingDir):
   self.directory = workingDir
   self.writeSolverFile()
   self.solver = caffe.SGDSolver(self.directory + 'solver.prototxt')
   self.iter = 0
   self.itersPerEpisode = config.geti('trainingIterationsPerBatch')
   self.lr = config.getf('learningRate')
   self.stepSize = config.geti('stepSize')
   self.gamma = config.getf('gamma')
   print 'CAFFE SOLVER INITALIZED'
 def __init__(self, mode):
   self.mode = mode
   cu.mem('Reinforcement Learning Started')
   self.environment = BoxSearchEnvironment(config.get(mode+'Database'), mode, config.get(mode+'GroundTruth'))
   self.controller = QNetwork()
   cu.mem('QNetwork controller created')
   self.learner = None
   self.agent = BoxSearchAgent(self.controller, self.learner)
   self.task = BoxSearchTask(self.environment, config.get(mode+'GroundTruth'))
   self.experiment = Experiment(self.task, self.agent)
예제 #5
0
 def __init__(self, workingDir):
     self.directory = workingDir
     self.writeSolverFile()
     self.solver = caffe.SGDSolver(self.directory + 'solver.prototxt')
     self.iter = 0
     self.itersPerEpisode = config.geti('trainingIterationsPerBatch')
     self.lr = config.getf('learningRate')
     self.stepSize = config.geti('stepSize')
     self.gamma = config.getf('gamma')
     print 'CAFFE SOLVER INITALIZED'
예제 #6
0
 def __init__(self, mode):
     self.mode = mode
     cu.mem('Reinforcement Learning Started')
     self.environment = BoxSearchEnvironment(
         config.get(mode + 'Database'), mode,
         config.get(mode + 'GroundTruth'))
     self.controller = QNetwork()
     cu.mem('QNetwork controller created')
     self.learner = None
     self.agent = BoxSearchAgent(self.controller, self.learner)
     self.task = BoxSearchTask(self.environment,
                               config.get(mode + 'GroundTruth'))
     self.experiment = Experiment(self.task, self.agent)
 def __init__(self, imageName, randomStart=False, groundTruth=None):
   self.imageName = imageName
   self.visibleImage = Image.open(config.get('imageDir') + '/' + self.imageName + '.jpg')
   if not randomStart:
     self.box = map(float, [0,0,self.visibleImage.size[0]-1,self.visibleImage.size[1]-1])
     self.boxW = self.box[2]+1.0
     self.boxH = self.box[3]+1.0
     self.aspectRatio = self.boxH/self.boxW
   else:
     wlimit = self.visibleImage.size[0]/4
     hlimit = self.visibleImage.size[1]/4
     a = random.randint(wlimit, self.visibleImage.size[0] - wlimit)
     b = random.randint(hlimit, self.visibleImage.size[1] - hlimit)
     c = random.randint(wlimit, min(self.visibleImage.size[0] - a, a) )
     d = random.randint(hlimit, min(self.visibleImage.size[1] - b, b) )
     self.box = map(float, [a-c, b-d, a+c, b+d] )
     self.boxW = 2.0*c
     self.boxH = 2.0*d
     self.aspectRatio = self.boxH/self.boxW
   self.splitsQueue = []
   self.actionChosen = 2
   self.actionValue = 0
   self.groundTruth = groundTruth
   if self.groundTruth is not None:
     self.task = bst.BoxSearchTask()
     self.task.groundTruth = self.groundTruth
     self.task.loadGroundTruth(self.imageName)
예제 #8
0
 def __init__(self, imageName, boxReset='Full', groundTruth=None):
   self.imageName = imageName
   self.visibleImage = Image.open(config.get('imageDir') + '/' + self.imageName + '.jpg')
   self.box = [0,0,0,0]
   self.resets = 1
   self.reset(boxReset)
   self.landmarkIndex = {}
   self.actionChosen = 2
   self.actionValue = 0
   self.groundTruth = groundTruth
   if self.groundTruth is not None:
     self.taskSimulator = bst.BoxSearchTask()
     self.taskSimulator.groundTruth = self.groundTruth
     self.taskSimulator.loadGroundTruth(self.imageName)
   self.stepsWithoutLandmark = 0
   self.actionHistory = [0 for i in range(NUM_ACTIONS*config.geti('actionHistoryLength'))]
예제 #9
0
 def __init__(self, imageName, randomStart=False, groundTruth=None):
     self.imageName = imageName
     self.visibleImage = Image.open(
         config.get('imageDir') + '/' + self.imageName + '.jpg')
     if not randomStart:
         self.box = map(float, [
             0, 0, self.visibleImage.size[0] - 1,
             self.visibleImage.size[1] - 1
         ])
         self.boxW = self.box[2] + 1.0
         self.boxH = self.box[3] + 1.0
         self.aspectRatio = self.boxH / self.boxW
     else:
         wlimit = self.visibleImage.size[0] / 4
         hlimit = self.visibleImage.size[1] / 4
         a = random.randint(wlimit, self.visibleImage.size[0] - wlimit)
         b = random.randint(hlimit, self.visibleImage.size[1] - hlimit)
         c = random.randint(wlimit, min(self.visibleImage.size[0] - a, a))
         d = random.randint(hlimit, min(self.visibleImage.size[1] - b, b))
         self.box = map(float, [a - c, b - d, a + c, b + d])
         self.boxW = 2.0 * c
         self.boxH = 2.0 * d
         self.aspectRatio = self.boxH / self.boxW
     self.splitsQueue = []
     self.actionChosen = 2
     self.actionValue = 0
     self.groundTruth = groundTruth
     if self.groundTruth is not None:
         self.task = bst.BoxSearchTask()
         self.task.groundTruth = self.groundTruth
         self.task.loadGroundTruth(self.imageName)
예제 #10
0
 def doNetworkTraining(self, samples, labels):
     self.solver.net.set_input_arrays(samples, labels)
     self.solver.solve()
     self.iter += config.geti('trainingIterationsPerBatch')
     if self.iter % self.stepSize == 0:
         newLR = self.lr * (self.gamma**int(self.iter / self.stepSize))
         print 'Changing LR to:', newLR
         self.solver.change_lr(newLR)
예제 #11
0
 def doNetworkTraining(self, samples, labels):
   self.solver.net.set_input_arrays(samples, labels)
   self.solver.solve()
   self.iter += config.geti('trainingIterationsPerBatch')
   if self.iter % self.stepSize == 0:
     newLR = self.lr * ( self.gamma** int(self.iter/self.stepSize) )
     print 'Changing LR to:',newLR
     self.solver.change_lr(newLR)
예제 #12
0
 def __init__(self, imageList, mode, groundTruthFile=None):
     self.mode = mode
     self.cnn = cn.ConvNet()
     self.testRecord = None
     self.idx = -1
     self.imageList = [x.strip() for x in open(imageList)]
     self.groundTruth = cu.loadBoxIndexFile(groundTruthFile)
     #self.imageList = self.rankImages()
     #self.imageList = self.imageList[0:10]
     allImgs = set([x.strip() for x in open(config.get('allImagesList'))])
     self.negativeSamples = list(
         allImgs.difference(set(self.groundTruth.keys())))
     self.negativeEpisode = False
     if self.mode == 'train':
         self.negativeProbability = config.getf('negativeEpisodeProb')
         random.shuffle(self.imageList)
         #self.priorMemory = PriorMemory(config.get('allObjectsBoxes'), self.groundTruth, self.cnn)
     self.loadNextEpisode()
 def run(self):
   if self.mode == 'train':
     self.agent.persistMemory = True
     self.agent.startReplayMemory(len(self.environment.imageList), config.geti('trainInteractions'))
     #self.agent.assignPriorMemory(self.environment.priorMemory)
     self.train()
   elif self.mode == 'test':
     self.agent.persistMemory = False
     self.test()
예제 #14
0
 def run(self):
     if self.mode == 'train':
         self.agent.persistMemory = True
         self.agent.startReplayMemory(len(self.environment.imageList),
                                      config.geti('trainInteractions'))
         #self.agent.assignPriorMemory(self.environment.priorMemory)
         self.train()
     elif self.mode == 'test':
         self.agent.persistMemory = False
         self.test()
예제 #15
0
 def loadNetwork(self, definition='deploy.prototxt'):
     if os.path.isfile(self.networkFile):
         modelFile = config.get('networkDir') + definition
         self.net = caffe.Net(modelFile, self.networkFile)
         self.net.set_phase_test()
         self.net.set_mode_gpu()
         print 'QNetwork loaded'
     else:
         self.net = None
         print 'QNetwork not found'
 def doValidation(self, epoch):
   if epoch % config.geti('validationEpochs') != 0:
     return
   auxRL = BoxSearchRunner('test')
   auxRL.run()
   indexType = config.get('evaluationIndexType')
   category = config.get('category')
   if indexType == 'pascal':
     categories, catIndex = bse.get20Categories()
   elif indexType == 'relations':
     categories, catIndex = bse.getCategories()
   elif indexType == 'finetunedRelations':
     categories, catIndex = bse.getRelationCategories()
   catI = categories.index(category)
   scoredDetections = bse.loadScores(config.get('testMemory'), catI)
   groundTruthFile = config.get('testGroundTruth')
   ps,rs = bse.evaluateCategory(scoredDetections, 'scores', groundTruthFile)
   pl,rl = bse.evaluateCategory(scoredDetections, 'landmarks', groundTruthFile)
   line = lambda x,y,z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y,z)
   print line('Validation Scores:',ps,rs)
   print line('Validation Landmarks:',pl,rl)
예제 #17
0
 def loadNetwork(self):
   self.imgDim = config.geti('imageDim')
   self.cropSize = config.geti('cropSize')
   self.contextPad = config.geti('contextPad')
   #self.stateContextFactor = config.geti('stateContextFactor')
   modelFile = config.get('convnetDir') + config.get('convNetDef')
   networkFile = config.get('convnetDir') + config.get('trainedConvNet')
   self.net = wrapperv0.ImageNetClassifier(modelFile, networkFile, IMAGE_DIM=self.imgDim, CROPPED_DIM=self.cropSize, MEAN_IMAGE=config.get('meanImage'))
   self.net.caffenet.set_mode_gpu()
   self.net.caffenet.set_phase_test()
   self.imageMean = self.net._IMAGENET_MEAN.swapaxes(1, 2).swapaxes(0, 1).astype('float32')
예제 #18
0
 def doValidation(self, epoch):
     if epoch % config.geti('validationEpochs') != 0:
         return
     auxRL = BoxSearchRunner('test')
     auxRL.run()
     indexType = config.get('evaluationIndexType')
     category = config.get('category')
     if indexType == 'pascal':
         categories, catIndex = bse.get20Categories()
     elif indexType == 'relations':
         categories, catIndex = bse.getCategories()
     elif indexType == 'finetunedRelations':
         categories, catIndex = bse.getRelationCategories()
     catI = categories.index(category)
     scoredDetections = bse.loadScores(config.get('testMemory'), catI)
     groundTruthFile = config.get('testGroundTruth')
     ps, rs = bse.evaluateCategory(scoredDetections, 'scores',
                                   groundTruthFile)
     pl, rl = bse.evaluateCategory(scoredDetections, 'landmarks',
                                   groundTruthFile)
     line = lambda x, y, z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y, z)
     print line('Validation Scores:', ps, rs)
     print line('Validation Landmarks:', pl, rl)
예제 #19
0
 def coverRegion(self, box, otherImg=None):
   if otherImg is not None:
     boxes = [map(int,box)]
     self.net.caffenet.CoverRegions(boxes, config.get('imageDir') + otherImg + '.jpg', self.id)
   else:
     # Create two perpendicular boxes
     w = box[2]-box[0]
     h = box[3]-box[1]
     b1 = map(int, [box[0] + w*0.5 - w*MARK_WIDTH, box[1], box[0] + w*0.5 + w*MARK_WIDTH, box[3]])
     b2 = map(int, [box[0], box[1] + h*0.5 - h*MARK_WIDTH, box[2], box[1] + h*0.5 + h*MARK_WIDTH])
     boxes = [b1, b2]
     self.net.caffenet.CoverRegions(boxes, '', self.id)
   self.id += 1
   return True
예제 #20
0
    def getSensors(self):
        # Compute features of visible region (4096)
        activations = self.cnn.getActivations(self.state.box)
        # Action history (90)
        actions = np.ones((ACTION_HISTORY_SIZE)) * self.state.actionHistory

        # Concatenate all info in the state representation vector
        state = np.hstack((activations[config.get('convnetLayer')], actions))
        self.scores = activations['prob'][0:21].tolist()
        return {
            'image': self.imageList[self.idx],
            'state': state,
            'negEpisode': self.negativeEpisode
        }
예제 #21
0
 def loadNetwork(self):
     self.imgDim = config.geti('imageDim')
     self.cropSize = config.geti('cropSize')
     self.contextPad = config.geti('contextPad')
     #self.stateContextFactor = config.geti('stateContextFactor')
     modelFile = config.get('convnetDir') + config.get('convNetDef')
     networkFile = config.get('convnetDir') + config.get('trainedConvNet')
     self.net = wrapperv0.ImageNetClassifier(
         modelFile,
         networkFile,
         IMAGE_DIM=self.imgDim,
         CROPPED_DIM=self.cropSize,
         MEAN_IMAGE=config.get('meanImage'))
     self.net.caffenet.set_mode_gpu()
     self.net.caffenet.set_phase_test()
     self.imageMean = self.net._IMAGENET_MEAN.swapaxes(1, 2).swapaxes(
         0, 1).astype('float32')
예제 #22
0
 def train(self):
     networkFile = config.get('networkDir') + config.get(
         'snapshotPrefix') + '_iter_' + config.get(
             'trainingIterationsPerBatch') + '.caffemodel'
     interactions = config.geti('trainInteractions')
     minEpsilon = config.getf('minTrainingEpsilon')
     epochSize = len(self.environment.imageList) / 1
     epsilon = 1.0
     self.controller.setEpsilonGreedy(epsilon,
                                      self.environment.sampleAction)
     epoch = 1
     exEpochs = config.geti('explorationEpochs')
     while epoch <= exEpochs:
         s = cu.tic()
         print 'Epoch', epoch, ': Exploration (epsilon=1.0)'
         self.runEpoch(interactions, len(self.environment.imageList))
         self.task.flushStats()
         s = cu.toc('Epoch done in ', s)
         epoch += 1
     self.learner = QLearning()
     self.agent.learner = self.learner
     egEpochs = config.geti('epsilonGreedyEpochs')
     while epoch <= egEpochs + exEpochs:
         s = cu.tic()
         epsilon = epsilon - (1.0 - minEpsilon) / float(egEpochs)
         if epsilon < minEpsilon: epsilon = minEpsilon
         self.controller.setEpsilonGreedy(epsilon,
                                          self.environment.sampleAction)
         print 'Epoch', epoch, '(epsilon-greedy:{:5.3f})'.format(epsilon)
         self.runEpoch(interactions, epochSize)
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         epoch += 1
     maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs
     while epoch <= maxEpochs:
         s = cu.tic()
         print 'Epoch', epoch, '(exploitation mode: epsilon={:5.3f})'.format(
             epsilon)
         self.runEpoch(interactions, epochSize)
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         shutil.copy(networkFile, networkFile + '.' + str(epoch))
         epoch += 1
예제 #23
0
 def writeSolverFile(self):
   out = open(self.directory + '/solver.prototxt','w')
   out.write('train_net: "' + self.directory + 'train.prototxt"\n')
   out.write('base_lr: ' + config.get('learningRate') + '\n')
   out.write('lr_policy: "step"\n')
   out.write('gamma: ' + config.get('gamma') + '\n')
   out.write('stepsize: ' + config.get('stepSize') + '\n')
   out.write('display: 1\n')
   out.write('max_iter: ' + config.get('trainingIterationsPerBatch') + '\n')
   out.write('momentum: ' + config.get('momentum') + '\n')
   out.write('weight_decay: ' + config.get('weightDecay') + '\n')
   out.write('snapshot: ' + config.get('trainingIterationsPerBatch') + '\n')
   out.write('snapshot_prefix: "' + self.directory + 'multilayer_qlearner"\n')
   out.close()
예제 #24
0
 def writeSolverFile(self):
     out = open(self.directory + '/solver.prototxt', 'w')
     out.write('train_net: "' + self.directory + 'train.prototxt"\n')
     out.write('base_lr: ' + config.get('learningRate') + '\n')
     out.write('lr_policy: "step"\n')
     out.write('gamma: ' + config.get('gamma') + '\n')
     out.write('stepsize: ' + config.get('stepSize') + '\n')
     out.write('display: 1\n')
     out.write('max_iter: ' + config.get('trainingIterationsPerBatch') +
               '\n')
     out.write('momentum: ' + config.get('momentum') + '\n')
     out.write('weight_decay: ' + config.get('weightDecay') + '\n')
     out.write('snapshot: ' + config.get('trainingIterationsPerBatch') +
               '\n')
     out.write('snapshot_prefix: "' + self.directory +
               'multilayer_qlearner"\n')
     out.close()
예제 #25
0
 def loadNextEpisode(self):
     self.episodeDone = False
     self.extraSteps = 5
     self.negativeEpisode = False
     if self.selectNegativeSample(): return
     # Save actions performed during this episode
     if self.mode == 'test' and self.testRecord != None:
         with open(
                 config.get('testMemory') + self.imageList[self.idx] +
                 '.txt', 'w') as outfile:
             json.dump(self.testRecord, outfile)
     # Load a new episode
     self.idx += 1
     if self.idx < len(self.imageList):
         # Initialize state
         self.cnn.prepareImage(self.imageList[self.idx])
         restartMode = {'train': 'Random', 'test': 'Full'}
         self.state = bs.BoxSearchState(self.imageList[self.idx],
                                        groundTruth=self.groundTruth,
                                        boxReset=restartMode[self.mode])
         print 'Environment::LoadNextEpisode => Image', self.idx, self.imageList[
             self.idx], '(' + str(
                 self.state.visibleImage.size[0]) + ',' + str(
                     self.state.visibleImage.size[1]) + ')'
     else:
         if self.mode == 'train':
             random.shuffle(self.imageList)
             self.idx = -1
             self.loadNextEpisode()
         else:
             print 'No more images available'
     # Restart record for new episode
     if self.mode == 'test':
         self.testRecord = {
             'boxes': [],
             'actions': [],
             'values': [],
             'rewards': [],
             'scores': []
         }
예제 #26
0
 def coverRegion(self, box, otherImg=None):
     if otherImg is not None:
         boxes = [map(int, box)]
         self.net.caffenet.CoverRegions(
             boxes,
             config.get('imageDir') + otherImg + '.jpg', self.id)
     else:
         # Create two perpendicular boxes
         w = box[2] - box[0]
         h = box[3] - box[1]
         b1 = map(int, [
             box[0] + w * 0.5 - w * MARK_WIDTH, box[1],
             box[0] + w * 0.5 + w * MARK_WIDTH, box[3]
         ])
         b2 = map(int, [
             box[0], box[1] + h * 0.5 - h * MARK_WIDTH, box[2],
             box[1] + h * 0.5 + h * MARK_WIDTH
         ])
         boxes = [b1, b2]
         self.net.caffenet.CoverRegions(boxes, '', self.id)
     self.id += 1
     return True
 def train(self):
   networkFile = config.get('networkDir') + config.get('snapshotPrefix') + '_iter_' + config.get('trainingIterationsPerBatch') + '.caffemodel'
   interactions = config.geti('trainInteractions')
   minEpsilon = config.getf('minTrainingEpsilon')
   epochSize = len(self.environment.imageList)/1
   epsilon = 1.0
   self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction)
   epoch = 1
   exEpochs = config.geti('explorationEpochs')
   while epoch <= exEpochs:
     s = cu.tic()
     print 'Epoch',epoch,': Exploration (epsilon=1.0)'
     self.runEpoch(interactions, len(self.environment.imageList))
     self.task.flushStats()
     s = cu.toc('Epoch done in ',s)
     epoch += 1
   self.learner = QLearning()
   self.agent.learner = self.learner
   egEpochs = config.geti('epsilonGreedyEpochs')
   while epoch <= egEpochs + exEpochs:
     s = cu.tic()
     epsilon = epsilon - (1.0-minEpsilon)/float(egEpochs)
     if epsilon < minEpsilon: epsilon = minEpsilon
     self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction)
     print 'Epoch',epoch ,'(epsilon-greedy:{:5.3f})'.format(epsilon)
     self.runEpoch(interactions, epochSize)
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     epoch += 1
   maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs
   while epoch <= maxEpochs:
     s = cu.tic()
     print 'Epoch',epoch,'(exploitation mode: epsilon={:5.3f})'.format(epsilon)
     self.runEpoch(interactions, epochSize)
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     shutil.copy(networkFile, networkFile + '.' + str(epoch))
     epoch += 1
예제 #28
0
import json

import utils.utils as cu
import utils.libDetection as det
import learn.rl.RLConfig as config


def sigmoid(x, a=1.0, b=0.0):
    return 1.0 / (1.0 + np.exp(-a * x + b))


def tanh(x, a=5, b=0.5, c=2.0):
    return c * np.tanh(a * x + b)


TEST_TIME_OUT = config.geti('testTimeOut')
ACTION_HISTORY_SIZE = bs.NUM_ACTIONS * config.geti('actionHistoryLength')


class BoxSearchEnvironment(Environment, Named):
    def __init__(self, imageList, mode, groundTruthFile=None):
        self.mode = mode
        self.cnn = cn.ConvNet()
        self.testRecord = None
        self.idx = -1
        self.imageList = [x.strip() for x in open(imageList)]
        self.groundTruth = cu.loadBoxIndexFile(groundTruthFile)
        #self.imageList = self.rankImages()
        #self.imageList = self.imageList[0:10]
        allImgs = set([x.strip() for x in open(config.get('allImagesList'))])
        self.negativeSamples = list(
예제 #29
0
 def __init__(self, alpha=0.5):
   ValueBasedLearner.__init__(self)
   self.alpha = alpha
   self.gamma = config.getf('gammaDiscountReward')
   self.netManager = CaffeMultiLayerPerceptronManagement(config.get('networkDir'))
예제 #30
0
 def prepareImage(self, image):
   if self.image != '':
     self.net.caffenet.ReleaseImageData()
   self.image = config.get('imageDir') + image + '.jpg'
   self.net.caffenet.InitializeImage(self.image, self.imgDim, self.imageMean, self.cropSize)
예제 #31
0
__author__ = "Juan C. Caicedo, [email protected]"

import learn.rl.RLConfig as config

import numpy as np
import scipy.io
import utils.MemoryUsage

import BoxSearchState as bss
import PriorMemory as prm
import random

STATE_FEATURES = config.geti('stateFeatures')/config.geti('temporalWindow')
NUM_ACTIONS = config.geti('outputActions')
TEMPORAL_WINDOW = config.geti('temporalWindow')
HISTORY_FACTOR = config.geti('historyFactor')
NEGATIVE_PROBABILITY = config.getf('negativeEpisodeProb')

class BoxSearchAgent():

  image = None
  observation = None
  action = None
  reward = None
  timer = 0
  
  def __init__(self, qnet, learner=None):
    self.controller = qnet
    self.learner = learner
    self.avgReward = 0
    self.replayMemory = None
예제 #32
0
__author__ = "Juan C. Caicedo, [email protected]"

import learn.rl.RLConfig as config

import numpy as np
import scipy.io
import utils.MemoryUsage

import BoxSearchState as bss
import PriorMemory as prm
import random

STATE_FEATURES = config.geti('stateFeatures') / config.geti('temporalWindow')
NUM_ACTIONS = config.geti('outputActions')
TEMPORAL_WINDOW = config.geti('temporalWindow')
HISTORY_FACTOR = config.geti('historyFactor')
NEGATIVE_PROBABILITY = config.getf('negativeEpisodeProb')


class BoxSearchAgent():

    image = None
    observation = None
    action = None
    reward = None
    timer = 0

    def __init__(self, qnet, learner=None):
        self.controller = qnet
        self.learner = learner
        self.avgReward = 0
예제 #33
0
                                      groundTruthFile)
        pl, rl = bse.evaluateCategory(scoredDetections, 'landmarks',
                                      groundTruthFile)
        line = lambda x, y, z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y, z)
        print line('Validation Scores:', ps, rs)
        print line('Validation Landmarks:', pl, rl)


#def main():
if __name__ == "__main__":
    if len(sys.argv) < 2:
        print 'Use: ReinforcementLearningRunner.py configFile'
        sys.exit()

    ## Load Global Configuration
    config.readConfiguration(sys.argv[1])

    from QNetwork import QNetwork
    from QLearning import QLearning
    from BoxSearchEnvironment import BoxSearchEnvironment
    from BoxSearchTask import BoxSearchTask
    from BoxSearchAgent import BoxSearchAgent
    import BoxSearchEvaluation as bse

    print 'Hello'

    if len(sys.argv) == 2:
        ## Run Training and Testing
        rl = BoxSearchRunner('train')
        rl.run()
        rl = BoxSearchRunner('test')
예제 #34
0
__author__ = "Juan C. Caicedo, [email protected]"

from pybrain.rl.environments import Task
import BoxSearchState as bss

import utils.utils as cu
import utils.libDetection as det
import numpy as np

import learn.rl.RLConfig as config

MIN_ACCEPTABLE_IOU = config.getf('minAcceptableIoU')
DETECTION_REWARD = config.getf('detectionReward')

def center(box):
  return [ (box[2] + box[0])/2.0 , (box[3] + box[1])/2.0 ]

def euclideanDist(c1, c2):
  return (c1[0] - c2[0])**2 + (c1[1] - c2[1])**2

class BoxSearchTask(Task):

  def __init__(self, environment=None, groundTruthFile=None):
    Task.__init__(self, environment)
    if groundTruthFile is not None:
      self.groundTruth = cu.loadBoxIndexFile(groundTruthFile)
    self.image = ''
    self.epochRecall = []
    self.epochMaxIoU = []
    self.epochLandmarks = []
예제 #35
0
def defaultSampler():
    return np.random.random([1, config.geti('outputActions')])
    groundTruthFile = config.get('testGroundTruth')
    ps,rs = bse.evaluateCategory(scoredDetections, 'scores', groundTruthFile)
    pl,rl = bse.evaluateCategory(scoredDetections, 'landmarks', groundTruthFile)
    line = lambda x,y,z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y,z)
    print line('Validation Scores:',ps,rs)
    print line('Validation Landmarks:',pl,rl)


#def main():
if __name__ == "__main__":
  if len(sys.argv) < 2:
    print 'Use: ReinforcementLearningRunner.py configFile'
    sys.exit()

  ## Load Global Configuration
  config.readConfiguration(sys.argv[1])

  from QNetwork import QNetwork
  from QLearning import QLearning
  from BoxSearchEnvironment import BoxSearchEnvironment
  from BoxSearchTask import BoxSearchTask
  from BoxSearchAgent import BoxSearchAgent
  import BoxSearchEvaluation as bse

  print 'Hello'

  if len(sys.argv) == 2:
    ## Run Training and Testing
    rl = BoxSearchRunner('train')
    rl.run()
    rl = BoxSearchRunner('test')
예제 #37
0
X_COORD_UP         = 0
Y_COORD_UP         = 1
SCALE_UP           = 2
ASPECT_RATIO_UP    = 3
X_COORD_DOWN       = 4
Y_COORD_DOWN       = 5
SCALE_DOWN         = 6
ASPECT_RATIO_DOWN  = 7
PLACE_LANDMARK     = 8
SKIP_REGION        = 9

# BOX LIMITS
MIN_ASPECT_RATIO = 0.15
MAX_ASPECT_RATIO = 6.00
MIN_BOX_SIDE     = 10
STEP_FACTOR      = config.getf('boxResizeStep')
DELTA_SIZE       = config.getf('boxResizeStep')

# OTHER DEFINITIONS
NUM_ACTIONS = config.geti('outputActions')
RESET_BOX_FACTOR = 2
QUADRANT_SIZE = 0.7

def fingerprint(b):
  return '_'.join( map(str, map(int, b)) )

class BoxSearchState():

  def __init__(self, imageName, boxReset='Full', groundTruth=None):
    self.imageName = imageName
    self.visibleImage = Image.open(config.get('imageDir') + '/' + self.imageName + '.jpg')
예제 #38
0
 def test(self):
     interactions = config.geti('testInteractions')
     self.controller.setEpsilonGreedy(config.getf('testEpsilon'))
     self.runEpoch(interactions, len(self.environment.imageList))
예제 #39
0
__author__ = "Juan C. Caicedo, [email protected]"

import os
import utils.utils as cu
import numpy as np

import caffe
from caffe import wrapperv0

import learn.rl.RLConfig as config

LAYER = config.get('convnetLayer')
MARK_WIDTH = config.getf('markWidth')

class ConvNet():

  def __init__(self):
    self.net = None
    self.image = ''
    self.id = 0
    self.loadNetwork()

  def loadNetwork(self):
    self.imgDim = config.geti('imageDim')
    self.cropSize = config.geti('cropSize')
    self.contextPad = config.geti('contextPad')
    #self.stateContextFactor = config.geti('stateContextFactor')
    modelFile = config.get('convnetDir') + config.get('convNetDef')
    networkFile = config.get('convnetDir') + config.get('trainedConvNet')
    self.net = wrapperv0.ImageNetClassifier(modelFile, networkFile, IMAGE_DIM=self.imgDim, CROPPED_DIM=self.cropSize, MEAN_IMAGE=config.get('meanImage'))
    self.net.caffenet.set_mode_gpu()
예제 #40
0
__author__ = "Juan C. Caicedo, [email protected]"

from pybrain.rl.environments import Task
import BoxSearchState as bss

import utils.utils as cu
import utils.libDetection as det
import numpy as np

import learn.rl.RLConfig as config

MIN_ACCEPTABLE_IOU = config.getf('minAcceptableIoU')
DETECTION_REWARD = config.getf('detectionReward')


def center(box):
    return [(box[2] + box[0]) / 2.0, (box[3] + box[1]) / 2.0]


def euclideanDist(c1, c2):
    return (c1[0] - c2[0])**2 + (c1[1] - c2[1])**2


class BoxSearchTask(Task):
    def __init__(self, environment=None, groundTruthFile=None):
        Task.__init__(self, environment)
        if groundTruthFile is not None:
            self.groundTruth = cu.loadBoxIndexFile(groundTruthFile)
        self.image = ''
        self.epochRecall = []
        self.epochMaxIoU = []
 def test(self):
   interactions = config.geti('testInteractions')
   self.controller.setEpsilonGreedy(config.getf('testEpsilon'))
   self.runEpoch(interactions, len(self.environment.imageList))
예제 #42
0
__author__ = "Juan C. Caicedo, [email protected]"

import os
import random
import numpy as np
import scipy.io
import caffe

import learn.rl.RLConfig as config
import BoxSearchState as bs
from pybrain.rl.learners.valuebased.valuebased import ValueBasedLearner

DETECTION_REWARD = config.getf('detectionReward')
ACTION_HISTORY_SIZE = bs.NUM_ACTIONS*config.geti('actionHistoryLength')
ACTION_HISTORY_LENTH = config.geti('actionHistoryLength')
NETWORK_INPUTS = config.geti('stateFeatures')/config.geti('temporalWindow')
REPLAY_MEMORY_SIZE = config.geti('trainingIterationsPerBatch')*config.geti('trainingBatchSize')

def generateRandomActionHistory():
  actions = np.zeros((ACTION_HISTORY_SIZE))
  history = [i*bs.NUM_ACTIONS + np.random.randint(0,bs.PLACE_LANDMARK) for i in range(ACTION_HISTORY_LENTH)]
  actions[history] = 1
  return actions

class QLearning(ValueBasedLearner):

  offPolicy = True
  batchMode = True
  dataset = []

  trainingSamples = 0
예제 #43
0
 def prepareImage(self, image):
     if self.image != '':
         self.net.caffenet.ReleaseImageData()
     self.image = config.get('imageDir') + image + '.jpg'
     self.net.caffenet.InitializeImage(self.image, self.imgDim,
                                       self.imageMean, self.cropSize)
예제 #44
0
__author__ = "Juan C. Caicedo, [email protected]"

import os
import random
import numpy as np
import scipy.io
import caffe

import learn.rl.RLConfig as config
import BoxSearchState as bs
from pybrain.rl.learners.valuebased.valuebased import ValueBasedLearner

DETECTION_REWARD = config.getf('detectionReward')
ACTION_HISTORY_SIZE = bs.NUM_ACTIONS * config.geti('actionHistoryLength')
ACTION_HISTORY_LENTH = config.geti('actionHistoryLength')
NETWORK_INPUTS = config.geti('stateFeatures') / config.geti('temporalWindow')
REPLAY_MEMORY_SIZE = config.geti('trainingIterationsPerBatch') * config.geti(
    'trainingBatchSize')


def generateRandomActionHistory():
    actions = np.zeros((ACTION_HISTORY_SIZE))
    history = [
        i * bs.NUM_ACTIONS + np.random.randint(0, bs.PLACE_LANDMARK)
        for i in range(ACTION_HISTORY_LENTH)
    ]
    actions[history] = 1
    return actions


class QLearning(ValueBasedLearner):
예제 #45
0
__author__ = "Juan C. Caicedo, [email protected]"

import os
import utils.utils as cu
import numpy as np

import caffe
from caffe import wrapperv0

import learn.rl.RLConfig as config

LAYER = config.get('convnetLayer')
MARK_WIDTH = config.getf('markWidth')


class ConvNet():
    def __init__(self):
        self.net = None
        self.image = ''
        self.id = 0
        self.loadNetwork()

    def loadNetwork(self):
        self.imgDim = config.geti('imageDim')
        self.cropSize = config.geti('cropSize')
        self.contextPad = config.geti('contextPad')
        #self.stateContextFactor = config.geti('stateContextFactor')
        modelFile = config.get('convnetDir') + config.get('convNetDef')
        networkFile = config.get('convnetDir') + config.get('trainedConvNet')
        self.net = wrapperv0.ImageNetClassifier(
            modelFile,
예제 #46
0
 def __init__(self, alpha=0.5):
     ValueBasedLearner.__init__(self)
     self.alpha = alpha
     self.gamma = config.getf('gammaDiscountReward')
     self.netManager = CaffeMultiLayerPerceptronManagement(
         config.get('networkDir'))