Пример #1
0
 def __init__(self, workingDir):
     self.directory = workingDir
     self.writeSolverFile()
     self.solver = caffe.SGDSolver(self.directory + 'solver.prototxt')
     self.iter = 0
     self.itersPerEpisode = config.geti('trainingIterationsPerBatch')
     self.lr = config.getf('learningRate')
     self.stepSize = config.geti('stepSize')
     self.gamma = config.getf('gamma')
     print 'CAFFE SOLVER INITALIZED'
Пример #2
0
 def __init__(self, workingDir):
   self.directory = workingDir
   self.writeSolverFile()
   self.solver = caffe.SGDSolver(self.directory + 'solver.prototxt')
   self.iter = 0
   self.itersPerEpisode = config.geti('trainingIterationsPerBatch')
   self.lr = config.getf('learningRate')
   self.stepSize = config.geti('stepSize')
   self.gamma = config.getf('gamma')
   print 'CAFFE SOLVER INITALIZED'
Пример #3
0
 def loadNetwork(self):
   self.imgDim = config.geti('imageDim')
   self.cropSize = config.geti('cropSize')
   self.contextPad = config.geti('contextPad')
   #self.stateContextFactor = config.geti('stateContextFactor')
   modelFile = config.get('convnetDir') + config.get('convNetDef')
   networkFile = config.get('convnetDir') + config.get('trainedConvNet')
   self.net = wrapperv0.ImageNetClassifier(modelFile, networkFile, IMAGE_DIM=self.imgDim, CROPPED_DIM=self.cropSize, MEAN_IMAGE=config.get('meanImage'))
   self.net.caffenet.set_mode_gpu()
   self.net.caffenet.set_phase_test()
   self.imageMean = self.net._IMAGENET_MEAN.swapaxes(1, 2).swapaxes(0, 1).astype('float32')
Пример #4
0
 def loadNetwork(self):
     self.imgDim = config.geti('imageDim')
     self.cropSize = config.geti('cropSize')
     self.contextPad = config.geti('contextPad')
     #self.stateContextFactor = config.geti('stateContextFactor')
     modelFile = config.get('convnetDir') + config.get('convNetDef')
     networkFile = config.get('convnetDir') + config.get('trainedConvNet')
     self.net = wrapperv0.ImageNetClassifier(
         modelFile,
         networkFile,
         IMAGE_DIM=self.imgDim,
         CROPPED_DIM=self.cropSize,
         MEAN_IMAGE=config.get('meanImage'))
     self.net.caffenet.set_mode_gpu()
     self.net.caffenet.set_phase_test()
     self.imageMean = self.net._IMAGENET_MEAN.swapaxes(1, 2).swapaxes(
         0, 1).astype('float32')
Пример #5
0
 def doNetworkTraining(self, samples, labels):
     self.solver.net.set_input_arrays(samples, labels)
     self.solver.solve()
     self.iter += config.geti('trainingIterationsPerBatch')
     if self.iter % self.stepSize == 0:
         newLR = self.lr * (self.gamma**int(self.iter / self.stepSize))
         print 'Changing LR to:', newLR
         self.solver.change_lr(newLR)
Пример #6
0
 def doNetworkTraining(self, samples, labels):
   self.solver.net.set_input_arrays(samples, labels)
   self.solver.solve()
   self.iter += config.geti('trainingIterationsPerBatch')
   if self.iter % self.stepSize == 0:
     newLR = self.lr * ( self.gamma** int(self.iter/self.stepSize) )
     print 'Changing LR to:',newLR
     self.solver.change_lr(newLR)
Пример #7
0
 def train(self):
     networkFile = config.get('networkDir') + config.get(
         'snapshotPrefix') + '_iter_' + config.get(
             'trainingIterationsPerBatch') + '.caffemodel'
     interactions = config.geti('trainInteractions')
     minEpsilon = config.getf('minTrainingEpsilon')
     epochSize = len(self.environment.imageList) / 1
     epsilon = 1.0
     self.controller.setEpsilonGreedy(epsilon,
                                      self.environment.sampleAction)
     epoch = 1
     exEpochs = config.geti('explorationEpochs')
     while epoch <= exEpochs:
         s = cu.tic()
         print 'Epoch', epoch, ': Exploration (epsilon=1.0)'
         self.runEpoch(interactions, len(self.environment.imageList))
         self.task.flushStats()
         s = cu.toc('Epoch done in ', s)
         epoch += 1
     self.learner = QLearning()
     self.agent.learner = self.learner
     egEpochs = config.geti('epsilonGreedyEpochs')
     while epoch <= egEpochs + exEpochs:
         s = cu.tic()
         epsilon = epsilon - (1.0 - minEpsilon) / float(egEpochs)
         if epsilon < minEpsilon: epsilon = minEpsilon
         self.controller.setEpsilonGreedy(epsilon,
                                          self.environment.sampleAction)
         print 'Epoch', epoch, '(epsilon-greedy:{:5.3f})'.format(epsilon)
         self.runEpoch(interactions, epochSize)
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         epoch += 1
     maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs
     while epoch <= maxEpochs:
         s = cu.tic()
         print 'Epoch', epoch, '(exploitation mode: epsilon={:5.3f})'.format(
             epsilon)
         self.runEpoch(interactions, epochSize)
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         shutil.copy(networkFile, networkFile + '.' + str(epoch))
         epoch += 1
 def run(self):
   if self.mode == 'train':
     self.agent.persistMemory = True
     self.agent.startReplayMemory(len(self.environment.imageList), config.geti('trainInteractions'))
     #self.agent.assignPriorMemory(self.environment.priorMemory)
     self.train()
   elif self.mode == 'test':
     self.agent.persistMemory = False
     self.test()
Пример #9
0
 def run(self):
     if self.mode == 'train':
         self.agent.persistMemory = True
         self.agent.startReplayMemory(len(self.environment.imageList),
                                      config.geti('trainInteractions'))
         #self.agent.assignPriorMemory(self.environment.priorMemory)
         self.train()
     elif self.mode == 'test':
         self.agent.persistMemory = False
         self.test()
 def train(self):
   networkFile = config.get('networkDir') + config.get('snapshotPrefix') + '_iter_' + config.get('trainingIterationsPerBatch') + '.caffemodel'
   interactions = config.geti('trainInteractions')
   minEpsilon = config.getf('minTrainingEpsilon')
   epochSize = len(self.environment.imageList)/1
   epsilon = 1.0
   self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction)
   epoch = 1
   exEpochs = config.geti('explorationEpochs')
   while epoch <= exEpochs:
     s = cu.tic()
     print 'Epoch',epoch,': Exploration (epsilon=1.0)'
     self.runEpoch(interactions, len(self.environment.imageList))
     self.task.flushStats()
     s = cu.toc('Epoch done in ',s)
     epoch += 1
   self.learner = QLearning()
   self.agent.learner = self.learner
   egEpochs = config.geti('epsilonGreedyEpochs')
   while epoch <= egEpochs + exEpochs:
     s = cu.tic()
     epsilon = epsilon - (1.0-minEpsilon)/float(egEpochs)
     if epsilon < minEpsilon: epsilon = minEpsilon
     self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction)
     print 'Epoch',epoch ,'(epsilon-greedy:{:5.3f})'.format(epsilon)
     self.runEpoch(interactions, epochSize)
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     epoch += 1
   maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs
   while epoch <= maxEpochs:
     s = cu.tic()
     print 'Epoch',epoch,'(exploitation mode: epsilon={:5.3f})'.format(epsilon)
     self.runEpoch(interactions, epochSize)
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     shutil.copy(networkFile, networkFile + '.' + str(epoch))
     epoch += 1
Пример #11
0
 def __init__(self, imageName, boxReset='Full', groundTruth=None):
   self.imageName = imageName
   self.visibleImage = Image.open(config.get('imageDir') + '/' + self.imageName + '.jpg')
   self.box = [0,0,0,0]
   self.resets = 1
   self.reset(boxReset)
   self.landmarkIndex = {}
   self.actionChosen = 2
   self.actionValue = 0
   self.groundTruth = groundTruth
   if self.groundTruth is not None:
     self.taskSimulator = bst.BoxSearchTask()
     self.taskSimulator.groundTruth = self.groundTruth
     self.taskSimulator.loadGroundTruth(self.imageName)
   self.stepsWithoutLandmark = 0
   self.actionHistory = [0 for i in range(NUM_ACTIONS*config.geti('actionHistoryLength'))]
 def doValidation(self, epoch):
   if epoch % config.geti('validationEpochs') != 0:
     return
   auxRL = BoxSearchRunner('test')
   auxRL.run()
   indexType = config.get('evaluationIndexType')
   category = config.get('category')
   if indexType == 'pascal':
     categories, catIndex = bse.get20Categories()
   elif indexType == 'relations':
     categories, catIndex = bse.getCategories()
   elif indexType == 'finetunedRelations':
     categories, catIndex = bse.getRelationCategories()
   catI = categories.index(category)
   scoredDetections = bse.loadScores(config.get('testMemory'), catI)
   groundTruthFile = config.get('testGroundTruth')
   ps,rs = bse.evaluateCategory(scoredDetections, 'scores', groundTruthFile)
   pl,rl = bse.evaluateCategory(scoredDetections, 'landmarks', groundTruthFile)
   line = lambda x,y,z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y,z)
   print line('Validation Scores:',ps,rs)
   print line('Validation Landmarks:',pl,rl)
Пример #13
0
 def doValidation(self, epoch):
     if epoch % config.geti('validationEpochs') != 0:
         return
     auxRL = BoxSearchRunner('test')
     auxRL.run()
     indexType = config.get('evaluationIndexType')
     category = config.get('category')
     if indexType == 'pascal':
         categories, catIndex = bse.get20Categories()
     elif indexType == 'relations':
         categories, catIndex = bse.getCategories()
     elif indexType == 'finetunedRelations':
         categories, catIndex = bse.getRelationCategories()
     catI = categories.index(category)
     scoredDetections = bse.loadScores(config.get('testMemory'), catI)
     groundTruthFile = config.get('testGroundTruth')
     ps, rs = bse.evaluateCategory(scoredDetections, 'scores',
                                   groundTruthFile)
     pl, rl = bse.evaluateCategory(scoredDetections, 'landmarks',
                                   groundTruthFile)
     line = lambda x, y, z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y, z)
     print line('Validation Scores:', ps, rs)
     print line('Validation Landmarks:', pl, rl)
Пример #14
0
 def test(self):
     interactions = config.geti('testInteractions')
     self.controller.setEpsilonGreedy(config.getf('testEpsilon'))
     self.runEpoch(interactions, len(self.environment.imageList))
Пример #15
0
__author__ = "Juan C. Caicedo, [email protected]"

import os
import random
import numpy as np
import scipy.io
import caffe

import learn.rl.RLConfig as config
import BoxSearchState as bs
from pybrain.rl.learners.valuebased.valuebased import ValueBasedLearner

DETECTION_REWARD = config.getf('detectionReward')
ACTION_HISTORY_SIZE = bs.NUM_ACTIONS*config.geti('actionHistoryLength')
ACTION_HISTORY_LENTH = config.geti('actionHistoryLength')
NETWORK_INPUTS = config.geti('stateFeatures')/config.geti('temporalWindow')
REPLAY_MEMORY_SIZE = config.geti('trainingIterationsPerBatch')*config.geti('trainingBatchSize')

def generateRandomActionHistory():
  actions = np.zeros((ACTION_HISTORY_SIZE))
  history = [i*bs.NUM_ACTIONS + np.random.randint(0,bs.PLACE_LANDMARK) for i in range(ACTION_HISTORY_LENTH)]
  actions[history] = 1
  return actions

class QLearning(ValueBasedLearner):

  offPolicy = True
  batchMode = True
  dataset = []

  trainingSamples = 0
Пример #16
0
def defaultSampler():
    return np.random.random([1, config.geti('outputActions')])
Пример #17
0
X_COORD_DOWN       = 4
Y_COORD_DOWN       = 5
SCALE_DOWN         = 6
ASPECT_RATIO_DOWN  = 7
PLACE_LANDMARK     = 8
SKIP_REGION        = 9

# BOX LIMITS
MIN_ASPECT_RATIO = 0.15
MAX_ASPECT_RATIO = 6.00
MIN_BOX_SIDE     = 10
STEP_FACTOR      = config.getf('boxResizeStep')
DELTA_SIZE       = config.getf('boxResizeStep')

# OTHER DEFINITIONS
NUM_ACTIONS = config.geti('outputActions')
RESET_BOX_FACTOR = 2
QUADRANT_SIZE = 0.7

def fingerprint(b):
  return '_'.join( map(str, map(int, b)) )

class BoxSearchState():

  def __init__(self, imageName, boxReset='Full', groundTruth=None):
    self.imageName = imageName
    self.visibleImage = Image.open(config.get('imageDir') + '/' + self.imageName + '.jpg')
    self.box = [0,0,0,0]
    self.resets = 1
    self.reset(boxReset)
    self.landmarkIndex = {}
Пример #18
0
import json

import utils.utils as cu
import utils.libDetection as det
import learn.rl.RLConfig as config


def sigmoid(x, a=1.0, b=0.0):
    return 1.0 / (1.0 + np.exp(-a * x + b))


def tanh(x, a=5, b=0.5, c=2.0):
    return c * np.tanh(a * x + b)


TEST_TIME_OUT = config.geti('testTimeOut')
ACTION_HISTORY_SIZE = bs.NUM_ACTIONS * config.geti('actionHistoryLength')


class BoxSearchEnvironment(Environment, Named):
    def __init__(self, imageList, mode, groundTruthFile=None):
        self.mode = mode
        self.cnn = cn.ConvNet()
        self.testRecord = None
        self.idx = -1
        self.imageList = [x.strip() for x in open(imageList)]
        self.groundTruth = cu.loadBoxIndexFile(groundTruthFile)
        #self.imageList = self.rankImages()
        #self.imageList = self.imageList[0:10]
        allImgs = set([x.strip() for x in open(config.get('allImagesList'))])
        self.negativeSamples = list(
Пример #19
0
__author__ = "Juan C. Caicedo, [email protected]"

import learn.rl.RLConfig as config

import numpy as np
import scipy.io
import utils.MemoryUsage

import BoxSearchState as bss
import PriorMemory as prm
import random

STATE_FEATURES = config.geti('stateFeatures')/config.geti('temporalWindow')
NUM_ACTIONS = config.geti('outputActions')
TEMPORAL_WINDOW = config.geti('temporalWindow')
HISTORY_FACTOR = config.geti('historyFactor')
NEGATIVE_PROBABILITY = config.getf('negativeEpisodeProb')

class BoxSearchAgent():

  image = None
  observation = None
  action = None
  reward = None
  timer = 0
  
  def __init__(self, qnet, learner=None):
    self.controller = qnet
    self.learner = learner
    self.avgReward = 0
    self.replayMemory = None
 def test(self):
   interactions = config.geti('testInteractions')
   self.controller.setEpsilonGreedy(config.getf('testEpsilon'))
   self.runEpoch(interactions, len(self.environment.imageList))
Пример #21
0
__author__ = "Juan C. Caicedo, [email protected]"

import learn.rl.RLConfig as config

import numpy as np
import scipy.io
import utils.MemoryUsage

import BoxSearchState as bss
import PriorMemory as prm
import random

STATE_FEATURES = config.geti('stateFeatures') / config.geti('temporalWindow')
NUM_ACTIONS = config.geti('outputActions')
TEMPORAL_WINDOW = config.geti('temporalWindow')
HISTORY_FACTOR = config.geti('historyFactor')
NEGATIVE_PROBABILITY = config.getf('negativeEpisodeProb')


class BoxSearchAgent():

    image = None
    observation = None
    action = None
    reward = None
    timer = 0

    def __init__(self, qnet, learner=None):
        self.controller = qnet
        self.learner = learner
        self.avgReward = 0
Пример #22
0
__author__ = "Juan C. Caicedo, [email protected]"

import os
import random
import numpy as np
import scipy.io
import caffe

import learn.rl.RLConfig as config
import BoxSearchState as bs
from pybrain.rl.learners.valuebased.valuebased import ValueBasedLearner

DETECTION_REWARD = config.getf('detectionReward')
ACTION_HISTORY_SIZE = bs.NUM_ACTIONS * config.geti('actionHistoryLength')
ACTION_HISTORY_LENTH = config.geti('actionHistoryLength')
NETWORK_INPUTS = config.geti('stateFeatures') / config.geti('temporalWindow')
REPLAY_MEMORY_SIZE = config.geti('trainingIterationsPerBatch') * config.geti(
    'trainingBatchSize')


def generateRandomActionHistory():
    actions = np.zeros((ACTION_HISTORY_SIZE))
    history = [
        i * bs.NUM_ACTIONS + np.random.randint(0, bs.PLACE_LANDMARK)
        for i in range(ACTION_HISTORY_LENTH)
    ]
    actions[history] = 1
    return actions


class QLearning(ValueBasedLearner):