def train(self): interactions = config.geti('trainInteractions') minEpsilon = config.getf('minTrainingEpsilon') epochSize = len(self.environment.db.images) / 2 epsilon = 1.0 self.controller.setEpsilonGreedy(epsilon) print 'Epoch 0: Exploration' self.runEpoch(interactions, len(self.environment.db.images)) self.learner = QLearning() self.agent.learner = self.learner epoch = 1 egEpochs = config.geti('epsilonGreedyEpochs') while epoch <= egEpochs: epsilon = epsilon - (1.0 - minEpsilon) / float(egEpochs) if epsilon < minEpsilon: epsilon = minEpsilon self.controller.setEpsilonGreedy(epsilon) print 'Epoch', epoch, '(epsilon-greedy:{:5.3f})'.format(epsilon) self.runEpoch(interactions, epochSize) epoch += 1 epoch = 1 maxEpochs = config.geti('exploitLearningEpochs') while epoch <= maxEpochs: print 'Epoch', epoch + egEpochs, '(exploitation mode: epsilon={:5.3f})'.format( epsilon) self.runEpoch(interactions, epochSize) epoch += 1
def train(self): interactions = config.geti('trainInteractions') minEpsilon = config.getf('minTrainingEpsilon') epochSize = len(self.environment.db.images)/2 epsilon = 1.0 self.controller.setEpsilonGreedy(epsilon) print 'Epoch 0: Exploration' self.runEpoch(interactions, len(self.environment.db.images)) self.learner = QLearning() self.agent.learner = self.learner epoch = 1 egEpochs = config.geti('epsilonGreedyEpochs') while epoch <= egEpochs: epsilon = epsilon - (1.0-minEpsilon)/float(egEpochs) if epsilon < minEpsilon: epsilon = minEpsilon self.controller.setEpsilonGreedy(epsilon) print 'Epoch',epoch ,'(epsilon-greedy:{:5.3f})'.format(epsilon) self.runEpoch(interactions, epochSize) epoch += 1 epoch = 1 maxEpochs = config.geti('exploitLearningEpochs') while epoch <= maxEpochs: print 'Epoch',epoch+egEpochs,'(exploitation mode: epsilon={:5.3f})'.format(epsilon) self.runEpoch(interactions, epochSize) epoch += 1
def learn(self, memory, controller): print '# Identify memory records stored by the agent',memory.O.shape, memory.A.shape,memory.R.shape totalMemorySize = memory.usableRecords replayMemorySize = config.geti('trainingIterationsPerBatch')*config.geti('trainingBatchSize') print '# Select a random sample of records' recordsToPull = [random.randint(0,totalMemorySize-2) for i in range(replayMemorySize)] samples = np.zeros( (replayMemorySize, memory.O.shape[1], 1, 1), np.float32 ) targets = np.zeros( (replayMemorySize, 3, 1, 1), np.float32 ) nextStates = np.zeros( (replayMemorySize, memory.O.shape[1], 1, 1), np.float32 ) trainingSet = [] terminalStates = [] for i in range(len(recordsToPull)): r = recordsToPull[i] # Make sure that next state belongs to the same image if memory.I[r] != memory.I[r+1]: terminalStates.append(i) samples[i,:,0,0] = memory.O[r,:] nextStates[i,:,0,0] = memory.O[r+1,:] action = memory.A[r,0] reward = memory.R[r,0] targets[i,:,0,0] = np.array([action, reward, 0.0], np.float32) if controller.net != None: controller.loadNetwork(definition='deploy.maxq.prototxt') discountedMaxNextQ = self.gamma*np.max( controller.getActivations(nextStates), axis=1 ) discountedMaxNextQ[terminalStates] = 0.0 targets[:,2,0,0] = discountedMaxNextQ print '# Update network' self.netManager.doNetworkTraining(samples, targets)
def __init__(self, workingDir): self.directory = workingDir self.writeSolverFile() self.solver = caffe.SGDSolver(self.directory + 'solver.prototxt') self.iter = 0 self.itersPerEpisode = config.geti('trainingIterationsPerBatch') self.lr = config.getf('learningRate') self.stepSize = config.geti('stepSize') self.gamma = config.getf('gamma') print 'CAFFE SOLVER INITALIZED'
def run(self): if self.mode == 'train': self.agent.persistMemory = True self.agent.startReplayMemory(len(self.environment.db.images), config.geti('trainInteractions'), config.geti('stateFeatures')) self.train() elif self.mode == 'test': self.agent.persistMemory = False self.test()
def loadNetwork(self): self.imgDim = config.geti('imageDim') self.cropSize = config.geti('cropSize') self.contextPad = config.geti('contextPad') modelFile = config.get('convnetDir') + config.get('convNetDef') networkFile = config.get('convnetDir') + config.get('trainedConvNet') self.net = wrapperv0.ImageNetClassifier(modelFile, networkFile, IMAGE_DIM=self.imgDim, CROPPED_DIM=self.cropSize, MEAN_IMAGE=config.get('meanImage')) self.net.caffenet.set_mode_gpu() self.net.caffenet.set_phase_test() self.imageMean = self.net._IMAGENET_MEAN.swapaxes(1, 2).swapaxes(0, 1).astype('float32')
def loadNetwork(self): modelFile = config.get('networkDir') + 'deploy.prototxt' meanImage = config.get('meanImagePickle') self.net = imagenet.ImageNetClassifier( modelFile, self.networkFile, IMAGE_DIM=config.geti('imageSize'), CROPPED_DIM=config.geti('cropSize'), MEAN_IMAGE=meanImage) self.net.caffenet.set_phase_test() self.net.caffenet.set_mode_gpu() self.meanImage = self.net._IMAGENET_MEAN.swapaxes(1, 2).swapaxes( 0, 1).astype('float32')
def loadNextEpisode(self): self.getExample() if self.imgName == None: print 'All episodes done' return self.visibleImage = Image.open(self.imageDir + '/' + self.imgName + '.jpg') size = self.visibleImage.size self.state = [ SingleObjectLocalizer(size, box[0:4], box[4]) for box in self.state ] if config.geti('maxCandidatesPerImage') != 0 and self.mode == "Training": random.shuffle(self.state) self.state = self.state[0: config.geti('maxCandidatesPerImage')] print 'Episode done:',self.imgName,'Boxes:',len(self.state),'Terminals:',self.terminalCounts,'Moves:',self.episodeMoves
def loadNetwork(self): self.imgDim = config.geti('imageDim') self.cropSize = config.geti('cropSize') self.contextPad = config.geti('contextPad') modelFile = config.get('convnetDir') + config.get('convNetDef') networkFile = config.get('convnetDir') + config.get('trainedConvNet') self.net = wrapperv0.ImageNetClassifier( modelFile, networkFile, IMAGE_DIM=self.imgDim, CROPPED_DIM=self.cropSize, MEAN_IMAGE=config.get('meanImage')) self.net.caffenet.set_mode_gpu() self.net.caffenet.set_phase_test() self.imageMean = self.net._IMAGENET_MEAN.swapaxes(1, 2).swapaxes( 0, 1).astype('float32')
def loadNetwork(self): modelFile = config.get('networkDir') + 'deploy.prototxt' meanImage = config.get('meanImagePickle') self.net = imagenet.ImageNetClassifier(modelFile, self.networkFile, IMAGE_DIM=config.geti('imageSize'), CROPPED_DIM=config.geti('cropSize'), MEAN_IMAGE=meanImage) self.net.caffenet.set_phase_test() self.net.caffenet.set_mode_gpu() self.meanImage = self.net._IMAGENET_MEAN.swapaxes(1, 2).swapaxes(0, 1).astype('float32')
def doValidation(self, epoch): if epoch % config.geti('validationEpochs') != 0: return auxRL = BoxSearchRunner('test') auxRL.run() indexType = config.get('evaluationIndexType') category = config.get('category') if indexType == 'pascal': categories, catIndex = bse.get20Categories() elif indexType == 'relations': categories, catIndex = bse.getCategories() elif indexType == 'finetunedRelations': categories, catIndex = bse.getRelationCategories() if category in categories: catI = categories.index(category) else: catI = -1 scoredDetections = bse.loadScores(config.get('testMemory'), catI) groundTruthFile = config.get('testGroundTruth') #ps,rs = bse.evaluateCategory(scoredDetections, 'scores', groundTruthFile) pl, rl = bse.evaluateCategory(scoredDetections, 'landmarks', groundTruthFile) line = lambda x, y, z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y, z) #print line('Validation Scores:',ps,rs) print line('Validation Landmarks:', pl, rl)
def train(self): networkFile = config.get('networkDir') + config.get( 'snapshotPrefix') + '_iter_' + config.get( 'trainingIterationsPerBatch') + '.caffemodel' interactions = config.geti('trainInteractions') minEpsilon = config.getf('minTrainingEpsilon') epochSize = len(self.environment.imageList) / 1 epsilon = 1.0 self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction) epoch = 1 exEpochs = config.geti('explorationEpochs') while epoch <= exEpochs: s = cu.tic() print 'Epoch', epoch, ': Exploration (epsilon=1.0)' self.runEpoch(interactions, len(self.environment.imageList)) self.task.flushStats() self.doValidation(epoch) s = cu.toc('Epoch done in ', s) epoch += 1 self.learner = QLearning() self.agent.learner = self.learner egEpochs = config.geti('epsilonGreedyEpochs') while epoch <= egEpochs + exEpochs: s = cu.tic() epsilon = epsilon - (1.0 - minEpsilon) / float(egEpochs) if epsilon < minEpsilon: epsilon = minEpsilon self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction) print 'Epoch', epoch, '(epsilon-greedy:{:5.3f})'.format(epsilon) self.runEpoch(interactions, epochSize) self.task.flushStats() self.doValidation(epoch) s = cu.toc('Epoch done in ', s) epoch += 1 maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs while epoch <= maxEpochs: s = cu.tic() print 'Epoch', epoch, '(exploitation mode: epsilon={:5.3f})'.format( epsilon) self.runEpoch(interactions, epochSize) self.task.flushStats() self.doValidation(epoch) s = cu.toc('Epoch done in ', s) shutil.copy(networkFile, networkFile + '.' + str(epoch)) epoch += 1
def doNetworkTraining(self, samples, labels): self.solver.net.set_input_arrays(samples, labels) self.solver.solve() self.iter += config.geti('trainingIterationsPerBatch') if self.iter % self.stepSize == 0: newLR = self.lr * ( self.gamma** int(self.iter/self.stepSize) ) print 'Changing LR to:',newLR self.solver.change_lr(newLR)
def dropRecords(self, rec, total, new): if total > config.geti('replayMemorySize'): drop = 0 while drop < new: for k in rec.keys(): rec[k].pop(0) drop += 1 return rec
def run(self): if self.mode == 'train': self.agent.persistMemory = True self.agent.startReplayMemory(len(self.environment.imageList), config.geti('trainInteractions')) self.train() elif self.mode == 'test': self.agent.persistMemory = False self.test()
def doNetworkTraining(self, samples, labels): self.solver.net.set_input_arrays(samples, labels) self.solver.solve() self.iter += config.geti('trainingIterationsPerBatch') if self.iter % self.stepSize == 0: newLR = self.lr * (self.gamma**int(self.iter / self.stepSize)) print 'Changing LR to:', newLR self.solver.change_lr(newLR)
def loadNextEpisode(self): self.getExample() if self.imgName == None: print 'All episodes done' return self.visibleImage = Image.open(self.imageDir + '/' + self.imgName + '.jpg') size = self.visibleImage.size self.state = [ SingleObjectLocalizer(size, box[0:4], box[4]) for box in self.state ] if config.geti( 'maxCandidatesPerImage') != 0 and self.mode == "Training": random.shuffle(self.state) self.state = self.state[0:config.geti('maxCandidatesPerImage')] print 'Episode done:', self.imgName, 'Boxes:', len( self.state ), 'Terminals:', self.terminalCounts, 'Moves:', self.episodeMoves
def train(self): networkFile = config.get('networkDir') + config.get('snapshotPrefix') + '_iter_' + config.get('trainingIterationsPerBatch') + '.caffemodel' interactions = config.geti('trainInteractions') minEpsilon = config.getf('minTrainingEpsilon') epochSize = len(self.environment.imageList)/1 epsilon = 1.0 self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction) epoch = 1 exEpochs = config.geti('explorationEpochs') while epoch <= exEpochs: s = cu.tic() print 'Epoch',epoch,': Exploration (epsilon=1.0)' self.runEpoch(interactions, len(self.environment.imageList)) self.task.flushStats() self.doValidation(epoch) s = cu.toc('Epoch done in ',s) epoch += 1 self.learner = QLearning() self.agent.learner = self.learner egEpochs = config.geti('epsilonGreedyEpochs') while epoch <= egEpochs + exEpochs: s = cu.tic() epsilon = epsilon - (1.0-minEpsilon)/float(egEpochs) if epsilon < minEpsilon: epsilon = minEpsilon self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction) print 'Epoch',epoch ,'(epsilon-greedy:{:5.3f})'.format(epsilon) self.runEpoch(interactions, epochSize) self.task.flushStats() self.doValidation(epoch) s = cu.toc('Epoch done in ',s) epoch += 1 maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs while epoch <= maxEpochs: s = cu.tic() print 'Epoch',epoch,'(exploitation mode: epsilon={:5.3f})'.format(epsilon) self.runEpoch(interactions, epochSize) self.task.flushStats() self.doValidation(epoch) s = cu.toc('Epoch done in ',s) shutil.copy(networkFile, networkFile + '.' + str(epoch)) epoch += 1
def getActivations(self, imagePath, boxes, state): n = len(boxes) activations = cu.emptyMatrix( [n, config.geti('outputActions')] ) numBatches = (n + config.geti('deployBatchSize') - 1) / config.geti('deployBatchSize') boxes += [ [0,0,0,0] for x in range(numBatches * config.geti('deployBatchSize') - n) ] stateFeatures = np.zeros( (len(boxes), 20, 1, 1), dtype=np.float32) stateFeatures[0:n,:,:,:] = state dims = self.net.caffenet.InitializeImage(imagePath, config.geti('imageSize'), self.meanImage, config.geti('cropSize')) for k in range(numBatches): s, f = k * config.geti('deployBatchSize'), (k + 1) * config.geti('deployBatchSize') e = config.geti('deployBatchSize') if f <= n else n - s # Forward this batch #self.net.caffenet.ForwardRegions(boxes[s:f], config.geti('contextPad')) self.net.caffenet.ForwardRegionsAndState(boxes[s:f], config.geti('contextPad'), [stateFeatures[s:f,:,:,:]]) outputs = self.net.caffenet.blobs f = n if f > n else f # Collect outputs activations[s:f,:] = outputs['prob'].data[0:e,:,:,:].reshape([e,config.geti('outputActions')]) # Release image data self.net.caffenet.ReleaseImageData() return activations
def learn(self, memory, controller): print '# Identify memory records stored by the agent', memory.O.shape, memory.A.shape, memory.R.shape totalMemorySize = memory.usableRecords replayMemorySize = config.geti( 'trainingIterationsPerBatch') * config.geti('trainingBatchSize') print '# Select a random sample of records' recordsToPull = [ random.randint(0, totalMemorySize - 2) for i in range(replayMemorySize) ] samples = np.zeros((replayMemorySize, memory.O.shape[1], 1, 1), np.float32) targets = np.zeros((replayMemorySize, 3, 1, 1), np.float32) nextStates = np.zeros((replayMemorySize, memory.O.shape[1], 1, 1), np.float32) trainingSet = [] terminalStates = [] for i in range(len(recordsToPull)): r = recordsToPull[i] # Make sure that next state belongs to the same image if memory.I[r] != memory.I[r + 1]: terminalStates.append(i) samples[i, :, 0, 0] = memory.O[r, :] nextStates[i, :, 0, 0] = memory.O[r + 1, :] action = memory.A[r, 0] reward = memory.R[r, 0] targets[i, :, 0, 0] = np.array([action, reward, 0.0], np.float32) if controller.net != None: controller.loadNetwork(definition='deploy.maxq.prototxt') discountedMaxNextQ = self.gamma * np.max( controller.getActivations(nextStates), axis=1) discountedMaxNextQ[terminalStates] = 0.0 targets[:, 2, 0, 0] = discountedMaxNextQ print '# Update network' self.netManager.doNetworkTraining(samples, targets)
def getActionValues(self, state): imgName = state[0] if self.net == None or self.exploreOrExploit() == EXPLORE: return np.random.random([len(state[1]), config.geti('outputActions')]) else: boxes = [] stateFeatures = np.zeros( (len(state[1]), 20, 1, 1), dtype=np.float32) for i in range(len(state[1])): s = state[1][i] boxes.append( map(int, s.nextBox) ) stateFeatures[i,0,0,0] = s.prevScore stateFeatures[i,1:5,0,0] = np.asarray( s.normPrevBox() ) stateFeatures[i,5,0,0] = s.currScore stateFeatures[i,6:10,0,0] = np.asarray( s.normCurrBox() ) if s.prevAction() >= 0: stateFeatures[i,10 + s.prevAction(), 0,0] = 1.0 return self.getActivations(config.get('imageDir') + '/' + imgName + '.jpg', boxes, stateFeatures)
def getActionValues(self, state): imgName = state[0] if self.net == None or self.exploreOrExploit() == EXPLORE: return np.random.random( [len(state[1]), config.geti('outputActions')]) else: boxes = [] stateFeatures = np.zeros((len(state[1]), 20, 1, 1), dtype=np.float32) for i in range(len(state[1])): s = state[1][i] boxes.append(map(int, s.nextBox)) stateFeatures[i, 0, 0, 0] = s.prevScore stateFeatures[i, 1:5, 0, 0] = np.asarray(s.normPrevBox()) stateFeatures[i, 5, 0, 0] = s.currScore stateFeatures[i, 6:10, 0, 0] = np.asarray(s.normCurrBox()) if s.prevAction() >= 0: stateFeatures[i, 10 + s.prevAction(), 0, 0] = 1.0 return self.getActivations( config.get('imageDir') + '/' + imgName + '.jpg', boxes, stateFeatures)
def doValidation(self, epoch): if epoch % config.geti('validationEpochs') != 0: return auxRL = BoxSearchRunner('test') auxRL.run() indexType = config.get('evaluationIndexType') category = config.get('category') if indexType == 'pascal': categories, catIndex = bse.get20Categories() elif indexType == 'relations': categories, catIndex = bse.getCategories() elif indexType == 'finetunedRelations': categories, catIndex = bse.getRelationCategories() if category in categories: catI = categories.index(category) else: catI = -1 scoredDetections = bse.loadScores(config.get('testMemory'), catI) groundTruthFile = config.get('testGroundTruth') #ps,rs = bse.evaluateCategory(scoredDetections, 'scores', groundTruthFile) pl,rl = bse.evaluateCategory(scoredDetections, 'landmarks', groundTruthFile) line = lambda x,y,z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y,z) #print line('Validation Scores:',ps,rs) print line('Validation Landmarks:',pl,rl)
from DeepQLearning import DeepQLearning from MDPObjectLocalizerTask import MDPObjectLocalizerTask from ObjectLocalizationAgent import ObjectLocalizationAgent print 'Starting Environment' epsilon = 1.0 environment = ObjectLocalizerEnvironment(config.get('imageDir'), config.get('candidatesFile'), 'Training') print 'Initializing DeepQNetwork' controller = DeepQNetwork() controller.setEpsilonGreedy(epsilon) print 'Initializing Q Learner' learner = DeepQLearning() print 'Preparing Agent' agent = ObjectLocalizationAgent(controller, learner) print 'Configuring Task' task = MDPObjectLocalizerTask(environment, config.get('groundTruth')) print 'Setting up Experiment' experiment = Experiment(task, agent) i = 0 print 'Main Loop' while i < config.geti('maximumEpochs'): print 'Epoch', i, '(epsilon:{:5.3f})'.format(epsilon) experiment.doInteractions(int(config.get('numInteractions'))) agent.learn() agent.reset() i += 1 epsilon = adjustEpsilon(config.geti('maximumEpochs'), i, epsilon) controller.setEpsilonGreedy(epsilon)
import random import numpy as np import json import utils as cu import libDetection as det import RLConfig as config def sigmoid(x, a=1.0, b=0.0): return 1.0/(1.0 + np.exp(-a*x + b)) def tanh(x, a=5, b=0.5, c=2.0): return c*np.tanh(a*x + b) TEST_TIME_OUT = config.geti('testTimeOut') class BoxSearchEnvironment(Environment, Named): def __init__(self, imageList, mode, groundTruthFile=None): self.mode = mode self.cnn = cn.ConvNet() self.testRecord = None self.idx = -1 self.imageList = [x.strip() for x in open(imageList)] self.groundTruth = cu.loadBoxIndexFile(groundTruthFile) #self.imageList = self.rankImages() #self.imageList = self.imageList[0:10] allImgs = set([x.strip() for x in open(config.get('allImagesList'))]) self.negativeSamples = list(allImgs.difference(set(self.groundTruth.keys()))) self.negativeEpisode = False
def defaultSampler(): return np.random.random([1, config.geti('outputActions')])
def test(self): interactions = config.geti('testInteractions') self.controller.setEpsilonGreedy(config.getf('testEpsilon')) self.runEpoch(interactions, len(self.environment.imageList))
from ObjectLocalizerEnvironment import ObjectLocalizerEnvironment from DeepQNetwork import DeepQNetwork from DeepQLearning import DeepQLearning from MDPObjectLocalizerTask import MDPObjectLocalizerTask from ObjectLocalizationAgent import ObjectLocalizationAgent print 'Starting Environment' epsilon = 1.0 environment = ObjectLocalizerEnvironment(config.get('imageDir'), config.get('candidatesFile'), 'Training') print 'Initializing DeepQNetwork' controller = DeepQNetwork() controller.setEpsilonGreedy(epsilon) print 'Initializing Q Learner' learner = DeepQLearning() print 'Preparing Agent' agent = ObjectLocalizationAgent(controller, learner) print 'Configuring Task' task = MDPObjectLocalizerTask(environment, config.get('groundTruth')) print 'Setting up Experiment' experiment = Experiment(task, agent) i = 0 print 'Main Loop' while i < config.geti('maximumEpochs'): print 'Epoch',i,'(epsilon:{:5.3f})'.format(epsilon) experiment.doInteractions(int(config.get('numInteractions'))) agent.learn() agent.reset() i += 1 epsilon = adjustEpsilon(config.geti('maximumEpochs'), i, epsilon) controller.setEpsilonGreedy(epsilon)
def updatePostReward(self): if len(self.state) == self.terminalCounts or self.episodeMoves >= config.geti('maxMovesAllowed'): self.loadNextEpisode()
import json import utils as cu import libDetection as det import RLConfig as config def sigmoid(x, a=1.0, b=0.0): return 1.0 / (1.0 + np.exp(-a * x + b)) def tanh(x, a=5, b=0.5, c=2.0): return c * np.tanh(a * x + b) TEST_TIME_OUT = config.geti('testTimeOut') class BoxSearchEnvironment(Environment, Named): def __init__(self, imageList, mode, groundTruthFile=None): self.mode = mode self.cnn = cn.ConvNet() self.testRecord = None self.idx = -1 self.imageList = [x.strip() for x in open(imageList)] self.groundTruth = cu.loadBoxIndexFile(groundTruthFile) #self.imageList = self.rankImages() #self.imageList = self.imageList[0:10] allImgs = set([x.strip() for x in open(config.get('allImagesList'))]) self.negativeSamples = list( allImgs.difference(set(self.groundTruth.keys())))
def updatePostReward(self): if len(self.state ) == self.terminalCounts or self.episodeMoves >= config.geti( 'maxMovesAllowed'): self.loadNextEpisode()
__author__ = "Juan C. Caicedo, [email protected]" import RLConfig as config import numpy as np import scipy.io import MemoryUsage import RLConfig as config NUM_ACTIONS = config.geti('outputActions') class RegionFilteringAgent(): image = None observation = None action = None reward = None timer = 0 def __init__(self, qnet, learner=None): self.controller = qnet self.learner = learner self.avgReward = 0 self.replayMemory = None def startReplayMemory(self, memoryImages, recordsPerImage, recordSize): self.replayMemory = ReplayMemory(memoryImages, recordsPerImage, recordSize)
def test(self): interactions = config.geti('testInteractions') self.controller.setEpsilonGreedy(config.getf('testEpsilon')) self.runEpoch(interactions, len(self.environment.db.images))
def __init__(self, workingDir): self.directory = workingDir self.step = config.geti('trainingIterationsPerBatch') self.readCheckpoint() self.writeSolverFile(self.step)
__author__ = "Juan C. Caicedo, [email protected]" import RLConfig as config import numpy as np import scipy.io import MemoryUsage import RLConfig as config import BoxSearchState as bss import random STATE_FEATURES = config.geti('stateFeatures') / config.geti('temporalWindow') NUM_ACTIONS = config.geti('outputActions') TEMPORAL_WINDOW = config.geti('temporalWindow') HISTORY_FACTOR = config.geti('historyFactor') NEGATIVE_PROBABILITY = config.getf('negativeEpisodeProb') class BoxSearchAgent(): image = None observation = None action = None reward = None timer = 0 def __init__(self, qnet, learner=None): self.controller = qnet self.learner = learner self.avgReward = 0
__author__ = "Juan C. Caicedo, [email protected]" import RLConfig as config import numpy as np import scipy.io import MemoryUsage import RLConfig as config NUM_ACTIONS = config.geti('outputActions') class RegionFilteringAgent(): image = None observation = None action = None reward = None timer = 0 def __init__(self, qnet, learner=None): self.controller = qnet self.learner = learner self.avgReward = 0 self.replayMemory = None def startReplayMemory(self, memoryImages, recordsPerImage, recordSize): self.replayMemory = ReplayMemory(memoryImages, recordsPerImage, recordSize) def integrateObservation(self, obs): if obs['image'] != self.image:
def getActivations(self, imagePath, boxes, state): n = len(boxes) activations = cu.emptyMatrix([n, config.geti('outputActions')]) numBatches = (n + config.geti('deployBatchSize') - 1) / config.geti('deployBatchSize') boxes += [[0, 0, 0, 0] for x in range(numBatches * config.geti('deployBatchSize') - n)] stateFeatures = np.zeros((len(boxes), 20, 1, 1), dtype=np.float32) stateFeatures[0:n, :, :, :] = state dims = self.net.caffenet.InitializeImage(imagePath, config.geti('imageSize'), self.meanImage, config.geti('cropSize')) for k in range(numBatches): s, f = k * config.geti('deployBatchSize'), ( k + 1) * config.geti('deployBatchSize') e = config.geti('deployBatchSize') if f <= n else n - s # Forward this batch #self.net.caffenet.ForwardRegions(boxes[s:f], config.geti('contextPad')) self.net.caffenet.ForwardRegionsAndState( boxes[s:f], config.geti('contextPad'), [stateFeatures[s:f, :, :, :]]) outputs = self.net.caffenet.blobs f = n if f > n else f # Collect outputs activations[s:f, :] = outputs['prob'].data[0:e, :, :, :].reshape( [e, config.geti('outputActions')]) # Release image data self.net.caffenet.ReleaseImageData() return activations
for l in open(params['caffeLog']): if l.find('loss =') != -1: loss.append( float(l.split()[-1]) ) i = np.argmax(loss) loss[i] = np.average(loss) ax[0,0].plot(range(len(loss)), loss) ax[0,0].set_title('QNetwork Loss') # Parse RL output avgRewards = [] epochRewards = [] epochRecall = [] epochIoU = [] epochLandmarks = [] validationLandmarks = [] positives = dict([ (i,0) for i in range(config.geti('outputActions')) ]) negatives = dict([ (i,0) for i in range(config.geti('outputActions')) ]) for l in open(params['rlLog']): if l.find('Agent::MemoryRecord') != -1: parts = l.split() action = int(parts[7]) reward = float(parts[9]) if reward > 0: positives[action] += 1 else: negatives[action] += 1 elif l.find('reset') != -1: avgRewards.append( float(l.split()[-1]) ) elif l.find('Epoch Recall') != -1: epochRecall.append( float(l.split()[-1]) ) epochRewards.append( np.average(avgRewards) )
__author__ = "Juan C. Caicedo, [email protected]" import RLConfig as config import numpy as np import scipy.io import MemoryUsage import RLConfig as config import BoxSearchState as bss import random STATE_FEATURES = config.geti('stateFeatures')/config.geti('temporalWindow') NUM_ACTIONS = config.geti('outputActions') TEMPORAL_WINDOW = config.geti('temporalWindow') HISTORY_FACTOR = config.geti('historyFactor') NEGATIVE_PROBABILITY = config.getf('negativeEpisodeProb') class BoxSearchAgent(): image = None observation = None action = None reward = None timer = 0 def __init__(self, qnet, learner=None): self.controller = qnet self.learner = learner self.avgReward = 0 self.replayMemory = None
for l in open(params['caffeLog']): if l.find('loss =') != -1: loss.append(float(l.split()[-1])) i = np.argmax(loss) loss[i] = np.average(loss) ax[0, 0].plot(range(len(loss)), loss) ax[0, 0].set_title('QNetwork Loss') # Parse RL output avgRewards = [] epochRewards = [] epochRecall = [] epochIoU = [] epochLandmarks = [] validationLandmarks = [] positives = dict([(i, 0) for i in range(config.geti('outputActions'))]) negatives = dict([(i, 0) for i in range(config.geti('outputActions'))]) for l in open(params['rlLog']): if l.find('Agent::MemoryRecord') != -1: parts = l.split() action = int(parts[7]) reward = float(parts[9]) if reward > 0: positives[action] += 1 else: negatives[action] += 1 elif l.find('reset') != -1: avgRewards.append(float(l.split()[-1])) elif l.find('Epoch Recall') != -1: epochRecall.append(float(l.split()[-1])) epochRewards.append(np.average(avgRewards))