def computeGlobalImageMask(): params = cu.loadParams('category imgsDir groundTruthsFile layer outputDir') arch = loadArchitecture('cnn.arch') A = projectCoordsToReceptiveField(arch,params['layer']) boxes = listBoxes(A) gt = loadBoxIndexFile(params['groundTruthsFile']) for imName in gt.keys(): imgFile = params['imgsDir']+'/'+imName+'.jpg' w,h = Image.open(imgFile).size R = intersectWithGroundTruth(A,rescaleAllBoxes(gt[imName],227./w, 227./h)) cu.saveMatrix(R,params['outputDir']+'/'+imName+'.'+params['category'])
def computeGlobalImageMask(): params = cu.loadParams('category imgsDir groundTruthsFile layer outputDir') arch = loadArchitecture('cnn.arch') A = projectCoordsToReceptiveField(arch, params['layer']) boxes = listBoxes(A) gt = loadBoxIndexFile(params['groundTruthsFile']) for imName in gt.keys(): imgFile = params['imgsDir'] + '/' + imName + '.jpg' w, h = Image.open(imgFile).size R = intersectWithGroundTruth( A, rescaleAllBoxes(gt[imName], 227. / w, 227. / h)) cu.saveMatrix( R, params['outputDir'] + '/' + imName + '.' + params['category'])
def multipleRegionMasks(): params = cu.loadParams('category imgsDir groundTruthsFile layer featuresDir outputDir') arch = loadArchitecture('cnn.arch') A = projectCoordsToReceptiveField(arch,params['layer']) s = len(A) gt = loadBoxIndexFile(params['groundTruthsFile']) for imName in gt.keys(): imgFile = params['imgsDir']+'/'+imName+'.jpg' w,h = Image.open(imgFile).size idx = loadBoxIndexFile(params['featuresDir'] + '/' + imName + '.idx') M = np.zeros((len(idx[imName]),s,s)) i = 0 for box in idx[imName]: P = projectFeatureMapToImagePlane(box,A) M[i,:,:] = intersectWithGroundTruth(P,gt[imName]) i += 1 cu.saveMatrix(M,params['outputDir']+'/'+imName+'.'+params['category'])
def multipleRegionMasks(): params = cu.loadParams( 'category imgsDir groundTruthsFile layer featuresDir outputDir') arch = loadArchitecture('cnn.arch') A = projectCoordsToReceptiveField(arch, params['layer']) s = len(A) gt = loadBoxIndexFile(params['groundTruthsFile']) for imName in gt.keys(): imgFile = params['imgsDir'] + '/' + imName + '.jpg' w, h = Image.open(imgFile).size idx = loadBoxIndexFile(params['featuresDir'] + '/' + imName + '.idx') M = np.zeros((len(idx[imName]), s, s)) i = 0 for box in idx[imName]: P = projectFeatureMapToImagePlane(box, A) M[i, :, :] = intersectWithGroundTruth(P, gt[imName]) i += 1 cu.saveMatrix( M, params['outputDir'] + '/' + imName + '.' + params['category'])
import os,sys import utils as cu import scipy.io if __name__ == "__main__": params = cu.loadParams('matFilesDir outFile') out = open(params['outFile'],'w') counter = 0 for f in os.listdir(params['matFilesDir']): if not f.endswith('.mat') or f == 'gt_pos_layer_5_cache.mat': continue img = f.replace('.mat','') counter += 1 print counter,img mat = scipy.io.loadmat(params['matFilesDir'] + '/' + f) idx = mat['gt'] == 0 mat['boxes'] = mat['boxes'][idx[:,0],:] for i in range(mat['boxes'].shape[0]): box = mat['boxes'][i,:].tolist() out.write(img + ' ' + ' '.join(map(str, map(int,box))) + '\n' ) out.close()
import os,sys import utils as cu import numpy as np params = cu.loadParams('matrix1 matrix2 output') Ma,Ia = cu.loadMatrixAndIndex(params['matrix1']) Mb,Ib = cu.loadMatrixAndIndex(params['matrix2']) extension = params['matrix1'].split('.')[-1] cu.saveMatrix( np.concatenate( (Ma,Mb) ) , params['output']+'.'+extension) out = open(params['output']+'.idx','w') for r in Ia: out.write(' '.join(r)+'\n') for r in Ib: out.write(' '.join(r)+'\n') out.close()
if t == maxTime - 1: prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'], output) else: prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp']) performance.append( [prec, recall] ) return performance def saveTimeResults(categories, results, outputFile): out = open(outputFile,'w') out.write(' '.join(categories) + '\n') for i in range(results.shape[0]): r = results[i,:].tolist() out.write(' '.join(map(str,r)) + '\n') if __name__ == "__main__": params = cu.loadParams('testMemDir relationFeaturesDir groundTruthDir outputDir category') categories, categoryIndex = getCategories() scoredDetections, maxTime = loadScores(params['testMemDir'], params['relationFeaturesDir'], categoryIndex) P = np.zeros( (maxTime, len(categories)) ) R = np.zeros( (maxTime, len(categories)) ) if params['category'] == 'all': catIdx = range(len(categories)) else: catIdx = [i for i in range(len(categories)) if categories[i] == params['category']] for i in catIdx: groundTruthFile = params['groundTruthDir'] + '/' + categories[i] + '_test_bboxes.txt' outputFile = params['outputDir'] + '/' + categories[i] + '.out' performance = evaluateCategory(scoredDetections, i, maxTime, groundTruthFile, outputFile)
import os,sys import utils as cu if __name__ == "__main__": params = cu.loadParams("detectionsFile outputDir") f = open(params['detectionsFile']) line = f.readline() img = '' imgOut = open(params['outputDir'] + '/tmp.region_rank','w') while line != '': parts = line.split() if parts[0] != img: imgOut.close() imgOut = open(params['outputDir'] + '/' + parts[0] + '.region_rank','w') img = parts[0] imgOut.write(line) line = f.readline() imgOut.close() f.close()
smap[ox - 1, oy - 1] = -20 plt.imshow(smap) plt.savefig('/home/caicedo/data/rcnn/masksOut/' + image + '.png', bbox_inches='tight') ######################################## ## MAIN PROGRAM ######################################## if __name__ == "__main__": MIN_AREA = 99.0 * 99.0 MAX_AREA = 227.0 * 227.0 CONV_LAYER = 'conv3' ## Main Program Parameters params = cu.loadParams( "modelFile testImageList proposalsFile featuresDir featuresExt threshold outputDir" ) model = det.createDetector('linear') model.load(params['modelFile']) imageList = [x.replace('\n', '') for x in open(params['testImageList'])] proposals = mk.loadBoxIndexFile(params['proposalsFile']) threshold = float(params['threshold']) ## Make detections and transfer scores projector = PredictionsToImagePlane(proposals, CONV_LAYER, MIN_AREA, MAX_AREA, 0.7) results = detectObjects(model, imageList, params['featuresDir'], params['featuresExt'], -10.0, projector) out = open(params['outputDir'], 'w') for r in results: out.write(' '.join(map(str, r)) + ' 0\n') out.close()
prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'], output) else: prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp']) performance.append( [prec, recall] ) T += 1 return performance def saveTimeResults(categories, results, outputFile): out = open(outputFile,'w') out.write(' '.join(categories) + '\n') for i in range(results.shape[0]): r = results[i,:].tolist() out.write(' '.join(map(str,r)) + '\n') if __name__ == "__main__": params = cu.loadParams('relationFeaturesDir imageList maxTime groundTruthDir outputDir category') images = [x.strip() for x in open(params['imageList'])] maxTime = int(params['maxTime']) step = 3 categories, categoryIndex = getCategories() #ranking = CenterToEdgesDetector(maxTime, categoryIndex) #ranking = BigestToSmallestArea(maxTime, categoryIndex) ranking = Objectness(maxTime, categoryIndex) scoredDetections = loadDetections(images, params['relationFeaturesDir'], ranking) P = np.zeros( (maxTime/step, len(categories)) ) R = np.zeros( (maxTime/step, len(categories)) ) if params['category'] == 'all': catIdx = range(len(categories)) else:
import os,sys import utils as cu import libDetection as det import numpy as np if __name__ == "__main__": params = cu.loadParams('boxesFile minSize outputFile') boxes = [x.split() for x in open(params['boxesFile'])] minA = float(params['minSize'])**2 images = {} found = set() for box in boxes: a = det.area( map(int,box[1:]) ) if a >= minA: try: images[ box[0] ].append(box[1:]) except: images[ box[0] ] = [box[1:]] found.add(box[0]) # Add records for images that do not have enough area to comply with the filter missing = found.symmetric_difference( images.keys() ) missing = [b for b in boxes if b[0] in missing] allAreas = {} for m in missing: img = m[0] try: allAreas[img]['box'].append( m[1:] ) allAreas[img]['area'].append( det.area( map(int,m[1:]) ) ) except: allAreas[img] = { 'box':[m[1:]], 'area':[det.area( map(int,m[1:]) )] } for img in allAreas.keys():
import os, sys from PIL import Image import utils as cu import numpy as np def findNearestNeighbor(H, i): J = np.tile(H[i, :], (H.shape[0], 1)) R = np.sum(np.abs(J - H), axis=1) R[i] = np.inf return np.argmin(R), np.min(R) params = cu.loadParams('imageDir') dir = params['imageDir'] allImages = os.listdir(dir) H = np.zeros((len(allImages), 768)) print 'Scanning', len(allImages), 'images' imgs = 0 for f in allImages: try: im = Image.open(dir + '/' + f) h = np.asarray(im.histogram()) if len(h) == 256: H[imgs, :] = np.tile(np.asarray(h), (1, 3)) else: H[imgs, :] = np.asarray(h) imgs += 1 except: print 'Problems with', f
bestOverlap = ov bestIdx = idx idx += 1 pos[i, :] = allPos[bestIdx, :] posIdx.append(allPosIdx[bestIdx]) print 'Positive Matrix with High Overlaping Detections (' + str( pos.shape[0]) + ' instances)' return (pos, posIdx, ari, osi) ######################################## ## MAIN PROGRAM ######################################## if __name__ == "__main__": params = cu.loadParams( 'detectionsFile logFile topKParameter trainingList featuresDir featuresExt modelOut cost maxNegOverlap iterations' ) trainingList = [x.replace('\n', '') for x in open(params['trainingList'])] cost = float(params['cost']) maxNegOverlap = float(params['maxNegOverlap']) topK = int(params['topKParameter']) if os.path.isfile(params['logFile']): # Positives with High Overlap can be found in log file logData = [x.split() for x in open(params['logFile'])] positives = buildDataSetWithHighOverlap(params['detectionsFile'], logData, topK, params['featuresDir'], params['featuresExt']) else: # Positives from top detections detectionsData = [x.split() for x in open(params['detectionsFile'])]
i = 0 outputFile = open(outputDir + '/' + category + '.idx', 'w') for r in result: featureMatrix[i:i + r[0].shape[0]] = r[0] for box in r[1]: outputFile.write(box[0] + ' ' + ' '.join(map(str, map(int, box[1:]))) + '\n') i += r[0].shape[0] outputFile.close() cu.saveMatrix(featureMatrix, outputDir + '/' + category + '.' + featExt) print 'Total of', nBoxes, 'positive examples collected for', category if __name__ == "__main__": params = cu.loadParams( "imageList featuresDir groundTruthFile outputDir featuresExt category operation" ) groundTruths = cu.loadBoxIndexFile(params['groundTruthFile']) imageList = [x.replace('\n', '') for x in open(params['imageList'])] operator = None if params['operation'] == 'big': operator = big elif params['operation'] == 'tight': operator = tight elif params['operation'] == 'inside': operator = inside elif params['operation'] == 'background': operator = background else: print 'Select a valid operation: [big | tight | inside | background]' sys.exit()
writeF = lambda x, y, b: x detectionsList = [] for data in result: img, filteredBoxes, filteredScores = data for i in range(len(filteredBoxes)): b = filteredBoxes[i] writeF(img, filteredScores[i], b) detectionsList.append( [img, filteredScores[i], b[0], b[1], b[2], b[3], b[4]]) if outputFile != None: outf.close() return detectionsList ######################################## ## MAIN PROGRAM ######################################## if __name__ == "__main__": ## Main Program Parameters params = cu.loadParams( "modelType modelFile testImageList featuresDir featuresExt maxOverlap threshold outputFile" ) model = det.createDetector(params['modelType']) model.load(params['modelFile']) imageList = [x.replace('\n', '') for x in open(params['testImageList'])] maxOverlap = float(params['maxOverlap']) threshold = float(params['threshold']) detectObjects(model, imageList, params['featuresDir'], params['featuresExt'], maxOverlap, threshold, params['outputFile'])
######################################## def detectObjects(imageList, featuresDir, indexType, groundTruthDir, outputDir): maxOverlap = 0.3 categories, catIndex = bse.categoryIndex(indexType) task = SoftmaxDetector(maxOverlap, catIndex) result = processData(imageList, featuresDir, 'prob', task) # Collect detection results after NMS detections = dict([ (c,[]) for c in catIndex]) for res in result: for idx in catIndex: img, filteredBoxes, filteredScores = res[idx] for j in range(len(filteredBoxes)): detections[idx].append( [img, filteredScores[j]] + filteredBoxes[j] ) # Evaluate results for each category independently for idx in catIndex: groundTruthFile = groundTruthDir + '/' + categories[idx] + '_test_bboxes.txt' output = outputDir + '/' + categories[idx] + '.out' detections[idx].sort(key=lambda x:x[1], reverse=True) gtBoxes = [x.split() for x in open(groundTruthFile)] numPositives = len(gtBoxes) groundTruth = eval.loadGroundTruthAnnotations(gtBoxes) results = eval.evaluateDetections(groundTruth, detections[idx], 0.5) prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'], output) if __name__ == "__main__": params = cu.loadParams('imageList scoresDir indexType groundTruthDir outputDir') imageList = [x.strip() for x in open(params['imageList'])] imageList = imageList print 'Ready to process',len(imageList) detectObjects(imageList, params['scoresDir'], params['indexType'], params['groundTruthDir'], params['outputDir'])
if truePos[0] == imgName: ov = det.IoU(box,map(float,truePos[1:5])) if ov > bestOverlap: bestOverlap = ov bestIdx = idx idx += 1 pos[i,:] = allPos[bestIdx,:] posIdx.append(allPosIdx[bestIdx]) print 'Positive Matrix with High Overlaping Detections ('+str(pos.shape[0])+' instances)' return (pos,posIdx,ari,osi) ######################################## ## MAIN PROGRAM ######################################## if __name__ == "__main__": params = cu.loadParams('detectionsFile logFile topKParameter trainingList featuresDir featuresExt modelOut cost maxNegOverlap iterations') trainingList = [x.replace('\n','') for x in open(params['trainingList'])] cost = float(params['cost']) maxNegOverlap = float(params['maxNegOverlap']) topK = int(params['topKParameter']) if os.path.isfile(params['logFile']): # Positives with High Overlap can be found in log file logData = [x.split() for x in open(params['logFile'])] positives = buildDataSetWithHighOverlap(params['detectionsFile'],logData,topK,params['featuresDir'],params['featuresExt']) else: # Positives from top detections detectionsData = [x.split() for x in open(params['detectionsFile'])] positives = buildDataSetWithTopDetections(detectionsData,topK,params['featuresDir'],params['featuresExt']) # Run the training algorithm iterations = int(params['iterations'])+1 for i in range(iterations):
F[:, 0:-1] = X return np.dot(F, self.M.T) def sigmoidValues(self, X): return 1 / (1 + np.exp(-self.decisionFunction(X))) class CategoryScores(): def __init__(self, integratedModel, outDir): self.model = integratedModel self.outDir = outDir def run(self, img, features, bboxes): print img scores = self.model.sigmoidValues(features) cu.saveMatrix(scores, self.outDir + '/' + img + '.sigmoid_scores') return def extractFeatures(model, imageList, featuresDir, featuresExt): task = CategoryScores(model, featuresDir) result = processData(imageList, featuresDir, featuresExt, task) if __name__ == "__main__": params = cu.loadParams('modelsDir imageList featuresDir featuresExt') im = IntegratedModel(params['modelsDir']) imageList = [x.replace('\n', '') for x in open(params['imageList'])] extractFeatures(im, imageList, params['featuresDir'], params['featuresExt'])
import os,sys import utils as cu import scipy.io import h5py import numpy as np import libDetection as det params = cu.loadParams('rcnnImdbFile MatlabScoresDir outputDir doNMS') imdb = h5py.File(params['rcnnImdbFile']) print 'Images:',imdb['imdb']['image_ids'] images = [u''.join(unichr(c) for c in imdb[o]) for o in imdb['imdb']['image_ids'][0]] doNMS = params['doNMS'] != 'noNMS' for f in os.listdir(params['MatlabScoresDir']): if f.endswith('__all.mat') and f.find('_boxes_') != -1: nameParts = f.split('_') out = open(params['outputDir'] + '/' + nameParts[0] + '_' + nameParts[1] + '.out', 'w') data = scipy.io.loadmat(params['MatlabScoresDir'] + '/' + f) print nameParts[0:2] for i in range(data['boxes'].shape[0]): detections = data['boxes'][i][0] img = str(images[i]) boxes = [ box[0:4].tolist() for box in detections ] scores = [ box[-1] for box in detections ] if len(boxes) == 0: continue if doNMS: boxes, scores = det.nonMaximumSuppression(boxes, scores, 0.3) for j in range(len(boxes)): box = boxes[j]
def getCategories(): cat = 'aeroplane bicycle bird boat bottle bus car cat chair cow diningtable dog horse motorbike person pottedplant sheep sofa train tvmonitor'.split( ) categories = {} id = 0 for c in cat: categories[c + '_big'] = id id += 1 categories[c + '_inside'] = id id += 1 return categories if __name__ == "__main__": params = cu.loadParams('relationsAnnotations matFilesDir outDir') relations = loadBoxIndexFile(params['relationsAnnotations']) print 'Relations loaded' categories = getCategories() counter = 0 for f in os.listdir(params['matFilesDir']): if not f.endswith('.mat') or f == 'gt_pos_layer_5_cache.mat': continue counter += 1 if os.path.isfile(params['outDir'] + '/' + f): continue img = f.replace('.mat', '') print counter, img mat = scipy.io.loadmat(params['matFilesDir'] + '/' + f) idx = mat['gt'] == 0 mat['feat'] = mat['feat'][idx[:, 0], :] mat['gt'] = mat['gt'][idx[:, 0], :] mat['boxes'] = mat['boxes'][idx[:, 0], :]
candidates = [] for j in range(len(inside)): n = inside[j] r = [det.IoU(n[1:], t[1:]), det.overlap(t[1:], n[1:])] if 0.8 >= r[0] and r[1] >= 0.8: s = np.exp(-dist(idealMatch, r)) candidates.append([j, s, n[0], r]) if len(candidates) > 0: candidates.sort(key=lambda x: x[1] + x[2], reverse=True) for k in range(min(MAX_NUMBER_OF_PARTS, len(candidates))): layouts[i].addPart(inside[candidates[k][0]], candidates[k][-1]) layouts.sort(key=lambda x: x.getScore(), reverse=True) return layouts params = cu.loadParams( 'bigDetections tightDetections insideDetections imageDir outputDir') big = cu.loadBoxIndexFile(params['bigDetections']) tight = cu.loadBoxIndexFile(params['tightDetections']) inside = cu.loadBoxIndexFile(params['insideDetections']) print 'Images:', len(big), len(tight), len(inside) allLayouts = [] for k in big.keys(): layouts = findBigMatches(k, big[k], tight[k]) layouts = findInsideMatches(inside[k], layouts) allLayouts += [layouts[0]] allLayouts.sort(key=lambda x: x.getScore(), reverse=True) matchCounter = 0
t = layouts[i].root candidates = [] for j in range(len(inside)): n = inside[j] r = [ det.IoU(n[1:], t[1:]), det.overlap(t[1:], n[1:]) ] if 0.8 >= r[0] and r[1] >= 0.8: s = np.exp( -dist(idealMatch, r) ) candidates.append( [j,s,n[0],r] ) if len(candidates) > 0: candidates.sort(key=lambda x:x[1]+x[2],reverse=True) for k in range( min(MAX_NUMBER_OF_PARTS,len(candidates)) ): layouts[i].addPart( inside[ candidates[k][0] ], candidates[k][-1] ) layouts.sort(key=lambda x: x.getScore(), reverse=True) return layouts params = cu.loadParams('bigDetections tightDetections insideDetections imageDir outputDir') big = cu.loadBoxIndexFile( params['bigDetections'] ) tight = cu.loadBoxIndexFile( params['tightDetections'] ) inside = cu.loadBoxIndexFile( params['insideDetections'] ) print 'Images:',len(big),len(tight),len(inside) allLayouts = [] for k in big.keys(): layouts = findBigMatches(k, big[k], tight[k]) layouts = findInsideMatches(inside[k], layouts) allLayouts += [ layouts[0] ] allLayouts.sort(key=lambda x: x.getScore(), reverse=True) matchCounter = 0
import os, sys import utils as cu import libDetection as ldet import numpy as np params = cu.loadParams('scoresFile groundTruth relation output') scores = [x.split() for x in open(params['scoresFile'])] ground = cu.loadBoxIndexFile(params['groundTruth']) scores.sort(key=lambda x: float(x[1]), reverse=True) if params['relation'] == 'big': operator = lambda x, y: np.exp(-((1.0 - ldet.overlap(x, y))**2 + (0.25 - ldet.IoU(x, y))**2)) >= 0.7 if params['relation'] == 'inside': operator = lambda x, y: np.exp(-((1.0 - ldet.overlap(y, x))**2 + (0.25 - ldet.IoU(x, y))**2)) >= 0.7 if params['relation'] == 'tight': operator = lambda x, y: ldet.IoU(x, y) >= 0.5 out = open(params['output'], 'w') for s in scores: box = map(float, s[2:7]) img = s[0] try: gtBoxes = ground[img] except: gtBoxes = [] match = '0' for gt in gtBoxes: if operator(box, gt): match = '1'
def selectBestBoxes(detections, groundTruth, minOverlap): candidates = [] for d in detections: try: boxes = groundTruth[d[0]] except: continue bestIoU = 0.0 for gt in boxes: iou = det.IoU(d[2:6],gt) if iou > bestIoU: bestIoU = iou print bestIoU if bestIoU > minOverlap: candidates.append(d) return candidates def saveCandidates(candidates, output): out = open(output, 'w') for k in candidates: out.write(k[0] + ' ' + ' '.join(map(str, map(int, k[2:6]) ) ) + '\n') out.close() if __name__ == "__main__": params = cu.loadParams("detectionsFile groundTruths output") detectionsData = [x.split() for x in open(params['detectionsFile'])] detections = eval.loadDetections(detectionsData) groundTruth = cu.loadBoxIndexFile(params['groundTruths']) candidates = selectBestBoxes(detections, groundTruth, 0.5) print 'Selected candidates:', len(candidates) saveCandidates(candidates, params['output'])
performance.append([prec, recall]) T += 1 return performance def saveTimeResults(categories, results, outputFile): out = open(outputFile, 'w') out.write(' '.join(categories) + '\n') for i in range(results.shape[0]): r = results[i, :].tolist() out.write(' '.join(map(str, r)) + '\n') if __name__ == "__main__": params = cu.loadParams( 'relationFeaturesDir imageList maxTime groundTruthDir outputDir category' ) images = [x.strip() for x in open(params['imageList'])] maxTime = int(params['maxTime']) step = 3 categories, categoryIndex = getCategories() #ranking = CenterToEdgesDetector(maxTime, categoryIndex) #ranking = BigestToSmallestArea(maxTime, categoryIndex) ranking = Objectness(maxTime, categoryIndex) scoredDetections = loadDetections(images, params['relationFeaturesDir'], ranking) P = np.zeros((maxTime / step, len(categories))) R = np.zeros((maxTime / step, len(categories))) if params['category'] == 'all':
import os, sys import utils as cu import Image params = cu.loadParams('positiveBoxes negativeBoxes imgDir output') pos = [x.split() for x in open(params['positiveBoxes'])] neg = [x.split() for x in open(params['negativeBoxes'])] boxes = {} for p in pos: try: boxes[p[0]].append(p[1:] + [0]) except: boxes[p[0]] = [p[1:] + [0]] for n in neg: try: boxes[n[0]].append(n[1:] + [1]) except: boxes[n[0]] = [p[1:] + [1]] flipLabel = lambda x: 0 if x == 1 else 1 counter = 0 out = open(params['output'], 'w') for img in boxes.keys(): out.write('# ' + str(counter) + '\n') imPath = params['imgDir'] + '/' + img + '.jpg' im = Image.open(imPath) w, h = im.size out.write(imPath + '\n3\n' + str(w) + '\n' + str(h) + '\n' +
import sys,os import utils as cu params = cu.loadParams('fullList positivesList output') full = [x for x in open(params['fullList'])] positives = [x for x in open(params['positivesList'])] out = open(params['output'],'w') for r in full: if r not in positives: out.write(r) out.close()
import os,sys import utils as cu import libDetection as ldet import numpy as np params = cu.loadParams('scoresFile groundTruth relation output') scores = [x.split() for x in open(params['scoresFile'])] ground = cu.loadBoxIndexFile(params['groundTruth']) scores.sort(key=lambda x:float(x[1]), reverse=True) if params['relation'] == 'big': operator = lambda x,y: np.exp( -( (1.0-ldet.overlap(x,y))**2 + (0.25-ldet.IoU(x,y))**2 ) ) >= 0.7 if params['relation'] == 'inside': operator = lambda x,y: np.exp( -( (1.0-ldet.overlap(y,x))**2 + (0.25-ldet.IoU(x,y))**2 ) ) >= 0.7 if params['relation'] == 'tight': operator = lambda x,y: ldet.IoU(x,y) >= 0.5 out = open(params['output'],'w') for s in scores: box = map(float,s[2:7]) img = s[0] try: gtBoxes = ground[img] except: gtBoxes = [] match = '0' for gt in gtBoxes: if operator(box,gt): match = '1' out.write(' '.join(s) + ' ' + match + '\n') out.close()
import os, sys import utils as cu import numpy as np params = cu.loadParams('matrix1 matrix2 output') Ma, Ia = cu.loadMatrixAndIndex(params['matrix1']) Mb, Ib = cu.loadMatrixAndIndex(params['matrix2']) extension = params['matrix1'].split('.')[-1] cu.saveMatrix(np.concatenate((Ma, Mb)), params['output'] + '.' + extension) out = open(params['output'] + '.idx', 'w') for r in Ia: out.write(' '.join(r) + '\n') for r in Ib: out.write(' '.join(r) + '\n') out.close()
result = processData(imageList,featuresDir,featuresExt,task) if outputFile != None: outf = open(outputFile,'w') writeF = lambda x,y,b: outf.write(x + ' {:.8f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}\n'.format(y,b[0],b[1],b[2],b[3],b[4])) else: writeF = lambda x,y,b: x detectionsList = [] for data in result: img,filteredBoxes,filteredScores = data for i in range(len(filteredBoxes)): b = filteredBoxes[i] writeF(img,filteredScores[i],b) detectionsList.append( [img,filteredScores[i],b[0],b[1],b[2],b[3],b[4]] ) if outputFile != None: outf.close() return detectionsList ######################################## ## MAIN PROGRAM ######################################## if __name__ == "__main__": ## Main Program Parameters params = cu.loadParams("modelType modelFile testImageList featuresDir featuresExt maxOverlap threshold outputFile") model = det.createDetector(params['modelType']) model.load(params['modelFile']) imageList = [x.replace('\n','') for x in open(params['testImageList'])] maxOverlap = float(params['maxOverlap']) threshold = float(params['threshold']) detectObjects(model,imageList,params['featuresDir'],params['featuresExt'],maxOverlap,threshold,params['outputFile'])
import os,sys import utils as cu import libDetection as det import numpy as np import pickle params = cu.loadParams('scoresFile outputDir category') if os.path.isfile(params['scoresFile']+'.p'): print 'Loading pickled data' imgs = pickle.load( open(params['scoresFile']+'.p', 'rb') ) else: imgs = {} data = open(params['scoresFile']) l = data.readline() counter = 0 while l != '': counter += 1 d = l.split() rec = {'b': map(float,d[1:5]), 's':map(float,d[5:])} try: imgs[d[0]]['boxes'].append(rec['b']) imgs[d[0]]['scores'] = np.vstack( (imgs[d[0]]['scores'], np.array(rec['s'])) ) except: imgs[d[0]] = {'boxes':[], 'scores':[]} imgs[d[0]]['boxes'] = [ rec['b'] ] imgs[d[0]]['scores'] = np.array(rec['s']) l = data.readline() if counter % 100000 == 0: print counter data.close()
# Collect detection results after NMS detections = dict([(c, []) for c in catIndex]) for res in result: for idx in catIndex: img, filteredBoxes, filteredScores = res[idx] for j in range(len(filteredBoxes)): detections[idx].append([img, filteredScores[j]] + filteredBoxes[j]) # Evaluate results for each category independently for idx in catIndex: groundTruthFile = groundTruthDir + '/' + categories[ idx] + '_test_bboxes.txt' output = outputDir + '/' + categories[idx] + '.out' detections[idx].sort(key=lambda x: x[1], reverse=True) gtBoxes = [x.split() for x in open(groundTruthFile)] numPositives = len(gtBoxes) groundTruth = eval.loadGroundTruthAnnotations(gtBoxes) results = eval.evaluateDetections(groundTruth, detections[idx], 0.5) prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'], output) if __name__ == "__main__": params = cu.loadParams( 'imageList scoresDir indexType groundTruthDir outputDir') imageList = [x.strip() for x in open(params['imageList'])] imageList = imageList print 'Ready to process', len(imageList) detectObjects(imageList, params['scoresDir'], params['indexType'], params['groundTruthDir'], params['outputDir'])
score = records[key] except: score = -10.0 self.scores[img][i, fileIdx] = score def saveDB(self, outputDir): for img in self.imgBoxes.keys(): data = {'boxes': self.imgBoxes[img], 'scores': self.scores[img]} scipy.io.savemat(outputDir + '/' + img + '.mat', data, do_compression=True) out = open(outputDir + '/categories.txt', 'w') for c in self.categories: out.write(c + '\n') out.close() if __name__ == "__main__": params = cu.loadParams('scoresDirectory proposalsFile outputDir') cu.mem('Program started') lap = tic() builder = DBBuilder(params['scoresDirectory'], params['proposalsFile']) lap = toc('Proposals loaded', lap) cu.mem('DB initialized') builder.parseDir() lap = toc('Directory parsed', lap) cu.mem('All files read') builder.saveDB(params['outputDir']) lap = toc('Database saved', lap) cu.mem('Program ends')
import os, sys import utils as cu import scipy.io import h5py import numpy as np import libDetection as det params = cu.loadParams('rcnnImdbFile MatlabScoresDir outputDir doNMS') imdb = h5py.File(params['rcnnImdbFile']) print 'Images:', imdb['imdb']['image_ids'] images = [ u''.join(unichr(c) for c in imdb[o]) for o in imdb['imdb']['image_ids'][0] ] doNMS = params['doNMS'] != 'noNMS' for f in os.listdir(params['MatlabScoresDir']): if f.endswith('__all.mat') and f.find('_boxes_') != -1: nameParts = f.split('_') out = open( params['outputDir'] + '/' + nameParts[0] + '_' + nameParts[1] + '.out', 'w') data = scipy.io.loadmat(params['MatlabScoresDir'] + '/' + f) print nameParts[0:2] for i in range(data['boxes'].shape[0]): detections = data['boxes'][i][0] img = str(images[i]) boxes = [box[0:4].tolist() for box in detections] scores = [box[-1] for box in detections] if len(boxes) == 0: continue
import os,sys import utils as cu import libDetection as det import cPickle as pickle import scipy.io import numpy as np params = cu.loadParams('dbDir relationsFile outputDir') archive = {} T = pickle.load( open(params['dbDir']+'/db.idx','rb') ) M = scipy.io.loadmat(params['dbDir']+'/db.cache') archive['images'] = T.keys() index = np.zeros( (len(T), 2), np.int ) for i in range(len(archive['images'])): idx = T[archive['images'][i]] index[i,0] = idx['s'] + 1 index[i,1] = idx['e'] archive['index'] = index data = [x.split() for x in open(params['relationsFile'])] categories = set() labels = {} for d in data: r = [d[1]] + map(float,d[2:]) try: labels[d[0]].append( r ) except: labels[d[0]] = [ r ] categories.add(d[1])
idx = range(len(data['boxes'])) boxes = [data['boxes'][i] for i in idx if data[ranking][i] > float('-inf')] scores = [data[ranking][i] for i in idx if data[ranking][i] > float('-inf')] if len(boxes) > 0: fBoxes, fScores = det.nonMaximumSuppression(boxes, scores, 0.3) for i in range(len(fBoxes)): detections.append( [img, fScores[i]] + fBoxes[i] ) detections.sort(key=lambda x:x[1], reverse=True) gtBoxes = [x.split() for x in open(groundTruthFile)] numPositives = len(gtBoxes) groundTruth = eval.loadGroundTruthAnnotations(gtBoxes) results = eval.evaluateDetections(groundTruth, detections, 0.5) if output is not None: output = output + '.' + ranking prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'], output) return prec, recall if __name__ == "__main__": params = cu.loadParams('testMemDir groundTruthFile outputDir') scoredDetections = loadScores(params['testMemDir'], -1) groundTruthFile = params['groundTruthFile'] outputFile = params['outputDir'] + '/' + 'result.out' pl,rl = evaluateCategory(scoredDetections, 'landmarks', groundTruthFile, outputFile) line = lambda x,y,z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y,z) out = open(params['outputDir'] + '/evaluation.txt','w') out.write('\tPrecision\tRecall\n') out.write(line('Landmarks',pl,rl)) out.close()
import os, sys import utils as cu import libDetection as det import cPickle as pickle import scipy.io import numpy as np params = cu.loadParams('dbDir relationsFile outputDir') archive = {} T = pickle.load(open(params['dbDir'] + '/db.idx', 'rb')) M = scipy.io.loadmat(params['dbDir'] + '/db.cache') archive['images'] = T.keys() index = np.zeros((len(T), 2), np.int) for i in range(len(archive['images'])): idx = T[archive['images'][i]] index[i, 0] = idx['s'] + 1 index[i, 1] = idx['e'] archive['index'] = index data = [x.split() for x in open(params['relationsFile'])] categories = set() labels = {} for d in data: r = [d[1]] + map(float, d[2:]) try: labels[d[0]].append(r) except:
images[k[0]] = [ [k[1]] + map(float,k[2:]) ] return images def getCategories(): cat = 'aeroplane bicycle bird boat bottle bus car cat chair cow diningtable dog horse motorbike person pottedplant sheep sofa train tvmonitor'.split() categories = {} id = 0 for c in cat: categories[c + '_big'] = id id += 1 categories[c + '_inside'] = id id += 1 return categories if __name__ == "__main__": params = cu.loadParams('relationsAnnotations matFilesDir outDir') relations = loadBoxIndexFile(params['relationsAnnotations']) print 'Relations loaded' categories = getCategories() counter = 0 for f in os.listdir(params['matFilesDir']): if not f.endswith('.mat') or f == 'gt_pos_layer_5_cache.mat': continue counter += 1 if os.path.isfile(params['outDir'] + '/' + f): continue img = f.replace('.mat','') print counter,img mat = scipy.io.loadmat(params['matFilesDir'] + '/' + f) idx = mat['gt'] == 0 mat['feat'] = mat['feat'][idx[:,0],:] mat['gt'] = mat['gt'][idx[:,0],:] mat['boxes'] = mat['boxes'][idx[:,0],:]
# Expected Format: k:v!k:v! params = params.split('!') result = {} for p in params: if p != '': k, v = p.split(':') result[k] = v return result ######################################## ## MAIN PROGRAM ######################################## if __name__ == "__main__": params = cu.loadParams( "modelType modelParams positivesFeatures trainingList featuresDir modelOut overlap iterations" ) featuresExt = params['positivesFeatures'].split('.')[-1] trainingList = [x.replace('\n', '') for x in open(params['trainingList'])] maxNegOverlap = float(params['overlap']) iterations = int(params['iterations']) + 1 positives = readPositivesData(params['positivesFeatures']) args = parseModelParams(params['modelParams']) print " ++ LEARNING", params['modelType'], "MODEL WITH ARGS:", params[ 'modelParams'], " ++ " for i in range(iterations): mainLoop(params['modelType'], args, positives, trainingList, params['featuresDir'], featuresExt, params['modelOut'], maxNegOverlap, i) os.system('rm ' + params['modelOut'] + '.hards')
def mapToGroundTruth(img, box, groundTruths): result = {} for cat in groundTruths.keys(): try: annotations = groundTruths[cat][img] except: continue for gt in annotations: iou = det.IoU(box, gt) rel = findRelation(box, gt, iou) if rel != None: result[cat+'_'+rel] = iou #if len(result.keys()) > 1: # print result return result params = cu.loadParams('regionsFile groundTruthDir output') regions,headers = loadRegionsFile(params['regionsFile']) print 'Images:',len(regions) groundTruths = loadAllGroundTruths(params['groundTruthDir']) out = open(params['output'],'w') catNames = groundTruths.keys() relNames = ['_tight','_big','_inside'] catNames.sort() categories = [] for r in relNames: for c in catNames: categories.append(c+r) labels = dict([ (categories[i],i+1) for i in range(len(categories)) ])
elif t: result = {'tight':'tp'} elif i: result = {'tight':'fp', 'inside':'fn'} else: result = {'tight':'fp'} elif boxData['type'] == 'inside' and float(boxData['score']) >= threshold: if b: result = {'inside':'fp', 'big':'fn'} elif t: result = {'inside':'fp', 'tight':'fn'} elif i: result = {'inside':'tp'} else: result = {'inside':'fp'} else: result = {boxData['type']:'tn'} return result if __name__ == "__main__": params = cu.loadParams("bigFile tightFile insideFile groundTruthsFile threshold outputDir") big = readScoresFile(params['bigFile']) tight = readScoresFile(params['tightFile']) inside = readScoresFile(params['insideFile']) threshold = float(params['threshold']) results = mergeScores(big,tight,inside) groundTruths = cu.loadBoxIndexFile(params['groundTruthsFile']) counts = {'big': {'tp':0,'tn':0,'fp':0,'fn':0}, 'tight':{'tp':0,'tn':0,'fp':0,'fn':0}, 'inside':{'tp':0,'tn':0,'fp':0,'fn':0}} allBoxes = 0 for img in results.keys(): try: boxes = groundTruths[img] imageOK = True
return result def reformatGroundTruth(gt, category): result = [] for img in gt.keys(): for box in gt[img]: result.append( [img, category + '_tight'] + box ) return result def saveResults(outputFile, results): outputFile = open(outputFile,'w') for r in results: outputFile.write(r[0] + ' ' + r[1] + ' ' + ' '.join(map(str,map(int,r[2:]))) + '\n') outputFile.close() if __name__ == "__main__": params = cu.loadParams("proposalsFile groundTruthDir outputFile") proposals = cu.loadBoxIndexFile(params['proposalsFile']) records = [] files = os.listdir(params['groundTruthDir']) files.sort() for f in files: category = f.split('_')[0] print category groundTruth = cu.loadBoxIndexFile(params['groundTruthDir'] + '/' + f) records += selectRegions(proposals, groundTruth, category, big) records += selectRegions(proposals, groundTruth, category, inside) records += reformatGroundTruth(groundTruth, category) saveResults(params['outputFile'], records)
results['tp'], results['fp']) performance.append([prec, recall]) return performance def saveTimeResults(categories, results, outputFile): out = open(outputFile, 'w') out.write(' '.join(categories) + '\n') for i in range(results.shape[0]): r = results[i, :].tolist() out.write(' '.join(map(str, r)) + '\n') if __name__ == "__main__": params = cu.loadParams( 'testMemDir relationFeaturesDir groundTruthDir outputDir category') categories, categoryIndex = getCategories() scoredDetections, maxTime = loadScores(params['testMemDir'], params['relationFeaturesDir'], categoryIndex) P = np.zeros((maxTime, len(categories))) R = np.zeros((maxTime, len(categories))) if params['category'] == 'all': catIdx = range(len(categories)) else: catIdx = [ i for i in range(len(categories)) if categories[i] == params['category'] ]
import os,sys import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt import numpy as np import utils as cu import RLConfig as config params = cu.loadParams("config caffeLog rlLog outdir") config.readConfiguration(params["config"]) fig, ax = plt.subplots(nrows=2, ncols=3) fig.set_size_inches(18.5,10.5) # Parse Caffe Log loss = [] for l in open(params['caffeLog']): if l.find('loss =') != -1: loss.append( float(l.split()[-1]) ) i = np.argmax(loss) loss[i] = np.average(loss) ax[0,0].plot(range(len(loss)), loss) ax[0,0].set_title('QNetwork Loss') # Parse RL output avgRewards = [] epochRewards = [] epochRecall = [] epochIoU = [] epochLandmarks = []
a = map(int,areas[i]) smap[ a[1]:a[3], a[0]:a[2] ] += scores[i] a = fig.add_subplot(1,2,2) smap[0,0] = 20 smap[ox-1,oy-1] = -20 plt.imshow(smap) plt.savefig('/home/caicedo/data/rcnn/masksOut/'+image+'.png',bbox_inches='tight') ######################################## ## MAIN PROGRAM ######################################## if __name__ == "__main__": MIN_AREA = 99.0*99.0 MAX_AREA = 227.0*227.0 CONV_LAYER = 'conv3' ## Main Program Parameters params = cu.loadParams("modelFile testImageList proposalsFile featuresDir featuresExt threshold outputDir") model = det.createDetector('linear') model.load(params['modelFile']) imageList = [x.replace('\n','') for x in open(params['testImageList'])] proposals = mk.loadBoxIndexFile(params['proposalsFile']) threshold = float(params['threshold']) ## Make detections and transfer scores projector = PredictionsToImagePlane(proposals,CONV_LAYER,MIN_AREA,MAX_AREA,0.7) results = detectObjects(model,imageList,params['featuresDir'],params['featuresExt'],-10.0,projector) out = open(params['outputDir'],'w') for r in results: out.write(' '.join(map(str,r))+' 0\n') out.close()
import os, sys import utils as cu import scipy.io if __name__ == "__main__": params = cu.loadParams('matFilesDir outFile') out = open(params['outFile'], 'w') counter = 0 for f in os.listdir(params['matFilesDir']): if not f.endswith('.mat') or f == 'gt_pos_layer_5_cache.mat': continue img = f.replace('.mat', '') counter += 1 print counter, img mat = scipy.io.loadmat(params['matFilesDir'] + '/' + f) idx = mat['gt'] == 0 mat['boxes'] = mat['boxes'][idx[:, 0], :] for i in range(mat['boxes'].shape[0]): box = mat['boxes'][i, :].tolist() out.write(img + ' ' + ' '.join(map(str, map(int, box))) + '\n') out.close()
import os, sys import utils as cu if __name__ == "__main__": params = cu.loadParams("detectionsFile outputDir") f = open(params['detectionsFile']) line = f.readline() img = '' imgOut = open(params['outputDir'] + '/tmp.region_rank', 'w') while line != '': parts = line.split() if parts[0] != img: imgOut.close() imgOut = open( params['outputDir'] + '/' + parts[0] + '.region_rank', 'w') img = parts[0] imgOut.write(line) line = f.readline() imgOut.close() f.close()
import os,sys import utils as cu import Image params = cu.loadParams('positiveBoxes negativeBoxes imgDir output') pos = [x.split() for x in open(params['positiveBoxes'])] neg = [x.split() for x in open(params['negativeBoxes'])] boxes = {} for p in pos: try: boxes[p[0]].append(p[1:] + [0]) except: boxes[p[0]] = [p[1:] + [0]] for n in neg: try: boxes[n[0]].append(n[1:] + [1]) except: boxes[n[0]] = [p[1:] + [1]] flipLabel = lambda x: 0 if x == 1 else 1 counter = 0 out = open(params['output'],'w') for img in boxes.keys(): out.write('# ' + str(counter) + '\n') imPath = params['imgDir'] + '/' + img + '.jpg' im = Image.open(imPath) w,h = im.size out.write(imPath + '\n3\n' + str(w) + '\n' + str(h) + '\n' + str(2*len(boxes[img])) + '\n') for b in boxes[img]: out.write(str(b[-1]) + ' 1.0 0.0 ' + ' '.join(b[0:4]) + '\n' ) out.write(str(flipLabel(b[-1])) + ' -1.0 0.0 ' + ' '.join(b[0:4]) + '\n' ) counter += 1
if len(boxes) > 0: fBoxes, fScores = det.nonMaximumSuppression(boxes, scores, 0.3) for i in range(len(fBoxes)): detections.append([img, fScores[i]] + fBoxes[i]) detections.sort(key=lambda x: x[1], reverse=True) gtBoxes = [x.split() for x in open(groundTruthFile)] numPositives = len(gtBoxes) groundTruth = eval.loadGroundTruthAnnotations(gtBoxes) results = eval.evaluateDetections(groundTruth, detections, 0.5) if output is not None: output = output + '.' + ranking prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'], output) return prec, recall if __name__ == "__main__": params = cu.loadParams('testMemDir groundTruthFile outputDir') scoredDetections = loadScores(params['testMemDir'], -1) groundTruthFile = params['groundTruthFile'] outputFile = params['outputDir'] + '/' + 'result.out' pl, rl = evaluateCategory(scoredDetections, 'landmarks', groundTruthFile, outputFile) line = lambda x, y, z: x + '\t{:5.3f}\t{:5.3f}\n'.format(y, z) out = open(params['outputDir'] + '/evaluation.txt', 'w') out.write('\tPrecision\tRecall\n') out.write(line('Landmarks', pl, rl)) out.close()
print 'Positive Matrix loaded ('+str(pos.shape[0])+' instances)' return (pos,posIdx,ari,osi) def parseModelParams(params): # Expected Format: k:v!k:v! params = params.split('!') result = {} for p in params: if p != '': k,v = p.split(':') result[k] = v return result ######################################## ## MAIN PROGRAM ######################################## if __name__ == "__main__": params = cu.loadParams("modelType modelParams positivesFeatures trueObjectBoxesFile trainingList featuresDir modelOut overlap iterations") featuresExt = params['positivesFeatures'].split('.')[-1] trainingList = [x.replace('\n','') for x in open(params['trainingList'])] maxNegOverlap = float(params['overlap']) iterations = int(params['iterations'])+1 positives = readPositivesData(params['positivesFeatures']) args = parseModelParams(params['modelParams']) print " ++ LEARNING",params['modelType'],"MODEL WITH ARGS:",params['modelParams']," ++ " trueObjectBoxes = [x.split() for x in open(params['trueObjectBoxesFile'])] for i in range(iterations): mainLoop(params['modelType'],args,positives,trueObjectBoxes,trainingList,params['featuresDir'],featuresExt,params['modelOut'],maxNegOverlap,i) os.system('rm '+params['modelOut']+'.hards')
nBoxes += r[0].shape[0] nFeat = r[0].shape[1] featureMatrix = np.zeros( (nBoxes,nFeat) ) i = 0 outputFile = open(outputDir + '/' + category + '.idx','w') for r in result: featureMatrix[i:i+r[0].shape[0]] = r[0] for box in r[1]: outputFile.write(box[0] + ' ' + ' '.join(map(str,map(int,box[1:]))) + '\n') i += r[0].shape[0] outputFile.close() cu.saveMatrix(featureMatrix,outputDir + '/' + category + '.' + featExt) print 'Total of',nBoxes,'positive examples collected for',category if __name__ == "__main__": params = cu.loadParams("imageList featuresDir groundTruthFile outputDir featuresExt category operation") groundTruths = cu.loadBoxIndexFile(params['groundTruthFile']) imageList = [x.replace('\n','') for x in open(params['imageList'])] operator = None if params['operation'] == 'big': operator = big elif params['operation'] == 'tight': operator = tight elif params['operation'] == 'inside': operator = inside elif params['operation'] == 'background': operator = background else: print 'Select a valid operation: [big | tight | inside | background]' sys.exit()
import numpy as np import conf import NN from activation_function import Sigmoid import utils if __name__ == '__main__': print "Part 1: Loading Data\n" X, y = utils.loadData(conf.FILE_X, conf.FILE_Y) print "Part 2: Loading Parameters\n" W1, W2 = utils.loadParams(conf.FILE_W1, conf.FILE_W2) # Unroll parameters W = np.hstack((W1.flatten(0), W2.flatten(0))) W = W.reshape((len(W), 1)) print "Part 3: Compute Cost(Feedforward)\n" LEARN_RATE = 0 J, _ = NN.nnCostFunction(W, conf.INPUT_LAYER_SIZE, conf.HIDDEN_LAYER_SIZE, conf.NUM_LABELS, X, y, LEARN_RATE) print ("Cost at parameters (loaded from w1.txt and w2.txt): %f" "\n(this value should be about 0.287629)\n") % J print "Part 4: Implement Regularization\n" LEARN_RATE = 1
try: boxes = groundTruth[d[0]] except: continue bestIoU = 0.0 for gt in boxes: iou = det.IoU(d[2:6], gt) if iou > bestIoU: bestIoU = iou print bestIoU if bestIoU > minOverlap: candidates.append(d) return candidates def saveCandidates(candidates, output): out = open(output, 'w') for k in candidates: out.write(k[0] + ' ' + ' '.join(map(str, map(int, k[2:6]))) + '\n') out.close() if __name__ == "__main__": params = cu.loadParams("detectionsFile groundTruths output") detectionsData = [x.split() for x in open(params['detectionsFile'])] detections = eval.loadDetections(detectionsData) groundTruth = cu.loadBoxIndexFile(params['groundTruths']) candidates = selectBestBoxes(detections, groundTruth, 0.5) print 'Selected candidates:', len(candidates) saveCandidates(candidates, params['output'])
import os,sys from PIL import Image import utils as cu import numpy as np def findNearestNeighbor(H,i): J = np.tile(H[i,:],(H.shape[0],1)) R = np.sum( np.abs(J-H), axis=1 ) R[i] = np.inf return np.argmin(R),np.min(R) params = cu.loadParams('imageDir') dir = params['imageDir'] allImages = os.listdir(dir) H = np.zeros( (len(allImages),768) ) print 'Scanning',len(allImages),'images' imgs = 0 for f in allImages: try: im = Image.open(dir+'/'+f) h = np.asarray(im.histogram()) if len(h) == 256: H[imgs,:] = np.tile(np.asarray(h),(1,3)) else: H[imgs,:] = np.asarray(h) imgs += 1 except: print 'Problems with',f H = H[0:imgs,:] for i in range(imgs):
def computePrecAt(tp,K): import numpy as np print 'Prec@K', for k in K: print '(',str(k),':',np.sum(tp[0:k])/float(k),')', print '' def bigOverlap(box, gt): if ldet.overlap(box,gt) > 0.5 and ldet.IoU(box,gt) < 0.5: return 1.0 else: return 0.0 # Main Program if __name__ == "__main__": params = cu.loadParams("overlap groundTruth detections output") indexData = [x.split() for x in open(params['groundTruth'])] detectionsData = [x.split() for x in open(params['detections'])] overlapLimit = 1.0 if params['overlap'].startswith('big'): minOverlap = float(params['overlap'].replace('big','')) overlapMeasure = lambda x,y: np.exp( -( (1.0-ldet.overlap(x,y))**2 + (0.25-ldet.IoU(x,y))**2 ) ) #overlapMeasure = bigOverlap elif params['overlap'].startswith('tight'): minOverlap = float(params['overlap'].replace('tight','')) overlapMeasure = ldet.IoU elif params['overlap'].startswith('inside'): minOverlap = float(params['overlap'].replace('inside','')) overlapMeasure = lambda x,y: np.exp( -( (1.0-ldet.overlap(y,x))**2 + (0.25-ldet.IoU(x,y))**2 ) ) elif params['overlap'].startswith('OV'):
] scaleBoxes.append(shiftBox(nb, w, h)) else: x1, x2 = 0, adjustedSize parts = int(round(bh / adjustedSize + 0.3)) step = (bh - adjustedSize) / max(parts - 1, 1) for i in range(parts): nb = [ x1, gt[1] + i * step, x2, gt[1] + i * step + adjustedSize ] scaleBoxes.append(shiftBox(nb, w, h)) return scaleBoxes if __name__ == "__main__": params = cu.loadParams("groundTruthBoxes imageDir outputDir cropSize") groundTruthBoxes = cu.loadBoxIndexFile(params['groundTruthBoxes']) cropSize = int(params['cropSize']) projections = {} overlaps = [] ious = [] for img in groundTruthBoxes.keys(): print img name = img.split('/')[1] if not os.path.isfile(params['imageDir'] + '/' + name + '.JPEG'): continue im = Image.open(params['imageDir'] + '/' + name + '.JPEG') w, h = im.size try: p = projections[img]
import sys, os import utils as cu params = cu.loadParams('fullList positivesList output') full = [x for x in open(params['fullList'])] positives = [x for x in open(params['positivesList'])] out = open(params['output'], 'w') for r in full: if r not in positives: out.write(r) out.close()
# Expected Format: k:v!k:v! params = params.split('!') result = {} for p in params: if p != '': k, v = p.split(':') result[k] = v return result ######################################## ## MAIN PROGRAM ######################################## if __name__ == "__main__": params = cu.loadParams( "modelType modelParams category positivesList trainingList masksDir featuresDir featuresExt modelOut overlap iterations" ) trainingList = [x.replace('\n', '') for x in open(params['trainingList'])] maxNegOverlap = float(params['overlap']) iterations = int(params['iterations']) + 1 positives = readPositivesData(params['masksDir'], params['featuresDir'], params['featuresExt'], params['positivesList'], params['category']) args = parseModelParams(params['modelParams']) print " ++ LEARNING", params['modelType'], "MODEL WITH ARGS:", params[ 'modelParams'], " ++ " for i in range(iterations): mainLoop(params['modelType'], args, positives, list(trainingList), params['featuresDir'], params['featuresExt'], params['modelOut'], maxNegOverlap, i) os.system('rm ' + params['modelOut'] + '.hards')