Ejemplo n.º 1
0
	def calculateVocabulary(self):
		return utility.duration(
			clusters.calculateClusters,
			utility.duration(
				features.imagesFeatures,
				self.extractor,
				self.dataset.clusterImages(),
				task="Image features"
			),
			clusters.calculateClusterCount(self.dataset.size),
			task="Clustering"
		)
Ejemplo n.º 2
0
	def initializeFeatures(self, extractor, output):
		outputPositions = 'positions/'+output+'.hdf5'
		outputDescriptors = 'descriptors/'+output+'.hdf5'
		if not os.path.isfile(outputPositions) and not os.path.isfile(outputDescriptors):
			self.positions = h5py.File(outputPositions, 'w')
			self.descriptors = h5py.File(outputDescriptors, 'w')
			utility.duration(
				utility.iterate,
				self.images(),
				lambda image, i: self.initializeFeature(extractor, image),
				False,
				task="Initialize dataset "+self.name+" features"
			)
		else:
			self.positions = h5py.File(outputPositions, 'r')
			self.descriptors = h5py.File(outputDescriptors, 'r')
		self.featuresSize = numpy.sum([len(self.imageDescriptors(image)) for image in self.images()])
Ejemplo n.º 3
0
def executeTest(sourceData, queryData, model, extractor, vectorNormalize,
                vectorDifference, *modelArguments):
    dataFile = '_'.join([sourceData, extractor])
    modelInstance = availableModels[model](availableDataSets[sourceData],
                                           availableExtractors[extractor],
                                           dataFile, *modelArguments)
    outputFile = 'results/' + '_'.join([
        utility.nameSourceQuery(sourceData, queryData), '-'.join(
            [model, *[str(argument) for argument in modelArguments]]),
        str(modelInstance.dataset.vocabularySize), extractor, vectorNormalize,
        vectorDifference
    ]) + '.hdf5'
    utility.duration(modelInstance.test,
                     availableDataSets[queryData],
                     availableVectorNormalizers[vectorNormalize],
                     availableVectorComparators[vectorDifference],
                     outputFile,
                     task="Full matrix comparison")
    print("Result written to " + outputFile)
Ejemplo n.º 4
0
def test(sourceData, queryData, model, extractor, vectorNormalize,
         vectorDifference, *modelArguments):
    # Start the test function for a model based on string arguments, and generate a corresponding result hdf5 file
    if not sourceData in availableDataSets:
        print("Unknown data set '" + sourceData + "', available data sets: " +
              ", ".join(availableDataSets.keys()))
        return
    if not queryData in availableDataSets:
        print("Unknown data set '" + queryData + "', available data sets: " +
              ", ".join(availableDataSets.keys()))
        return
    if not model in availableModels:
        print("Unknown model '" + model + "', available models: " +
              ", ".join(availableModels.keys()))
        return
    if not extractor in availableExtractors:
        print("Unknown feature extractor '" + extractor +
              "', available extractors: " +
              ", ".join(availableExtractors.keys()))
        return
    if not vectorNormalize in availableVectorNormalizers:
        print("Unknown function '" + vectorNormalize +
              "' for vector normalization, available functions: " +
              ", ".join(availableVectorNormalizers.keys()))
        return
    if not vectorDifference in availableVectorComparators:
        print("Unknown function '" + vectorDifference +
              "' for vector comparison, available functions: " +
              ", ".join(availableVectorComparators.keys()))
        return
    utility.duration(executeTest,
                     sourceData,
                     queryData,
                     model,
                     extractor,
                     vectorNormalize,
                     vectorDifference,
                     *modelArguments,
                     task="Test")
Ejemplo n.º 5
0
	def initializeVocabulary(self, output):
		outputArguments = output.split('_')
		outputVocabulary = 'vocabulary/'+outputArguments[0]+'_'+str(self.vocabularySize)+'_'+'_'.join(outputArguments[1:])+'.pickle'
		if not os.path.isfile(outputVocabulary):
			self.vocabulary = utility.duration(
				clusters.calculateClusters,
				self.clusterDescriptors(),
				self.vocabularySize,
				task="Initialize dataset "+self.name+" vocabulary"
			)
			with open(outputVocabulary, 'wb') as file:
				pickle.dump(self.vocabulary, file)
		else:
			with open(outputVocabulary, 'rb') as file:
				self.vocabulary = pickle.load(file)
		self.vocabularyInertia = self.vocabulary.inertia_
Ejemplo n.º 6
0
if deExp:
# important part: execution od DE for multiple iterations , for n number of times #####
 for cnt in xrange(iterations):
  print " ************ Iteration # {} ***************  ".format(cnt)  
  for deRunCount in deRunList:
    print "### gettting baseline ###" 
    baseline_fileNameToWriteP = "baseline_" + str(runCount) + "_" + str(deRunCount) + "_" + str("cnt")
    minB, maxB = getBaselineForModel(runCount, dirToWriteP, baseline_fileNameToWriteP, constFlag)
    #print "And the baseline is (min, max format) \n", minB, maxB

    print "Executing D.E (minimized version) ... for {} D.E. runs and {} model runs".format(deRunCount, runCount)
    print "========================================================================="
    #constraintFileNameParam = "all_0_1_equ.csv"
    constraintFileNameParam = "all_0_1_no_equ.csv"
    constraintFile = dirToWriteP + constraintFileNameParam 
    with  utility.duration(): 
      integrator.runDE(minB, maxB, IntegratedDefectModel, deRunCount, runCount, constraintFile)
  print "------------------------------------------ END ----------------------------------------------------------"    
####  DE ZONE ENDS! #########
##### GALE ZONE STARTS ! #########
#if galeExp:
# for cnt in xrange(iterations):
#  galeRunCount = 10 
#  print "### gettting baseline ###" 
#  baseline_fileNameToWriteP = "baseline_" + str(runCount)
#  minB, maxB = getBaselineForModel(runCount, dirToWriteP, baseline_fileNameToWriteP, constFlag)
#  print "And the baseline is (min, max format) \n", minB, maxB
#
#  print "Executing GALE (minimized version) ... for {} GALE runs and {} model runs".format(galeRunCount, runCount)
#  print "========================================================================="   
#  with utility.duration():
Ejemplo n.º 7
0
        for deRunCount in deRunList:
            print "### gettting baseline ###"
            baseline_fileNameToWriteP = "baseline_" + str(
                runCount) + "_" + str(deRunCount) + "_" + str("cnt")
            minB, maxB = getBaselineForModel(runCount, dirToWriteP,
                                             baseline_fileNameToWriteP,
                                             constFlag)
            #print "And the baseline is (min, max format) \n", minB, maxB

            print "Executing D.E (minimized version) ... for {} D.E. runs and {} model runs".format(
                deRunCount, runCount)
            print "========================================================================="
            #constraintFileNameParam = "all_0_1_equ.csv"
            constraintFileNameParam = "all_0_1_no_equ.csv"
            constraintFile = dirToWriteP + constraintFileNameParam
            with utility.duration():
                integrator.runDE(minB, maxB, IntegratedDefectModel, deRunCount,
                                 runCount, constraintFile)
        print "------------------------------------------ END ----------------------------------------------------------"
####  DE ZONE ENDS! #########
##### GALE ZONE STARTS ! #########
#if galeExp:
# for cnt in xrange(iterations):
#  galeRunCount = 10
#  print "### gettting baseline ###"
#  baseline_fileNameToWriteP = "baseline_" + str(runCount)
#  minB, maxB = getBaselineForModel(runCount, dirToWriteP, baseline_fileNameToWriteP, constFlag)
#  print "And the baseline is (min, max format) \n", minB, maxB
#
#  print "Executing GALE (minimized version) ... for {} GALE runs and {} model runs".format(galeRunCount, runCount)
#  print "========================================================================="
Ejemplo n.º 8
0
def fileMetrics(result, sourceData, queryData, amounts):
	similarityMatrix = utility.duration(
		readSimilarities,
		sourceData.name,
		queryData.name,
		task="Read similarity matrix"
	)
	differenceFile, differenceMatrix = utility.duration(
		readDifferences,
		result,
		task="Read difference matrix"
	)
	sourcePoses = utility.duration(
		readPoses,
		sourceData.name,
		task="Read poses",
	)
	queryPoses = utility.duration(
		readPoses,
		queryData.name,
		task="Read poses",
	)

	totalMetrics = {
		'precision': {amount:numpy.empty(queryData.size, dtype=numpy.float64) for amount in amounts},
		'recall': {amount:numpy.empty(queryData.size, dtype=numpy.float64) for amount in amounts},
		'recallRate': {amount:numpy.empty(queryData.size, dtype=numpy.float64) for amount in amounts},
		'transform': numpy.empty(queryData.size, dtype=numpy.float64),
		'rotation': numpy.empty(queryData.size, dtype=numpy.float64)
	}

	utility.iterate(range(queryData.size), lambda _, i: registerMetrics(totalMetrics, imageMetrics(similarityMatrix, differenceMatrix, sourcePoses, queryPoses, i, amounts), i), False)

	differenceFile.close()

	return {
		'precision': {
			amount: {
				'mean': numpy.mean(totalMetrics['precision'][amount]),
				'variance': numpy.var(totalMetrics['precision'][amount]),
				'standardDeviation': numpy.std(totalMetrics['precision'][amount])
			} for amount in totalMetrics['precision']
		},
		'recall': {
			amount: {
				'mean': numpy.mean(totalMetrics['recall'][amount]),
				'variance': numpy.var(totalMetrics['recall'][amount]),
				'standardDeviation': numpy.std(totalMetrics['recall'][amount])
			} for amount in totalMetrics['recall']
		},
		'recallRate': {
			amount: {
				'mean': numpy.mean(totalMetrics['recallRate'][amount]),
				'variance': numpy.var(totalMetrics['recallRate'][amount]),
				'standardDeviation': numpy.std(totalMetrics['recallRate'][amount])
			} for amount in totalMetrics['recallRate']
		},
		'transform': {
			'mean': numpy.mean(totalMetrics['transform']),
			'variance': numpy.var(totalMetrics['transform']),
			'standardDeviation': numpy.std(totalMetrics['transform'])
		},
		'rotation': {
			'mean': numpy.mean(totalMetrics['rotation']),
			'variance': numpy.var(totalMetrics['rotation']),
			'standardDeviation': numpy.std(totalMetrics['rotation'])
		}
	}
Ejemplo n.º 9
0
	def calculateRepresentations(self):
		return utility.duration(
			self.representDataset,
			task="Dataset representations"
		)