示例#1
0
 def __matchDict(self, dictionary, other):
     """
   Method to check the consistency of two dictionaries
   Returns true if all the keys and values in other
   match all the keys and values in dictionary.
   Note that it does not check that all the keys in dictionary
   match all the keys and values in other.
   @ In, dictionary, dict, first dictionary to check
   @ In, other, dict, second dictionary to check
   @ Out, returnBool, bool, True if all the keys and values in other match all the keys and values in dictionary.
 """
     returnBool = True
     for key in other:
         if key in dictionary:
             #if dictionary[key] != other[key]:
             if not compare(dictionary[key], other[key]):
                 print("Missmatch ", key, repr(dictionary[key]),
                       repr(other[key]))
                 returnBool = False
         else:
             binKey = toBytes(key)
             if binKey in dictionary:
                 if not compare(dictionary[binKey], other[key]):
                     print("Missmatch_b ", key, dictionary[binKey],
                           other[key])
                     returnBool = False
             else:
                 print("No_key ", key, other[key])
                 returnBool = False
     return returnBool
示例#2
0
文件: Code.py 项目: dylanjm/raven
  def createExportDictionary(self, evaluation):
    """
      Method that is aimed to create a dictionary with the sampled and output variables that can be collected by the different
      output objects.
      @ In, evaluation, tuple, (dict of sampled variables, dict of code outputs)
      @ Out, outputEval, dict, dictionary containing the output/input values: {'varName':value}
    """
    sampledVars,outputDict = evaluation

    if type(outputDict).__name__ == "tuple":
      outputEval = outputDict[0]
    else:
      outputEval = outputDict

    for key, value in outputEval.items():
      outputEval[key] = np.atleast_1d(value)

    for key, value in sampledVars.items():
      if key in outputEval.keys():
        if not utils.compare(value,np.atleast_1d(outputEval[key])[-1],relTolerance = 1e-8):
          self.raiseAWarning('The model '+self.type+' reported a different value (%f) for %s than raven\'s suggested sample (%f). Using the value reported by the raven (%f).' % (outputEval[key][0], key, value, value))
      outputEval[key] = np.atleast_1d(value)

    self._replaceVariablesNamesWithAliasSystem(outputEval, 'input',True)

    return outputEval
示例#3
0
def compare_two_img():
    photo_form = PhotoForm(request.form)
    data = request.get_json()
    if data["img_1"].startswith('data:image/jpeg;base64,'):
        img_1 = data["img_1"].replace('data:image/jpeg;base64,', '')
    else:
        img_1 = data["img_1"].replace('data:image/png;base64,', '')
    if data["img_2"].startswith('data:image/jpeg;base64,'):
        img_2 = data["img_2"].replace('data:image/jpeg;base64,', '')
    else:
        img_2 = data["img_2"].replace('data:image/png;base64,', '')

    im = Image.open(BytesIO(base64.b64decode(img_1)))
    im2 = Image.open(BytesIO(base64.b64decode(img_2)))
    im_arr1 = np.array(im)
    im_arr2 = np.array(im2)
    if len(im_arr1.shape) == 2:
        im_arr1 = np.stack([im_arr1] * 3, 2)
        im = Image.fromarray(im_arr1)

    elif im_arr1.shape[2] ==4:
        im_arr1 = im_arr1[:,:,:3]
        im = Image.fromarray(im_arr1)

    if im_arr2.shape[2] == 1:
        im_arr2 = np.stack([im_arr2] * 3, 2)
        im2 = Image.fromarray(im_arr2)

    elif im_arr2.shape[2] == 4:
        im_arr2 = im_arr2[:,:,:3]
        im2 = Image.fromarray(im_arr2)
    print("aaaaaaaaaaaa")

    video_form = VideoForm(request.form)
    features_1, faces_1 = face_recognize.feature_img(im)
    features_2, faces_2 = face_recognize.feature_img(im2)

    if len(faces_1) == 0 or len(faces_1) > 1:
        result = {
            "results": "image 1 should only face!"
        }
    elif len(faces_2) == 0 or len(faces_2) > 1:
        result = {
            "results": "image 2 should only face!"
        }
    else:
        pros, rel = compare(features_1, features_2)

        result = {
                "results": "{0:.2f}".format(pros[0:,0][0]*100)
        }
    return json.dumps(result)
示例#4
0
 def _initializeLSpp(self, runInfo, inputs, initDict):
   """
     Method to initialize the LS post processor (create grid, etc.)
     @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc)
     @ In, inputs, list, list of inputs
     @ In, initDict, dict, dictionary with initialization options
     @ Out, None
   """
   PostProcessor.initialize(self, runInfo, inputs, initDict)
   self.gridEntity = GridEntities.returnInstance("MultiGridEntity",self,self.messageHandler)
   self.externalFunction = self.assemblerDict['Function'][0][3]
   if 'ROM' not in self.assemblerDict.keys():
     self.ROM = LearningGate.returnInstance('SupervisedGate','SciKitLearn', self, **{'SKLtype':'neighbors|KNeighborsClassifier',"n_neighbors":1, 'Features':','.join(list(self.parameters['targets'])), 'Target':[self.externalFunction.name]})
   else:
     self.ROM = self.assemblerDict['ROM'][0][3]
   self.ROM.reset()
   self.indexes = -1
   for index, inp in enumerate(self.inputs):
     if mathUtils.isAString(inp)  or isinstance(inp, bytes):
       self.raiseAnError(IOError, 'LimitSurface PostProcessor only accepts Data(s) as inputs. Got string type!')
     if inp.type == 'PointSet':
       self.indexes = index
   if self.indexes == -1:
     self.raiseAnError(IOError, 'LimitSurface PostProcessor needs a PointSet as INPUT!!!!!!')
   #else:
   #  # check if parameters are contained in the data
   #  inpKeys = self.inputs[self.indexes].getParaKeys("inputs")
   #  outKeys = self.inputs[self.indexes].getParaKeys("outputs")
   #  self.paramType = {}
   #  for param in self.parameters['targets']:
   #    if param not in inpKeys + outKeys:
   #      self.raiseAnError(IOError, 'LimitSurface PostProcessor: The param ' + param + ' not contained in Data ' + self.inputs[self.indexes].name + ' !')
   #    if param in inpKeys:
   #      self.paramType[param] = 'inputs'
   #    else:
   #      self.paramType[param] = 'outputs'
   if self.bounds == None:
     dataSet = self.inputs[self.indexes].asDataset()
     self.bounds = {"lowerBounds":{},"upperBounds":{}}
     for key in self.parameters['targets']:
       self.bounds["lowerBounds"][key], self.bounds["upperBounds"][key] = min(dataSet[key].values), max(dataSet[key].values)
       #self.bounds["lowerBounds"][key], self.bounds["upperBounds"][key] = min(self.inputs[self.indexes].getParam(self.paramType[key],key,nodeId = 'RecontructEnding')), max(self.inputs[self.indexes].getParam(self.paramType[key],key,nodeId = 'RecontructEnding'))
       if utils.compare(round(self.bounds["lowerBounds"][key],14),round(self.bounds["upperBounds"][key],14)):
         self.bounds["upperBounds"][key]+= abs(self.bounds["upperBounds"][key]/1.e7)
   self.gridEntity.initialize(initDictionary={"rootName":self.name,'constructTensor':True, "computeCells":initDict['computeCells'] if 'computeCells' in initDict.keys() else False,
                                              "dimensionNames":self.parameters['targets'], "lowerBounds":self.bounds["lowerBounds"],"upperBounds":self.bounds["upperBounds"],
                                              "volumetricRatio":self.tolerance   ,"transformationMethods":self.transfMethods})
   self.nVar                  = len(self.parameters['targets'])                                  # Total number of variables
   self.axisName              = self.gridEntity.returnParameter("dimensionNames",self.name)      # this list is the implicit mapping of the name of the variable with the grid axis ordering self.axisName[i] = name i-th coordinate
   self.testMatrix[self.name] = np.zeros(self.gridEntity.returnParameter("gridShape",self.name)) # grid where the values of the goalfunction are stored
示例#5
0
    def infer_numpy(self, faces, target_embs):
        '''
        faces : list of PIL Image
        target_embs : [n, 512] computed embeddings of faces in facebank
        names : recorded names of faces in facebank
        tta : test time augmentation (hfilp, that's all)
        '''
        embs = []
        for img in faces:
            if self.tta:
                with torch.no_grad():
                    mirror = trans.functional.hflip(img)
                    emb = self.model(
                        self.test_transform(img).to(
                            self.conf.device).unsqueeze(0))
                    emb_mirror = self.model(
                        self.test_transform(mirror).to(
                            self.conf.device).unsqueeze(0))
                    embs.append(
                        l2_norm(emb + emb_mirror).data.cpu().numpy()[0])
            else:
                with torch.no_grad():
                    embs.append(
                        self.model(
                            self.test_transform(img).to(
                                self.conf.device).unsqueeze(
                                    0)).data.cpu().numpy()[0])
        source_embs = np.array(embs)
        probs, re = compare(source_embs, target_embs)
        # diff =  source_embs - np.expand_dims(target_embs, 0)
        # dist = np.sum(np.power(diff, 2), axis=2)
        maximum = np.amax(probs, axis=1)

        max_idx = np.argmax(probs, axis=1)
        max_idx[maximum < self.threshold] = -1  # if no match, set idx to -1
        return max_idx, maximum, source_embs
示例#6
0
    def _checkClosestBranch(self):
        """
      Function that checks the closest branch already evaluated
      @ In, None
      @ Out, returnTuple, tuple, closest branch info:
        - if self.hybridDETstrategy and branch found         -> returnTuple = (valBranch,cdfValues,treer)
        - if self.hybridDETstrategy and branch not found     -> returnTuple = (None,cdfValues,treer)
        - if not self.hybridDETstrategy and branch found     -> returnTuple = (valBranch,cdfValues)
        - if not self.hybridDETstrategy and branch not found -> returnTuple = (None,cdfValues)
    """
        from sklearn import neighbors

        # compute cdf of sampled vars
        lowerCdfValues = {}
        cdfValues = {}
        self.raiseADebug("Check for closest branch:")
        self.raiseADebug("_" * 50)
        for key, value in self.values.items():
            self.raiseADebug("Variable name   : " + str(key))
            self.raiseADebug("Distrbution name: " + str(self.toBeSampled[key]))
            if key not in self.epistemicVariables.keys():
                cdfValues[key] = self.distDict[key].cdf(value)
                try:
                    index = utils.first(
                        np.atleast_1d(
                            np.asarray(self.branchProbabilities[key]) <=
                            cdfValues[key]).nonzero())[-1]
                    val = self.branchProbabilities[key][index]
                except (ValueError, IndexError):
                    val = None
                lowerCdfValues[key] = val
                self.raiseADebug("CDF value       : " + str(cdfValues[key]))
                self.raiseADebug("Lower CDF found : " +
                                 str(lowerCdfValues[key]))
            self.raiseADebug("_" * 50)
        #if hybrid DET, we need to find the correct tree that matches the values of the epistemic
        if self.hybridDETstrategy is not None:
            self.foundEpistemicTree, treer, compareDict = False, None, dict.fromkeys(
                self.epistemicVariables.keys(), False)
            for tree in self.TreeInfo.values():
                epistemicVars = tree.getrootnode().get(
                    "hybridsamplerCoordinate")[0]['SampledVars']
                for key in self.epistemicVariables.keys():
                    compareDict[key] = utils.compare(epistemicVars[key],
                                                     self.values[key])
                if all(compareDict.values()):
                    # we found the right epistemic tree
                    self.foundEpistemicTree, treer = True, tree
                    break
        else:
            treer = utils.first(self.TreeInfo.values())

        # check if in the adaptive points already explored (if not push into the grid)
        if not self.insertAdaptBPb:
            candidatesBranch = []
            # check if adaptive point is better choice -> TODO: improve efficiency
            for invPoint in self.investigatedPoints:
                pbth = [
                    invPoint[self.toBeSampled[key]]
                    for key in cdfValues.keys()
                ]
                if all(i <= pbth[cnt]
                       for cnt, i in enumerate(cdfValues.values())):
                    candidatesBranch.append(invPoint)
            if len(candidatesBranch) > 0:
                if None in lowerCdfValues.values():
                    lowerCdfValues = candidatesBranch[0]
                for invPoint in candidatesBranch:
                    pbth = [
                        invPoint[self.toBeSampled[key]]
                        for key in cdfValues.keys()
                    ]
                    if all(i >= pbth[cnt]
                           for cnt, i in enumerate(lowerCdfValues.values())):
                        lowerCdfValues = invPoint
        # Check if The adaptive point requested is outside the so far run grid; in case return None
        # In addition, if Adaptive Hybrid DET, if treer is None, we did not find any tree
        #              in the epistemic space => we need to create another one
        if None in lowerCdfValues.values() or treer is None:
            if self.hybridDETstrategy is not None:
                returnTuple = None, cdfValues, treer
            else:
                returnTuple = None, cdfValues
            return returnTuple

        nntrain, mapping = None, {}
        for ending in treer.iterProvidedFunction(self._checkEnded):
            #already ended branches, create training set for nearest algorithm (take coordinates <= of cdfValues) -> TODO: improve efficiency
            pbth = [
                ending.get('SampledVarsPb')[key]
                for key in lowerCdfValues.keys()
            ]
            if all(pbth[cnt] <= i
                   for cnt, i in enumerate(lowerCdfValues.values())):
                if nntrain is None:
                    nntrain = np.zeros((1, len(cdfValues.keys())))
                    nntrain[0, :] = np.array(copy.copy(pbth))
                else:
                    nntrain = np.concatenate(
                        (nntrain, np.atleast_2d(np.array(copy.copy(pbth)))),
                        axis=0)
                mapping[nntrain.shape[0]] = ending
        if nntrain is not None:
            neigh = neighbors.NearestNeighbors(n_neighbors=len(mapping.keys()))
            neigh.fit(nntrain)
            valBranch = self._checkValidityOfBranch(
                neigh.kneighbors([list(lowerCdfValues.values())]), mapping)
            if self.hybridDETstrategy is not None:
                returnTuple = valBranch, cdfValues, treer
            else:
                returnTuple = valBranch, cdfValues
            return returnTuple
        else:
            returnTuple = (None, cdfValues,
                           treer) if self.hybridDETstrategy is not None else (
                               None, cdfValues)
            return returnTuple