Esempio n. 1
0
def dense_solve_prep(model, dataBasics, string, prevSpan, prevNumSpan, span,
                     numSpan, cp, input, wv, bv, activations):

    # filters can be seen as the output of a convolutional layer
    nfilters = basics.numberOfFilters(wv)
    # features can be seen as the inputs for a convolutional layer
    nfeatures = basics.numberOfFeatures(wv)

    # space holders for computation values
    biasCollection = {}
    filterCollection = {}

    for ((p1, c1), (p, c), w) in wv:
        if c1 - 1 in range(nfeatures) and c - 1 in range(nfilters):
            filterCollection[c1 - 1, c - 1] = w
    for (p, c, w) in bv:
        if c - 1 in range(nfilters):
            for l in range(nfeatures):
                biasCollection[l, c - 1] = w

    (bl1, newInput) = dense_safety_solve(nfeatures, nfilters, filterCollection,
                                         biasCollection, input, activations,
                                         prevSpan, prevNumSpan, span, numSpan,
                                         cp)

    basics.nprint("completed a round of processing ")
    return (bl1, newInput)
Esempio n. 2
0
 def bestChild(self, index):
     allValues = {}
     for childIndex in self.children[index]:
         allValues[childIndex] = self.cost[childIndex] / float(
             self.numberOfVisited[childIndex])
     basics.nprint("finding best children from %s" % (allValues))
     return max(allValues.iteritems(), key=operator.itemgetter(1))[0]
Esempio n. 3
0
 def sampling(self, index, availableActions):
     basics.nprint("start sampling node %s" % (index))
     availableActions2 = copy.deepcopy(availableActions)
     #print(availableActions,self.keypoint[index],self.indexToActionID[index])
     availableActions2[self.keypoint[index]].pop(
         self.indexToActionID[index], None)
     sampleValues = []
     i = 0
     for i in range(cfg.MCTS_multi_samples):
         self.spansPath = self.spans[index]
         self.numSpansPath = self.numSpans[index]
         self.depth = 0
         self.availableActionIDs = {}
         for k in self.keypoints.keys():
             self.availableActionIDs[k] = availableActions2[k].keys()
         self.usedActionIDs = {}
         for k in self.keypoints.keys():
             self.usedActionIDs[k] = []
         self.accDims = []
         self.d = 2
         (childTerminated, val) = self.sampleNext(self.keypoint[index])
         sampleValues.append(val)
         #if childTerminated == True: break
         i += 1
     return (childTerminated, max(sampleValues))
Esempio n. 4
0
 def initialiseExplorationNode(self, index, availableActions):
     basics.nprint("expanding %s" % (index))
     for (actionId, (span, numSpan, _)) in availableActions.iteritems():
         self.indexToNow += 1
         self.indexToActionID[self.indexToNow] = actionId
         self.initialiseLeafNode(self.indexToNow, index, span, numSpan)
         self.children[index].append(self.indexToNow)
     self.fullyExpanded[index] = True
     self.usedActionsID = []
     return self.children[index]
Esempio n. 5
0
 def backPropagation(self, index, value):
     self.cost[index] += value
     self.numberOfVisited[index] += 1
     if self.parent[index] in self.parent:
         basics.nprint(
             "start backPropagating the value %s from node %s, whose parent node is %s"
             % (value, index, self.parent[index]))
         self.backPropagation(self.parent[index], value)
     else:
         basics.nprint("backPropagating ends on node %s" % (index))
Esempio n. 6
0
 def initialiseLeafNode(self, index, parentIndex, newSpans, newNumSpans):
     basics.nprint("initialising a leaf node %s from the node %s" %
                   (index, parentIndex))
     self.spans[index] = basics.mergeTwoDicts(self.spans[parentIndex],
                                              newSpans)
     self.numSpans[index] = basics.mergeTwoDicts(self.numSpans[parentIndex],
                                                 newNumSpans)
     self.cost[index] = 0
     self.parent[index] = parentIndex
     self.children[index] = []
     self.fullyExpanded[index] = False
     self.numberOfVisited[index] = 0
Esempio n. 7
0
 def terminatedByControlledSearch(self, index):
     activations1 = applyManipulation(self.activations, self.spans[index],
                                      self.numSpans[index])
     (distMethod, distVal) = cfg.controlledSearch
     if distMethod == "euclidean":
         dist = basics.euclideanDistance(activations1, self.activations)
     elif distMethod == "L1":
         dist = basics.l1Distance(activations1, self.activations)
     elif distMethod == "Percentage":
         dist = basics.diffPercent(activations1, self.activations)
     elif distMethod == "NumDiffs":
         dist = basics.diffPercent(activations1, self.activations)
     basics.nprint("terminated by controlled search")
     return dist > distVal
Esempio n. 8
0
def conv_solve_prep(model, dataBasics, string, originalLayer2Consider,
                    layer2Consider, prevSpan, prevNumSpan, span, numSpan, cp,
                    input, wv, bv, activations):

    # filters can be seen as the output of a convolutional layer
    nfilters = basics.numberOfFilters(wv)
    # features can be seen as the inputs for a convolutional layer
    nfeatures = basics.numberOfFeatures(wv)

    # space holders for computation values
    biasCollection = {}
    filterCollection = {}

    for l in range(nfeatures):
        for k in range(nfilters):
            filter = [
                w for ((p1, c1), (p, c), w) in wv if c1 == l + 1 and c == k + 1
            ]
            bias = [w for (p, c, w) in bv if c == k + 1]
            if len(filter) == 0 or len(bias) == 0:
                print "error: bias =" + str(bias) + "\n filter = " + str(
                    filter)
            else:
                filter = filter[0]
                bias = bias[0]

            # flip the filter for convolve
            flipedFilter = np.fliplr(np.flipud(filter))
            biasCollection[l, k] = bias
            filterCollection[l, k] = flipedFilter
            #print filter.shape

    input2 = copy.deepcopy(input)

    if originalLayer2Consider > layer2Consider:
        (bl1, newInput) = conv_safety_solve(layer2Consider, nfeatures,
                                            nfilters, filterCollection,
                                            biasCollection, input, activations,
                                            prevSpan, prevNumSpan, span,
                                            numSpan, cp)
    else:
        (bl1, newInput) = conv_safety_solve(layer2Consider, nfeatures,
                                            nfilters, filterCollection,
                                            biasCollection, input, activations,
                                            prevSpan, prevNumSpan, span,
                                            numSpan, cp)

    basics.nprint("completed a round of processing of the entire image ")
    return (bl1, newInput)
Esempio n. 9
0
 def initialiseLeafNode(self, index, parentIndex, newSpans, newNumSpans):
     basics.nprint("initialising a leaf node %s from the node %s" %
                   (index, parentIndex))
     self.spans[index] = basics.mergeTwoDicts(self.spans[parentIndex],
                                              newSpans)
     self.numSpans[index] = basics.mergeTwoDicts(self.numSpans[parentIndex],
                                                 newNumSpans)
     self.cost[index] = 0
     self.parent[index] = parentIndex
     self.children[index] = []
     self.fullyExpanded[index] = False
     self.numberOfVisited[index] = 0
     activations1 = applyManipulation(self.activations, self.spans[index],
                                      self.numSpans[index])
     self.re_training.addDatum(activations1, self.originalClass,
                               self.originalClass)
Esempio n. 10
0
    def treeTraversal(self, index):
        if self.fullyExpanded[index] == True:
            basics.nprint("tree traversal on node %s" % (index))
            allValues = {}
            for childIndex in self.children[index]:
                allValues[childIndex] = (
                    self.cost[childIndex] /
                    float(self.numberOfVisited[childIndex])
                ) + explorationRate * math.sqrt(
                    math.log(self.numberOfVisited[index]) /
                    float(self.numberOfVisited[childIndex]))
            #nextIndex = max(allValues.iteritems(), key=operator.itemgetter(1))[0]
            if self.player_mode == "adversary" and self.keypoint[index] == 0:
                allValues2 = {}
                for k, v in allValues.iteritems():
                    allValues2[k] = 1 / float(allValues[k])
                nextIndex = np.random.choice(allValues.keys(),
                                             1,
                                             p=[
                                                 x / sum(allValues2.values())
                                                 for x in allValues2.values()
                                             ])[0]
            else:
                nextIndex = np.random.choice(allValues.keys(),
                                             1,
                                             p=[
                                                 x / sum(allValues.values())
                                                 for x in allValues.values()
                                             ])[0]

            if self.keypoint[index] in self.usedActionsID.keys(
            ) and self.keypoint[index] != 0:
                self.usedActionsID[self.keypoint[index]].append(
                    self.indexToActionID[index])
            elif self.keypoint[index] != 0:
                self.usedActionsID[self.keypoint[index]] = [
                    self.indexToActionID[index]
                ]
            return self.treeTraversal(nextIndex)
        else:
            basics.nprint("tree traversal terminated on node %s" % (index))
            availableActions = copy.deepcopy(self.actions)
            for k in self.usedActionsID.keys():
                for i in self.usedActionsID[k]:
                    availableActions[k].pop(i, None)
            return (index, availableActions)
Esempio n. 11
0
 def treeTraversal(self, index):
     if self.fullyExpanded[index] == True:
         basics.nprint("tree traversal on node %s" % (index))
         allValues = {}
         for childIndex in self.children[index]:
             allValues[childIndex] = (self.cost[childIndex] / float(
                 self.numberOfVisited[childIndex])) + cp * math.sqrt(
                     math.log(self.numberOfVisited[index]) /
                     float(self.numberOfVisited[childIndex]))
         nextIndex = max(allValues.iteritems(),
                         key=operator.itemgetter(1))[0]
         self.usedActionsID.append(self.indexToActionID[nextIndex])
         return self.treeTraversal(nextIndex)
     else:
         basics.nprint("tree traversal terminated on node %s" % (index))
         availableActions = copy.deepcopy(self.actions)
         for i in self.usedActionsID:
             availableActions.pop(i, None)
         return (index, availableActions)
Esempio n. 12
0
def diffImage(image1, image2):
    i = 0
    if len(image1.shape) == 1:
        for x in range(len(image1)):
            if image1[x] != image2[x]:
                i += 1
                basics.nprint("dimension %s is changed from %s to %s" %
                              (x, image1[x], image2[x]))
    elif len(image1.shape) == 2:
        for x in range(len(image1)):
            for y in range(len(image1[0])):
                if image1[x][y] != image2[x][y]:
                    i += 1
                    basics.nprint(
                        "dimension (%s,%s) is changed from %s to %s" %
                        (x, y, image1[x][y], image2[x][y]))
    elif len(image1.shape) == 3:
        for x in range(len(image1)):
            for y in range(len(image1[0])):
                for z in range(len(image1[0])):
                    if image1[x][y][z] != image2[x][y][z]:
                        i += 1
                        basics.nprint(
                            "dimension (%s,%s,%s) is changed from %s to %s" %
                            (x, y, z, image1[x][y][z], image2[x][y][z]))
    print("%s elements have been changed!" % i)
Esempio n. 13
0
 def initialiseExplorationNode(self, index, availableActions):
     basics.nprint("expanding %s" % (index))
     if self.keypoint[index] != 0:
         for (
                 actionId, (span, numSpan, _)
         ) in availableActions[self.keypoint[index]].iteritems(
         ):  #initialisePixelSets(self.model,self.image,list(set(self.spans[index].keys() + self.usefulPixels))):
             self.indexToNow += 1
             self.keypoint[self.indexToNow] = 0
             self.indexToActionID[self.indexToNow] = actionId
             self.initialiseLeafNode(self.indexToNow, index, span, numSpan)
             self.children[index].append(self.indexToNow)
     else:
         for kp in self.keypoints.keys(
         ):  #initialisePixelSets(self.model,self.image,list(set(self.spans[index].keys() + self.usefulPixels))):
             self.indexToNow += 1
             self.keypoint[self.indexToNow] = kp
             self.indexToActionID[self.indexToNow] = 0
             self.initialiseLeafNode(self.indexToNow, index, {}, {})
             self.children[index].append(self.indexToNow)
     self.fullyExpanded[index] = True
     self.usedActionsID = {}
     return self.children[index]
Esempio n. 14
0
    def initialiseActions(self):
        # initialise actions according to the type of manipulations
        if self.manipulationType == "sift_twoPlayer":
            actions = initialiseSiftKeypointsTwoPlayer(self.autoencoder,
                                                       self.activations, [])
            self.keypoints[0] = 0
            i = 1
            for k in actions[0]:
                self.keypoints[i] = k
                i += 1
            #print self.keypoints
        else:
            print("unknown manipulation type")
            exit

        #print("actions=%s"%(actions.keys()))
        for i in range(len(actions)):
            ast = {}
            for j in range(len(actions[i])):
                ast[j] = actions[i][j]
            self.actions[i] = ast
        basics.nprint("%s actions have been initialised. " %
                      (len(self.actions)))
Esempio n. 15
0
def handleOne(model, dataset, dc, imgIdx):
    print(imgIdx)

    # get an image to interpolate
    image = dataset.getTestImage(imgIdx)
    print("the shape of the input is " + str(image.shape))

    if cfg.dataset == "twoDcurve": image = np.array([3.58747339, 1.11101673])

    dc.initialiseIndex(imgIdx)
    originalImage = copy.deepcopy(image)

    if cfg.checkingMode == "stepwise":
        k = cfg.startLayer
    elif cfg.checkingMode == "specificLayer":
        k = cfg.maxLayer

    while k <= cfg.maxLayer:

        layerType = model.getLayerType(k)
        re = False
        start_time = time.time()

        # only these layers need to be checked
        if layerType in ["Convolution2D", "Conv2D", "Dense", "InputLayer"
                         ] and k >= 0:

            dc.initialiseLayer(k)

            st = SearchTree(image, k)
            st.addImages(model, [image])

            print(
                "\n================================================================"
            )
            print "\nstart checking the safety of layer " + str(k)

            (originalClass, originalConfident) = model.predict(image)
            origClassStr = dataset.labels(int(originalClass))

            path0 = "%s/%s_original_as_%s_with_confidence_%s.png" % (
                cfg.directory_pic_string, imgIdx, origClassStr,
                originalConfident)
            dataset.save(-1, originalImage, path0)

            # for every layer
            f = 0
            while f < cfg.numOfFeatures:

                f += 1
                print(
                    "\n================================================================"
                )
                print("Round %s of layer %s for image %s" % (f, k, imgIdx))
                index = st.getOneUnexplored()
                imageIndex = copy.deepcopy(index)

                # for every image
                # start from the first hidden layer
                t = 0
                re = False
                while True and index != (-1, -1):

                    # pick the first element of the queue
                    print "(1) get a manipulated input ..."
                    (image0, span, numSpan, numDimsToMani,
                     _) = st.getInfo(index)

                    print "current layer: %s." % (t)
                    print "current index: %s." % (str(index))

                    path2 = cfg.directory_pic_string + "/temp.png"
                    print "current operated image is saved into %s" % (path2)
                    dataset.save(index[0], image0, path2)

                    print "(2) synthesise region from %s..." % (span.keys())
                    # ne: next region, i.e., e_{k+1}
                    #print "manipulated: %s"%(st.manipulated[t])
                    (nextSpan, nextNumSpan,
                     numDimsToMani) = regionSynth(model, cfg.dataset, image0,
                                                  st.manipulated[t], t, span,
                                                  numSpan, numDimsToMani)
                    st.addManipulated(t, nextSpan.keys())

                    (nextSpan, nextNumSpan,
                     npre) = precisionSynth(model, image0, t, span, numSpan,
                                            nextSpan, nextNumSpan)

                    print "dimensions to be considered: %s" % (nextSpan)
                    print "spans for the dimensions: %s" % (nextNumSpan)

                    if t == k:

                        # only after reaching the k layer, it is counted as a pass
                        print "(3) safety analysis ..."
                        # wk for the set of counterexamples
                        # rk for the set of images that need to be considered in the next precision
                        # rs remembers how many input images have been processed in the last round
                        # nextSpan and nextNumSpan are revised by considering the precision npre
                        (nextSpan, nextNumSpan, rs, wk,
                         rk) = safety_analysis(model, dataset, t, imgIdx, st,
                                               index, nextSpan, nextNumSpan,
                                               npre)
                        if len(rk) > 0:
                            rk = (zip(*rk))[0]

                            print "(4) add new images ..."
                            random.seed(time.time())
                            if len(rk) > numOfPointsAfterEachFeature:
                                rk = random.sample(
                                    rk, numOfPointsAfterEachFeature)
                            diffs = basics.diffImage(image0, rk[0])
                            print(
                                "the dimensions of the images that are changed in the this round: %s"
                                % diffs)
                            if len(diffs) == 0:
                                st.clearManipulated(k)
                                return

                            st.addImages(model, rk)
                            st.removeProcessed(imageIndex)
                            (re, percent, eudist, l1dist,
                             l0dist) = reportInfo(image, wk)
                            print "euclidean distance %s" % (
                                basics.euclideanDistance(image, rk[0]))
                            print "L1 distance %s" % (basics.l1Distance(
                                image, rk[0]))
                            print "L0 distance %s" % (basics.l0Distance(
                                image, rk[0]))
                            print "manipulated percentage distance %s\n" % (
                                basics.diffPercent(image, rk[0]))
                            break
                        else:
                            st.removeProcessed(imageIndex)
                            break
                    else:
                        print "(3) add new intermediate node ..."
                        index = st.addIntermediateNode(image0, nextSpan,
                                                       nextNumSpan, npre,
                                                       numDimsToMani, index)
                        re = False
                        t += 1
                if re == True:
                    dc.addManipulationPercentage(percent)
                    print "euclidean distance %s" % (eudist)
                    print "L1 distance %s" % (l1dist)
                    print "L0 distance %s" % (l0dist)
                    print "manipulated percentage distance %s\n" % (percent)
                    dc.addEuclideanDistance(eudist)
                    dc.addl1Distance(l1dist)
                    dc.addl0Distance(l0dist)
                    (ocl, ocf) = model.predict(wk[0])
                    dc.addConfidence(ocf)
                    break

            if f == cfg.numOfFeatures:
                print "(6) no adversarial example is found in this layer within the distance restriction."
            st.destructor()

        elif layerType in ["Input"
                           ] and k < 0 and mcts_mode == "sift_twoPlayer":

            print "directly handling the image ... "

            dc.initialiseLayer(k)

            (originalClass, originalConfident) = model.predict(image)
            origClassStr = dataset.labels(int(originalClass))
            path0 = "%s/%s_original_as_%s_with_confidence_%s.png" % (
                cfg.directory_pic_string, imgIdx, origClassStr,
                originalConfident)
            dataset.save(-1, originalImage, path0)

            # initialise a search tree
            st = MCTSTwoPlayer(model, model, image, image, -1, "cooperator",
                               dataset)
            st.initialiseActions()

            st.setManipulationType("sift_twoPlayer")

            start_time_all = time.time()
            runningTime_all = 0
            numberOfMoves = 0
            while st.terminalNode(
                    st.rootIndex
            ) == False and st.terminatedByControlledSearch(
                    st.rootIndex
            ) == False and runningTime_all <= cfg.MCTS_all_maximal_time:
                print("the number of moves we have made up to now: %s" %
                      (numberOfMoves))
                eudist = st.euclideanDist(st.rootIndex)
                l1dist = st.l1Dist(st.rootIndex)
                l0dist = st.l0Dist(st.rootIndex)
                percent = st.diffPercent(st.rootIndex)
                diffs = st.diffImage(st.rootIndex)
                print("euclidean distance %s" % (eudist))
                print("L1 distance %s" % (l1dist))
                print("L0 distance %s" % (l0dist))
                print("manipulated percentage distance %s" % (percent))
                print("manipulated dimensions %s" % (diffs))

                start_time_level = time.time()
                runningTime_level = 0
                childTerminated = False
                while runningTime_level <= cfg.MCTS_level_maximal_time:
                    (leafNode,
                     availableActions) = st.treeTraversal(st.rootIndex)
                    newNodes = st.initialiseExplorationNode(
                        leafNode, availableActions)
                    for node in newNodes:
                        (childTerminated,
                         value) = st.sampling(node, availableActions)
                        #if childTerminated == True: break
                        st.backPropagation(node, value)
                    #if childTerminated == True: break
                    runningTime_level = time.time() - start_time_level
                    basics.nprint("best possible one is %s" %
                                  (str(st.bestCase)))
                bestChild = st.bestChild(st.rootIndex)
                #st.collectUselessPixels(st.rootIndex)
                st.makeOneMove(bestChild)

                image1 = st.applyManipulationToGetImage(
                    st.spans[st.rootIndex], st.numSpans[st.rootIndex])
                diffs = st.diffImage(st.rootIndex)
                path0 = "%s/%s_temp_%s.png" % (cfg.directory_pic_string,
                                               imgIdx, len(diffs))
                dataset.save(-1, image1, path0)
                (newClass, newConfident) = model.predict(image1)
                print("confidence: %s" % (newConfident))

                if childTerminated == True: break

                # store the current best
                (_, bestSpans, bestNumSpans) = st.bestCase
                image1 = st.applyManipulationToGetImage(
                    bestSpans, bestNumSpans)
                path0 = "%s/%s_currentBest.png" % (cfg.directory_pic_string,
                                                   imgIdx)
                dataset.save(-1, image1, path0)

                numberOfMoves += 1
                runningTime_all = time.time() - start_time_all

            (_, bestSpans, bestNumSpans) = st.bestCase
            #image1 = applyManipulation(st.image,st.spans[st.rootIndex],st.numSpans[st.rootIndex])
            image1 = st.applyManipulationToGetImage(bestSpans, bestNumSpans)
            (newClass, newConfident) = model.predict(image1)
            newClassStr = dataset.labels(int(newClass))
            re = newClass != originalClass

            if re == True:
                path0 = "%s/%s_%s_%s_modified_into_%s_with_confidence_%s.png" % (
                    cfg.directory_pic_string, imgIdx, "sift_twoPlayer",
                    origClassStr, newClassStr, newConfident)
                dataset.save(-1, image1, path0)
                path0 = "%s/%s_diff.png" % (cfg.directory_pic_string, imgIdx)
                dataset.save(-1, np.subtract(image, image1), path0)
                print(
                    "\nfound an adversary image within prespecified bounded computational resource. The following is its information: "
                )
                print("difference between images: %s" %
                      (basics.diffImage(image, image1)))

                print("number of adversarial examples found: %s" % (st.numAdv))

                eudist = basics.euclideanDistance(st.image, image1)
                l1dist = basics.l1Distance(st.image, image1)
                l0dist = basics.l0Distance(st.image, image1)
                percent = basics.diffPercent(st.image, image1)
                print("euclidean distance %s" % (eudist))
                print("L1 distance %s" % (l1dist))
                print("L0 distance %s" % (l0dist))
                print("manipulated percentage distance %s" % (percent))
                print("class is changed into %s with confidence %s\n" %
                      (newClassStr, newConfident))
                dc.addRunningTime(time.time() - start_time_all)
                dc.addConfidence(newConfident)
                dc.addManipulationPercentage(percent)
                dc.addEuclideanDistance(eudist)
                dc.addl1Distance(l1dist)
                dc.addl0Distance(l0dist)

                #path0="%s/%s_original_as_%s_heatmap.png"%(directory_pic_string,imgIdx,origClassStr)
                #plt.imshow(GMM(image),interpolation='none')
                #plt.savefig(path0)
                #path1="%s/%s_%s_%s_modified_into_%s_heatmap.png"%(directory_pic_string,imgIdx,"sift_twoPlayer", origClassStr,newClassStr)
                #plt.imshow(GMM(image1),interpolation='none')
                #plt.savefig(path1)
            else:
                print(
                    "\nfailed to find an adversary image within prespecified bounded computational resource. "
                )

        elif layerType in ["Input"] and k < 0 and mcts_mode == "singlePlayer":

            print "directly handling the image ... "

            dc.initialiseLayer(k)

            (originalClass, originalConfident) = model.predict(image)
            origClassStr = dataset.labels(int(originalClass))
            path0 = "%s/%s_original_as_%s_with_confidence_%s.png" % (
                cfg.directory_pic_string, imgIdx, origClassStr,
                originalConfident)
            dataset.save(-1, originalImage, path0)

            # initialise a search tree
            st = SearchMCTS(model, image, k)
            st.initialiseActions()

            start_time_all = time.time()
            runningTime_all = 0
            numberOfMoves = 0
            while st.terminalNode(
                    st.rootIndex
            ) == False and st.terminatedByControlledSearch(
                    st.rootIndex
            ) == False and runningTime_all <= cfg.MCTS_all_maximal_time:
                print("the number of moves we have made up to now: %s" %
                      (numberOfMoves))
                eudist = st.euclideanDist(st.rootIndex)
                l1dist = st.l1Dist(st.rootIndex)
                l0dist = st.l0Dist(st.rootIndex)
                percent = st.diffPercent(st.rootIndex)
                diffs = st.diffImage(st.rootIndex)
                print "euclidean distance %s" % (eudist)
                print "L1 distance %s" % (l1dist)
                print "L0 distance %s" % (l0dist)
                print "manipulated percentage distance %s" % (percent)
                print "manipulated dimensions %s" % (diffs)

                start_time_level = time.time()
                runningTime_level = 0
                childTerminated = False
                while runningTime_level <= cfg.MCTS_level_maximal_time:
                    (leafNode,
                     availableActions) = st.treeTraversal(st.rootIndex)
                    newNodes = st.initialiseExplorationNode(
                        leafNode, availableActions)
                    for node in newNodes:
                        (childTerminated,
                         value) = st.sampling(node, availableActions)
                        if childTerminated == True: break
                        st.backPropagation(node, value)
                    if childTerminated == True: break
                    runningTime_level = time.time() - start_time_level
                    print("best possible one is %s" % (st.showBestCase()))
                bestChild = st.bestChild(st.rootIndex)
                #st.collectUselessPixels(st.rootIndex)
                st.makeOneMove(bestChild)

                image1 = applyManipulation(st.image, st.spans[st.rootIndex],
                                           st.numSpans[st.rootIndex])
                diffs = st.diffImage(st.rootIndex)
                path0 = "%s/%s_temp_%s.png" % (cfg.directory_pic_string,
                                               imgIdx, len(diffs))
                dataset.save(-1, image1, path0)
                (newClass, newConfident) = model.predict(image1)
                print "confidence: %s" % (newConfident)

                if childTerminated == True: break

                # store the current best
                (_, bestSpans, bestNumSpans) = st.bestCase
                image1 = applyManipulation(st.image, bestSpans, bestNumSpans)
                path0 = "%s/%s_currentBest.png" % (cfg.directory_pic_string,
                                                   imgIdx)
                dataset.save(-1, image1, path0)

                runningTime_all = time.time() - runningTime_all
                numberOfMoves += 1

            (_, bestSpans, bestNumSpans) = st.bestCase
            #image1 = applyManipulation(st.image,st.spans[st.rootIndex],st.numSpans[st.rootIndex])
            image1 = applyManipulation(st.image, bestSpans, bestNumSpans)
            (newClass, newConfident) = model.predict(image1)
            newClassStr = dataset.labels(int(newClass))
            re = newClass != originalClass
            path0 = "%s/%s_%s_modified_into_%s_with_confidence_%s.png" % (
                cfg.directory_pic_string, imgIdx, origClassStr, newClassStr,
                newConfident)
            dataset.save(-1, image1, path0)
            #print np.max(image1), np.min(image1)
            print("difference between images: %s" %
                  (basics.diffImage(image, image1)))
            #plt.imshow(image1 * 255, cmap=mpl.cm.Greys)
            #plt.show()

            if re == True:
                eudist = basics.euclideanDistance(st.image, image1)
                l1dist = basics.l1Distance(st.image, image1)
                l0dist = basics.l0Distance(st.image, image1)
                percent = basics.diffPercent(st.image, image1)
                print "euclidean distance %s" % (eudist)
                print "L1 distance %s" % (l1dist)
                print "L0 distance %s" % (l0dist)
                print "manipulated percentage distance %s" % (percent)
                print "class is changed into %s with confidence %s\n" % (
                    newClassStr, newConfident)
                dc.addEuclideanDistance(eudist)
                dc.addl1Distance(l1dist)
                dc.addl0Distance(l0dist)
                dc.addManipulationPercentage(percent)

            st.destructor()

        else:
            print("layer %s is of type %s, skipping" % (k, layerType))
            #return

        runningTime = time.time() - start_time
        dc.addRunningTime(runningTime)
        if re == True and cfg.exitWhen == "foundFirst":
            break
        k += 1

    print("Please refer to the file %s for statistics." % (dc.fileName))
    return re
Esempio n. 16
0
    def sampleNext(self, k):
        #print("k=%s"%k)
        #for j in self.keypoints:
        #    print(len(self.availableActionIDs[j]))
        #print("oooooooo")

        activations1 = applyManipulation(self.activations, self.spansPath,
                                         self.numSpansPath)
        (newClass, newConfident) = self.predictWithActivations(activations1)
        (distMethod, distVal) = cfg.controlledSearch
        if distMethod == "euclidean":
            dist = basics.euclideanDistance(activations1, self.activations)
            termValue = 0.0
            termByDist = dist > distVal
        elif distMethod == "L1":
            dist = basics.l1Distance(activations1, self.activations)
            termValue = 0.0
            termByDist = dist > distVal
        elif distMethod == "Percentage":
            dist = basics.diffPercent(activations1, self.activations)
            termValue = 0.0
            termByDist = dist > distVal
        elif distMethod == "NumDiffs":
            dist = basics.diffPercent(activations1,
                                      self.activations) * self.activations.size
            termValue = 0.0
            termByDist = dist > distVal

        #if termByDist == False and newConfident < 0.5 and self.depth <= 3:
        #    termByDist = True

        #self.re_training.addDatum(activations1,self.originalClass,newClass)

        if newClass != self.originalClass and newConfident > effectiveConfidenceWhenChanging:
            # and newClass == dataBasics.next_index(self.originalClass,self.originalClass):
            basics.nprint(
                "sampling a path ends in a terminal node with self.depth %s... "
                % self.depth)

            #print("L1 distance: %s"%(l1Distance(self.activations,activations1)))
            #print(self.activations.shape)
            #print(activations1.shape)
            #print("L1 distance with KL: %s"%(withKL(l1Distance(self.activations,activations1),self.activations,activations1)))

            (self.spansPath,
             self.numSpansPath) = self.scrutinizePath(self.spansPath,
                                                      self.numSpansPath,
                                                      newClass)

            #self.decisionTree.addOnePath(dist,self.spansPath,self.numSpansPath)
            self.numAdv += 1
            #self.analyseAdv.addAdv(activations1)
            self.getUsefulPixels(self.accDims, self.d)

            self.re_training.addDatum(activations1, self.originalClass,
                                      newClass)
            if self.bestCase[0] < dist:
                self.numConverge += 1
                self.bestCase = (dist, self.spansPath, self.numSpansPath)
                path0 = "%s/%s_currentBest_%s_as_%s_with_confidence_%s.png" % (
                    cfg.directory_pic_string, cfg.startIndexOfImage,
                    self.numConverge, self.dataset.labels(
                        int(newClass)), newConfident)
                self.dataset.save(-1, activations1, path0)

            return (self.depth == 0, dist)
        elif termByDist == True:
            basics.nprint(
                "sampling a path ends by controlled search with self.depth %s ... "
                % self.depth)
            self.re_training.addDatum(activations1, self.originalClass,
                                      newClass)
            return (self.depth == 0, termValue)
        elif list(
                set(self.availableActionIDs[k]) -
                set(self.usedActionIDs[k])) == []:
            basics.nprint(
                "sampling a path ends with self.depth %s because no more actions can be taken ... "
                % self.depth)
            return (self.depth == 0, termValue)
        else:
            #print("continue sampling node ... ")
            #allChildren = initialisePixelSets(self.model,self.activations,self.spansPath.keys())
            randomActionIndex = random.choice(
                list(
                    set(self.availableActionIDs[k]) -
                    set(self.usedActionIDs[k]))
            )  #random.randint(0, len(allChildren)-1)
            if k == 0:
                span = {}
                numSpan = {}
            else:
                (span, numSpan, _) = self.actions[k][randomActionIndex]
                self.availableActionIDs[k].remove(randomActionIndex)
                self.usedActionIDs[k].append(randomActionIndex)
            newSpanPath = self.mergeSpan(self.spansPath, span)
            newNumSpanPath = self.mergeNumSpan(self.numSpansPath, numSpan)
            activations2 = applyManipulation(self.activations, newSpanPath,
                                             newNumSpanPath)
            (newClass2,
             newConfident2) = self.predictWithActivations(activations2)
            confGap2 = newConfident - newConfident2
            if newClass2 == newClass:
                self.accDims.append((randomActionIndex, confGap2))
            else:
                self.accDims.append((randomActionIndex, 1.0))

            self.spansPath = newSpanPath
            self.numSpansPath = newNumSpanPath
            self.depth = self.depth + 1
            self.accDims = self.accDims
            self.d = self.d
            if k == 0:
                return self.sampleNext(randomActionIndex)
            else:
                return self.sampleNext(0)
Esempio n. 17
0
def conv_safety_solve(layer2Consider,nfeatures,nfilters,filters,bias,input,activations,prevSpan,prevNumSpan,span,numSpan,pk):  

    random.seed(time.time())
        
    # number of clauses
    c = 0
    # number of variables 
    d = 0

    # variables to be used for z3
    variable={}

    if nfeatures == 1: images = np.expand_dims(input, axis=0)
    else: images = input
    
    if len(activations.shape) == 3: 
        avg = np.sum(activations)/float(len(activations)*len(activations[0])*len(activations[0][0]))
    elif len(activations[0].shape) == 2:
        avg = np.sum(activations)/float(len(activations)*len(activations[0]))
    else: avg = 0

    s = Tactic('qflra').solver()
    s.reset()
    
    #print("%s\n%s\n%s\n%s"%(prevSpan,prevNumSpan,span,numSpan))
    
    toBeChanged = []
    if inverseFunction == "point": 
        if nfeatures == 1: 
            #print("%s\n%s"%(nfeatures,prevSpan.keys()))
            ks = [ (0,x,y) for (x,y) in prevSpan.keys() ]
        else:
        	ks = copy.deepcopy(prevSpan.keys())
        toBeChanged = toBeChanged + ks
    elif inverseFunction == "area": 
        for (k,x,y) in span.keys():
             toBeChanged = toBeChanged + [(l,x1,y1) for l in range(nfeatures) for x1 in range(x,x+cfg.filterSize) for y1 in range(y,y+cfg.filterSize) if x1 >= 0 and y1 >= 0 and x1 < images.shape[1] and y1 < images.shape[2]]
        toBeChanged = list(set(toBeChanged))

    for (l,x,y) in toBeChanged:
        variable[1,0,l+1,x,y] = Real('1_x_%s_%s_%s' % (l+1,x,y))
        d += 1    
        if not(cfg.boundOfPixelValue == [0,0]) and (layer2Consider == 0):
            pstr = eval("variable[1,0,%s,%s,%s] <= %s"%(l+1,x,y,cfg.boundOfPixelValue[1]))
            pstr = And(eval("variable[1,0,%s,%s,%s] >= %s"%(l+1,x,y,cfg.boundOfPixelValue[0])), pstr)
            pstr = And(eval("variable[1,0,%s,%s,%s] != %s"%(l+1,x,y,images[l][x][y])), pstr)
            s.add(pstr)
            c += 1       
            
    maxterms = ""
    for (k,x,y) in span.keys():
        variable[1,1,k+1,x,y] = Real('1_y_%s_%s_%s' % (k+1,x,y))
        d += 1
        string = "variable[1,1,%s,%s,%s] == "%(k+1,x,y)
        for l in range(nfeatures): 
           for x1 in range(cfg.filterSize):
                for y1 in range(cfg.filterSize):
                    if (l,x+x1,y+y1) in toBeChanged: 
                        newstr1 = " variable[1,0,%s,%s,%s] * %s + "%(l+1,x+x1,y+y1,filters[l,k][x1][y1])
                    elif x+x1 < images.shape[1] and y+y1 < images.shape[2] : 
                        newstr1 = " %s + "%(images[l][x+x1][y+y1] * filters[l,k][x1][y1])
                    string += newstr1
        string += str(bias[l,k])
        s.add(eval(string))
        c += 1
                    
        if cfg.enumerationMethod == "line":
            pstr = eval("variable[1,1,%s,%s,%s] < %s" %(k+1,x,y,activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] + cfg.epsilon))
            pstr = And(eval("variable[1,1,%s,%s,%s] > %s "%(k+1,x,y,activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] - cfg.epsilon)), pstr)
        elif cfg.enumerationMethod == "convex" or cfg.enumerationMethod == "point":
            if activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] >= 0: 
                upper = activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] + pk
                lower = -1 * (activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)]) - pk
            else: 
                upper = -1 * (activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)]) + pk
                lower = activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] - pk
                
            #if span[(k,x,y)] > 0 : 
            #    upper = activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] + epsilon
            #    lower = activations[k][x][y] - span[(k,x,y)] * numSpan[(k,x,y)] - epsilon
            #else: 
            #    upper = activations[k][x][y] - span[(k,x,y)] * numSpan[(k,x,y)] + epsilon
            #    lower = activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] - epsilon 
                
            #if span[(k,x,y)] > 0 : 
            #    upper = activations[k][x][y] + span[(k,x,y)]  + epsilon
            #    lower = activations[k][x][y] - epsilon - span[(k,x,y)]
            #else: 
            #    upper = activations[k][x][y] + epsilon - span[(k,x,y)]
            #    lower = activations[k][x][y] + span[(k,x,y)] - epsilon 
            pstr = eval("variable[1,1,%s,%s,%s] < %s"%(k+1,x,y,upper))
            pstr = And(eval("variable[1,1,%s,%s,%s] > %s"%(k+1,x,y,lower)), pstr)
            pstr = And(eval("variable[1,1,%s,%s,%s] != %s"%(k+1,x,y,activations[k][x][y])), pstr)
        s.add(pstr)
        c += 1        
        
        if activations[k][x][y] > 0 : 
            maxterm = "- variable[1,1,%s,%s,%s]"%(k+1,x,y)
        else: 
            maxterm = "variable[1,1,%s,%s,%s]"%(k+1,x,y)
        
        if maxterms == "": 
            maxterms = "(%s)"%maxterm
        else: 
            maxterms = " %s + (%s) "%(maxterms, maxterm)

    nprint("Number of variables: " + str(d))
    nprint("Number of clauses: " + str(c))

    p = multiprocessing.Process(target=s.check)
    p.start()

    # Wait for timeout seconds or until process finishes
    p.join(cfg.timeout)

    # If thread is still active
    if p.is_alive():
        print "Solver running more than timeout seconds (default="+str(cfg.timeout)+"s)! Skip it"
        p.terminate()
        p.join()
    else:
        s_return = s.check()

    if 's_return' in locals():
        if s_return == sat:            
            inputVars = [ (l,x,y,eval("variable[1,0,"+ str(l+1) +"," + str(x) +"," + str(y)+ "]")) for (l,x,y) in toBeChanged ]
            cex = copy.deepcopy(images)
            for (l,x,y,v) in inputVars:
                #if cex[l][x][y] != v: print("different dimension spotted ... ")
                cex[l][x][y] = getDecimalValue(s.model()[v])
                #print("%s    %s"%(images[l][x][y],cex[l][x][y]))
            cex = np.squeeze(cex)
            
            #cex2 = copy.deepcopy(activations)
            #nextVars = [ (k,x,y,eval("variable[1,1,"+ str(k+1) +"," + str(x) +"," + str(y)+ "]")) for (k,x,y) in span.keys() ]
            #for (k,x,y,v) in nextVars:
                #if cex[l][x][y] != v: print("different dimension spotted ... ")
            #    cex2[k][x][y] = getDecimalValue(s.model()[v])
            #    print("%s       %s"%(activations[k][x][y],cex2[k][x][y]))
                
            nprint("satisfiable!")
            return (True, cex)
        else:
            nprint("unsatisfiable!")
            return (False, input)
    else:
        print "timeout! "
        return (False, input)
Esempio n. 18
0
def dense_safety_solve(nfeatures, nfilters, filters, bias, input, activations,
                       pcl, pgl, span, numSpan, pk):

    random.seed(time.time())
    rn = random.random()

    # number of clauses
    c = 0
    # number of variables
    d = 0

    # variables to be used for z3
    variable = {}

    #print(filters)
    #print(bias)

    s = z3.Tactic('qflra').solver()
    s.reset()
    for l in pcl.keys():
        variable[1, 0, l + 1] = z3.Real('1_x_%s' % (l + 1))
        d += 1

    for k in span.keys():
        variable[1, 1, k + 1] = z3.Real('1_y_%s' % (k + 1))
        d += 1
        string = "variable[1,1,%s] ==  " % (k + 1)
        for l in range(nfeatures):
            if l in pcl.keys():
                newstr1 = " variable[1,0,%s] * %s + " % (l + 1, filters[l, k])
            else:
                newstr1 = " %s + " % (input[l] * filters[l, k])
            string += newstr1
        string += str(bias[l, k])
        s.add(eval(string))
        #print(eval(string))
        c += 1

        pStr1 = "variable[1,1,%s] == %s" % (k + 1, activations[k])

        s.add(eval(pStr1))
        c += 1

    basics.nprint("Number of variables: " + str(d))
    basics.nprint("Number of clauses: " + str(c))

    p = multiprocessing.Process(target=s.check)
    p.start()

    # Wait for timeout seconds or until process finishes
    p.join(cfg.timeout)

    # If thread is still active
    if p.is_alive():
        print "Solver running more than timeout seconds (default=" + str(
            cfg.timeout) + "s)! Skip it"
        p.terminate()
        p.join()
    else:
        s_return = s.check()

    if 's_return' in locals():
        if s_return == z3.sat:
            inputVars = [(l, eval("variable[1,0,%s]" % (l + 1)))
                         for l in pcl.keys()]
            cex = copy.deepcopy(input)
            for (l, x) in inputVars:
                cex[l] = getDecimalValue(s.model()[x])

            basics.nprint("satisfiable!")
            return (True, cex)
        else:
            basics.nprint("unsatisfiable!")
            return (False, input)
    else:
        print "unsatisfiable!"
        return (False, input)
Esempio n. 19
0
def safety_analysis(model, dataset, layer2Consider, imageIndex, st, index, cl2,
                    gl2, cp):

    originalIndex = copy.deepcopy(index)
    (originalImage, prevSpan, prevNumSpan, numDimsToMani,
     stepsUpToNow) = st.getInfo(index)

    config = model.getConfig()

    # get weights and bias of the entire trained neural network
    (wv, bv) = model.getWeightVector(layer2Consider)

    # save the starting layer
    originalLayer2Consider = copy.deepcopy(layer2Consider)

    # predict with neural network
    (originalSpanass, originalConfident) = model.predict(originalImage)

    classstr = "the right class is " + (str(
        dataset.labels(int(originalSpanass))))
    print classstr
    classstr = "the confidence is " + (str(originalConfident))
    print classstr

    print "safety analysis for layer %s ... " % (layer2Consider)

    # wk is the set of inputs whose classes are different with the original one
    wk = []
    # wk is for the set of inputs need to be further considered
    rk = [(originalImage, originalConfident)]
    rkupdated = False
    # rs remembers how many points have been tested in this round
    rs = 0

    # decide new span,numSpan according to precision cp
    (span, numSpan) = (cl2, gl2)
    #(span,numSpan) = decideNewP(cl2,gl2,cp)
    #print "the numbers of spans are updated into %s ... "%(numSpan)

    originalSpan = copy.deepcopy(span)
    originalNumSpan = copy.deepcopy(numSpan)
    if cfg.enumerationMethod == "convex":
        allRounds = reduce(mul, map(lambda x: 2, numSpan.values()), 1)
    elif cfg.enumerationMethod == "line":
        allRounds = reduce(mul, map(lambda x: 2 * x + 1, numSpan.values()), 1)
    elif cfg.enumerationMethod == "point":
        allRounds = 1
    print "%s regions need to be checked. " % (allRounds)

    # counter_numSpan tracks the working point
    # InitialisedNumSpan remembers
    (counter_numSpan, InitialisedNumSpan) = initialiseCounter(numSpan)
    #print("%s\n%s\n%s\n%s"%(counter_numSpan,InitialisedNumSpan,span,numSpan))
    counter_numSpans = {}
    counter_numSpans[originalLayer2Consider] = counter_numSpan
    round = 0

    cond = False
    rn = 0

    # a recursive procedure until find an interpolated image which is classified
    # differently with the original image
    # note: there are some other exit mechanisms in the looping body
    while (not cond) or layer2Consider > 0:

        if layer2Consider == originalLayer2Consider:
            activations = model.getActivationValue(originalLayer2Consider,
                                                   originalImage)
            activations1 = imageFromGL(activations, counter_numSpan, span)
            cond = equalCounters(counter_numSpan, numSpan)

        basics.nprint("\nin round: %s / %s" % (round, allRounds))
        basics.nprint("layer: " + str(layer2Consider))
        basics.nprint("counter_numSpan %s" % (counter_numSpan))
        basics.nprint("numSpan %s" % (numSpan))
        #print "maximal point %s"%(numSpan)
        #print "activations1=%s"%(activations1)

        # get the type of the current layer
        layerType = model.getLayerType(layer2Consider)
        #[ lt for (l,lt) in config if layer2Consider == l ]
        #if len(layerType) > 0: layerType = layerType[0]
        #else: print "cannot find the layerType"

        # get the weights and bias for the current layer
        wv2Consider, bv2Consider = basics.getWeight(wv, bv, layer2Consider)

        # call different solving approaches according to
        # the type of the layer and the type of the algorithm
        # FIXME: need to expand this to work with other cases, e.g., MaxPooling2D, Convolution3D
        if layerType == "Convolution2D" or layerType == "Conv2D":
            basics.nprint("convolutional layer, back-propagating ...")
            if layer2Consider == 0:
                activations0 = copy.deepcopy(originalImage)
            else:
                activations0 = model.getActivationValue(
                    layer2Consider - 1, originalImage)
            string = cfg.directory_pic_string + "/" + str(
                imageIndex) + "_original_as_" + str(originalSpanass)
            (bl, newInput) = conv_solve_prep(
                model, cfg.dataBasics, string, originalLayer2Consider,
                layer2Consider, prevSpan, prevNumSpan, counter_numSpan,
                numSpan, cp, activations0, wv2Consider, bv2Consider,
                activations1)

        elif layerType == "Dense":
            basics.nprint("dense layer, back propogation ... ")
            if layer2Consider == 0:
                activations0 = copy.deepcopy(originalImage)
            else:
                activations0 = model.getActivationValue(
                    layer2Consider - 1, originalImage)
            string = cfg.directory_pic_string + "/" + str(
                imageIndex) + "_original_as_" + str(originalSpanass)
            (bl, newInput) = dense_solve_prep(model, cfg.dataBasics, string,
                                              prevSpan, prevNumSpan,
                                              counter_numSpan, numSpan, cp,
                                              activations0, wv2Consider,
                                              bv2Consider, activations1)

        elif layerType == "InputLayer":
            basics.nprint("inputLayer layer, back-propagating ... ")
            (bl, newInput) = (True, copy.deepcopy(activations1))

        elif layerType == "relu":
            basics.nprint("relu layer, back-propagating ...")
            (bl, newInput) = (True, copy.deepcopy(activations1))

        elif layerType == "MaxPooling2D":
            basics.nprint("MaxPooling2D layer, solving ... ")
            if layer2Consider == 0:
                activations0 = copy.deepcopy(originalImage)
            else:
                activations0 = model.getActivationValue(
                    layer2Consider - 1, originalImage)
            image1 = maxpooling_safety_solve(activations0, activations1)
            (bl, newInput) = (True, image1)

        elif layerType == "Flatten":
            basics.nprint("Flatten layer, solving ... ")
            if layer2Consider == 0:
                activations0 = copy.deepcopy(originalImage)
            else:
                activations0 = model.getActivationValue(
                    layer2Consider - 1, originalImage)
            image1 = flatten_safety_solve(activations0, activations1)
            (bl, newInput) = (True, image1)

        # decide the next step according to the results from the solving
        if bl == False:
            # if back-propagation fails
            basics.nprint("back-propagation or solving fails ... ")

            if rkupdated == False:
                #rk.append((newInput,originalConfident))
                #print originalConfident, confident, rk[0][1]
                if counter_numSpan == numSpan:
                    rkupdated = True

            layer2Consider = copy.deepcopy(originalLayer2Consider)
            index = copy.deepcopy(originalIndex)
            (image, prevSpan, prevNumSpan, numDimsToMani,
             stepsUpToNow) = st.getInfo(originalIndex)
            counter_numSpan = counter_numSpans[originalLayer2Consider]
            span = copy.deepcopy(originalSpan)
            numSpan = copy.deepcopy(originalNumSpan)
            (_, InitialisedNumSpan) = initialiseCounter(numSpan)
            counter_numSpan = counterPlusOne(counter_numSpan, numSpan,
                                             InitialisedNumSpan)
            counter_numSpans[originalLayer2Consider] = copy.deepcopy(
                counter_numSpan)
            round += 1

        elif layer2Consider > 0:
            # still not yet reach the input layer
            # continue back-propagating
            layer2Consider -= 1
            activations1 = copy.deepcopy(newInput)
            index = st.parentIndexForIntermediateNode(index, layer2Consider)
            basics.nprint("backtrack to index %s in layer %s" %
                          (index, layer2Consider))
            activations = model.getActivationValue(layer2Consider,
                                                   originalImage)
            counter_numSpan = getCounter(activations, newInput, prevSpan,
                                         prevNumSpan)
            span = copy.deepcopy(prevSpan)
            numSpan = copy.deepcopy(prevNumSpan)
            (image, prevSpan, prevNumSpan, numDimsToMani,
             stepsUpToNow) = st.getInfo(index)
            counter_numSpans[layer2Consider] = copy.deepcopy(counter_numSpan)

        elif withinRegion(newInput, st) == True:
            # reached the input layer
            # and has to be within the region
            # check to see if the new input is classified wronextNumSpany.
            rs += 1
            #print "reach input layer"

            if dataset.name == "imageNet":
                newInput = normalise(newInput)

            basics.nprint("counter: %s" %
                          counter_numSpans[originalLayer2Consider])
            basics.nprint("input: %s" % newInput)

            (newClass, confident) = model.predict(newInput)
            if dataset.name == "twoDcurve":
                plt.plot([newInput[0]], [newInput[1]], 'g.')

            basics.nprint("confident level: " + str(confident))
            # Great! we found an image which has different class with the original image
            if newClass != originalSpanass:
                newClassStr = dataset.labels(int(newClass))
                origClassStr = dataset.labels(int(originalSpanass))
                classstr = "Class changed! from " + str(
                    origClassStr) + " into " + str(newClassStr)
                print classstr
                rk.append((newInput, confident))

                path1 = "%s/%s_%s_modified_into_%s_with_confidence_%s.png" % (
                    cfg.directory_pic_string, imageIndex, origClassStr,
                    newClassStr, confident)
                dataset.save(index[0], newInput, path1)

                # add a point whose class is wrong
                wk.append(newInput)
                if cfg.exitWhen == "foundFirst": break

            else:
                #oldconf = rk[0][1]
                #if (rk[0][1] == originalConfident):
                #    rk = [(newInput,confident)]
                #    diffImage(originalImage,newInput)
                #elif confident < oldconf:
                #    rk = rk + [(newInput,confident)]
                #    diffImage(originalImage,newInput)
                if rkupdated == False:
                    rk.append((newInput, confident))
                    #print originalConfident, confident, rk[0][1]
                    if counter_numSpan == numSpan:
                        rkupdated = True

            layer2Consider = copy.deepcopy(originalLayer2Consider)
            index = copy.deepcopy(originalIndex)
            (image, prevSpan, prevNumSpan, numDimsToMani,
             stepsUpToNow) = st.getInfo(originalIndex)
            counter_numSpan = counter_numSpans[originalLayer2Consider]
            span = copy.deepcopy(originalSpan)
            numSpan = copy.deepcopy(originalNumSpan)
            (_, InitialisedNumSpan) = initialiseCounter(numSpan)
            counter_numSpan = counterPlusOne(counter_numSpan, numSpan,
                                             InitialisedNumSpan)
            counter_numSpans[originalLayer2Consider] = copy.deepcopy(
                counter_numSpan)
            round += 1
            rn += 1

            #path2 = directory_pic_string+"/temp%s_(round=%s).png"%(rn,round)
            #path2 = directory_pic_string+"/temp.png"
            #dataBasics.save(index[0],newInput, path2)

        else:
            rs += 1

            if rkupdated == False:
                #rk.append((newInput,originalConfident))
                #print originalConfident, confident, rk[0][1]
                if counter_numSpan == numSpan:
                    rkupdated = True

            layer2Consider = copy.deepcopy(originalLayer2Consider)
            index = copy.deepcopy(originalIndex)
            (image, prevSpan, prevNumSpan, numDimsToMani,
             stepsUpToNow) = st.getInfo(originalIndex)
            counter_numSpan = counter_numSpans[originalLayer2Consider]
            span = copy.deepcopy(originalSpan)
            numSpan = copy.deepcopy(originalNumSpan)
            (_, InitialisedNumSpan) = initialiseCounter(numSpan)
            counter_numSpan = counterPlusOne(counter_numSpan, numSpan,
                                             InitialisedNumSpan)
            counter_numSpans[originalLayer2Consider] = copy.deepcopy(
                counter_numSpan)
            round += 1

            #print("2%s---%s"%(rkupdated,rk))

    print("ran througn the neural network for %s times." % (rn))
    #path2 = directory_pic_string+"/temp%s_%s.png"%(howfar,originalLayer2Consider)
    #dataBasics.save(newInput, path2)

    rk = rk[1:]
    if rk != []:
        rk.sort(key=lambda x: -1 * x[1])

    return (span, numSpan, rs, wk, rk)