def precSynthFull(model, image, layer2Consider, span, numSpan, nextSpan, nextNumSpan): pk = min(span.values()) config = cfg.NN.getConfig(model) # get weights and bias of the entire trained neural network (wv, bv) = cfg.NN.getWeightVector(model, layer2Consider) # get the activations of the previous and the current layer if layer2Consider == 0: activations0 = image else: activations0 = cfg.NN.getActivationValue(model, layer2Consider - 1, image) activations1 = cfg.NN.getActivationValue(model, layer2Consider, image) # get the type of the current layer layerType = model.getLayerType(layer2Consider) #[ lt for (l,lt) in config if layer2Consider == l ] #if len(layerType) > 0: layerType = layerType[0] #else: print "cannot find the layerType" wv2Consider, bv2Consider = basics.getWeight(wv, bv, layer2Consider) if layerType == "Convolution2D" or layerType == "Conv2D": print "convolutional layer, synthesising precision ..." # filters can be seen as the output of a convolutional layer nfilters = basics.numberOfFilters(wv2Consider) # features can be seen as the inputs for a convolutional layer nfeatures = basics.numberOfFeatures(wv2Consider) npk = conv_solve_prep(model, cfg.dataBasics, nfeatures, nfilters, wv2Consider, bv2Consider, activations0, activations1, span, numSpan, nextSpan, nextNumSpan, pk) elif layerType == "Dense": print "dense layer, synthesising precision ..." # filters can be seen as the output of a convolutional layer nfilters = basics.numberOfFilters(wv2Consider) # features can be seen as the inputs for a convolutional layer nfeatures = basics.numberOfFeatures(wv2Consider) npk = dense_solve_prep(model, cfg.dataBasics, nfeatures, nfilters, wv2Consider, bv2Consider, activations0, activations1, span, numSpan, nextSpan, nextNumSpan, pk) elif layerType == "InputLayer": print "inputLayer layer, synthesising precision ..." npk = copy.copy(pk) else: npk = copy.copy(pk) for k in nextSpan.keys(): length = nextSpan[k] * nextNumSpan[k] nextNumSpan[k] = math.ceil(length / float(npk)) nextSpan[k] = npk return (nextSpan, nextNumSpan, npk)
def dense_solve_prep(model, dataBasics, string, prevSpan, prevNumSpan, span, numSpan, cp, input, wv, bv, activations): # filters can be seen as the output of a convolutional layer nfilters = basics.numberOfFilters(wv) # features can be seen as the inputs for a convolutional layer nfeatures = basics.numberOfFeatures(wv) # space holders for computation values biasCollection = {} filterCollection = {} for ((p1, c1), (p, c), w) in wv: if c1 - 1 in range(nfeatures) and c - 1 in range(nfilters): filterCollection[c1 - 1, c - 1] = w for (p, c, w) in bv: if c - 1 in range(nfilters): for l in range(nfeatures): biasCollection[l, c - 1] = w (bl1, newInput) = dense_safety_solve(nfeatures, nfilters, filterCollection, biasCollection, input, activations, prevSpan, prevNumSpan, span, numSpan, cp) basics.nprint("completed a round of processing ") return (bl1, newInput)
def conv_solve_prep(model, dataBasics, string, originalLayer2Consider, layer2Consider, prevSpan, prevNumSpan, span, numSpan, cp, input, wv, bv, activations): # filters can be seen as the output of a convolutional layer nfilters = basics.numberOfFilters(wv) # features can be seen as the inputs for a convolutional layer nfeatures = basics.numberOfFeatures(wv) # space holders for computation values biasCollection = {} filterCollection = {} for l in range(nfeatures): for k in range(nfilters): filter = [ w for ((p1, c1), (p, c), w) in wv if c1 == l + 1 and c == k + 1 ] bias = [w for (p, c, w) in bv if c == k + 1] if len(filter) == 0 or len(bias) == 0: print "error: bias =" + str(bias) + "\n filter = " + str( filter) else: filter = filter[0] bias = bias[0] # flip the filter for convolve flipedFilter = np.fliplr(np.flipud(filter)) biasCollection[l, k] = bias filterCollection[l, k] = flipedFilter #print filter.shape input2 = copy.deepcopy(input) if originalLayer2Consider > layer2Consider: (bl1, newInput) = conv_safety_solve(layer2Consider, nfeatures, nfilters, filterCollection, biasCollection, input, activations, prevSpan, prevNumSpan, span, numSpan, cp) else: (bl1, newInput) = conv_safety_solve(layer2Consider, nfeatures, nfilters, filterCollection, biasCollection, input, activations, prevSpan, prevNumSpan, span, numSpan, cp) basics.nprint("completed a round of processing of the entire image ") return (bl1, newInput)
def conv_bp_prep(model, input, wv, bv, activations): nfilters = basics.numberOfFilters(wv) nfeatures = basics.numberOfFeatures(wv) print "number of filters: " + str(nfilters) print "number of features in the previous layer: " + str(nfeatures) (_, sizex, sizey) = activations.shape sizex += 2 # space holders for computation values biasCollection = {} filterCollection = {} for l in range(nfeatures): for k in range(nfilters): filter = [ w for ((p1, c1), (p, c), w) in wv if c1 == l + 1 and c == k + 1 ] bias = [w for (p, c, w) in bv if c == k + 1] if len(filter) == 0 or len(bias) == 0: print "error: bias =" + str(bias) + "\n filter = " + str( filter) else: filter = filter[0] bias = bias[0] flipedFilter = np.fliplr(np.flipud(filter)) biasCollection[l, k] = bias filterCollection[l, k] = flipedFilter #print filter.shape (bl, newInput) = conv_bp.bp(nfeatures, nfilters, filterCollection, biasCollection, input, activations) return (bl, newInput)
def regionSynth(model,dataset,image,manipulated,layer2Consider,span,numSpan,numDimsToMani): # get weights and bias of the entire trained neural network (wv,bv) = model.getWeightVector(layer2Consider) # get the type of the current layer layerType = model.getLayerType(layer2Consider) wv2Consider, bv2Consider = basics.getWeight(wv,bv,layer2Consider) # get the activations of the previous and the current layer if layer2Consider == 0: activations0 = image else: activations0 = model.getActivationValue(layer2Consider-1, image) activations1 = model.getActivationValue(layer2Consider,image) if layerType == "Convolution2D" or layerType == "Conv2D": print "convolutional layer, synthesising region ..." if len(activations1.shape) == 3: inds = getTop3D(model,image,activations1,manipulated,span.keys(),numDimsToMani,layer2Consider) elif len(activations1.shape) ==2: inds = getTop2D(model,image,activations1,manipulated,span.keys(),numDimsToMani,layer2Consider) # filters can be seen as the output of a convolutional layer nfilters = basics.numberOfFilters(wv2Consider) # features can be seen as the inputs for a convolutional layer nfeatures = basics.numberOfFeatures(wv2Consider) (nextSpan,nextNumSpan) = conv_region_prep(model,cfg.dataBasics,nfeatures,nfilters,wv2Consider,bv2Consider,activations0,activations1,span,numSpan,inds,numDimsToMani) elif layerType == "Dense": print "dense layer, synthesising region ..." inds = getTop(model,image,activations1,manipulated,numDimsToMani,layer2Consider) # filters can be seen as the output of a convolutional layer nfilters = basics.numberOfFilters(wv2Consider) # features can be seen as the inputs for a convolutional layer nfeatures = basics.numberOfFeatures(wv2Consider) (nextSpan,nextNumSpan) = dense_solve_prep(model,cfg.dataBasics,nfeatures,nfilters,wv2Consider,bv2Consider,activations0,activations1,span,numSpan,inds) elif layerType == "InputLayer": print "inputLayer layer, synthesising region ..." nextSpan = copy.deepcopy(span) nextNumSpan = copy.deepcopy(numSpan) elif layerType == "MaxPooling2D": print "MaxPooling2D layer, synthesising region ..." nextSpan = {} nextNumSpan = {} for key in span.keys(): if len(key) == 3: (k,i,j) = key i2 = i/2 j2 = j/2 nextSpan[k,i2,j2] = span[k,i,j] nextNumSpan[k,i2,j2] = numSpan[k,i,j] else: print("error: ") elif layerType == "Flatten": print "Flatten layer, synthesising region ..." nextSpan = copy.deepcopy(span) nextNumSpan = copy.deepcopy(numSpan) nextSpan = {} nextNumSpan = {} for key,value in span.iteritems(): if len(key) == 3: (k,i,j) = key il = len(activations0[0]) jl = len(activations0[0][0]) ind = k * il * jl + i * jl + jl nextSpan[ind] = span[key] nextNumSpan[ind] = numSpan[key] else: print "Unknown layer type %s... "%(str(layerType)) nextSpan = copy.deepcopy(span) nextNumSpan = copy.deepcopy(numSpan) return (nextSpan,nextNumSpan,numDimsToMani)