Ejemplo n.º 1
0
def IterativeGradSign_h1(x, epsilon, grad_th, sbst):
    W1 = sbst['W1']
    b1 = sbst['b1']
    W2 = sbst['W2']
    b2 = sbst['b2']
    actfun = sbst['act']

    nInp = len(x)
    nHid, nCls = np.shape(W2)
    X = np.array(x)
    W1 = np.array(W1)
    W2 = np.array(W2)
    W2_trans = np.transpose(W2)

    Z = np.dot(X, W1) + b1  # weighted sum of layer 1
    Y = fun.act(Z, actfun)  # sigmoid output of the hidden layer
    S = np.dot(Y, W2) + b2  # weighted sum of layer 2
    L = fun.softmax(S)  # Likelihood of every class		 # softmax output vector

    A = np.dot(np.multiply(np.multiply(Y, 1 - Y), W1), np.transpose(W2_trans))

    out = np.zeros((nCls, nInp))

    for c in range(nCls):  # Craft samples for all target classes
        T = np.zeros(nCls)
        T[c] = 1

        G = (1 - T) / (1 - L) - T / L

        D = np.zeros(nCls)
        for k in range(nCls):
            if k == c:
                D[k] = L[k] * (1 - L[c])
            else:
                D[k] = -L[k] * L[c]
        D = np.repeat(np.array([D]), nCls, axis=0)
        Gradient = np.dot(G, np.dot(D, np.transpose(A)))

        for i in range(nInp):
            if Gradient[i] <= grad_th and Gradient[i] >= -grad_th:
                Gradient[i] = 0
        out[c] = x + epsilon * np.sign(Gradient)

    out = np.clip(out, 0, 256)
    return out
Ejemplo n.º 2
0
def Opt_L_BFGS_h1(x, sbst, coeff):
	W1 = sbst['W1']
	b1 = sbst['b1']
	W2 = sbst['W2']
	b2 = sbst['b2']
	actfun = sbst['act']

	nInp = len(x)
	nHid, nCls = np.shape(W2)
	W1 = np.array(W1)
	W2 = np.array(W2)

	out = np.zeros((nCls,nInp))

	for c in range(nCls):      # Craft samples for all target classes
		objfun = lambda x_adv: coeff * np.linalg.norm(x_adv-x) - np.log(fun.softmax(np.dot(fun.act(np.dot(x_adv,W1) + b1, actfun),W2) + b2)[c]) 
		bnds = np.concatenate((np.zeros((nInp,1)),np.ones((nInp,1))), axis=1)
		optRes = minimize(objfun, x, method='L-BFGS-B', bounds=bnds)
		out[c] = optRes.x
	return out