Beispiel #1
0
    def __init__(self, Nin, Nh_l,
                 Nout):  # Nin은 입력 크기, Nh_I는 은닉층 크기, Nout은 출력층 크기
        self.X_ph = tf.placeholder(tf.float32, shape=(None, Nin))  # 입력 플레이스홀더
        self.L_ph = tf.placeholder(tf.float32,
                                   shape=(None, Nout))  # 레이블 플레이스홀더

        # Modeling
        H = Dense(Nh_l[0], activation='relu')(
            self.X_ph)  # 입력 플레이스홀더를 넣고, 첫 번째 은닉층 크기만큼 반환하는 은닉층1
        H = Dropout(0.5)(H)  # 드롭아웃
        H = Dense(Nh_l[1], activation='relu')(
            H)  # 입력 플레이스홀더를 넣고, 두 번째 은닉층 크기만큼 반환하는 은닉층1
        H = Dropout(0.25)(H)  # 드롭아웃
        self.Y_tf = Dense(Nout, activation='softmax')(H)  # 출력층, 소프트맥스

        # Operation
        self.Loss_tf = tf.reduce_mean(
            categorical_crossentropy(self.L_ph,
                                     self.Y_tf))  # 손실함수는 레이블과 출력 간 크로스엔트로피
        self.Train_tf = tf.train.AdamOptimizer().minimize(
            self.Loss_tf)  # 최적화함수는 에이담
        self.Acc_tf = categorical_accuracy(self.L_ph,
                                           self.Y_tf)  # 정확도 산출은 케라스 함수로
        self.Init_tf = tf.global_variables_initializer(
        )  # 초기화 함수는 텐서플로 글로벌 전역 초기화 함수
Beispiel #2
0
 def call(self, inputs):
     y = inputs[0]
     y_pred = inputs[1]
     #                 classifier_loss=metrics.binary_crossentropy(y, y_pred)
     classifier_loss = metrics.categorical_crossentropy(y, y_pred)
     self.add_loss(gamma * K.mean(classifier_loss), inputs=inputs)
     return classifier_loss
Beispiel #3
0
def fast_gradient(model, x, eps=0.25):
    """
    Generates an adversarial example for the model using the fast gradient method

    :param      model   : The model from which to generate an adversarial example
    :param      x       : The original image from which to generate the adversarial example
    :param      eps     : The epsilon parameter that ponderates the gradient sign

    :return:    A tuple containing the adversarial example and the filter (sign of gradient) used to generate it
    """
    signo = np.zeros(np.shape(x))
    xadv = np.zeros(np.shape(x))
    for i, xi in enumerate(x):
        # Predicted result in normal case
        y = model.predict(xi).argmax()

        # Make predicted variable into categorical variable (0s or 1s) of 1000 classes (ImageNet)
        y_categorical = to_categorical(y, 1000)

        # Make the predicted class the target
        esperado = K.variable(y_categorical)

        # Set the loss function as Cross Entropy (for Classification problem)
        costo = metrics.categorical_crossentropy(model.output, esperado)

        # Get the gradient of the function
        gradiente = K.gradients(costo, model.input)
        val_gradiente = K.function([model.input], gradiente)

        # Remember that the adversarial examples are x + eps*sign(gradient)
        signoi = np.sign(val_gradiente([xi])[0])
        xadv[i] = xi + eps * signoi
        signo[i] = signoi

    return xadv, signo
Beispiel #4
0
 def call(self, inputs):
     y = inputs[0]
     y_pred = inputs[1]
     #                 adversarial_loss=metrics.binary_crossentropy(y, y_pred)
     adversarial_loss = metrics.categorical_crossentropy(y, y_pred)
     self.add_loss(alpha * K.mean(adversarial_loss), inputs=inputs)
     return adversarial_loss
    def train(self, mnist, save=False, filepath='./cvae.ckpt'):
        x_ph = tf.placeholder(tf.float32, [None, 784])
        y_ph = tf.placeholder(tf.float32, [None, 10])

        q_mean, q_log_var2 = self._encode(x_ph, y_ph)

        noise = tf.random_normal([self.batch_size, self.z_dim])
        z = tf.add(q_mean, tf.multiply(tf.sqrt(tf.exp(q_log_var2)), noise))

        p_mean = self._decode(z, y_ph)

        log_p_given_z = self.log_likelihood(x_ph, p_mean)
        D_KL = self.kl_divergence(q_mean, q_log_var2)

        low_bound = tf.reduce_mean(log_p_given_z + D_KL)
        train_vae = tf.train.AdamOptimizer(0.0003).minimize(-low_bound)

        # cnn
        pred = self.cnn(x_ph)
        loss = tf.reduce_mean(categorical_crossentropy(y_ph, pred))
        train_cnn = tf.train.AdamOptimizer(1e-4).minimize(loss)
        correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y_ph, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        self.trained_flg = save
        self.filepath = filepath

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            for i in range(self.epochs):
                ave_loss = []
                ave_cnn = []
                for j in range(mnist.train.images.shape[0] / self.batch_size):
                    batch_xs, batch_ys = mnist.train.next_batch(
                        self.batch_size)
                    _, vae_loss, _, cnn_loss = sess.run(
                        [train_vae, low_bound, train_cnn, loss],
                        feed_dict={
                            x_ph: batch_xs,
                            y_ph: batch_ys,
                            K.learning_phase(): 1
                        })
                    ave_loss.append(vae_loss)
                    ave_cnn.append(cnn_loss)
                result = np.mean(ave_loss)
                cnn_result = np.mean(ave_cnn)
                print i + 1, result, cnn_result
                print(
                    "test accuracy %g" % sess.run(accuracy,
                                                  feed_dict={
                                                      x_ph: mnist.test.images,
                                                      y_ph: mnist.test.labels,
                                                      K.learning_phase(): 0
                                                  }))
            if self.trained_flg:
                saver = tf.train.Saver()
                saver.save(sess, filepath)
Beispiel #6
0
    def label_vae_loss(self, x_word, decoder_word_mean, _z_mean,_z_log_var,x_label,classify_output):

        self.word_loss = K.mean( self.entity_sequence_length * metrics.sparse_categorical_crossentropy(x_word, decoder_word_mean))

        self.kl_loss = K.mean(- 0.5 * K.sum(1 + _z_log_var - K.square(_z_mean) - K.exp(_z_log_var), axis=-1))

        self.cls_loss = K.mean(self.alpha * metrics.categorical_crossentropy(x_label, classify_output))

        return self.word_loss + self.kl_loss+self.cls_loss
Beispiel #7
0
def get_gradient_signs(model, original_array):
    target_idx = model.predict(original_array).argmax()
    target = to_categorical(target_idx, 7)
    target_variable = K.variable(target)
    loss = metrics.categorical_crossentropy(model.output, target_variable)
    gradients = K.gradients(loss, model.input)
    get_grad_values = K.function([model.input], gradients)
    grad_values = get_grad_values([original_array])[0]
    grad_signs = np.sign(grad_values)

    return grad_signs
Beispiel #8
0
    def customloss(y_true, y_pred):
        xent_loss = metrics.categorical_crossentropy(y_true, y_pred)

        klosslist = []
        for l in my_model_variational.layers:
            if isinstance(l, VariationalDropoutLayer):
                a = l.alpha
                asq = a * a
                acu = asq * a
                klosslist.append(
                    K.mean((.5 * K.log(a) + c1 * a + c2 * asq + c3 * acu)))

        return K.mean(xent_loss - magicrescale * K.sum(klosslist))
Beispiel #9
0
def evaluate_ensemble(Best=True):
    '''
    loads and evaluates an ensemle from the models in the model folder.
    '''
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_test = X_test.reshape(10000, 784)
    X_test = X_test.astype('float32')
    X_test /= 255
    Y_test = np_utils.to_categorical(y_test, 10)

    model_dirs = []
    for i in os.listdir('weights'):
        if '.h5' in i:
            if not Best:
                model_dirs.append(i)
            else:
                if 'Best' in i:
                    model_dirs.append(i)

    preds = []
    model = create_model()
    for mfile in model_dirs:
        print(os.path.join('weights', mfile))
        model.load_weights(os.path.join('weights', mfile))
        yPreds = model.predict(X_test, batch_size=128, verbose=0)
        preds.append(yPreds)

    weighted_predictions = np.zeros((X_test.shape[0], 10), dtype='float64')
    weight = 1. / len(preds)
    for prediction in preds:
        weighted_predictions += weight * prediction
    y_pred = weighted_predictions

    print(type(Y_test))
    print(type(y_pred))
    Y_test = tf.convert_to_tensor(Y_test)
    y_pred = tf.convert_to_tensor(y_pred)
    print(type(Y_test))
    print(type(y_pred))

    loss = metrics.categorical_crossentropy(Y_test, y_pred)
    acc = metrics.categorical_accuracy(Y_test, y_pred)
    sess = tf.Session()
    print('--------------------------------------')
    print('ensemble')
    print('Test loss:', loss.eval(session=sess))
    print('error:', str((1. - acc.eval(session=sess)) * 100) + '%')
    print('--------------------------------------')
Beispiel #10
0
    def graph_vae_loss_function(self, x, x_decoded_mean):
        x = K.reshape(x, shape=(self.num_nodes * self.num_nodes, ))
        x_decoded_mean = K.reshape(x_decoded_mean, [-1])

        norm = self.adj_matrix.shape[0] * self.adj_matrix.shape[0] / \
               float((self.adj_matrix.shape[0] * self.adj_matrix.shape[0] -
                      self.adj_matrix.sum()) * 2)

        recon_loss = norm * metrics.categorical_crossentropy(
            y_true=x, y_pred=x_decoded_mean)

        kl_loss = -0.5 * K.sum(
            1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var),
            axis=-1)

        return K.mean(recon_loss + kl_loss)
Beispiel #11
0
    def get_gradient_signs(self, original_array, target_idx=None):
        if target_idx is None:
            model = self.models[0]
            target_idx = model.predict(
                preprocess_input(np.copy(original_array))).argmax(axis=-1)
        else:
            model = np.random.choice(self.models)

        target = to_categorical(target_idx, 1000)
        target_variable = K.variable(target)
        loss = categorical_crossentropy(model.output, target_variable)
        gradients = K.gradients(loss, model.input)
        get_grad_values = K.function([model.input], gradients)
        grad_values = get_grad_values([preprocess_input(original_array)])[0]
        grad_signs = np.sign(grad_values)
        return grad_signs
Beispiel #12
0
    def train_network(self, network, x_train, y_train, xtest, ytest,
                      batch_size, epochs):
        #todo add mean square error
        network.compile(loss='mean_squared_error',
                        optimizer=keras.optimizers.SGD(lr=0.01),
                        metrics=['accuracy'])

        preds = network.predict(x_train)
        #network.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(xtest, ytest))
        loss = tf.reduce_mean(categorical_crossentropy(labels, preds))

        train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

        score = network.evaluate(xtest, ytest)
        print('Test loss:', score[0])
        print('Test accuracy:', score[1])
 def __init__(self, Nin, Nh_l, Nout):
     self.X_ph = tf.placeholder(tf.float32, shape=(None, Nin))
     self.L_ph = tf.placeholder(tf.float32, shape=(None, Nout))
     
     # Modeling
     H = Dense(Nh_l[0], activation='relu')(self.X_ph)
     H = Dropout(0.5)(H)
     H = Dense(Nh_l[1], activation='relu')(H) 
     H = Dropout(0.25)(H)
     self.Y_tf = Dense(Nout, activation='softmax')(H)
     
     # Operation
     self.Loss_tf = tf.reduce_mean(
         categorical_crossentropy(self.L_ph, self.Y_tf))
     self.Train_tf = tf.train.AdamOptimizer().minimize(self.Loss_tf)
     self.Acc_tf = categorical_accuracy(self.L_ph, self.Y_tf)
     self.Init_tf = tf.global_variables_initializer()
    def get_gradient_signs(self, raw, target_idx):
        metamodel = np.random.choice((self.model1, self.model2))

        raw = adjust_size(raw, (224, 224, 3)) if metamodel.need_resize else raw

        original_array = metamodel.preprocess(raw.astype('float32'))
        model = metamodel.model
        target = to_categorical(target_idx, 1000)
        target_variable = K.variable(target)
        loss = categorical_crossentropy(model.output, target_variable)
        gradients = K.gradients(loss, model.input)
        get_grad_values = K.function([model.input], gradients)
        grad_values = get_grad_values([original_array])[0]
        grad_values = adjust_size(grad_values, (299, 299, 3))

        grad_signs = np.sign(grad_values)
        return grad_signs
Beispiel #15
0
def generate_example(model, eps, original_array):
    target_idx = model.predict(original_array).argmax()
    target = to_categorical(target_idx, 10)
    target_variable = K.variable(target)

    loss = metrics.categorical_crossentropy(model.output, target_variable)

    gradients = K.gradients(loss, model.input)
    get_grad_values = K.function([model.input], gradients)
    grad_values = get_grad_values([original_array])[0]
    grad_signs = np.sign(grad_values)

    perturbation = grad_signs * eps
    print(perturbation[0])
    # perturbation = perturbation.astype(np.uint8)
    modified_array = original_array + perturbation
    # modified_array = modified_array.astype(np.uint8)
    return perturbation, modified_array
Beispiel #16
0
def get_gradient_signs(model, original_array, targid):
    target_idx = targid  #model.predict(original_array).argmin()
    target = to_categorical(target_idx, 4)
    target_variable = K.variable(target)
    num_samples = 30
    grad_values = 0
    for i in range(num_samples):
        print(i)
        shift_ind = randint(0, 3000)
        ecg_shift = np.roll(original_array, -shift_ind)
        layer_name = 'dense_{}'.format(1)
        layer_dict = dict([(layer.name, layer) for layer in model.layers])
        layer_output = layer_dict[layer_name].output
        loss = metrics.categorical_crossentropy(layer_output, target_variable)
        #average_loss = average_loss + loss / num_samples
        gradients = K.gradients(loss, model.input)
        get_grad_values = K.function([model.input], gradients)
        grad_values = grad_values + get_grad_values([ecg_shift
                                                     ])[0] / num_samples
    #grad_signs = np.sign(grad_values)
    grad_value = grad_values / np.amax(grad_values)
    return grad_value
Beispiel #17
0
def fast_gradient_batch_generation(model, x, eps=0.25):
    """
    Generates a batch of adversarial examples for the model using the fast gradient method

    :param      model   : The model from which to generate an adversarial example
    :param      x       : An array of images from which to generate the adversarial examples
    :param      eps     : The epsilon parameter that ponderates the gradient sign

    :return:    A tuple containing the list of adversarial examples generated and the
                list of filters used to generate them
    """
    xadv = []
    filter = []
    for x_image in x:
        x_image = image.img_to_array(x_image)
        x_image = np.asarray([x_image])
        # Predicted result in normal case
        y = model.predict(x_image).argmax()

        # Make predicted variable into categorical variable (0s or 1s) of 1000 classes (ImageNet)
        y_categorical = to_categorical(y, 1000)

        # Make the predicted class the target
        esperado = K.variable(y_categorical)

        # Set the loss function as Cross Entropy (for Classification problem)
        costo = metrics.categorical_crossentropy(model.output, esperado)

        # Get the gradient of the function
        gradiente = K.gradients(costo, model.input)
        val_gradiente = K.function([model.input], gradiente)

        # Remember that the adversarial examples are x + eps*sign(gradient)
        signo = np.sign(val_gradiente([x_image])[0])[0]
        xadv.append(np.array(np.squeeze(x_image + eps * signo), dtype=int))
        filter.append(signo)

    return xadv, filter
Beispiel #18
0
    def build_model(self):
        x = Input(shape=(self.input_dim, ))
        # 算p(Z|X)的均值和方差
        z_mean = Dense(self.latent_dim)(x)
        z_log_var = Dense(self.latent_dim)(x)
        # 重参数层,相当于给输入加入噪声
        z = Lambda(self._sampling,
                   output_shape=(self.latent_dim, ))([z_mean, z_log_var])
        # 解码层,也就是生成器部分
        x_decoded_mean = Dense(self.input_dim, activation='sigmoid')(z)

        # 建立模型
        self.vae = Model(x, x_decoded_mean)
        self.encoder = Model(x, z)

        #
        z_input = Input(shape=(self.latent_dim, ))
        generated = self.vae.layers[-1](z_input)

        self.decoder = Model(z_input, generated)

        # xent_loss是重构loss,kl_loss是KL loss
        # xent_loss = self.input_dim * metrics.binary_crossentropy(x, x_decoded_mean)
        xent_loss = self.input_dim * metrics.categorical_crossentropy(
            x, x_decoded_mean)
        kl_loss = -0.5 * K.sum(
            1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        vae_loss = K.mean(xent_loss + kl_loss)

        # add_loss是新增的方法,用于更灵活地添加各种loss
        self.vae.add_loss(vae_loss)
        self.vae.compile(optimizer=optimizers.RMSprop(lr=config.learning_rate),
                         loss=None)  # 默认学习率 0.001
        # self.vae.compile(optimizer=optimizers.Adadelta(lr=1.0), loss=None)  # 默认学习率 1.0

        self.z_mean_model = Model(x, z_mean)
        self.z_log_var_model = Model(x, z_log_var)
Beispiel #19
0
elif args.network == "ResNet50":
    net = getattr(keras_helpers, args.network)()
    data_shape = [224, 224, 3]
elif args.network == "GoogLeNet":
    net = getattr(keras_helpers, args.network)()
    data_shape = [224, 224, 3]
else:
    sys.exit("Unknown Network")

fake_data = np.random.rand(args.train_batch, data_shape[0], data_shape[1], data_shape[2])
tmp_fake_labels = np.random.randint(0, high=1000, size=args.train_batch)
fake_labels = np.zeros([args.train_batch, 1000])
for i in range(args.train_batch):
    fake_labels[i, tmp_fake_labels[i]] = 1

loss = categorical_crossentropy(net.y_, net.y)
top1 = categorical_accuracy(net.y_, net.y)
top5 = top_k_categorical_accuracy(net.y_, net.y, 5)

base_lr = 0.02
step = tf.Variable(0, trainable=False, name="Step")
learning_rate = tf.train.exponential_decay(base_lr, step, 1, 0.999964)

weight_list = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name[-3:] == "W:0"]
bias_list = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name[-3:] == "b:0"]

optimizer1 = tf.train.MomentumOptimizer(learning_rate, 0.9)
optimizer2 = tf.train.MomentumOptimizer(tf.scalar_mul(2.0, learning_rate), 0.9)
grads = optimizer1.compute_gradients(loss, var_list=weight_list+bias_list)
w_grads = grads[:len(weight_list)]
b_grads = grads[len(weight_list):]
Beispiel #20
0
original_pic = Image.open(filename).resize((32, 32))
original_array = np.expand_dims(np.array(original_pic), 0)

plt.imshow(original_array[0])
original_array = np.swapaxes(original_array, 2, 3)
original_array = np.swapaxes(original_array, 1, 2)

pred = model.predict(original_array)
preds = [(class_names[i], pred[0][i]) for i in range(10)]

print(preds)

target_idx = model.predict(original_array).argmax()
target = to_categorical(target_idx, 10)
target_variable = K.variable(target)
loss = metrics.categorical_crossentropy(model.output, target_variable)
gradients = K.gradients(loss, model.input)
get_grad_values = K.function([model.input], gradients)
grad_values = get_grad_values([original_array])[0]
grad_signs = np.sign(grad_values)

epsilon = 10000
perturbation = grad_signs * epsilon
modified_array = original_array + perturbation
modified_array = modified_array.astype(np.uint8)
modified_pred = model.predict(modified_array)
modified_preds = [(class_names[i], modified_pred[0][i]) for i in range(10)]
print(preds)
print(modified_preds)

original_array = np.swapaxes(original_array, 1, 2)
def categorical_crossentropy_abs(y_true, y_pred):
    y_true = y_true [:,2:]
    return categorical_crossentropy(y_true, y_pred)     
Beispiel #22
0
x = Input(shape=(28, 28, 1))
y_ = tf.placeholder(tf.float32, [None, 10])
y = Convolution2D(20, 5, 5, W_regularizer=l2(args.decay_coefficient), b_regularizer=l2(args.decay_coefficient),
                  border_mode='same', activation='relu')(x)
y = MaxPooling2D(strides=(2, 2), border_mode='same')(y)
y = Convolution2D(50, 5, 5, W_regularizer=l2(args.decay_coefficient), b_regularizer=l2(args.decay_coefficient),
                  border_mode='same', activation='relu')(y)
y = MaxPooling2D(strides=(2, 2), border_mode='same')(y)
y = Flatten()(y)
y = Dense(500, activation='relu', W_regularizer=l2(args.decay_coefficient), b_regularizer=l2(args.decay_coefficient))(y)
y = Dense(10, activation='softmax', W_regularizer=l2(args.decay_coefficient),
          b_regularizer=l2(args.decay_coefficient))(y)
correct = tf.nn.in_top_k(y_, tf.argmax(y, 1), 1)
accuracy = tf.reduce_sum(tf.cast(correct, tf.int32))

cross_entropy = categorical_crossentropy(y_, y)

step = tf.Variable(0, trainable=False, name="Step")
learning_rate = 0.01
learning_rate = tf.train.inverse_time_decay(learning_rate, step, 1, 0.0001)
opt1 = tf.train.MomentumOptimizer(learning_rate, 0.9)
train_step = opt1.minimize(cross_entropy, global_step=step)

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

total_it = 0
global_start = time.time()
while total_it < args.iterations:
    epoch_start = time.time()
def tests():
    
    import numpy as np
    from keras.activations import softmax
    from keras.objectives import categorical_crossentropy
    
    # 1. weights = 1
    y_true = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0]])
    # y_true_ includes the weights / all weights to 1 in y_true_w
    y_true_w = np.array([[1,1, 0, 1, 0], [1,1, 1, 0, 0], [1,1, 0, 0, 1], [1,1, 1, 0, 0]])

    # second is incorrect
    y_pred = np.array([[0, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]])
    # softmax version to feed in categorical_crossentropy
    y_pred = K.variable(y_pred)
    y_pred_s = softmax(y_pred)
    y_pred_s_eval = y_pred_s.eval(session=K.get_session())
    
    loss_abs1 = categorical_crossentropy(y_true,y_pred_s).eval(session=K.get_session())
    loss_abs2 = categorical_crossentropy_abs(y_true_w,y_pred_s).eval(session=K.get_session())
    np.testing.assert_almost_equal(loss_abs1,loss_abs2)
    
    loss_abs3 = categorical_focal_loss(gamma=0, alpha=1)(y_true_w,y_pred_s).eval(session=K.get_session())
    np.testing.assert_almost_equal(loss_abs1,loss_abs3)
    
    artist_weights_matrix = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
    loss_artits_matr = w_categorical_crossentropy_matrix(artist_weights_matrix)
    loss_w1a = loss_artits_matr(y_true_w,y_pred_s).eval(session=K.get_session())
    np.testing.assert_almost_equal(loss_abs1,loss_w1a)

#    loss_artist_wb = lambda y_true, y_pred: w_categorical_crossentropy(y_true, y_pred, weights=artist_weights_matrix)
#    loss_w1b = loss_artist_wb(y_true_w,y_pred_s).eval(session=K.get_session())

    loss_w1b = w_categorical_crossentropy(y_true_w,y_pred_s).eval(session=K.get_session())
    np.testing.assert_almost_equal(loss_abs1,loss_w1b)

    loss_w1c = w_categorical_focal_loss(gamma=0, alpha=1)(y_true_w,y_pred_s).eval(session=K.get_session())
    np.testing.assert_almost_equal(loss_w1b,loss_w1c)
    
    
    # 2. weights != 1 ==> = 2 for 1st artist
    # change ground truth and weights
    # 1 error at secon line + 1 at last line
    y_true_w = np.array([[1,1, 0, 1, 0], [2,2, 1, 0, 0], [1,1, 0, 0, 1], [1,1, 0, 1, 0]])
    artist_weights_matrix_2 = np.array([[1, 2, 2], [1, 1, 1], [1, 1, 1]])
    loss_artits_w_2 = categorical_crossentropy_w_wrap(artist_weights_matrix_2)
    loss_w2a = loss_artits_w_2(y_true_w,y_pred_s).eval(session=K.get_session())
    loss_abs2 = categorical_crossentropy_abs(y_true_w,y_pred_s).eval(session=K.get_session())
    
    loss_artits_wb = w_categorical_crossentropy_matrix(artist_weights_matrix_2)
    #loss_artits_wb = lambda y_true, y_pred: w_categorical_crossentropy(y_true, y_pred, weights=artist_weights_matrix_2)
    loss_w2b = loss_artits_wb(y_true_w,y_pred_s).eval(session=K.get_session())
    np.testing.assert_almost_equal(loss_w2a,loss_w2b)
    
    loss_w2c = w_categorical_crossentropy(y_true_w,y_pred_s).eval(session=K.get_session())
    np.testing.assert_almost_equal(loss_w2b,loss_w2c)

    loss_w2d = w_categorical_focal_loss(gamma=0, alpha=1)(y_true_w,y_pred_s).eval(session=K.get_session())
    np.testing.assert_almost_equal(loss_w2b,loss_w2d)

    # non-weighted focal loss with default param
    loss_w2e = categorical_focal_loss(gamma=2, alpha=.25)(y_true_w,y_pred_s).eval(session=K.get_session())

    # weighted focal loss with default param
    loss_w2f = w_categorical_focal_loss(gamma=2, alpha=.25)(y_true_w,y_pred_s).eval(session=K.get_session())
    print('done')
    
#tests()
Beispiel #24
0
def loss2_1(y_true, y_pred):  #数值非常大,为什么?
    y_true = K.flatten(y_true)  #也可以使用K.reshape
    y_pred = K.flatten(y_pred)
    #y_pred = K.softmax(y_pred)
    xent_loss = metrics.categorical_crossentropy(y_true, y_pred)
    return xent_loss
Beispiel #25
0
 def categorical_log_likelihood(self, x, y):
     # $\log p(y)$
     return -categorical_crossentropy(y, x)
Beispiel #26
0
    net = getattr(keras_nets, args.network)()
    data_shape = [224, 224, 3]
elif args.network == "GoogLeNet":
    net = getattr(keras_nets, args.network)()
    data_shape = [224, 224, 3]
else:
    sys.exit("Unknown Network")

fake_data = np.random.rand(args.train_batch, data_shape[0], data_shape[1],
                           data_shape[2])
tmp_fake_labels = np.random.randint(0, high=1000, size=args.train_batch)
fake_labels = np.zeros([args.train_batch, 1000])
for i in range(args.train_batch):
    fake_labels[i, tmp_fake_labels[i]] = 1

loss = categorical_crossentropy(net.y_, net.y)
top1 = categorical_accuracy(net.y_, net.y)
top5 = top_k_categorical_accuracy(net.y_, net.y, 5)

base_lr = 0.02
step = tf.Variable(0, trainable=False, name="Step")
learning_rate = tf.train.exponential_decay(base_lr, step, 1, 0.999964)

weight_list = [
    v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    if v.name[-3:] == "W:0"
]
bias_list = [
    v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    if v.name[-3:] == "b:0"
]
                  border_mode='same',
                  activation='relu')(y)
y = MaxPooling2D(strides=(2, 2), border_mode='same')(y)
y = Flatten()(y)
y = Dense(500,
          activation='relu',
          W_regularizer=l2(args.decay_coefficient),
          b_regularizer=l2(args.decay_coefficient))(y)
y = Dense(10,
          activation='softmax',
          W_regularizer=l2(args.decay_coefficient),
          b_regularizer=l2(args.decay_coefficient))(y)
correct = tf.nn.in_top_k(y_, tf.argmax(y, 1), 1)
accuracy = tf.reduce_sum(tf.cast(correct, tf.int32))

cross_entropy = categorical_crossentropy(y_, y)

step = tf.Variable(0, trainable=False, name="Step")
learning_rate = 0.01
learning_rate = tf.train.inverse_time_decay(learning_rate, step, 1, 0.0001)
opt1 = tf.train.MomentumOptimizer(learning_rate, 0.9)
train_step = opt1.minimize(cross_entropy, global_step=step)

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

total_it = 0
global_start = time.time()
while total_it < args.iterations:
    epoch_start = time.time()
Beispiel #28
0
def generate_adversarial_examples(model,
                                  img_path,
                                  epsilon=5,
                                  source_idx=None,
                                  target_idx=None,
                                  size=299):

    K.set_learning_phase(0)

    x = image.load_img(img_path, target_size=(size, size))
    x = image.img_to_array(x)
    x = np.expand_dims(x, axis=0)
    preprocessed_array = preprocess_input(x)
    preprocessed_array[0, 210:260, 210:260, :] = preprocess_input(
        np.random.uniform(0, 255, (1, 50, 50, 3)))
    current_perturbation = np.zeros((1, 50, 50, 3))

    target = to_categorical(target_idx, 1000)
    target_variable = K.variable(target, dtype=tf.float32)
    source = to_categorical(source_idx, 1000)
    source_variable = tf.Variable(source, dtype=tf.float32)

    init_new_vars_op = tf.variables_initializer(
        [target_variable, source_variable])
    sess.run(init_new_vars_op)

    class_variable_t = target_variable
    loss_func_t = metrics.categorical_crossentropy(model.output.op.inputs[0],
                                                   class_variable_t)
    get_grad_values_t = K.function([model.input],
                                   K.gradients(loss_func_t, model.input))

    class_variable_s = source_variable
    loss_func_s = metrics.categorical_crossentropy(model.output.op.inputs[0],
                                                   class_variable_s)
    get_grad_values_s = K.function([model.input],
                                   K.gradients(loss_func_s, model.input))

    cnt = 0

    # 300 --> the max number of iterations, change it if necessary

    while (cnt < 300):

        start = time.time()

        # Prints relevant info every iteration
        print(preprocessed_array[0, 2, 2, 0],
              predict_array(model, preprocessed_array, source_idx, target_idx),
              cnt)

        grad_values_t, grad_values_s = get_grad_values_t(
            [preprocessed_array]), get_grad_values_s([preprocessed_array])

        diff = grad_values_t[0] - grad_values_s[0]

        perturbation_update = -diff * epsilon

        current_perturbation += perturbation_update[:, 210:260, 210:260, :]

        preprocessed_array[0, 210:260, 210:260, :] = current_perturbation

        end = time.time()
        # Prints time it costs each iteration
        print(end - start)

        cnt += 1

    return preprocessed_array
def categorical_crossentropy_weighted(y_true, y_pred):
    weights = y_true[:,0]    
    y_true_val = y_true [:,1:]
    return weights * categorical_crossentropy(y_true_val, y_pred)     
Beispiel #30
0
 def my_loss(self, probabilities, labels):
     loss = metrics.categorical_crossentropy(probabilities, labels)
     return K.mean(loss)