Esempio n. 1
0
def dsc_int(y_true, y_pred, smooth=0.00001):
    y_pred_int = 0.5 * (K.sign(y_pred - 0.5) + 1)
    y_true_int = 0.5 * (K.sign(y_true - 0.5) + 1)
    num = 2.0 * K.sum(K.abs(y_true_int * y_pred_int), axis=(1, 2))
    den = K.sum(K.abs(y_true_int), axis=(1, 2)) + K.sum(K.abs(y_pred_int),
                                                        axis=(1, 2)) + smooth
    return 1.0 - (num / den)
    def adv_loss(self, eps):
        outputs = [K.stop_gradient(x) for x in _to_list(self.outputs)]
        r_adv = [K.epsilon()*K.sign(K.random_normal(K.shape(x))) for x in self.inputs]

        # compute gradient of r_adv of negative kl-divergence
        new_inputs = [x + r for (x, r) in zip(self.inputs, r_adv)]
        new_outputs = _to_list(self.call(new_inputs))

        losses = [
            loss_weight * K.mean(loss_function(y_true, y_pred))
            for (y_true, y_pred, loss_weight, loss_function)
            in zip(outputs,
                   new_outputs,
                   self.loss_weights,
                   self.loss_functions)
        ]
        loss = reduce(lambda t, x: t + x, losses, 0)

        grads = [ K.stop_gradient(r) for r in K.gradients(loss, r_adv)]

        new_inputs = [x + eps * K.sign(g) for (x, g) in zip(self.inputs, grads)]
        new_outputs = _to_list(self.call(new_inputs))
        losses = [
            loss_weight * K.mean(loss_function(y_true, y_pred))
            for (y_true, y_pred, loss_weight, loss_function)
            in zip(outputs,
                   new_outputs,
                   self.loss_weights,
                   self.loss_functions)
        ]

        loss = reduce(lambda t, x: t + x, losses, 0)
        return loss
Esempio n. 3
0
def weighted_mse_returns(yTrue, yPred):
	"""
		Función para calcular el error personalizado del modelo LSTM, este error personalizado es la raíz del error cuadrático medio dandole más importancia a los primeros *lags*

		Parámetros:
		- yTrue -- valores con los cuales e va a comaprar las predicciones
		- yPred -- predicciones para calcular el error

		Retorna:
		- [valor] -- error cuadrático medio ponderado
	"""
	from keras import backend as K
	import utils

	ones = K.ones_like(yTrue[0,:]) # a simple vector with ones shaped as (n_series,)
	idx = K.cumsum(ones) # similar to a 'range(1,n_series+1)'

	std = K.std(yTrue)
	mean = K.mean(K.abs(yTrue))

	# weighted rmse
	l1 = K.abs(K.sqrt(K.mean((1/idx)*K.square(yTrue-yPred)))-mean)/mean
	# difference of standard deviation, for fluctuation (prevent under fitting)
	l2 = K.abs(std - K.std(yPred))/std
	# direction accuracy
	l3 = K.mean(K.cast(K.not_equal(K.sign(yTrue), K.sign(yPred)), dtype='float32'))

	return l1 + l2
Esempio n. 4
0
 def call(self, x):
     input_x, center, size = x[0], x[1], x[2]
     input_x = K.expand_dims(input_x, axis=2)
     center = K.expand_dims(center, axis=1)
     size = K.expand_dims(size, axis=1)
     dist = K.abs(input_x - center)
     yorn = 1 - K.sign(K.mean(K.sign(dist - size), axis=3) + 1)
     return yorn
Esempio n. 5
0
def sign_ae(x, y):
    """
    Checks the sign of x and y and returns a function of the difference
    """
    sign_x = K.sign(x)
    sign_y = K.sign(y)
    delta = x - y
    return sign_x * sign_y * K.abs(delta)
Esempio n. 6
0
def tf_auc_hl(y_true, y_pred):
    y_true = K.sign(K.sign(y_true) + 1.0)
    y_pred = (y_pred + 1.0) * 0.5
    score, up_opt = tf.metrics.auc(y_true, y_pred)
    K.get_session().run(tf.local_variables_initializer())
    with tf.control_dependencies([up_opt]):
        score = tf.identity(score)
    return score
def lossFuncNewS(y, yout):
    y = y
    yW = (K.sign(-y - 0.1) + 1) * 100 * (K.sign(yout - 0.35) + 1) + 1
    y = (K.sign(y + 0.1) + 1) * y / 2
    y0 = 0.13
    return -K.mean(
        (y * K.log(yout + 1e-9) / y0 + (1 - y) * (K.log(1 - yout + 1e-9)) /
         (1 - y0)) * (y * 0 + 1) * (1 + K.sign(y) * wY1) * yW,
        axis=[0, 1, 2, 3])
Esempio n. 8
0
  def loss_function(self, y_real, y_pred):
    # L = (mse scores) + D * (mse diff) + correct_winner_regularization
    D = 0.5
    mse = K.mean(K.square(y_pred - y_real), axis=-1) + D * K.mean(K.square(K.abs(y_pred[1] - y_pred[0]) - K.abs(y_real[1] - y_real[0])), axis=-1)

    correct_winner_regularization = 0 if K.sign(y_pred[0]-y_pred[1]) == K.sign(y_real[0]-y_real[1]) else C.WRONG_WINNER_PENALTY
    loss = mse + correct_winner_regularization

    return loss
Esempio n. 9
0
def custom_acc(y_true, y_pred):
    # count = 0
    # for i in range (y_true.shape[0]):
    #     if y_pred[i] >= 0 and y_true[i] >= 0:
    #         count = count + 1
    #     if y_pred[i] < 0 and y_true[i] < 0:
    #         count = count + 1
    # return count * 1.0 / y_true.shape[0]
    return K.mean(K.equal(K.sign(y_true), K.sign(y_pred)))
Esempio n. 10
0
    def loss_function(self, y_real, y_pred):
        mse = K.mean(K.square(y_pred - y_real), axis=-1)

        correct_winner_regularization = 0 if K.sign(
            y_pred[0] -
            y_pred[1]) == K.sign(y_real[0] -
                                 y_real[1]) else C.WRONG_WINNER_PENALTY
        loss = mse + correct_winner_regularization

        return loss
Esempio n. 11
0
def build_model(img_x, img_y):
    input_shape = Input(shape=(img_x, img_y, 3))

    conv_0 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1),
                    activation='relu')(input_shape)
    max_p0 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv_0)
    conv_1 = Conv2D(32, (3, 3), strides=(1, 1), activation='relu')(max_p0)
    max_p1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv_1)
    conv_2 = Conv2D(32, (3, 3), strides=(1, 1), activation='relu')(max_p1)
    max_p2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv_2)
    conv_3 = Conv2D(32, (3, 3), strides=(1, 1), activation='relu')(max_p2)
    max_p3 = MaxPooling2D(pool_size=(1, 1), strides=(1, 1))(conv_3)
    flatten = Flatten()(max_p3)
    dense1 = Dense(4024, activation='relu')(flatten)
    dense2 = Dense(512, activation='sigmoid')(dense1)
    merged_fc = concatenate([dense1, dense2])
    #binary output
    hash_fc = Dense(
        50,
        activation=Lambda(lambda z: tf.divide(
            tf.add(
                K.sign(tf.subtract(keras.layers.activations.sigmoid(x=z), 0.5)
                       ),
                K.abs(
                    K.sign(
                        tf.subtract(keras.layers.activations.sigmoid(x=z), 0.5)
                    ))), 2)),
        kernel_initializer="lecun_normal")(merged_fc)

    anchor = Input(shape=(60, 160, 3))
    positive = Input(shape=(60, 160, 3))
    a_negative = Input(shape=(60, 160, 3))
    p_negative = Input(shape=(60, 160, 3))

    reid_model = Model(inputs=[input_shape], outputs=[hash_fc])

    anchor_embed = reid_model(anchor)
    positive_embed = reid_model(positive)
    a_negative_embed = reid_model(a_negative)
    p_negative_embed = reid_model(p_negative)

    merged_output = concatenate(
        [anchor_embed, positive_embed, a_negative_embed, p_negative_embed])
    #loss = Lambda(structured_triplet_loss, (1,))(merged_output)

    #model = Model(inputs=[anchor, positive, a_negative, p_negative], outputs=loss)
    #model.compile(optimizer='Adam', loss='mse',
    #              metrics=["mae"])
    model = Model(inputs=[anchor, positive, a_negative, p_negative],
                  outputs=[merged_output])
    model.compile(optimizer='Adam',
                  loss=structured_triplet_loss,
                  metrics=[structured_triplet_loss])
    return model
Esempio n. 12
0
def dsc_int(y_true, y_pred, smooth=0.00001):
    if settings.options.D3:
        myaxis = (1, 2, 3)
    else:
        myaxis = (1, 2)
    y_pred_int = 0.5 * (K.sign(y_pred - 0.5) + 1)
    y_true_int = 0.5 * (K.sign(y_true - 0.5) + 1)
    num = 2.0 * K.sum(K.abs(y_true_int * y_pred_int), axis=myaxis)
    den = K.sum(K.abs(y_true_int), axis=myaxis) + K.sum(K.abs(y_pred_int),
                                                        axis=myaxis) + smooth
    return 1.0 - (num / den)
Esempio n. 13
0
def shrink_weights_fn_creator(weight_list, alpha=0.01):
    # build updates
    updates = []
    L1_metric = 0
    for w in weight_list:
        # clamp step to no more than w
        step = K.sign(w) * K.minimum(K.abs(w), K.abs(alpha * K.sign(w)))
        updates.append(K.update_add(w, -step))
        L1_metric += K.sum(K.abs(w))

    # create a function that returns the L1_metric and shrinks weights via
    # updates.
    return K.function([], [L1_metric], updates=updates)
Esempio n. 14
0
    def pair_loss2(self, y_true, y_pred):
        sigma = 0.1

        var1 = y_true
        var2 = K.transpose(var1)
        var3 = var1 - var2
        s1 = y_pred
        s2 = K.transpose(s1)
        s3 = s1 - s2

        losses = K.sum(-K.sign(var3) * sigma /
                       (1 + K.exp(sigma * K.sign(var3) * s3)),
                       axis=0)
        losses = K.sum(losses, axis=0)
        return losses
Esempio n. 15
0
def stock_loss(y_true, y_pred):
    alpha = 100.
    loss = K.switch(K.less(y_true * y_pred, 0), \
        alpha*y_pred**2 - K.sign(y_true)*y_pred + K.abs(y_true), \
        K.abs(y_true - y_pred)
        )
    return K.mean(loss, axis=-1)
Esempio n. 16
0
    def build():
        states = Input(shape=(height * base, width * base))
        error = build_error(states, height, width, base)
        matches = 1 - K.clip(K.sign(error - threshold), 0, 1)
        # a, h, w, panel

        num_matches = K.sum(matches, axis=3)
        panels_ok = K.all(K.equal(num_matches, 1), (1, 2))
        panels_ng = K.any(K.not_equal(num_matches, 1), (1, 2))
        panels_nomatch = K.any(K.equal(num_matches, 0), (1, 2))
        panels_ambiguous = K.any(K.greater(num_matches, 1), (1, 2))

        panel_coverage = K.sum(matches, axis=(1, 2))
        # ideally, this should be [[1,1,1,1,1,1,1,1,1], ...]
        coverage_ok = K.all(K.less_equal(panel_coverage, 1), 1)
        coverage_ng = K.any(K.greater(panel_coverage, 1), 1)
        validity = tf.logical_and(panels_ok, coverage_ok)

        if verbose:
            return Model(states, [
                wrap(states, x) for x in [
                    panels_ok, panels_ng, panels_nomatch, panels_ambiguous,
                    coverage_ok, coverage_ng, validity
                ]
            ])
        else:
            return Model(states, wrap(states, validity))
Esempio n. 17
0
    def call(self, inputs):
        #inputs = ops.convert_to_tensor(inputs)
        #outputs = gen_math_ops.mat_mul(inputs, self.kernel)
        #inputs: None * 2048
        inputs += 1e-7
        

        expanded_inputs = K.repeat(inputs, self.units) # None * 10 * 2048
        inputs_sign = K.sign(expanded_inputs) # None * 10 * 2048
        inputs_abs = K.abs(expanded_inputs) # None * 10 * 2048
        inputs_max = K.max(inputs_abs, axis=0) # 10 * 2048
        inputs_norm = inputs_abs / inputs_max # None * 10 * 2048
        inputs_exp = K.pow(inputs_norm, self.e) # None * 10 * 2048
        inputs_unnorm = inputs_exp * inputs_max # None * 10 * 2048
        exp_input = inputs_unnorm * inputs_sign # None * 10 * 2048
        
        # expanded_inputs = K.repeat(inputs, self.units) # None * 10 * 2048
        # inputs_sign = K.sign(expanded_inputs) # None * 10 * 2048
        # inputs_abs = K.abs(expanded_inputs) # None * 10 * 2048
        # inputs_exp = K.pow(inputs_abs, self.e) # None * 10 * 2048
        # exp_input = inputs_exp * inputs_sign # None * 10 * 2048


        exp_mult_input = exp_input * self.kernel # None * 10 * 2048
        outputs = K.sum(exp_mult_input, axis=2) # None * 10
        final_outputs = K.bias_add(outputs, self.bias) # None * 10
        return self.activation(final_outputs)
Esempio n. 18
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
                                                      K.dtype(self.decay))))

        t = K.cast(self.iterations, K.floatx()) + 1
        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                     (1. - K.pow(self.beta_1, t)))

        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vhats = [K.zeros(1) for _ in params]
        self.weights = [self.iterations] + ms + vs + vhats

        for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
            g2 = K.square(g)
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = v - (1. - self.beta_2) * K.sign(v - g2) * g2
            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))
        return self.updates
Esempio n. 19
0
 def get_gradients(self, loss, params):
     grads = super(E2EFS_SGD, self).get_gradients(loss, params)
     if not (hasattr(self.e2efs_layer, 'regularization_loss')):
         return grads
     e2efs_grad = grads[0]
     e2efs_regularizer_grad = K.gradients(
         self.e2efs_layer.regularization_loss, [self.e2efs_layer.kernel])[0]
     # norm_e2efs_grad_clipped = K.maximum(0.1, tf.norm(e2efs_grad) + K.epsilon())
     # e2efs_regularizer_grad_corrected = e2efs_regularizer_grad / K.max(K.abs(e2efs_regularizer_grad) + K.epsilon())
     # e2efs_grad_corrected = e2efs_grad / K.max(K.abs(e2efs_grad) + K.epsilon())
     e2efs_regularizer_grad_corrected = e2efs_regularizer_grad / (
         K.tf.norm(e2efs_regularizer_grad) + K.epsilon())
     e2efs_grad_corrected = e2efs_grad / (K.tf.norm(e2efs_grad) +
                                          K.epsilon())
     # e2efs_regularizer_grad_corrected = norm_e2efs_grad_clipped * e2efs_regularizer_grad / (tf.norm(e2efs_regularizer_grad) + K.epsilon())
     combined_e2efs_grad = (1. - self.e2efs_layer.moving_factor) * e2efs_grad_corrected + \
                           self.e2efs_layer.moving_factor * e2efs_regularizer_grad_corrected
     # combined_e2efs_grad_norm = tf.norm(combined_e2efs_grad) + K.epsilon()
     # combined_e2efs_grad = optimizers.clip_norm(combined_e2efs_grad, self.e2efs_norm_max, combined_e2efs_grad_norm)
     # combined_e2efs_grad = K.maximum(combined_e2efs_grad_norm, self.e2efs_norm_min) / combined_e2efs_grad_norm * combined_e2efs_grad
     combined_e2efs_grad = K.sign(
         self.e2efs_layer.moving_factor) * K.minimum(
             self.th, K.max(
                 K.abs(combined_e2efs_grad))) * combined_e2efs_grad / K.max(
                     K.abs(combined_e2efs_grad) + K.epsilon())
     # combined_e2efs_grad = K.tf.Print(combined_e2efs_grad, [K.max(combined_e2efs_grad), K.min(combined_e2efs_grad)])
     grads[0] = combined_e2efs_grad
     return grads
Esempio n. 20
0
def f1(y_true, y_pred):
    def recall(y_true, y_pred):
        """Recall metric.

        Only computes a batch-wise average of recall.

        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall

    def precision(y_true, y_pred):
        """Precision metric.

        Only computes a batch-wise average of precision.

        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision

    y_pred_01 = (K.sign(y_pred) + 1) / 2.
    y_true_01 = (y_true + 1) / 2.
    precision = precision(y_true_01, y_pred_01)
    recall = recall(y_true_01, y_pred_01)
    return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
Esempio n. 21
0
def stock_loss(y_true, y_pred):
    alpha = 100.
    loss = K.switch(K.less(y_true * y_pred, 0), \
        alpha*y_pred**2 - K.sign(y_true)*y_pred + K.abs(y_true), \
        K.abs(y_true - y_pred)
        )
    return K.mean(loss, axis=-1)
Esempio n. 22
0
def main(model_name, adv_model_names, model_type):
    np.random.seed(0)
    assert keras.backend.backend() == "tensorflow"
    set_flags(32)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    K.set_session(tf.Session(config=config))

    flags.DEFINE_integer('NUM_EPOCHS', args.epochs, 'Number of epochs')
    flags.DEFINE_integer('type', args.type, 'model type')

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = load_data()

    data_gen = data_flow(X_train)

    x = K.placeholder(shape=(None,
                             FLAGS.NUM_CHANNELS,
                             FLAGS.IMAGE_ROWS,
                             FLAGS.IMAGE_COLS))

    y = K.placeholder(shape=(FLAGS.BATCH_SIZE, FLAGS.NUM_CLASSES))

    eps = args.eps

    # if src_models is not None, we train on adversarial examples that come
    # from multiple models
    adv_models = [None] * len(adv_model_names)
    for i in range(len(adv_model_names)):
        adv_models[i] = load_model(adv_model_names[i])

    model = model_select(type=model_type)

    x_advs = [None] * (len(adv_models) + 1)
    

    for i, m in enumerate(adv_models + [model]):
        x_noise = x + tf.random_uniform(shape=[FLAGS.BATCH_SIZE, FLAGS.NUM_CHANNELS, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS], minval= -args.eps, maxval=args.eps)
        x_noise = tf.clip_by_value(x_noise, 0., 1.)
        for _ in range(args.k):
            logits = m(x_noise)
            grad = gen_grad(x_noise, logits, y, loss='logloss')
            x_noise =  K.stop_gradient(x_noise + args.eps / 4.0 * K.sign(grad))
            x_noise = tf.clip_by_value(x_noise, x - args.eps, x + args.eps)
            x_noise = tf.clip_by_value(x_noise, 0., 1.)
        x_advs[i] = x_noise

    # Train an MNIST model
    tf_train(x, y, model, X_train, Y_train, data_gen, model_name, x_advs=x_advs)

    # Finally print the result!
    test_error = tf_test_error_rate(model, x, X_test, Y_test)
    with open(model_name + '_log.txt', 'a') as log:
        log.write('Test error: %.1f%%' % test_error)
    print('Test error: %.1f%%' % test_error)
    save_model(model, model_name)
    json_string = model.to_json()
    with open(model_name+'.json', 'w') as f:
        f.write(json_string)
Esempio n. 23
0
def recon_loss_hinge(y_true, y_pred):
    mask_value = 0
    mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())
    y_true = K.sign(y_true * mask)
    y_pred = y_pred * mask

    return squared_hinge(y_true, y_pred)
Esempio n. 24
0
    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        shapes = [K.int_shape(p) for p in params]
        alphas = [K.variable(K.ones(shape) * self.init_alpha) for shape in shapes]
        old_grads = [K.zeros(shape) for shape in shapes]
        self.weights = alphas + old_grads
        self.updates = []

        for p, grad, old_grad, alpha in zip(params, grads, old_grads, alphas):
            grad = K.sign(grad)
            new_alpha = K.switch(
                K.greater(grad * old_grad, 0),
                K.minimum(alpha * self.scale_up, self.max_alpha),
                K.switch(K.less(grad * old_grad, 0),K.maximum(alpha * self.scale_down, self.min_alpha),alpha)    
            )

            grad = K.switch(K.less(grad * old_grad, 0),K.zeros_like(grad),grad)
            new_p = p - grad * new_alpha 

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)
            self.updates.append(K.update(p, new_p))
            self.updates.append(K.update(alpha, new_alpha))
            self.updates.append(K.update(old_grad, grad))

        return self.updates
Esempio n. 25
0
 def scoring_rule_adv(self, y_true, y_pred):
     """ Fast Gradient Sign Method (FSGM) to implement Adversarial Training
     """
     # Compute loss 
     error = self.mean_log_Gaussian_likelihood(y_true, y_pred)
 
     # Craft adversarial examples using Fast Gradient Sign Method (FGSM)
     # Define gradient of loss wrt input
     grad_error = K.gradients(error,self.model.input) #Minus is on error function
     # Take sign of gradient, Multiply by constant epsilon,
     # Add perturbation to original example to obtain adversarial example
     # Sign add a new dimension we need to obviate
 
     epsilon = 0.08
 
     adversarial_X = K.stop_gradient(self.model.input + epsilon * K.sign(grad_error)[0])
 
     # Note: If you want to test the variation of adversarial training 
     #  proposed by XX, eliminate the following comment character 
     #  and comment the previous one.
 
     ##adversarial_X = self.model.input + epsilon * K.sign(grad_error)[0]
 
     adv_output = self.model(adversarial_X)
 
     adv_error = self.mean_log_Gaussian_likelihood(y_true, adv_output)
     return 0.3 * error + 0.7 * adv_error
def fgsm(reshaped_image, model, shape=(28, 28, 1), num_classes=10):

    x = reshaped_image.reshape((-1, ) + shape).astype('float32')
    # x = reshaped_image

    preds = model.predict(x)
    initial_class = np.argmax(preds)

    print('initial class: {}'.format(initial_class), end='')

    sess = K.get_session()
    x_adv = x
    x_noise = np.zeros_like(x)

    for i in range(EPOCHS):
        target = K.one_hot(initial_class, num_classes)

        loss = K.categorical_crossentropy(target, model.output)
        grads = K.gradients(loss, model.input)

        delta = K.sign(grads[0])
        x_noise = x_noise + delta

        x_adv = x_adv + EPSILON * delta

        x_adv = sess.run(x_adv, feed_dict={model.input: x})
        preds = model.predict(x_adv)
        # print('epoch: %d, preds: %f, class: %d' % (i, preds[0][initial_class], np.argmax(preds)))

    print(' class: %d' % (np.argmax(preds)))
    return x_adv
Esempio n. 27
0
def binarize(x):
    '''Element-wise rounding to the closest integer with full gradient propagation.
    A trick from [Sergey Ioffe](http://stackoverflow.com/a/36480182)
    '''
    clipped = K.clip(x,-1,1)
    rounded = K.sign(clipped)
    return clipped + K.stop_gradient(rounded - clipped)
def all_loss(y_true, y_pred):
    mask = K.sign(y_true[..., 2 * category_n + 2])
    N = K.sum(mask)
    alpha = 2.
    beta = 4.

    heatmap_true_rate = K.flatten(y_true[..., :category_n])
    heatmap_true = K.flatten(y_true[..., category_n:(2 * category_n)])
    heatmap_pred = K.flatten(y_pred[..., :category_n])
    heatloss = -K.sum(heatmap_true * (
        (1 - heatmap_pred)**alpha) * K.log(heatmap_pred + 1e-6) +
                      (1 - heatmap_true) * ((1 - heatmap_true_rate)**beta) *
                      (heatmap_pred**alpha) * K.log(1 - heatmap_pred + 1e-6))
    offsetloss = K.sum(
        K.abs(y_true[..., 2 * category_n] - y_pred[..., category_n] * mask) +
        K.abs(y_true[..., 2 * category_n + 1] -
              y_pred[..., category_n + 1] * mask))
    sizeloss = K.sum(
        K.abs(y_true[..., 2 * category_n + 2] -
              y_pred[..., category_n + 2] * mask) +
        K.abs(y_true[..., 2 * category_n + 3] -
              y_pred[..., category_n + 3] * mask))

    all_loss = (heatloss + 1.0 * offsetloss + 5.0 * sizeloss) / N
    return all_loss
    def call(self, x):
        # sample from noise distribution
        e_i = K.random_normal((self.input_dim, self.units))
        e_j = K.random_normal((self.units,))

        # We use the factorized Gaussian noise variant from Section 3 of Fortunato et al.
        eW = K.sign(e_i) * (K.sqrt(K.abs(e_i))) * K.sign(e_j) * (K.sqrt(K.abs(e_j)))
        eB = K.sign(e_j) * (K.abs(e_j) ** (1 / 2))

        noise_injected_weights = K.dot(x, self.mu_weight + (self.sigma_weight * eW))
        noise_injected_bias = self.mu_bias + (self.sigma_bias * eB)

        output = K.bias_add(noise_injected_weights, noise_injected_bias)
        if self.activation is not None:
            output = self.activation(output)
        return output
Esempio n. 30
0
def so(model, x, y, steps, eps, alpha=0, norm="l2", sd=0.0):
    adv_x = x
    for i in range(steps):
        total_grad = 0
        for j in range(10):
            temp_adv_x = adv_x + tf.random_normal(
                stddev=sd, shape=tf.shape(x), seed=42)
            logits = model(temp_adv_x)
            if norm == "linf":
                grad = gen_grad(temp_adv_x, logits, y, loss='logloss')
            elif norm == "l2":
                grad = gen_grad(temp_adv_x, logits, y, loss='cw')
            total_grad += grad

        if norm == "linf":
            normed_grad = K.sign(total_grad)
            adv_x += alpha * normed_grad
            adv_x = tf.clip_by_value(adv_x, x - eps, x + eps)
        if norm == "l2":
            grad_norm = tf.clip_by_value(l2_norm(total_grad), 1e-8, np.inf)
            adv_x += 2.5 * eps / steps * total_grad / grad_norm
            dx = adv_x - x
            dx_norm = tf.clip_by_value(l2_norm(dx), 1e-8, np.inf)
            dx_final_norm = tf.clip_by_value(dx_norm, 0, eps)
            adv_x = x + dx_final_norm * dx / dx_norm
    adv_x = tf.clip_by_value(adv_x, 0, 1)
    return adv_x
    def adv_loss(self, eps):

        losses = [
            loss_weight*K.mean(loss_function(y_true,y_pred))
            for (y_true,y_pred,loss_weight,loss_function)
            in zip(_to_list(self.targets),
                   _to_list(self.outputs),
                   self.loss_weights,
                   self.loss_functions)
        ]

        loss = reduce(lambda t, x: t + x, losses, 0)
        r_adv = [eps*K.sign(K.stop_gradient(g)) for g in K.gradients(loss, self.inputs)]

        new_inputs = [x+r for (x, r) in zip(self.inputs, r_adv)]
        new_outputs = _to_list(self.call(new_inputs))
        new_loss = [
            loss_weight*K.mean(loss_function(y_true,y_pred))
            for (y_true,y_pred,loss_weight,loss_function)
            in zip(_to_list(self.targets),
                   new_outputs,
                   self.loss_weights,
                   self.loss_functions)
        ]

        loss = reduce(lambda t, x: t + x, new_loss, 0)
        return loss
Esempio n. 32
0
    def __init__(self, model):

        self.model = model

        # Initialize FGSM Attack
        # Get the loss and gradient of the loss wrt the inputs
        self.attack_type = "fgsmr_left"
        self.activate = False
        self.epsilon = 1
        self.loss = K.mean(-self.model.output, axis=-1)
        self.grads = K.gradients(self.loss, self.model.input)

        # Get the sign of the gradient
        self.delta = K.sign(self.grads[0])

        self.sess = tf.compat.v1.keras.backend.get_session()

        self.perturb = 0
        self.perturbs = []
        self.perturb_percent = 0
        self.perturb_percents = []
        self.n_attack = 1

        self.unir_no_left = np.zeros((1, 160, 320, 3))
        self.unir_no_right = np.zeros((1, 160, 320, 3))

        self.result = {}