def test_binary_accuracy_with_threshold_(self):
        y_true = Input((2,))
        y_pred = Input((2,))
        threshold = K.placeholder((2,))
        acc = binary_accuracy_with_threshold(y_true,y_pred,threshold)
        self.assertEqual(K.ndim(acc), 0)
        binary_accuracy_with_threshold_func = K.function(inputs=[y_true,y_pred,threshold], outputs=[acc])
        acc_val=binary_accuracy_with_threshold_func([np.array([[0,1],[1,0]]),np.array([[0.2,0.6],[0.3,0.1]]),np.array([0.25,0.4])])[0]
        self.assertEqual(round(acc_val,2), 1.00,"acc_val")

        #works on a single threshold
        threshold = K.placeholder(ndim=0)
        acc = binary_accuracy_with_threshold(y_true, y_pred, threshold)
        binary_accuracy_with_threshold_func = K.function(inputs=[y_true, y_pred, threshold], outputs=[acc])
        acc_val = binary_accuracy_with_threshold_func(
            [np.array([[0, 1], [1, 0]]), np.array([[0.2, 0.6], [0.3, 0.1]]), 0.5])[0]
        self.assertEqual(round(acc_val, 2), 0.75, "acc_val")

        # works on 3 dimension inputs
        y_true = Input((None,2))
        y_pred = Input((None,2))
        threshold = K.placeholder((2,))
        acc = binary_accuracy_with_threshold(y_true,y_pred,threshold)
        self.assertEqual(K.ndim(acc), 0)
        binary_accuracy_with_threshold_func = K.function(inputs=[y_true,y_pred,threshold], outputs=[acc])
        acc_val=binary_accuracy_with_threshold_func([np.array([[[0,1]],[[1,0]]]),np.array([[[0.2,0.6]],[[0.3,0.1]]]),np.array([0.25,0.4])])[0]
        self.assertEqual(round(acc_val,2), 1.00,"acc_val")
Exemple #2
0
    def call(self):
        E = K.variable(np.random.random((1000,100)), name="entity_embeddings")
        R = K.variable(np.random.random((10,10000)), name="relation_embeddings")
        x = K.placeholder(shape=(1,3), name="spo")
        y = K.placeholder(ndim=0, name="y")
        batch_placeholder = K.cast(x, 'int32')[0]
        # print(batch_placeholder.eval())
        s, o, p = [batch_placeholder[i] for i in range(3)]

        s2v = K.gather(E, s)
        o2v = K.gather(E, o)
        r2v = K.gather(R, p)

        def ccorr(a, b):
            return T.outer(a,b).flatten()
            # return T.arctan(s2v) + T.arctan(o2v)
            # return (s2v.dimshuffle('x', 'x', 0, 'x') + o2v.dimshuffle('x', 'x', 0, 'x')).flatten()
            # return T.nnet.conv2d(a.dimshuffle('x', 'x', 0, 'x'), b.dimshuffle('x', 'x', 0, 'x'), None,
            #                None,
            #                filter_flip=True, border_mode='half')
            # return self.ccorr1d_sc(a, b, border_mode='half')
        eta = K.dot(r2v, ccorr(s2v, o2v))
        # py = 1/(1+K.exp(-eta))
        # l = -K.log(py)
        # from theano import pp, function, printing
        # grad = T.grad(eta, E)
        # print(pp(grad))
        # func = function([x], grad)
        func = K.function([x, y], K.gradients(eta, [s2v, o2v, r2v, E, R]))

        # for i in func.maker.fgraph.outputs:
            # print(pp(i))
        # print (T.grad(py, s2v))
        print (func([[[1,2,3]], -1]))
Exemple #3
0
    def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        ref_th = KTH.variable(ref)
        ref_tf = KTF.variable(ref)

        inds = [1, 3, 7, 9]
        inds_th = KTH.variable(inds, dtype='int32')
        inds_tf = KTF.variable(inds, dtype='int32')
        th_z = KTH.gather(ref_th, inds_th)
        th_result = KTH.eval(th_z)
        tf_result = KTF.eval(KTF.gather(ref_tf, inds_tf))

        assert_allclose(tf_result, th_result, atol=1e-05)

        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_result.shape

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4)
Exemple #4
0
    def __init__(
            self, x_dim, u_dim, r_dim, model_path=None, model=None
    ):
        print("Init ForwardDynamicsAndRewardDNN")
        super(ForwardDynamicsAndRewardDNN, self).__init__(
            x_dim, u_dim, r_dim
        )

        if model_path is not None:
            self.mdl = load_model(
                model_path,
                custom_objects={'atan2_loss': atan2_loss, 'cos': KK.cos}
            )

        if model is not None:
            self.mdl = model

        x0 = KK.placeholder(shape=(None, self.x_dim), name='x0')
        u = KK.placeholder(shape=(None, self.u_dim), name='u')
        x1, cost = self.mdl([x0, u])
        samp_symb = KK.placeholder(
            shape=(1, self.x_dim),
            name='samp_syb'
        )
        loss = KK.expand_dims(mse(samp_symb, x1), axis=1)
        u_grads = KK.gradients([loss], [u])

        self.meas_fn = KK.function(
            [x0, u, samp_symb],
            [x1, loss] + u_grads
        )

        self.zero_control = None
Exemple #5
0
def test_model_custom_target_tensors():
    a = Input(shape=(3,), name='input_a')
    b = Input(shape=(3,), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    y = K.placeholder([10, 4], name='y')
    y1 = K.placeholder([10, 3], name='y1')
    y2 = K.placeholder([7, 5], name='y2')
    model = Model([a, b], [a_2, b_2])

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]

    # test list of target tensors
    with pytest.raises(ValueError):
        model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                      sample_weight_mode=None, target_tensors=[y, y1, y2])
    model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                  sample_weight_mode=None, target_tensors=[y, y1])
    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np],
                               {y: np.random.random((10, 4)),
                                y1: np.random.random((10, 3))})
    # test dictionary of target_tensors
    with pytest.raises(ValueError):
        model.compile(optimizer, loss,
                      metrics=[],
                      loss_weights=loss_weights,
                      sample_weight_mode=None,
                      target_tensors={'does_not_exist': y2})
    # test dictionary of target_tensors
    model.compile(optimizer, loss,
                  metrics=[],
                  loss_weights=loss_weights,
                  sample_weight_mode=None,
                  target_tensors={'dense_1': y, 'dropout': y1})
    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np],
                               {y: np.random.random((10, 4)),
                                y1: np.random.random((10, 3))})

    if K.backend() == 'tensorflow':
        import tensorflow as tf
        # test with custom TF placeholder as target
        pl_target_a = tf.placeholder('float32', shape=(None, 4))
        model.compile(optimizer='rmsprop', loss='mse',
                      target_tensors={'dense_1': pl_target_a})
        model.train_on_batch([input_a_np, input_b_np],
                             [output_a_np, output_b_np])
Exemple #6
0
 def get_input(self, train=False):
     res = []
     # question
     q = K.placeholder(shape=(
         self.input_shape[1][0], 1, self.q_nb_words))
     res.append(q)
     # facts
     f = K.placeholder(shape=(
         self.input_shape[0][0], self.input_length, self.f_nb_words))
     res.append(f)
     return res
  def build(self):
    # Create target inputs
    self.label_placeholder = Input(tensor=K.placeholder(
      shape=(None,self.n_tasks), name="label_placeholder", dtype='bool'))
    self.weight_placeholder = Input(tensor=K.placeholder(
          shape=(None,self.n_tasks), name="weight_placholder", dtype='float32'))

    # Create final dense layer from keras 
    feat = self.model.return_outputs()
    output = model_ops.multitask_logits(
        feat, self.n_tasks)
    return output
Exemple #8
0
  def __init__(self, n_feat, name='topology', max_deg=6,
               min_deg=0):
    """
    Note that batch size is not specified in a GraphTopology object. A batch
    of molecules must be combined into a disconnected graph and fed to topology
    directly to handle batches.

    Parameters
    ----------
    n_feat: int
      Number of features per atom.
    name: str, optional
      Name of this manager.
    max_deg: int, optional
      Maximum #bonds for atoms in molecules.
    min_deg: int, optional
      Minimum #bonds for atoms in molecules.
    """
    
    #self.n_atoms = n_atoms
    self.n_feat = n_feat

    self.name = name
    self.max_deg = max_deg
    self.min_deg = min_deg

    self.atom_features_placeholder = Input(
        tensor=K.placeholder(
            shape=(None, self.n_feat), dtype='float32',
            name=self.name+'_atom_features'))
        #tensor=K.placeholder(
        #    shape=(self.n_atoms, self.n_feat), dtype='float32',
        #    name=self.name+'_atom_features'))
    self.deg_adj_lists_placeholders = [
        Input(tensor=K.placeholder(
          shape=(None, deg), dtype='int32', name=self.name+'_deg_adj'+str(deg)))
        for deg in range(1, self.max_deg+1)]
    self.deg_slice_placeholder = Input(
        tensor=K.placeholder(
            shape=(self.max_deg-self.min_deg+1,2),
            name="deg_slice", dtype='int32'),
        name=self.name+'_deg_slice')
    self.membership_placeholder = Input(
          tensor=K.placeholder(shape=(None,), dtype='int32', name="membership"),
          name=self.name+'_membership')

    # Define the list of tensors to be used as topology
    self.topology = [self.deg_slice_placeholder, self.membership_placeholder]
    self.topology += self.deg_adj_lists_placeholders

    self.inputs = [self.atom_features_placeholder]
    self.inputs += self.topology
  def add_placeholders(self):
    """Adds placeholders to graph."""
    self.test_label_placeholder = Input(
        tensor=K.placeholder(shape=(self.test_batch_size), dtype='float32',
        name="label_placeholder"))
    self.test_weight_placeholder = Input(
        tensor=K.placeholder(shape=(self.test_batch_size), dtype='float32',
        name="weight_placeholder"))

    # TODO(rbharath): Should weights for the support be used?
    # Support labels
    self.support_label_placeholder = Input(
        tensor=K.placeholder(shape=[self.support_batch_size], dtype='float32',
        name="support_label_placeholder"))
Exemple #10
0
    def test_repeat_elements(self):
        reps = 3
        for ndims in [1, 2, 3]:
            shape = np.arange(2, 2 + ndims)
            arr = np.arange(np.prod(shape)).reshape(shape)
            arr_th = KTH.variable(arr)
            arr_tf = KTF.variable(arr)

            for rep_axis in range(ndims):
                np_rep = np.repeat(arr, reps, axis=rep_axis)
                th_z = KTH.repeat_elements(arr_th, reps, axis=rep_axis)
                th_rep = KTH.eval(th_z)
                tf_rep = KTF.eval(
                    KTF.repeat_elements(arr_tf, reps, axis=rep_axis))

                assert th_rep.shape == np_rep.shape
                assert tf_rep.shape == np_rep.shape
                assert_allclose(np_rep, th_rep, atol=1e-05)
                assert_allclose(np_rep, tf_rep, atol=1e-05)
                if hasattr(th_z, '_keras_shape'):
                    assert th_z._keras_shape == th_rep.shape

                # test theano shape inference when
                # input shape has None entries
                if K.backend() == 'theano':
                    shape = list(shape)
                    shape[rep_axis] = None
                    x = K.placeholder(shape=shape)
                    y = K.repeat_elements(x, reps, axis=rep_axis)
                    assert y._keras_shape == tuple(shape)
    def __init__(self, output_dim,
                 init='glorot_uniform', activation='linear', weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 input_dim=None, input_length1=None, input_length2=None, **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.activation = activations.get(activation)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights

        self.input_dim = input_dim
        self.input_length1 = input_length1
        self.input_length2 = input_length2
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length1, self.input_length2, self.input_dim)
        self.input = K.placeholder(ndim=4)
        super(HigherOrderTimeDistributedDense, self).__init__(**kwargs)
Exemple #12
0
def test_sequential_call():
    """Test keras.models.Sequential.__call__"""
    nb_samples, input_dim, output_dim = 3, 10, 5
    model = Sequential()
    model.add(Dense(output_dim=output_dim, input_dim=input_dim))
    model.compile('sgd', 'mse')

    # test flat model
    X = K.placeholder(ndim=2)
    Y = model(X)
    f = K.function([X], [Y])

    x = np.ones((nb_samples, input_dim)).astype(K.floatx())
    y1 = f([x])[0].astype(K.floatx())
    y2 = model.predict(x)
    # results of __call__ should match model.predict
    assert_allclose(y1, y2)

    # test nested model
    model2 = Sequential()
    model2.add(model)
    model2.compile('sgd', 'mse')

    Y2 = model2(X)
    f = K.function([X], [Y2])

    y1 = f([x])[0].astype(K.floatx())
    y2 = model2.predict(x)
    # results of __call__ should match model.predict
    assert_allclose(y1, y2)
Exemple #13
0
    def build(self):
        if K._BACKEND == 'theano':
            batch_size = None
        else:
            batch_size = None  # self.batch_size
        input_dim = self.input_shape
        bm = self.border_mode
        reshape_dim = self.reshape_dim
        hidden_dim = self.output_dim

        nb_filter, nb_rows, nb_cols = self.filter_dim
        self.input = K.placeholder(shape=(batch_size, input_dim[1], input_dim[2]))

        # self.b_h = K.zeros((nb_filter,))
        self.conv_h = Convolution2D(nb_filter, nb_rows, nb_cols, border_mode=bm, input_shape=hidden_dim)
        self.conv_x = Convolution2D(nb_filter, nb_rows, nb_cols, border_mode=bm, input_shape=reshape_dim)

        # hidden to hidden connections
        self.conv_h.build()
        # input to hidden connections
        self.conv_x.build()

        self.max_pool = MaxPooling2D(pool_size=self.subsample, input_shape=hidden_dim)
        self.max_pool.build()

        self.trainable_weights = self.conv_h.trainable_weights + self.conv_x.trainable_weights

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
Exemple #14
0
 def __init__(
     self,
     encoder,
     decoder,
     code_dim,
     batch_size,
     beta=0.5,
     subsample=2,
     regularizer_scale=0.5,
     init="glorot_uniform",
     activation="linear",
     weights=None,
     input_dim=None,
     **kwargs
 ):
     self.regularizer_scale = regularizer_scale
     self.beta = beta
     self.max_pool = MaxPooling1D(subsample)
     self.encoder = encoder
     self.decoder = decoder
     self.variational = VariationalDense(
         code_dim, batch_size, input_dim=self.encoder.output_shape[1], regularizer_scale=regularizer_scale
     )
     self.batch_size = batch_size
     self.init = initializations.get(init)
     self.activation = activations.get(activation)
     self.code_dim = code_dim
     self.initial_weights = weights
     self.input_dim = input_dim
     if self.input_dim:
         kwargs["input_shape"] = (self.input_dim,)
     self.input = K.placeholder(ndim=4)
     super(SlowSiamese, self).__init__(**kwargs)
Exemple #15
0
 def build(self):
     self.input = K.placeholder(shape=(self.input_shape[0],
                                       self.input_length),
                                dtype='int32')
     self.W = K.variable(self.initial_weights[0])
     self.trainable_weights = []
     self.regularizers = []
Exemple #16
0
 def __init__(
         self,
         nb_motifs,
         motif_len, 
         use_three_base_encoding=True,
         init='glorot_uniform', 
         **kwargs):
     self.nb_motifs = nb_motifs
     self.motif_len = motif_len
     self.input = K.placeholder(ndim=4)
     self.use_three_base_encoding = use_three_base_encoding
     self.kwargs = kwargs
     
     self.W = None
     self.b = None
     
     #if isinstance(init, ConvolutionalDNABindingModel):
     #    self.init = lambda x: (
     #        K.variable(-init.ddg_array[None,None,:,:]), 
     #        K.variable(np.array([-init.ref_energy,])[:,None]) 
     #    )
     #else:
     #    self.init = lambda x: (
     #        initializations.get(init)(x), 
     #        K.zeros((self.nb_motifs,)) 
     #    )
     self.init = initializations.get(init)
     super(ConvolutionDNASequenceBinding, self).__init__(**kwargs)
Exemple #17
0
def EvaluateJacobian(model):
	#theano.function( [model.layers[0].input], T.jacobian(model.layers[-1].output.flatten(), model.layers[0].input) )


	X = K.placeholder(shape=(15,15)) #specify the right placeholder
	Y = K.sum(K.square(X)) # loss function
	fn = K.function([X], K.gradients(Y, [X])) #function to call the gradient
Exemple #18
0
def build_train_fn(model):
    # cost
    lr = T.scalar()
    labels = K.placeholder(ndim=2, dtype='int32')
    ob_input = model.inputs[0]
    raw_softmax_outputs = model.outputs[0]

    softmax_outputs = raw_softmax_outputs.dimshuffle((2,0,1))
    softmax_outputs = softmax_outputs.reshape((softmax_outputs.shape[0], softmax_outputs.shape[1]*softmax_outputs.shape[2]))
    softmax_outputs = softmax_outputs.dimshuffle((1,0))

    cost = categorical_crossentropy(softmax_outputs, labels).mean()

    # gradients
    trainable_vars = model.trainable_weights
    grads = K.gradients(cost, trainable_vars)
    grads = lasagne.updates.total_norm_constraint(grads, 100)
    updates = lasagne.updates.nesterov_momentum(grads, trainable_vars, lr, 0.99)

    for key, val in model.updates:                              
        updates[key] = val

    # train_fn
    train_fn = K.function([ob_input, labels, K.learning_phase(), lr],
                          [softmax_outputs, cost],
                          updates=updates)

    return train_fn
Exemple #19
0
    def __init__(self, output_dim, init='glorot_uniform', activation='linear',
                 reconstruction_activation='linear', weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 output_reconstruction=False,
                 W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.reconstruction_activation = activations.get(reconstruction_activation)
        self.output_reconstruction = output_reconstruction
        self.output_dim = output_dim
        self.pretrain = True

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights

        self.input_dim = input_dim
        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        self.input = K.placeholder(ndim=2)
        super(SymmetricAutoencoder, self).__init__(**kwargs)
Exemple #20
0
def mogan(self, gan: GAN, loss_fn, d_optimizer, name="mogan",
          gan_objective=binary_crossentropy, gan_regulizer=None,
          cond_true_ndim=4):
    assert len(gan.conditionals) >= 1

    g_dummy_opt = SGD()
    d_optimizer = d_optimizer
    v = gan.build(g_dummy_opt, d_optimizer, gan_objective)
    del v['g_updates']

    cond_true = K.placeholder(ndim=cond_true_ndim)
    inputs = copy(gan.graph.inputs)
    inputs['cond_true'] = cond_true

    cond_loss = loss_fn(cond_true, v.g_outmap)

    metrics = {
        "cond_loss": cond_loss.mean(),
        "d_loss": v.d_loss,
        "g_loss": v.g_loss,
    }

    params = flatten([n.trainable_weights
                      for n in gan.get_generator_nodes().values()])

    return MultipleObjectives(
        name, inputs, metrics=metrics, params=params,
        objectives={'g_loss': v['g_loss'], 'cond_loss': cond_loss},
        additional_updates=v['d_updates'] + gan.updates)
Exemple #21
0
def _simulate_layer_call(layer, input_data, train=True):
  np.random.seed(0)
  if not hasattr(layer, 'input'):
    if not hasattr(input_data, 'ndim'): input_data = np.array(input_data)
    layer.input = K.placeholder(ndim=input_data.ndim)
    layer.set_input_shape(input_data.shape)
  return _call_f(layer.input, layer.get_output(train=train), input_data)
def main(weights_path, base_path, base_file, style_path, style_file,
         combo_path, img_width, img_height, iterations):
    result_prefix = base_file[:-4] + '_' + style_file[:-4]
    base_img_path = base_path + base_file
    style_img_path = style_path + style_file
    # get tensor representations of images
    base_img = K.variable(preprocess_image(base_img_path,
                                           img_width,
                                           img_height))
    style_img = K.variable(preprocess_image(style_img_path,
                                            img_width,
                                            img_height))
    combo_img = K.placeholder((1, 3, img_width, img_height))
    # combine the 3 images into a single Keras tensor
    input_tensor = K.concatenate([base_img, style_img, combo_img],
                                 axis=0)

    print('Creating painting of {} in the style of {}'.format(base_file[:-4],
                                                              style_file[:-4]))
    print('Loading model with VGG16 network weights...')
    nn = model(weights_path, input_tensor, img_width, img_height)
    loss, grads = calc_loss_grad(nn, combo_img, img_width, img_height)
    evaluate = Evaluator(loss, grads, combo_img, img_width, img_height)
    return optimizer(evaluate, img_width, img_height, combo_path,
                     result_prefix, iterations=iterations)
Exemple #23
0
def test_relu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.relu(x)])

    test_values = get_standard_values()
    result = f([test_values])[0]
    assert_allclose(result, test_values, rtol=1e-05)
Exemple #24
0
 def predict(self):
     weights_name = '{}/phase_2_best.h5'.format(self.root_dir)
     self.model_body.load_weights(weights_name)
     yolo_outputs = yolo_head(self.model_body.output, self.anchors, len(self.class_names))
     input_image_shape = K.placeholder(shape=(2,))
     boxes, scores, classes = yolo_eval(yolo_outputs, input_image_shape, score_threshold=0.5, iou_threshold=0)
     sess = K.get_session()
     results = []
     for i_path in self.images:
         im = Image.open(i_path)
         image_data = np.array(im.resize((416, 416), Image.BICUBIC), dtype=np.float) / 255.
         if len(image_data.shape) >= 3:
             image_data = np.expand_dims(image_data, 0)
             feed_dict = {self.model_body.input: image_data,input_image_shape: [im.size[1], im.size[0]], K.learning_phase(): 0}
             out_boxes, out_scores, out_classes = sess.run([boxes, scores, classes],feed_dict=feed_dict)
             for i, c in list(enumerate(out_classes)):
                 box_class = self.class_names[c]
                 box = out_boxes[i]
                 score = out_scores[i]
                 label = '{}'.format(box_class)
                 top, left, bottom, right = box
                 top = max(0, np.floor(top + 0.5).astype('int32'))
                 left = max(0, np.floor(left + 0.5).astype('int32'))
                 bottom = min(im.size[1], np.floor(bottom + 0.5).astype('int32'))
                 right = min(im.size[0], np.floor(right + 0.5).astype('int32'))
                 results.append((i_path,box_class,score,top, left, bottom, right))
         else:
             logging.warning("skipping {} contains less than 3 channels".format(i_path))
     return results
Exemple #25
0
    def set_batch_function(self, model, input_shape, batch_size, nb_actions, gamma):
        input_dim = np.prod(input_shape)
        samples = K.placeholder(shape=(batch_size, input_dim * 2 + 3))

        S = samples[:, 0 : input_dim]
        a = samples[:, input_dim]
        a = K.cast(a, '')
        r = samples[:, input_dim + 1]
        S_prime = samples[:, input_dim + 2 : 2 * input_dim + 2]
        game_over = samples[:, 2 * input_dim + 2 : 2 * input_dim + 3]

        r = K.reshape(r, (batch_size, 1))
        r = K.repeat(r, nb_actions)
        r = K.reshape(r, (batch_size, nb_actions))

        game_over = K.repeat(game_over, nb_actions)
        game_over = K.reshape(game_over, (batch_size, nb_actions))

        S = K.reshape(S, (batch_size, ) + input_shape)
        S_prime = K.reshape(S_prime, (batch_size, ) + input_shape)

        X = K.concatenate([S, S_prime], axis=0)
        Y = model(X)

        Qsa = K.max(Y[batch_size:], axis=1)
        Qsa = K.reshape(Qsa, (batch_size, 1))
        Qsa = K.repeat(Qsa, nb_actions)
        Qsa = K.reshape(Qsa, (batch_size, nb_actions))

        delta = K.reshape(self.one_hot(a, nb_actions), (batch_size, nb_actions))
        targets = (1 - delta) * Y[:batch_size] + delta * (r + gamma * (1 - game_over) * Qsa)

        self.batch_function = K.function(inputs=[samples], outputs=[S, targets])
Exemple #26
0
 def __init__(self, input_shape, **kwargs):
     #super(TimeDistributedPassThrough, self).__init__()
     #self.input_shape = input_shape
     #self.output_shape = input_shape
     self.input = K.placeholder(ndim=3)
     kwargs['input_shape'] = input_shape
     super(TimeDistributedPassThrough, self).__init__(**kwargs)
    def optimizer(self):
        action = K.placeholder(shape=[None, 5])
        discounted_rewards = K.placeholder(shape=[None, ])

        # Calculate cross entropy error function
        action_prob = K.sum(action * self.model.output, axis=1)
        cross_entropy = K.log(action_prob) * discounted_rewards
        loss = -K.sum(cross_entropy)

        # create training function
        optimizer = Adam(lr=self.learning_rate)
        updates = optimizer.get_updates(self.model.trainable_weights, [],
                                        loss)
        train = K.function([self.model.input, action, discounted_rewards], [],
                           updates=updates)

        return train
Exemple #28
0
def test_elu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.elu(x, 0.5)])

    test_values = get_standard_values()
    result = f([test_values])[0]
    assert_allclose(result, test_values, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())
    # cntk can't rebind the input shape, so create the model again to test different batch size
    if (K.backend() == 'cntk'):
        x2 = K.placeholder(ndim=2)
        f = K.function([x2], [activations.elu(x2, 0.5)])
    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) / 2

    assert_allclose(result, true_result)
Exemple #29
0
def test_softmax_invalid():
    """Test for the expected exception behaviour on invalid input
    """

    x = K.placeholder(ndim=1)

    # One dimensional arrays are supposed to raise a value error
    with pytest.raises(ValueError):
        f = K.function([x], [activations.softmax(x)])
def check_layer_output_shape(layer, input_data):
    ndim = len(input_data.shape)
    layer.input = K.placeholder(ndim=ndim)
    layer.set_input_shape(input_data.shape)
    expected_output_shape = layer.output_shape[1:]

    function = K.function([layer.input], [layer.get_output()])
    output = function([input_data])[0]
    assert output.shape[1:] == expected_output_shape
def _main(args):
    model_path = os.path.expanduser(args.model_path)
    assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)
    test_path = os.path.expanduser(args.test_path)
    output_path = os.path.expanduser(args.output_path)

    if not os.path.exists(output_path):
        print('Creating output path {}'.format(output_path))
        os.mkdir(output_path)

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)

    yolo_model = load_model(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=args.score_threshold,
                                       iou_threshold=args.iou_threshold)

    #video url
    videoUrl = "C:\\Users\\Jason\\Scripts\\YOLO\\YAD2K-master\\images\\20200102   Surf Cam Surf spot Twin Lion beach in Toucheng Yilan County Taiwan ROC.mp4"  #"https://thbcctv01.thb.gov.tw/T1A-9K+700"
    cap = cv2.VideoCapture(videoUrl)

    while (True):
        try:
            ret, frame = cap.read()
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
        except:
            cap = cv2.VideoCapture(videoUrl)
            import time
            time.sleep(0.5)
            continue

        if cv2.waitKey(1) & 0xFF == ord('q'):
            out = cv2.imwrite('capture.jpg', frame)
            break
        frame = cv2.resize(frame, tuple(reversed(model_image_size)),
                           Image.BICUBIC)
        image_data = np.array(frame, dtype='float32')
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.
        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [frame.shape[1], frame.shape[0]],
                K.learning_phase(): 0
            })
        # print('Found {} boxes for {}'.format(len(out_boxes), image_file))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * frame.shape[1] +
                                                0.5).astype('int32'))
        thickness = (frame.shape[0] + frame.shape[1]) // 300

        img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)

            draw = ImageDraw.Draw(img)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(frame.shape[1],
                         np.floor(bottom + 0.5).astype('int32'))
            right = min(frame.shape[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],
                               outline=colors[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw
        cv_img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
        resized = cv2.resize(cv_img, (640, 480), interpolation=cv2.INTER_AREA)
        cv2.imshow('frame', resized)
        # image.save(os.path.join(output_path, image_file), quality=90)
    sess.close()
    cap.release()
    cv2.destroyAllWindows()
Exemple #32
0
    def generate(self):
        '''

        Parameters
        ----------

        Returns
        -------
            boxes: 
            scores: 
            classes: 

        '''
        model_path = os.path.expanduser(self.model_path)
        assert model_path.endswith(
            '.h5'), 'Keras model or weights must be a .h5 file.'

        # 计算anchor数量
        num_anchors = len(self.anchors)
        num_classes = len(self.class_names)

        # 载入模型,如果原来的模型里已经包括了模型结构则直接载入。
        # 否则先构建模型再载入
        try:
            self.yolo_model = load_model(model_path, compile=False)
        except:
            self.yolo_model = yolo_body(Input(shape=(None, None, 3)),
                                        num_anchors // 3, num_classes)
            self.yolo_model.load_weights(self.model_path)
        else:
            assert self.yolo_model.layers[-1].output_shape[-1] == \
                num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
                'Mismatch between model and given anchor and class sizes'

        print('{} model, anchors, and classes loaded.'.format(model_path))

        # 画框设置不同的颜色,每个类设置一种颜色
        # colors:array, shape=(20, 3), eg:[(0,178,255), (255,153,0), ..., (255,0,0)]
        hsv_tuples = [(x / len(self.class_names), 1., 1.)
                      for x in range(len(self.class_names))]
        self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))

        self.colors = list(
            map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                self.colors))

        # 打乱颜色
        np.random.seed(10101)
        np.random.shuffle(self.colors)
        np.random.seed(None)

        # 根据检测参数,过滤框
        # <tf.Tensor 'Placeholder_366:0' shape=(2,) dtype=float32>
        self.input_image_shape = K.placeholder(shape=(2, ))
        # 图片预测
        boxes, scores, classes = yolo_eval(self.yolo_model.output,
                                           self.anchors,
                                           num_classes,
                                           self.input_image_shape,
                                           score_threshold=self.score,
                                           iou_threshold=self.iou)
        return boxes, scores, classes
Exemple #33
0
    def get_loss(self, model, target, output):
        """ Returns the loss function that can be used by the implementation-
			specific model.
		"""
        backend = model.get_backend()

        if backend.get_name() == 'keras':

            import keras.backend as K

            if self.variant is None:

                # Just use the built-in Keras CTC loss function.
                logger.debug(
                    'Attaching built-in Keras CTC loss function to '
                    'model output "%s".', target)

            elif self.variant == 'warp':

                # Just use the built-in Keras CTC loss function.
                logger.info(
                    'Attaching Warp-CTC loss function to model '
                    'output "%s".', target)

                if backend.get_toolchain() != 'theano':
                    logger.error('If you want to use warp-ctc, you need to '
                                 'use the Theano backend to Keras.')
                    raise ValueError('Warp-CTC is currently only supported '
                                     'with the Theano backend to Keras.')

            else:
                raise ValueError('Unsupported variant "{}" on loss function '
                                 '"{}" for backend "{}".'.format(
                                     self.variant, self.get_name(),
                                     backend.get_name()))

            ctc_scaled = 'ctc_scaled_{}'.format(self.input_length)
            flattened_labels = 'ctc_flattened_labels_{}'.format(target)

            transcript_length = K.placeholder(ndim=2,
                                              dtype='int32',
                                              name=self.output_length)
            transcript = K.placeholder(
             ndim=2,
             dtype='int32',
             name=self.output if self.variant is None \
              else flattened_labels
            )
            utterance_length = K.placeholder(
             ndim=2,
             dtype='int32',
             name=self.input_length if self.relative_to is None \
              else ctc_scaled
            )

            if self.relative_to is not None:
                model.add_data_source(
                    ctc_scaled,
                    ScaledSource(model,
                                 relative_to=self.relative_to,
                                 to_this=target,
                                 scale_this=self.input_length))

            if self.variant == 'warp':
                model.add_data_source(
                    flattened_labels,
                    FlattenSource(self.output, self.output_length))

            if self.variant is None:
                out = K.ctc_batch_cost(transcript, output, utterance_length,
                                       transcript_length)
            else:
                try:
                    import ctc  # pylint: disable=import-error
                except ImportError:
                    logger.error(
                        'The warp-CTC loss function was requested,  '
                        'but we cannot find the "ctc" library. See our '
                        'troubleshooting page for helpful tips.')
                    raise ImportError(
                        'Cannot find the "ctc" library, which '
                        'is needed when using the "warp" variant of the CTC '
                        'loss function.')

                out = ctc.cpu_ctc_th(output.dimshuffle((1, 0, 2)),
                                     K.squeeze(utterance_length, -1),
                                     transcript[0] + 1,
                                     K.squeeze(transcript_length, -1))

            return (
             (
              (self.output_length, transcript_length),
              (self.output if self.variant is None \
               else flattened_labels, transcript),
              (self.input_length if self.relative_to is None \
               else ctc_scaled, utterance_length)
             ),
             out
            )

        elif backend.get_name() == 'pytorch':

            if self.variant != 'warp':
                logger.error(
                    'PyTorch does not include a native CTC loss '
                    'function yet. However, PyTorch bindings to Warp CTC are '
                    'available (SeanNaren/warp-ctc). Try installing that, and '
                    'then settings variant=warp.')
                raise ValueError('Only Warp CTC is supported for PyTorch '
                                 'right now.')

            ctc_scaled = 'ctc_scaled_{}'.format(self.input_length)
            flattened_labels = 'ctc_flattened_labels_{}'.format(target)
            transcript_length = model.data.placeholder(self.output_length,
                                                       location='cpu',
                                                       data_type='int')
            transcript = model.data.placeholder(flattened_labels,
                                                location='cpu',
                                                data_type='int')
            utterance_length = model.data.placeholder(
                self.input_length if self.relative_to is None else ctc_scaled,
                location='cpu',
                data_type='int')

            if self.relative_to is not None:
                model.add_data_source(
                    ctc_scaled,
                    ScaledSource(model,
                                 relative_to=self.relative_to,
                                 to_this=target,
                                 scale_this=self.input_length))

            if self.variant == 'warp':
                model.add_data_source(
                    flattened_labels,
                    FlattenSource(self.output, self.output_length))

            try:
                from warpctc_pytorch import CTCLoss  # pytorch: disable=import-error
            except ImportError:
                logger.error(
                    'The warp-CTC loss function was requested,  '
                    'but we cannot find the "warpctc_pytorch" library. See '
                    'out troubleshooting page for helpful tips.')
                raise ImportError(
                    'Cannot find the "warpctc_pytorch" library, '
                    'which is needed when using the "warp" variant of the CTC '
                    'loss function.')

            loss = model.data.move(CTCLoss())

            def get_ctc_loss(inputs, output):
                """ Computes CTC loss.
				"""
                return loss(
                    output.transpose(1, 0).contiguous(),
                    inputs[0][0] + 1,  # transcript[0]+1
                    inputs[1].squeeze(1),  # K.squeeze(utterance_length, -1),
                    inputs[2].squeeze(1)  # K.squeeze(transcript_length, -1)
                ) / output.size(0)

            return [
             [
              (self.output if self.variant is None \
               else flattened_labels, transcript),
              (self.input_length if self.relative_to is None \
               else ctc_scaled, utterance_length),
              (self.output_length, transcript_length)
             ],
             get_ctc_loss
            ]

        else:
            raise ValueError(
                'Unsupported backend "{}" for loss function "{}"'.format(
                    backend.get_name(), self.get_name()))
Exemple #34
0
        (img_nrows, img_ncols))
    style_mask = np.stack([style_mask_label == r for r in xrange(nb_labels)],
                          axis=0)
    target_mask = np.stack([target_mask_label == r for r in xrange(nb_labels)],
                           axis=0)
    return np.expand_dims(style_mask, axis=0), np.expand_dims(target_mask,
                                                              axis=0)


# create tensor variables for images
if K.image_dim_ordering() == 'th':
    shape = (1, nb_colors, img_nrows, img_ncols)
else:
    shape = (1, img_nrows, img_ncols, nb_colors)
style_image = K.variable(preprocess_image(style_img_path))
target_image = K.placeholder(shape=shape)
if use_content_img:
    content_image = K.variable(preprocess_image(content_img_path))
else:
    content_image = K.zeros(shape=shape)
images = K.concatenate([style_image, target_image, content_image], axis=0)

# create tensor variables for masks
raw_style_mask, raw_target_mask = load_semantic_map()
style_mask = K.variable(raw_style_mask.astype("float32"))
target_mask = K.variable(raw_target_mask.astype("float32"))
masks = K.concatenate([style_mask, target_mask], axis=0)

# index constants for images and tasks variables
STYLE, TARGET, CONTENT = 0, 1, 2
Exemple #35
0
img_b = np.expand_dims(img_b, axis=0)
img_s = np.expand_dims(img_s, axis=0)

img_b[:, :, :, 0] = np.subtract(img_b[:, :, :, 0], 103.939, out=img_b[:, :, :, 0], casting="unsafe")
img_b[:, :, :, 1] = np.subtract(img_b[:, :, :, 1], 116.779, out=img_b[:, :, :, 1], casting="unsafe")
img_b[:, :, :, 2] = np.subtract(img_b[:, :, :, 2], 123.68, out=img_b[:, :, :, 2], casting="unsafe")
img_b = img_b[:, :, :, ::-1]

img_s[:, :, :, 0] = np.subtract(img_s[:, :, :, 0], 103.939, out=img_s[:, :, :, 0], casting="unsafe")
img_s[:, :, :, 1] = np.subtract(img_s[:, :, :, 1], 116.779, out=img_s[:, :, :, 1], casting="unsafe")
img_s[:, :, :, 2] = np.subtract(img_s[:, :, :, 2], 123.68, out=img_s[:, :, :, 2], casting="unsafe")
img_s = img_s[:, :, :, ::-1]

img_b = K.variable(img_b)
img_s = K.variable(img_s)
combination_image = K.placeholder((1, 512, 512, 3))

input_tensor = K.concatenate([img_b, img_s, combination_image], axis=0)
model = VGG16(input_tensor=input_tensor, weights='imagenet', include_top=False)

layers = dict([(layer.name, layer.output) for layer in model.layers])
content_weight = 0.025
style_weight = 5.0
total_variation_weight = 1.0
loss = K.variable(0.)
height = 512
width = 512

def content_loss(content, combination):
    return K.sum(K.square(combination - content))
# Tensorflow as Backend.
content_image = K.variable(content_array)
style_image = K.variable(style_array)

# Create a Tensor (x) which represents the Final Output Image
# It is a combunation of content and style image

# Try to keep the contents of "x" as closer to Original Image (content_image)
# i.e  (x - content_image) approx = 0
# Loss(content,x) = 0

# Try to keep the contents of "x" as closer to Style Image to transfer the style
# i.e  (x - style_image) approx = 0
# Loss(style,x) = 0
# Dimensions same as content and style image
combination_image = K.placeholder((1, ht, wd, 3))

# Concatinate content, style and combination Tensors into one Tensor.
# Input the final Tensor into VGG16.
# Format: tf.concat(values, axis, name='concat')
# Concatinates Tensors along one direction
input_tensor = K.concatenate([content_image, style_image, combination_image],
                             axis=0)

# Why use VGG ?
# Coz it is pre-trained Neural Network for image classification
# and already knows how to encode perceptual and semantic information about images.

# Using inbuilt VGG16 from Keras and removing the fully connected layers
model = VGG16(input_tensor=input_tensor, weights='imagenet', include_top=False)
Exemple #37
0
def style(base_img=None,
          style=None,
          size=512,
          fun_evals=100,
          weights=None,
          pooling='max',
          tv_weight=1e-3,
          style_weight=1e4,
          content_weight=5e0):
    base_image_path = base_img
    style_reference_image_path = style

    # these are the weights of the different loss components
    total_variation_weight = tv_weight
    style_weight = style_weight
    content_weight = content_weight

    # dimensions of the generated picture.
    if isinstance(base_image_path, np.ndarray):
        width, height = (base_image_path.shape[0], base_image_path.shape[1])
    else:
        width, height = load_img(base_image_path).size

    img_nrows = size
    img_ncols = int(width * img_nrows / height)

    # util function to open, resize and format pictures into appropriate tensors
    def preprocess_image(image_path):
        if isinstance(image_path, np.ndarray):
            img = cv2.resize(image_path, (img_ncols, img_nrows))
        else:
            img = load_img(image_path, target_size=(img_nrows, img_ncols))
            img = img_to_array(img)

        img = np.expand_dims(img, axis=0)
        img = vgg19.preprocess_input(img)

        return img

    # util function to convert a tensor into a valid image
    def deprocess_image(x):
        if K.image_data_format() == 'channels_first':
            x = x.reshape((3, img_nrows, img_ncols))
            x = x.transpose((1, 2, 0))
        else:
            x = x.reshape((img_nrows, img_ncols, 3))
        # Remove zero-center by mean pixel
        x[:, :, 0] += 103.939
        x[:, :, 1] += 116.779
        x[:, :, 2] += 123.68
        # 'BGR'->'RGB'
        x = x[:, :, ::-1]
        x = np.clip(x, 0, 255).astype('uint8')
        return x

    # get tensor representations of our images
    base_image = K.variable(preprocess_image(base_image_path))
    style_reference_image = K.variable(
        preprocess_image(style_reference_image_path))

    # this will contain our generated image
    if K.image_data_format() == 'channels_first':
        combination_image = K.placeholder((1, 3, img_nrows, img_ncols))
    else:
        combination_image = K.placeholder((1, img_nrows, img_ncols, 3))

    # combine the 3 images into a single Keras tensor
    input_tensor = K.concatenate(
        [base_image, style_reference_image, combination_image], axis=0)

    # build the VGG19 network with our 3 images as input
    # the model will be loaded with pre-trained ImageNet weights
    if pooling == 'max':
        if weights is not None:
            model = vgg19.VGG19(input_tensor=input_tensor,
                                weights=weights,
                                include_top=False)
        else:
            model = vgg19.VGG19(input_tensor=input_tensor,
                                weights='imagenet',
                                include_top=False)
    elif pooling == 'avg':
        if weights is not None:
            model = build_vgg(input_tensor, weights)
        else:
            model = build_vgg(input_tensor)
    else:
        assert 'Only max and average pooling are supported'

    print('Model loaded.')

    # get the symbolic outputs of each "key" layer (we gave them unique names).
    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])

    # compute the neural style loss
    # first we need to define 4 util functions

    # the gram matrix of an image tensor (feature-wise outer product)
    def gram_matrix(x):
        assert K.ndim(x) == 3
        if K.image_data_format() == 'channels_first':
            features = K.batch_flatten(x)
        else:
            features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
        gram = K.dot(features, K.transpose(features))
        return gram

    # the "style loss" is designed to maintain
    # the style of the reference image in the generated image.
    # It is based on the gram matrices (which capture style) of
    # feature maps from the style reference image
    # and from the generated image
    def style_loss(style, combination):
        assert K.ndim(style) == 3
        assert K.ndim(combination) == 3
        S = gram_matrix(style)
        C = gram_matrix(combination)
        channels = 3
        size = img_nrows * img_ncols
        return K.sum(K.square(S - C)) / (4.0 * (channels**2) * (size**2))

    # an auxiliary loss function
    # designed to maintain the "content" of the
    # base image in the generated image
    def content_loss(base, combination):
        return K.sum(K.square(combination - base))

    # the 3rd loss function, total variation loss,
    # designed to keep the generated image locally coherent
    def total_variation_loss(x):
        assert K.ndim(x) == 4
        if K.image_data_format() == 'channels_first':
            a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
                         x[:, :, 1:, :img_ncols - 1])
            b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
                         x[:, :, :img_nrows - 1, 1:])
        else:
            a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
                         x[:, 1:, :img_ncols - 1, :])
            b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
                         x[:, :img_nrows - 1, 1:, :])
        return K.sum(K.pow(a + b, 1.25))

    # combine these loss functions into a single scalar
    loss = K.variable(0.0)
    layer_features = outputs_dict['block5_conv1']
    base_image_features = layer_features[0, :, :, :]
    combination_features = layer_features[2, :, :, :]
    loss += content_weight * content_loss(base_image_features,
                                          combination_features)

    feature_layers = [
        'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1',
        'block5_conv1'
    ]
    for layer_name in feature_layers:
        layer_features = outputs_dict[layer_name]
        style_reference_features = layer_features[1, :, :, :]
        combination_features = layer_features[2, :, :, :]
        sl = style_loss(style_reference_features, combination_features)
        loss += (style_weight / len(feature_layers)) * sl
    loss += total_variation_weight * total_variation_loss(combination_image)

    # get the gradients of the generated image wrt the loss
    grads = K.gradients(loss, combination_image)

    outputs = [loss]
    if isinstance(grads, (list, tuple)):
        outputs += grads
    else:
        outputs.append(grads)

    f_outputs = K.function([combination_image], outputs)

    def eval_loss_and_grads(x):
        if K.image_data_format() == 'channels_first':
            x = x.reshape((1, 3, img_nrows, img_ncols))
        else:
            x = x.reshape((1, img_nrows, img_ncols, 3))
        outs = f_outputs([x])
        loss_value = outs[0]
        if len(outs[1:]) == 1:
            grad_values = outs[1].flatten().astype('float64')
        else:
            grad_values = np.array(outs[1:]).flatten().astype('float64')
        return loss_value, grad_values

    # this Evaluator class makes it possible
    # to compute loss and gradients in one pass
    # while retrieving them via two separate functions,
    # "loss" and "grads". This is done because scipy.optimize
    # requires separate functions for loss and gradients,
    # but computing them separately would be inefficient.
    class Evaluator(object):
        def __init__(self):
            self.loss_value = None
            self.grads_values = None

        def loss(self, x):
            assert self.loss_value is None
            loss_value, grad_values = eval_loss_and_grads(x)
            self.loss_value = loss_value
            self.grad_values = grad_values
            return self.loss_value

        def grads(self, x):
            assert self.loss_value is not None
            grad_values = np.copy(self.grad_values)
            self.loss_value = None
            self.grad_values = None
            return grad_values

    evaluator = Evaluator()

    def style(base_image_path):
        # run scipy-based optimization (L-BFGS) over the pixels of the generated image
        # so as to minimize the neural style loss
        x = preprocess_image(base_image_path)

        for i in range(1):
            print('Start of iteration', i)
            start_time = time.time()
            x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                             x.flatten(),
                                             fprime=evaluator.grads,
                                             maxfun=fun_evals)
            print('Current loss value:', min_val)
            end_time = time.time()
            print('Iteration %d completed in %ds' % (i, end_time - start_time))

        return deprocess_image(x)

    return style
Exemple #38
0
y_train = df_y[0:len(x_train)]
y_test = df_y[len(x_train):]
inputX = np.array(x_train)
inputY = y_train.as_matrix()
outputX = np.array(x_test)
outputY = y_test.as_matrix()
numFeatures = inputX.shape[1]
numEpochs = 1000
n_classes = 2
x_test = np.array(x_test)
y_test = np.array(y_test)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_train = x_train.reshape(x_train.shape[0], 1, 1, 50)
x_test = x_test.reshape(x_test.shape[0], 1, 1, 50)
x = K.placeholder(shape=(5, 5, numFeatures))
y = K.floatx()
input_shape = (1, 1, 50)
print(input_shape)
keep_rate = 0.8
keep_prob = K.floatx()
print(x_test.shape)
model = Sequential()
model.add(
    Conv2D(32,
           kernel_size=(1, 1),
           activation='relu',
           input_shape=(1, 1, numFeatures)))
model.add(Conv2D(64, (1, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(1, 1)))
model.add(Dropout(0.25))
Exemple #39
0
def draw(model_body,
         class_names,
         anchors,
         image_data,
         image_set='val',
         weights_name='trained_stage_3_best.h5',
         out_path="output_images",
         save_all=True):
    '''
    Draw bounding boxes on image data
    '''
    if image_set == 'train':
        image_data = np.array([
            np.expand_dims(image, axis=0)
            for image in image_data[:int(len(image_data) * .9)]
        ])
    elif image_set == 'val':
        image_data = np.array([
            np.expand_dims(image, axis=0)
            for image in image_data[int(len(image_data) * .9):]
        ])
    elif image_set == 'all':
        image_data = np.array(
            [np.expand_dims(image, axis=0) for image in image_data])
    else:
        ValueError("draw argument image_set must be 'train', 'val', or 'all'")
    # model.load_weights(weights_name)
    print(image_data.shape)
    model_body.load_weights(weights_name)

    # Create output variables for prediction.
    yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=0.07,
                                       iou_threshold=0.)

    # Run prediction on overfit image.
    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    if not os.path.exists(out_path):
        os.makedirs(out_path)
    for i in range(10):
        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                model_body.input: image_data[i],
                input_image_shape: [image_data.shape[2], image_data.shape[3]],
                K.learning_phase(): 0
            })
        print('Found {} boxes for image.'.format(len(out_boxes)))
        print(out_boxes)

        # Plot image with predicted boxes.
        image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,
                                      class_names, out_scores)
        # Save the image:
        if save_all or (len(out_boxes) > 0):
            image = PIL.Image.fromarray(image_with_boxes)
            image.save(os.path.join(out_path, str(i) + '.png'))

        # To display (pauses the program):
        plt.imshow(image_with_boxes, interpolation='nearest')
        plt.show()
Exemple #40
0
os.mkdir(npz_path)
#bounds = True
epsilon = 0.01

#mlflow
mlflow.set_experiment(model_type+'_'+run)
mlflow.start_run(run_name=datetime.datetime.fromtimestamp(time.time()).strftime('%c')) #start the mlflow run for logging
mlflow.log_param("eps", epsilon)

try:

	"""
	PREPARE MODEL
	"""
	if K.image_data_format() == 'channels_first':
		combination_image = K.placeholder((1, 1, img_nrows, img_ncols))
	else:
		combination_image = K.placeholder((1, img_nrows, img_ncols, 1)) #the generated image, i.e. the adversarial example
	input_tensor = combination_image

	#load the model
	if model_type == 'drop':
		model_raw = UNetDropout(model_path).model
	elif model_type == 'prob':
		model_raw = UNetProboutFindAdv(model_path).model
	
	 #model_raw is the raw prediction model for which we try to find the adversarial examole, we need to do some modifications in order to run the optimization to find the adversarial perturbation in the input
	model_raw.layers.pop(0)

	img_input = Input(tensor=input_tensor, shape=(img_nrows, img_ncols, n_channels))
	output_tensor = model_raw(img_input)
Exemple #41
0
    size = img_height * img_width
    return K.sum(K.square(S - C)) / (4. * (channels**2) * (size**2))


def total_variation_loss(x):
    a = K.square(x[:, :img_height - 1, :img_width - 1, :] -
                 x[:, 1:, :img_width - 1, :])
    b = K.square(x[:, :img_height - 1, :img_width - 1, :] -
                 x[:, :img_height - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))


target_image = K.constant(preprocess_image(target_image_path))
style_reference_image = K.constant(
    preprocess_image(style_reference_image_path))
combination_image = K.placeholder((1, img_height, img_width, 3))

input_tensor = K.concatenate(
    [target_image, style_reference_image, combination_image], axis=0)

model = vgg19.VGG19(input_tensor=input_tensor,
                    weights='imagenet',
                    include_top=False)

print('Model loaded.')

outputs_dict = dict([(layer.name, layer_output) for layer in model.layers])
content_layer = 'block5_conv2'
style_layers = [
    'block1_conv1',
    'block2_conv1',
Exemple #42
0
def run_inference(model_path,
                  anchors,
                  classes_path,
                  test_path,
                  output_path,
                  input_mode,
                  score_threshold,
                  iou_threshold,
                  map_iou_threshold,
                  class_index=0,
                  num_saved_images=0):

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    yolo_model = load_model(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.

    input_image_shape = K.placeholder(shape=(2, ))

    boxes, scores, classes = yolo_post_process(yolo_model.output, anchors,
                                               len(class_names),
                                               input_image_shape,
                                               score_threshold, iou_threshold)

    total_num_bboxes = 0
    total_num_images = 0

    # "Dir" mode
    if input_mode == 0:
        for image_file in os.listdir(test_path):
            try:
                image_type = imghdr.what(os.path.join(test_path, image_file))
                if not image_type:
                    continue
            except IsADirectoryError:
                continue

            image = Image.open(os.path.join(test_path, image_file))
            if is_fixed_size:  # TODO: When resizing we can use minibatch input.
                resized_image = image.resize(tuple(reversed(model_image_size)),
                                             Image.BILINEAR)
                image_data = np.array(resized_image, dtype='float32')
            else:
                # Due to skip connection + max pooling in YOLO_v2, inputs must have
                # width and height as multiples of 32.
                new_image_size = (image.width - (image.width % 32),
                                  image.height - (image.height % 32))
                resized_image = image.resize(new_image_size, Image.BILINEAR)
                image_data = np.array(resized_image, dtype='float32')

            image_data /= 255.
            image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

            out_boxes, out_scores, out_classes = sess.run(
                [boxes, scores, classes],
                feed_dict={
                    yolo_model.input: image_data,
                    input_image_shape: [image.size[1], image.size[0]],
                    K.learning_phase(): 0
                })

            total_num_bboxes += len(out_boxes)
            total_num_images += 1

            # Rank boxes, scores and classes by score

            font = ImageFont.truetype(font='../data/font/FiraMono-Medium.otf', \
                                      size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
            thickness = (image.size[0] + image.size[1]) // 300

            for i, c in reversed(list(enumerate(out_classes))):
                predicted_class = class_names[c]
                box = out_boxes[i]
                score = out_scores[i]
                top, left, bottom, right = box
                print(predicted_class)

                label = '{} {:.2f}'.format(predicted_class, score)
                draw = ImageDraw.Draw(image)
                label_size = draw.textsize(label, font)

                top, left, bottom, right = box
                top = max(0, np.floor(top + 0.5).astype('int32'))
                left = max(0, np.floor(left + 0.5).astype('int32'))
                bottom = min(image.size[1],
                             np.floor(bottom + 0.5).astype('int32'))
                right = min(image.size[0],
                            np.floor(right + 0.5).astype('int32'))
                print(label, (left, top), (right, bottom))

                if top - label_size[1] >= 0:
                    text_origin = np.array([left, top - label_size[1]])
                else:
                    text_origin = np.array([left, top + 1])

                # My kingdom for a good redistributable image drawing library.
                for i in range(thickness):
                    draw.rectangle([left + i, top + i, right - i, bottom - i],
                                   outline=colors[c])
                draw.rectangle(
                    [tuple(text_origin),
                     tuple(text_origin + label_size)],
                    fill=colors[c])
                draw.text(text_origin, label, fill=(0, 0, 0), font=font)
                del draw

            image.save(os.path.join(output_path, image_file), quality=90)

        # This image dir doesn't have ground truth so we can't calculate mAP
        return (None, None, None)

    # "NPZ" mode
    elif input_mode == 1:
        # 2 modes: 1) "dir" inference from dir 2) "npz" inference and mAP from bounding boxes
        data = np.load(test_path,
                       encoding='bytes')  # custom data saved as a numpy file.
        input_images = data['images']
        input_boxes = data['boxes']

        output_boxes = []
        output_scores = []
        output_classes = []

        for i, (image, gt_boxes) in enumerate(zip(input_images, input_boxes)):

            height, width = image.shape[:2]

            if is_fixed_size:
                width_scale = model_image_size[1] / float(width)
                height_scale = model_image_size[0] / float(height)
                resized_image = cv2.resize(image,
                                           tuple(reversed(model_image_size)),
                                           interpolation=cv2.INTER_LINEAR)
            else:
                # Due to skip connection + max pooling in YOLO_v2, inputs must have
                # width and height as multiples of 32.
                new_image_size = (image.width - (image.width % 32),
                                  image.height - (image.height % 32))
                width_scale = new_image_size[0]
                height_scale = new_image_size[1]
                resized_image = image.resize(new_image_size, Image.BILINEAR)

            image_data = np.array(resized_image, dtype='float32')

            image_data /= 255.
            image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

            image_data_orig = np.array(image, dtype='float32')

            image_data_orig /= 255.
            image_data_orig = np.expand_dims(image_data_orig,
                                             0)  # Add batch dimension.

            out_boxes, out_scores, out_classes = sess.run(
                [boxes, scores, classes],
                feed_dict={
                    yolo_model.input: image_data,
                    input_image_shape: [image.shape[0], image.shape[1]],
                    K.learning_phase(): 0
                })

            total_num_bboxes += len(out_boxes)
            total_num_images += 1

            # Transform out_box ordering to NPZ box ordering
            transformed_out_boxes = []
            for out_box in out_boxes:
                new_box = [0, 0, 0, 0]
                xmin = out_box[1]
                xmax = out_box[3]
                ymin = out_box[0]
                ymax = out_box[2]

                new_box[0] = xmin
                new_box[1] = ymin
                new_box[2] = xmax
                new_box[3] = ymax

                transformed_out_boxes.append(new_box)

            transformed_gt_boxes = []
            gt_classes = []
            gt_scores = [1] * len(gt_boxes)
            for gt in gt_boxes:
                new_box = [0, 0, 0, 0]
                xmin = gt[1]
                ymin = gt[2]
                xmax = gt[3]
                ymax = gt[4]
                c = int(gt[0])

                new_box[0] = ymin
                new_box[1] = xmin
                new_box[2] = ymax
                new_box[3] = xmax

                transformed_gt_boxes.append(new_box)
                gt_classes.append(c)

            output_boxes.append(transformed_out_boxes)
            output_scores.append(out_scores)
            output_classes.append(out_classes)

            if total_num_images < num_saved_images:
                # Draw TP, FP, FNs on image
                image_with_boxes = draw_boxes_advanced(
                    image_data_orig[0],
                    transformed_gt_boxes,
                    gt_classes,
                    out_boxes,
                    out_classes,
                    out_scores,
                    class_names,
                    score_threshold=0.3,
                    iou_threshold=map_iou_threshold)
                #image_with_boxes = draw_boxes(image_data_orig[0],
                #                              transformed_gt_boxes,
                #                              gt_classes,
                #                              class_names,
                #                              scores=None)

                full_image = PIL.Image.fromarray(image_with_boxes)
                full_image.save(os.path.join(output_path, \
                                             str(map_iou_threshold)+"-"+str(i)+'.png'))

        # Hack for running YOLO as a binary classifier
        # Map label indices in NPZ to label indices of trained model
        transformed_input_boxes = []
        for boxes in input_boxes:
            boxes_for_image = []
            for box in boxes:
                new_box = list(box)
                new_box[0] = class_index
                boxes_for_image.append(new_box)
            transformed_input_boxes.append(boxes_for_image)

        prs_by_threshold = ml_utils.get_pr_curve(
            transformed_input_boxes,
            output_boxes,
            output_scores,
            output_classes,
            iou_threshold=map_iou_threshold)
    mAP, precisions, recalls = ml_utils.get_mAP(prs_by_threshold)
    return (mAP, precisions, recalls)
Exemple #43
0
    x /= (x.std() + 1e-5)
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x


# this will contain our generated image
input_img = K.placeholder((1, 3, img_width, img_height))

# build the VGG16 network with our input_img as input
first_layer = ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height))
first_layer.input = input_img

model = Sequential()
model.add(first_layer)
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
Exemple #44
0
def main(attack, src_model_name, target_model_names, data_train_dir,
         data_test_dir):
    np.random.seed(0)
    tf.set_random_seed(0)
    set_gtsrb_flags()

    # Get GTSRB test data
    _, _, _, _, X_test, Y_test = load_data(data_train_dir, data_test_dir)

    # display_leg_sample(X_test)

    # One-hot encode image labels
    label_binarizer = LabelBinarizer()
    Y_test = label_binarizer.fit_transform(Y_test)

    x = K.placeholder(
        (None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))

    y = K.placeholder((None, FLAGS.NUM_CLASSES))

    # one_hot_y = tf.one_hot(y, 43)

    # source model for crafting adversarial examples
    src_model = load_model(src_model_name)

    # model(s) to target
    target_models = [None] * len(target_model_names)
    for i in range(len(target_model_names)):
        target_models[i] = load_model(target_model_names[i])

    # simply compute test error
    if attack == "test":
        err = tf_test_error_rate(src_model, x, X_test, Y_test)
        print '{}: {:.3f}'.format(basename(src_model_name), err)

        for (name, target_model) in zip(target_model_names, target_models):
            err = tf_test_error_rate(target_model, x, X_test, Y_test)
            print '{}: {:.3f}'.format(basename(name), err)
        return

    eps = args.eps

    # take the random step in the RAND+FGSM
    if attack == "rand_fgs":
        X_test = np.clip(
            X_test + args.alpha * np.sign(np.random.randn(*X_test.shape)), 0.0,
            1.0)
        eps -= args.alpha

    logits = src_model(x)
    grad = gen_grad(x, logits, y)

    # FGSM and RAND+FGSM one-shot attack
    if attack in ["fgs", "rand_fgs"]:
        adv_x = symbolic_fgs(x, grad, eps=eps)

    # iterative FGSM
    if attack == "ifgs":
        adv_x = iter_fgs(src_model,
                         x,
                         y,
                         steps=args.steps,
                         eps=args.eps / args.steps)

    # Carlini & Wagner attack
    if attack == "CW":
        X_test = X_test[0:200]
        Y_test = Y_test[0:200]

        cli = CarliniLi(K.get_session(),
                        src_model,
                        targeted=False,
                        confidence=args.kappa,
                        eps=args.eps)

        X_adv = cli.attack(X_test, Y_test)

        r = np.clip(X_adv - X_test, -args.eps, args.eps)
        X_adv = X_test + r
        np.save('Train_Carlini_200.npy', X_adv)
        np.save('Label_Carlini_200.npy', Y_test)

        err = tf_test_error_rate(src_model, x, X_adv, Y_test)
        print '{}->{}: {:.3f}'.format(basename(src_model_name),
                                      basename(src_model_name), err)

        for (name, target_model) in zip(target_model_names, target_models):
            err = tf_test_error_rate(target_model, x, X_adv, Y_test)
            print '{}->{}: {:.3f}'.format(basename(src_model_name),
                                          basename(name), err)
        display_leg_adv_sample(X_test, X_adv)
        return

    if attack == "cascade_ensemble":
        # X_test = np.clip(
        #     X_test + args.alpha * np.sign(np.random.randn(*X_test.shape)),
        #     0.0, 1.0)
        # eps -= args.alpha

        sub_model_ens = (sub_model_2, sub_model_3)
        sub_models = [None] * len(sub_model_ens)
        for i in range(len(sub_model_ens)):
            sub_models[i] = load_model(sub_model_ens[i])

        adv_x = x
        for j in range(args.steps):
            for i, m in enumerate(sub_models + [src_model]):
                logits = m(adv_x)
                gradient = gen_grad(adv_x, logits, y)
                adv_x = symbolic_fgs(adv_x,
                                     gradient,
                                     eps=args.eps / args.steps,
                                     clipping=True)

    if attack == "Iter_Casc":
        # X_test = np.clip(
        #     X_test + args.alpha * np.sign(np.random.randn(*X_test.shape)),
        #     0.0, 1.0)
        # args.eps = args.eps - args.alpha

        sub_model_ens = (sub_model_1, sub_model_2, sub_model_3)
        sub_models = [None] * len(sub_model_ens)
        for i in range(len(sub_model_ens)):
            sub_models[i] = load_model(sub_model_ens[i])

        x_advs = [None] * len(sub_models)
        errs = [None] * len(sub_models)
        adv_x = x
        eps_all = []

        for i in range(args.steps):
            if i == 0:
                eps_all[0] = (1.0 / len(sub_models)) * args.eps
            else:
                for j in range(i):
                    pre_sum = 0.0
                    pre_sum += eps_all[j]
                    eps_all[i] = (args.eps - pre_sum) * (1.0 / len(sub_models))

        # for i in range(args.steps):
        #     if i == 0:
        #         eps_0 = (1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_0)
        #     elif i == 1:
        #         eps_1 = (1 - 1.0 / len(sub_models)) * (1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_1)
        #     elif i == 2:
        #         eps_2 = (1 - (1 - 1.0 / len(sub_models)) * (1.0 / len(sub_models))) * (1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_2)
        #     elif i == 3:
        #         eps_3 = (1 - (1 - (1 - 1.0 / len(sub_models)) * (1.0 / len(sub_models))) * (1.0 / len(sub_models))) * (
        #                 1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_3)
        #     elif i == 4:
        #         eps_4 = (1 - (
        #                 1 - (1 - (1 - 1.0 / len(sub_models)) * (1.0 / len(sub_models))) * (1.0 / len(sub_models))) * (
        #                          1.0 / len(sub_models))) * (1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_4)
        #     elif i == 5:
        #         eps_5 = (1 - (1 - (
        #                 1 - (1 - (1 - 1.0 / len(sub_models)) * (1.0 / len(sub_models))) * (1.0 / len(sub_models))) * (
        #                               1.0 / len(sub_models)))) * (1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_5)
        #     elif i == 6:
        #         eps_6 = (1 - (1 - (1 - (
        #                 1 - (1 - (1 - 1.0 / len(sub_models)) * (1.0 / len(sub_models))) * (1.0 / len(sub_models))) * (
        #                                    1.0 / len(sub_models))))) * (1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_6)
        #
        #     elif i == 7:
        #         eps_7 = (1 - (1 - (1 - (1 - (
        #                 1 - (1 - (1 - 1.0 / len(sub_models)) * (1.0 / len(sub_models))) * (1.0 / len(sub_models))) * (
        #                                         1.0 / len(sub_models)))))) * (1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_7)
        #     elif i == 8:
        #         eps_8 = (1 - (1 - (1 - (1 - (1 - (
        #                 1 - (1 - (1 - 1.0 / len(sub_models)) * (1.0 / len(sub_models))) * (1.0 / len(sub_models))) * (
        #                                              1.0 / len(sub_models))))))) * (1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_8)
        #     elif i == 9:
        #         eps_9 = (1 - (1 - (1 - (1 - (1 - (1 - (
        #                 1 - (1 - (1 - 1.0 / len(sub_models)) * (1.0 / len(sub_models))) * (1.0 / len(sub_models))) * (
        #                                                   1.0 / len(sub_models)))))))) * (
        #                         1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_9)
        #     elif i == 10:
        #         eps_10 = (1 - (1 - (1 - (1 - (1 - (1 - (1 - (
        #                 1 - (1 - (1 - 1.0 / len(sub_models)) * (1.0 / len(sub_models))) * (1.0 / len(sub_models))) * (
        #                                                         1.0 / len(sub_models))))))))) * (
        #                          1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_10)
        #     elif i == 11:
        #         eps_11 = (1 - (1 - (1 - (1 - (1 - (1 - (1 - (1 - (
        #                 1 - (1 - (1 - 1.0 / len(sub_models)) * (1.0 / len(sub_models))) * (1.0 / len(sub_models))) * (
        #                                                              1.0 / len(sub_models)))))))))) * (
        #                          1.0 / len(sub_models)) * args.eps
        #         eps_all.append(eps_11)

        for j in range(args.steps):
            print('iterative step is :', j)
            if j == 0:
                for i, m in enumerate(sub_models):
                    logits = m(adv_x)
                    gradient = gen_grad(adv_x, logits, y)
                    adv_x_ = symbolic_fgs(adv_x,
                                          gradient,
                                          eps=eps_all[j],
                                          clipping=True)
                    x_advs[i] = adv_x_

                    X_adv = batch_eval([x, y], [adv_x_], [X_test, Y_test])[0]

                    err = tf_test_error_rate(m, x, X_adv, Y_test)
                    errs[i] = err
                adv_x = x_advs[errs.index(min(errs))]
            else:
                t = errs.index(min(errs))
                print('index of min value of errs:', t)
                logits = sub_models[t](adv_x)
                gradient = gen_grad(adv_x, logits, y)
                adv_x = symbolic_fgs(adv_x,
                                     gradient,
                                     eps=eps_all[j],
                                     clipping=True)

                for i, m in enumerate(sub_models):
                    X_adv = batch_eval([x, y], [adv_x], [X_test, Y_test])[0]
                    err = tf_test_error_rate(m, x, X_adv, Y_test)
                    errs[i] = err
            print('error rate of each substitute models_oldest: ', errs)
            print('\t')
            if min(errs) >= 99:
                success_rate = sum(errs) / len(sub_models)
                print('success rate is: {:.3f}'.format(success_rate))
                break

        success_rate = sum(errs) / len(sub_models)
        print('success rate is: {:.3f}'.format(success_rate))

        X_adv = batch_eval([x, y], [adv_x], [X_test, Y_test])[0]
        np.save('results/iter_casc_0.2_leg_adv/X_adv_Iter_Casc_0.2.npy', X_adv)

        for (name, target_model) in zip(target_model_names, target_models):
            err = tf_test_error_rate(target_model, x, X_adv, Y_test)
            print '{}->{}: {:.3f}'.format(basename(src_model_name),
                                          basename(name), err)

        save_leg_adv_sample('results/iter_casc_0.2_leg_adv/', X_test, X_adv)

        # save adversarial example specified by user
        save_leg_adv_specified_by_user(
            'results/iter_casc_0.2_leg_adv_label_4/', X_test, X_adv, Y_test)
        return

    if attack == "stack_paral":
        # X_test = np.clip(
        #     X_test + args.alpha * np.sign(np.random.randn(*X_test.shape)),
        #     0.0, 1.0)
        # eps -= args.alpha

        sub_model_ens = (sub_model_1, sub_model_2, sub_model_3)
        sub_models = [None] * len(sub_model_ens)

        for i in range(len(sub_model_ens)):
            sub_models[i] = load_model(sub_model_ens[i])

        errs = [None] * (len(sub_models) + 1)
        x_advs = [None] * len(sub_models)
        # print x_advs

        for i, m in enumerate(sub_models):
            # x = x + args.alpha * np.sign(np.random.randn(*x[0].shape))
            logits = m(x)
            gradient = gen_grad(x, logits, y)
            adv_x = symbolic_fgs(x, gradient, eps=args.eps / 2, clipping=True)
            x_advs[i] = adv_x

        # print x_advs
        adv_x_sum = x_advs[0]
        for i in range(len(sub_models)):
            if i == 0: continue
            adv_x_sum = adv_x_sum + x_advs[i]
        adv_x_mean = adv_x_sum / (len(sub_models))
        preds = src_model(adv_x_mean)
        grads = gen_grad(adv_x_mean, preds, y)
        adv_x = symbolic_fgs(adv_x_mean, grads, eps=args.eps, clipping=True)

        # compute the adversarial examples and evaluate
        for i, m in enumerate(sub_models + [src_model]):
            X_adv = batch_eval([x, y], [adv_x], [X_test, Y_test])[0]
            err = tf_test_error_rate(m, x, X_adv, Y_test)
            errs[i] = err

        # compute success rate
        success_rate = sum(errs) / (len(sub_models) + 1)
        print('success rate is: {:.3f}'.format(success_rate))

        # compute transfer rate
        for (name, target_model) in zip(target_model_names, target_models):
            err = tf_test_error_rate(target_model, x, X_adv, Y_test)
            print '{}->{}: {:.3f}'.format(basename(src_model_name),
                                          basename(name), err)

        # save adversarial examples
        np.save('results/stack_paral_0.2_leg_adv/X_adv_stack_paral_0.2.npy',
                X_adv)
        # save_leg_adv_sample(X_test, X_adv)
        save_leg_adv_sample('results/stack_paral_0.2_leg_adv/', X_test, X_adv)

        # save adversarial example specified by user
        save_leg_adv_specified_by_user(
            'results/stack_paral_0.2_leg_adv_label_4/', X_test, X_adv, Y_test)

        return

    if attack == "cascade_ensemble_2":
        X_test = np.clip(
            X_test + args.alpha * np.sign(np.random.randn(*X_test.shape)), 0.0,
            1.0)
        eps -= args.alpha

        sub_model_ens = (sub_model_1, sub_model_2)
        sub_models = [None] * len(sub_model_ens)

        for i in range(len(sub_model_ens)):
            sub_models[i] = load_model(sub_model_ens[i])

        x_advs = [([None] * len(sub_models)) for i in range(args.steps)]
        # print x_advs

        x_adv = x
        for j in range(args.steps):
            for i, m in enumerate(sub_models):
                logits = m(x_adv)
                gradient = gen_grad(x_adv, logits, y)
                x_adv = symbolic_fgs(x_adv,
                                     gradient,
                                     eps=args.eps / args.steps,
                                     clipping=True)
                x_advs[j][i] = x_adv

        # print x_advs
        adv_x_sum = x_advs[0][0]
        for j in range(args.steps):
            for i in range(len(sub_models)):
                if j == 0 and i == 0: continue
                adv_x_sum = adv_x_sum + x_advs[j][i]
        adv_x_mean = adv_x_sum / (args.steps * len(sub_models))
        preds = src_model(adv_x_mean)
        grads = gen_grad(adv_x_mean, preds, y)
        adv_x = symbolic_fgs(adv_x_mean,
                             grads,
                             eps=args.eps / args.steps,
                             clipping=True)

    # compute the adversarial examples and evaluate
    X_adv = batch_eval([x, y], [adv_x], [X_test, Y_test])[0]

    # white-box attack
    err = tf_test_error_rate(src_model, x, X_adv, Y_test)
    print '{}->{}: {:.3f}'.format(basename(src_model_name),
                                  basename(src_model_name), err)

    # black-box attack
    for (name, target_model) in zip(target_model_names, target_models):
        err = tf_test_error_rate(target_model, x, X_adv, Y_test)
        print '{}->{}: {:.3f}'.format(basename(src_model_name), basename(name),
                                      err)
Exemple #45
0
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
              for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
    map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
        colors))
random.seed(10101)  # Fixed seed for consistent colors across runs.
random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
random.seed(None)  # Reset seed to default.

# Generate output tensor targets for filtered bounding boxes.
# TODO: Wrap these backend operations with Keras layers.
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
    yolo_outputs,
    input_image_shape,
    score_threshold=args.score_threshold,
    iou_threshold=args.iou_threshold)

# Save the output into a compact JSON file.
outfile = open('output/game_data.json', 'w')
# This will be appended with an object for every frame.
data_to_write = []


class CocoBox:
    def __init__(self, name, xmin, ymin, xmax, ymax, score=0.0):
        self.name = name
Exemple #46
0
    x[:, :, 0] += 103.939
    x[:, :, 1] += 116.779
    x[:, :, 2] += 123.68
    # 'BGR'->'RGB'
    x = x[:, :, ::-1]
    x = np.clip(x, 0, 255).astype('uint8')
    return x


#Preprocess the content and style images to tensors
content_image = K.variable(preprocess_image(content_image_path))
style_image = K.variable(preprocess_image(style_image_path))

#create and store random generated image
if K.image_data_format() == 'channels_first':
    generated_image = K.placeholder((1, 3, img_nrows, img_ncols))
else:
    generated_image = K.placeholder((1, img_nrows, img_ncols, 3))

#Combine the Content,Style,Generated image to push into the VGG19 model at one go
input_tensor = K.concatenate([content_image, style_image, generated_image],
                             axis=0)

#Load the VGG19 model
model = vgg19.VGG19(input_tensor=input_tensor,
                    weights='imagenet',
                    include_top=False)

#Store the activations of the layers in a dictionary to utilize them at later time
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
    return K.exp(x) / K.sum(K.exp(x), axis=-1)


y_old_label = y_test
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)

batch_size = 16
epoch = 10
learning_rate = 0.1
nb_classes = 10
rng = np.random
print x_train.shape[1]
print y_train.shape[1]

x = K.placeholder((batch_size, x_train.shape[1]))
y = K.placeholder((batch_size, y_train.shape[1]))

#init weights
w = K.variable(rng.randn(nb_classes, x_train.shape[1]))
b = K.variable(rng.randn(nb_classes))

pred = softmax(-K.dot(x, w.transpose()) - b)
# pred = K.softmax(-K.dot(x,w.transpose())-b)
pred_classes = pred.argmax(axis=-1)

xent = K.sum(-K.log(pred) * y, axis=-1)

l1_normal = 0.01 * (w**2).sum()

cost = xent.mean() + l1_normal
    # Remove zero-center by mean pixel
    x[:, :, 0] += 103.939
    x[:, :, 1] += 116.779
    x[:, :, 2] += 123.68
    # 'BGR'->'RGB'
    x = x[:, :, ::-1]
    x = np.clip(x, 0, 255).astype('uint8')
    return x

# get tensor representations of our images
base_image = K.variable(preprocess_image(base_image_path))
style_reference_image = K.variable(preprocess_image(style_reference_image_path))

# this will contain our generated image
if K.image_data_format() == 'channels_first':
    combination_image = K.placeholder((1, 3, img_nrows, img_ncols))
else:
    combination_image = K.placeholder((1, img_nrows, img_ncols, 3))

# combine the 3 images into a single Keras tensor
input_tensor = K.concatenate([base_image,
                              style_reference_image,
                              combination_image], axis=0)

# build the VGG16 network with our 3 images as input
# the model will be loaded with pre-trained ImageNet weights
model = vgg19.VGG19(input_tensor=input_tensor,
                    weights='imagenet', include_top=False)
print('Model loaded.')

# get the symbolic outputs of each "key" layer (we gave them unique names).
content_image = load_img(content_image_path, target_size=target_size)
content_image_array = img_to_array(content_image)
content_image_array = K.variable(preprocess_input(
    np.expand_dims(content_image_array, 0)),
                                 dtype='float32')

style_image = load_img(style_image_path, target_size=target_size)
style_image_array = img_to_array(style_image)
style_image_array = K.variable(preprocess_input(
    np.expand_dims(style_image_array, 0)),
                               dtype='float32')

generate_image = np.random.randint(256, size=(target_width, target_height,
                                              3)).astype('float64')
generate_image = preprocess_input(np.expand_dims(generate_image, 0))
generate_image_placeholder = K.placeholder(shape=(1, target_width,
                                                  target_height, 3))


def get_feature_represent(x, layer_names, model):
    '''图片的特征图表示
    
    参数
    ----------------------------------------------
    x : 输入,
        这里并没有使用,可以看作一个输入的标识
    layer_names : list
        CNN网络层的名字
    model : CNN模型
    
    返回值
    ----------------------------------------------
Exemple #50
0
    def __init__(
        self,
        input_shape=None,
        batch_size=None,
        dtype=None,
        input_tensor=None,
        sparse=None,
        name=None,
        ragged=None,
        type_spec=None,
        **kwargs,
    ):
        self._init_input_shape = input_shape
        self._init_batch_size = batch_size
        self._init_dtype = dtype
        self._init_sparse = sparse
        self._init_ragged = ragged
        self._init_type_spec = type_spec

        strategy = tf.distribute.get_strategy()
        if (strategy and batch_size is not None
                and distributed_training_utils.global_batch_size_supported(
                    strategy)):
            if batch_size % strategy.num_replicas_in_sync != 0:
                raise ValueError(
                    "The `batch_size` argument ({}) must be divisible by "
                    "the number of replicas ({})".format(
                        batch_size, strategy.num_replicas_in_sync))
            batch_size = batch_size // strategy.num_replicas_in_sync

        if "batch_input_shape" in kwargs:
            batch_input_shape = kwargs.pop("batch_input_shape")
            if input_shape and batch_input_shape:
                raise ValueError("Only provide the input_shape OR "
                                 "batch_input_shape argument to "
                                 "InputLayer, not both at the same time.")
            # Set the input shape and batch size from the batch_input_shape.
            # Note that batch_input_shape can be None (unknown rank) or []
            # (scalar), in which case the batch size must be None.
            if batch_input_shape:
                batch_size = batch_input_shape[0]
                input_shape = batch_input_shape[1:]
        if kwargs:
            raise ValueError(
                f"Unrecognized keyword arguments: {list(kwargs.keys())}")

        if sparse and ragged:
            raise ValueError(
                "Cannot set both sparse and ragged to True in a Keras input.")

        if not name:
            prefix = "input"
            name = prefix + "_" + str(backend.get_uid(prefix))

        if not dtype:
            if input_tensor is None:
                dtype = backend.floatx()
            else:
                dtype = backend.dtype(input_tensor)
        elif input_tensor is not None and input_tensor.dtype != dtype:
            raise ValueError(
                "`input_tensor.dtype` differs from `dtype`. Received: "
                f"input_tensor.dtype={input_tensor.dtype} "
                f"but expected dtype={dtype}")
        super().__init__(dtype=dtype, name=name)
        self.built = True
        self.sparse = True if sparse else False
        self.ragged = True if ragged else False
        self.batch_size = batch_size
        self.supports_masking = True

        if isinstance(input_shape, tf.TensorShape):
            input_shape = tuple(input_shape.as_list())
        elif isinstance(input_shape, int):
            input_shape = (input_shape, )

        if type_spec is not None:
            args_that_must_be_none = [
                ("(input_)shape", self._init_input_shape),
                ("batch_size", self._init_batch_size),
                ("dtype", self._init_dtype),
                ("input_tensor", input_tensor),
                ("sparse", self._init_sparse),
                ("ragged", self._init_ragged),
            ]
            for arg_name, arg in args_that_must_be_none:
                _assert_other_arg_none(arg_name, arg)
            if not tf.compat.v1.executing_eagerly_outside_functions():
                raise ValueError(
                    "Creating Keras inputs from a type_spec is only "
                    "supported when eager execution is enabled.")
            input_tensor = keras_tensor.keras_tensor_from_type_spec(type_spec)
            if isinstance(input_tensor, keras_tensor.SparseKerasTensor):
                self.sparse = True
            if isinstance(input_tensor, keras_tensor.RaggedKerasTensor):
                self.ragged = True
            self.is_placeholder = True
            try:
                self._batch_input_shape = tuple(input_tensor.shape.as_list())
            except ValueError:
                # If the shape cannot be represented as a tuple (e.g. unknown
                # rank)
                self._batch_input_shape = None
        elif input_tensor is None:
            if input_shape is not None:
                batch_input_shape = (batch_size, ) + tuple(input_shape)
            else:
                batch_input_shape = None
            graph = backend.get_graph()
            with graph.as_default():
                input_tensor = backend.placeholder(
                    shape=batch_input_shape,
                    dtype=dtype,
                    name=self.name,
                    sparse=sparse,
                    ragged=ragged,
                )

            self.is_placeholder = True
            self._batch_input_shape = batch_input_shape
        else:
            if tf.compat.v1.executing_eagerly_outside_functions():
                if not isinstance(input_tensor, keras_tensor.KerasTensor):
                    input_tensor = keras_tensor.keras_tensor_from_tensor(
                        input_tensor)
            else:
                if not tf_utils.is_symbolic_tensor(input_tensor):
                    raise ValueError(
                        "You should not pass an EagerTensor to `Input`. "
                        "For example, instead of creating an "
                        "`InputLayer`, you should instantiate your model "
                        "and directly call it on your input.")
            self.is_placeholder = False
            try:
                self._batch_input_shape = tuple(input_tensor.shape.as_list())
            except ValueError:
                # If the shape cannot be represented as a tuple (e.g. unknown
                # rank)
                self._batch_input_shape = None
        # Create an input node.
        input_tensor._keras_mask = None
        node_module.Node(layer=self, outputs=input_tensor)

        # Store type spec
        if isinstance(input_tensor, keras_tensor.KerasTensor) or (
                tf_utils.is_extension_type(input_tensor)):
            self._type_spec = input_tensor._type_spec
        else:
            self._type_spec = tf.TensorSpec(
                shape=input_tensor.shape,
                dtype=input_tensor.dtype,
                name=self.name,
            )
def main(attack, src_model_names, target_model_name):
    np.random.seed(0)
    tf.set_random_seed(0)

    X_train, Y_train, X_test, Y_test = data_mnist(args.dataset)

    if args.two_class:
        NUM_CLASSES = 2
        class_1 = 3
        class_2 = 7
        X_train, Y_train, X_test, Y_test = two_class_convert(
            X_train, Y_train, X_test, Y_test, class_1, class_2, args.dataset)
        # X_test = X_train
        # Y_test = Y_train

    dim = IMAGE_ROWS * IMAGE_COLS * NUM_CHANNELS

    x = K.placeholder((None, IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))

    y = K.placeholder((None, NUM_CLASSES))

    # source model for crafting adversarial examples
    src_models = [None] * len(src_model_names)
    for i in range(len(src_model_names)):
        src_models[i] = load_model(src_model_names[i])

    src_model_name_joint = ''
    for i in range(len(src_models)):
        src_model_name_joint += basename(src_model_names[i])

    # model(s) to target
    if target_model_name is not None:
        target_model = load_model(target_model_name)

    # simply compute test error
    if attack == "test":
        for (name, src_model) in zip(src_model_names, src_models):
            _, _, err = tf_test_error_rate(src_model, x, X_test, Y_test)
            print '{}: {:.1f}'.format(basename(name), err)
        if target_model_name is not None:
            _, _, err = tf_test_error_rate(target_model, x, X_test, Y_test)
            print('{}: {:.1f}'.format(basename(target_model_name), err))

        return

    if args.targeted_flag == 1:
        pickle_name = attack + '_' + src_model_name_joint + '_' + '_' + args.loss_type + '_targets.p'
        if os.path.exists(pickle_name):
            targets = pickle.load(open(pickle_name, 'rb'))
        else:
            targets = []
            allowed_targets = list(range(NUM_CLASSES))
            for i in range(len(Y_test)):
                allowed_targets.remove(Y_test_uncat[i])
                targets.append(np.random.choice(allowed_targets))
                allowed_targets = list(range(NUM_CLASSES))
            # targets = np.random.randint(10, size = BATCH_SIZE*BATCH_EVAL_NUM)
            targets = np.array(targets)
            print targets
            targets_cat = np_utils.to_categorical(targets, NUM_CLASSES).astype(
                np.float32)
            Y_test = targets_cat
            if SAVE_FLAG == True:
                pickle.dump(Y_test, open(pickle_name, 'wb'))

    # take the random step in the RAND+FGSM
    if attack == "rand_fgs":
        X_test = np.clip(
            X_test + args.alpha * np.sign(np.random.randn(*X_test.shape)), 0.0,
            1.0)
        eps -= args.alpha

    logits = [None] * len(src_model_names)
    for i in range(len(src_model_names)):
        curr_model = src_models[i]
        logits[i] = curr_model(x)

    if args.loss_type == 'xent':
        loss, grad = gen_grad(x, logits, y)
    elif args.loss_type == 'cw':
        grad = gen_grad_cw(x, logits, y)
    if args.targeted_flag == 1:
        grad = -1.0 * grad

    for eps in eps_list:
        # FGSM and RAND+FGSM one-shot attack
        if attack in ["fgs", "rand_fgs"] and args.norm == 'linf':
            adv_x = symbolic_fgs(x, grad, eps=eps)
        elif attack in ["fgs", "rand_fgs"] and args.norm == 'l2':
            adv_x = symbolic_fg(x, grad, eps=eps)

        # iterative FGSM
        if attack == "ifgs":
            l = 500
            X_test = X_test[0:l]
            Y_test = Y_test[0:l]

            adv_x = x
            # iteratively apply the FGSM with small step size
            for i in range(args.num_iter):
                adv_logits = [None] * len(src_model_names)
                for i in range(len(src_model_names)):
                    curr_model = src_models[i]
                    adv_logits[i] = curr_model(adv_x)

                if args.loss_type == 'xent':
                    loss, grad = gen_grad_ens(adv_x, adv_logits, y)
                elif args.loss_type == 'cw':
                    grad = gen_grad_cw(adv_x, adv_logits, y)
                if args.targeted_flag == 1:
                    grad = -1.0 * grad

                adv_x = symbolic_fgs(adv_x, grad, args.delta, True)
                r = adv_x - x
                r = K.clip(r, -eps, eps)
                adv_x = x + r

            adv_x = K.clip(adv_x, 0, 1)
        if attack == "ifg":
            l = 2000
            X_test = X_test[0:l]
            Y_test = Y_test[0:l]
            if args.noise:
                noise_delta = np.random.normal(size=(l, 784))
                norm_vec = np.linalg.norm(noise_delta, axis=1)
                noise_delta /= np.expand_dims(norm_vec, 1)
                noise_delta = noise_delta.reshape((l, 28, 28, 1))
                X_test = X_test + args.eps * noise_delta
                X_test = np.clip(X_test, 0, 1)  # ensure valid pixel range
            adv_x = iter_fg(src_models[0], x, y, args.num_iter, args.delta,
                            eps, args.targeted_flag)

        pickle_name = attack + '_' + src_model_name_joint + '_' + args.loss_type + '_' + str(
            eps) + '_adv.p'
        if args.targeted_flag == 1:
            pickle_name = attack + '_' + src_model_name_joint + '_' + args.loss_type + '_' + str(
                eps) + '_adv_t.p'

        if os.path.exists(pickle_name):
            print 'Loading adversarial samples'
            X_adv = pickle.load(open(pickle_name, 'rb'))
        else:
            print 'Generating adversarial samples'
            X_adv = batch_eval([x, y], [adv_x], [X_test, Y_test])[0]
            if SAVE_FLAG == True:
                pickle.dump(X_adv, open(pickle_name, 'wb'))

        avg_l2_perturb = np.mean(
            np.linalg.norm((X_adv - X_test).reshape(len(X_test), dim), axis=1))

        # white-box attack
        l = len(X_adv)
        # print ('Carrying out white-box attack')
        for (name, src_model) in zip(src_model_names, src_models):
            preds_adv, orig, err = tf_test_error_rate(src_model, x, X_adv,
                                                      Y_test[0:l])
            if args.targeted_flag == 1:
                err = 100.0 - err
            print '{}->{}: {:.1f}'.format(basename(name), basename(name), err)

        # black-box attack
        if target_model_name is not None:
            print('Carrying out black-box attack')
            preds, _, err = tf_test_error_rate(target_model, x, X_adv, Y_test)
            if args.targeted_flag == 1:
                err = 100.0 - err
            print '{}->{}: {:.1f}, {}, {} {}'.format(
                src_model_name_joint, basename(target_model_name), err,
                avg_l2_perturb, eps, attack)
Exemple #52
0
def _main(args):
    model_path = args.model_path
    assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
    anchors_path = args.anchors_path
    classes_path = args.classes_path
    test_path = args.test_path
    output_path = args.output_path
    weight_path = args.weight_path

    #if not os.path.exists(output_path):
    #print 'Creating output path {}'.format(output_path)
    #    os.mkdir(output_path)

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    #classes file should one class one line
    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    #anchors should be separated by ,
    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)

    yolo_model = load_model(model_path)
    if weight_path != None:
        yolo_model.load_weights(weight_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / float(len(class_names)), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = convert_result(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = draw_helper(yolo_outputs,
                                         input_image_shape,
                                         to_threshold=args.score_threshold,
                                         iou_threshold=args.iou_threshold)

    video_in = cv2.VideoCapture(test_path)
    width, height, FPS = int(video_in.get(3)), int(
        video_in.get(4)), video_in.get(5)
    video_out = cv2.VideoWriter()
    video_out.open(
        output_path,  # Filename
        cv2.VideoWriter_fourcc(
            *'DIVX'
        ),  # Negative 1 denotes manual codec selection. You can make this automatic by defining the "fourcc codec" with "cv2.VideoWriter_fourcc"
        FPS,  # 10 frames per second is chosen as a demo, 30FPS and 60FPS is more typical for a YouTube video
        (width, height),  # The width and height come from the stats of image1
    )
    #begin from here
    while video_in.isOpened():
        ret, data = video_in.read()
        if ret == False:
            break
        array = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
        image = Image.fromarray(array, mode='RGB')
        if is_fixed_size:  # TODO: When resizing we can use minibatch input.
            resized_image = image.resize(tuple(reversed(model_image_size)),
                                         Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
        else:
            # Due to skip connection + max pooling in YOLO_v2, inputs must have
            # width and height as multiples of 32.
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            resized_image = image.resize(new_image_size, Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
            print(image_data.shape)

        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        #print('Found {} boxes for {}'.format(len(out_boxes), image_file))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] +
                                                0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)

            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            #the result's origin is in top left
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],
                               outline=colors[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        #image.save(os.path.join(output_path, image_file), quality=90)
        video_out.write(cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR))
    print("Done")
    sess.close()
    video_in.release()
    video_out.release()
#Ex 1

# a = K.placeholder(shape=(5,))
# b = K.placeholder(shape=(5,))
# c = K.placeholder(shape=(5,))
#
# sqr_tensor = a*a+b*b+2*b*c
#
# SQR_function = K.function(inputs=[a,b,c], outputs=[sqr_tensor])

#Ex 2

# x= K.placeholder(shape=())
#
# tanh_tensor = (K.exp(x)-K.exp(-x))/(K.exp(x)+K.exp(-x))
# grad_tanh = K.gradients(loss = tanh_tensor, variables =[x])
#
# tanh_function = K.function(inputs=[x], outputs=[tanh_tensor,grad_tanh[0]])

#Ex 3
w = K.placeholder(shape=(2, ))
x = K.placeholder(shape=(2, ))
b = K.placeholder(shape=(1, ))

w_tensor = w[0] * x[0] + w[1] * x[1] + b
f_tensor = 1 / (1 + K.exp(w_tensor))
grad = K.gradients(loss=f_tensor, variables=[w])

f_function = K.function(inputs=[x], outputs=[f_tensor] + grad)
Exemple #54
0
lrD = 1e-4  # learning rate
lrG = 1e-4  # learning rate

netD = DCGAN_D(imageSize, nz, nc, ndf, n_extra_layers)
netD.summary()

netG = DCGAN_G(imageSize, nz, nc, ngf, n_extra_layers)
netG.summary()

from keras.optimizers import RMSprop, SGD, Adam

netD_real_input = Input(shape=(nc, imageSize, imageSize))
noisev = Input(shape=(nz, ))
netD_fake_input = netG(noisev)

Epsilon_input = K.placeholder(shape=(None, nc, imageSize, imageSize))
netD_mixed_input = Input(shape=(nc, imageSize, imageSize),
                         tensor=netD_real_input + Epsilon_input)

loss_real = K.mean(netD(netD_real_input))
loss_fake = K.mean(netD(netD_fake_input))

grad_mixed = K.gradients(netD(netD_mixed_input), [netD_mixed_input])[0]
norm_grad_mixed = K.sqrt(K.sum(K.square(grad_mixed), axis=[1, 2, 3]))
grad_penalty = K.mean(K.square(norm_grad_mixed - 1))

loss = loss_fake - loss_real + Lambda * grad_penalty

import os
import urllib
# from urllib.request import urlretrieve
Exemple #55
0
import numpy as np

import tensorflow as tf
from keras import backend
from keras.models import Model

from preprocess import *

content_weight = 0.025
style_weight = 5.0
total_variation_weight = 1.0

combined_image = backend.placeholder((1, 512, 512, 3))
layers = process_images('Images/content.jpg', 'Images/style.jpg',
                        combined_image)

loss = tf.Variable(0.)


def content_loss(loss):

    #Obtaining content and combined image features information from appropriate layer
    content_features = layers['block2_conv2'][0, :, :, :]
    combined_features = layers['block2_conv2'][2, :, :, :]

    #Computing scaled Euclidean distance between feature representations of the 2 images
    content_loss = backend.sum(tf.square(combined_features - content_features))
    loss += content_weight * content_loss

    return loss
Exemple #56
0
X_train = X_train/127.5 - 1
X_train = np.expand_dims(X_train, axis=3)
X_train = X_train.reshape(
    X_train.shape[0], X_train.shape[1], X_train.shape[2], X_train.shape[4])


# -------------------------
# compute grandient penalty
# -------------------------

dis_real = Input(shape=(image_size, image_size, channels))
noisev = Input(shape=(z_dim,))
dis_fake = gen(noisev)
valid = dis(dis_fake)

delta_input = K.placeholder(shape=(None, image_size, image_size, channels))
# alpha = K.random_uniform(
#     shape=[batch_size, 1, 1, 1],
#     minval=0.,
#     maxval=1.
# )

dis_mixed = Input(shape=(image_size, image_size, channels),
                  tensor=dis_real + delta_input)

loss_real = K.sum(K.softplus(-dis(dis_real))) / batch_size
loss_fake = K.sum(K.softplus(valid)) / batch_size

# dis_mixed_real = alpha * dis_real + ((1 - alpha) * dis_mixed)

grad_mixed = K.gradients(dis(dis_mixed), [dis_mixed])[0]
Exemple #57
0
# defining the batch size and epochs
batch_size = 64
num_epochs = 20

# defining the learning rate
lr = 0.1

# defining the number of layers and number of hidden units in each layer
num_layers = 2
num_units = [100, 100]

# building the model

# defining the placeholders to feed the input and target data
input_tensor = K.placeholder(shape=(batch_size, input_dim), dtype='float32')
target_tensor = K.placeholder(shape=(batch_size, num_classes), dtype='float32')

# defining the lists to store the weight and bias variables across the layers
weight_variables, bias_variables = [], []

# defining the weight and bias variable for the first layer
weight_variable = K.random_uniform_variable(shape=(input_dim, num_units[0]),
                                            low=-1.,
                                            high=1.,
                                            dtype='float32')
bias_variable = K.zeros(shape=(num_units[0], ), dtype='float32')

weight_variables.append(weight_variable)
bias_variables.append(bias_variable)
Exemple #58
0
def _main(args):
    model_path = os.path.expanduser(args.model_path)
    assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)
    test_path = os.path.expanduser(args.test_path)
    output_path = os.path.expanduser(args.output_path)

    if not os.path.exists(output_path):
        print('Creating output path {}'.format(output_path))
        os.mkdir(output_path)

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)

    yolo_model = load_model(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    print(model_image_size)
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=args.score_threshold,
                                       iou_threshold=args.iou_threshold)
    print(
        "======================================================================"
    )
    print(yolo_model.summary())
    yolo_model.save('coco/coco-model.h5')
    return
    for image_file in os.listdir(test_path):
        try:
            image_type = imghdr.what(os.path.join(test_path, image_file))
            if not image_type:
                continue
        except IsADirectoryError:
            continue

        image = Image.open(os.path.join(test_path, image_file))
        if is_fixed_size:  # TODO: When resizing we can use minibatch input.
            resized_image = image.resize(tuple(reversed(model_image_size)),
                                         Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
        else:
            # Due to skip connection + max pooling in YOLO_v2, inputs must have
            # width and height as multiples of 32.
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            resized_image = image.resize(new_image_size, Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
            print(image_data.shape)

        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.
        print(
            "======================================================================"
        )
        print(yolo_model.summary())
        yolo_model.save('carplate3/yolo5.h5')
        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        print('Found {} boxes for {}'.format(len(out_boxes), image_file))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] +
                                                0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)

            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],
                               outline=colors[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        image.save(os.path.join(output_path, image_file), quality=90)
    sess.close()
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.is_site = self.config['is_site']

        #TODO Remove hack to force site mode or ground_truth for testing
        # self.is_site = True
        self.ground_truth = False

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        self.waypoints_2d = None
        self.waypoint_tree = None
        self.vgg_model = None
        self.graph = None
        self.sess = None
        self.initialized = False

        if self.is_site:
            # Detector Stuff
            self.model_image_size = None
            model_path = os.path.expanduser('./weights/parking_lot.h5')
            anchors_path = os.path.expanduser('./model_data/lisa_anchors.txt')
            classes_path = os.path.expanduser('./model_data/lisa_classes.txt')

            self.class_names = utils.get_classes(classes_path)

            anchors = utils.get_anchors(anchors_path)
            if SHALLOW_DETECTOR:
                anchors = anchors * 2

            self.yolo_model, _ = create_model(
                anchors,
                self.class_names,
                load_pretrained=True,
                feature_extractor=FEATURE_EXTRACTOR,
                pretrained_path=model_path,
                freeze_body=True)

            # Check if model is fully convolutional, assuming channel last order.
            self.model_image_size = self.yolo_model.layers[0].input_shape[1:3]

            self.sess = K.get_session()

            # Generate output tensor targets for filtered bounding boxes.
            self.yolo_outputs = decode_yolo_output(self.yolo_model.output,
                                                   anchors,
                                                   len(self.class_names))

            self.input_image_shape = K.placeholder(shape=(2, ))
            self.boxes, self.scores, self.classes = yolo_eval(
                self.yolo_outputs,
                self.input_image_shape,
                score_threshold=.6,
                iou_threshold=.6)

            self.graph = tensorflow.get_default_graph()
        else:
            try:
                model_path = os.path.expanduser('./weights/vgg16_1.h5')
                self.vgg_model = load_model(model_path)
                self.graph = tensorflow.get_default_graph()
            except:
                rospy.logerr(
                    "Could not load model. Have you downloaded the vgg16_1.h5 file to the weights folder? You can download it here: https://s3-eu-west-1.amazonaws.com/sdcnddata/vgg16_1.h5"
                )

        self.initialized = True

        rospy.spin()
Exemple #60
0
def run(args):
    # Variables declaration
    base_image_path = f"reference_images/base_image/{args.base_image}"
    style_reference_image_path = f"reference_images/style_image/{args.style_image}"
    iterations = args.iterations

    # Weights to compute the final loss
    total_variation_weight = 1
    style_weight = 2
    content_weight = 5

    # Dimensions of the generated picture.
    width, height = load_img(base_image_path).size
    resized_width = 400
    resized_height = int(width * resized_width / height)

    # Get tensor representations of our images
    base_image = K.variable(
        preprocess_image(base_image_path, resized_width, resized_height))
    style_reference_image = K.variable(
        preprocess_image(style_reference_image_path, resized_width,
                         resized_height))

    # Placeholder for generated image
    combination_image = K.placeholder((1, resized_width, resized_height, 3))

    # Combine the 3 images into a single Keras tensor
    input_tensor = K.concatenate(
        [base_image, style_reference_image, combination_image], axis=0)

    # Build the VGG19 network with our 3 images as input
    # the model is loaded with pre-trained ImageNet weights
    model = vgg19.VGG19(input_tensor=input_tensor,
                        weights='imagenet',
                        include_top=False)

    # Get the outputs of each key layer, through unique names.
    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
    loss = total_loss(outputs_dict, content_weight, resized_width,
                      resized_height, style_weight, total_variation_weight,
                      combination_image)

    # Get the gradients of the generated image
    grads = K.gradients(loss, combination_image)
    outputs = [loss]
    outputs += grads

    f_outputs = K.function([combination_image], outputs)

    evaluator = Evaluator(resized_width, resized_height, f_outputs)

    x = preprocess_image(base_image_path, resized_width, resized_height)

    # The oprimizer is fmin_l_bfgs
    for i in range(iterations):
        print('Iteration: ', i)
        x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                         x.flatten(),
                                         fprime=evaluator.grads,
                                         maxfun=25)

        print('Current loss value:', min_val)

        # Save current generated image
        img = deprocess_image(x.copy(), resized_width, resized_height)
        fname = 'results/' + np.str(i) + '.png'
        save(fname, img)