def test_keras_consistency(self):
        """ Exported model in Keras should get same result as original """

        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        def keras_load_and_preproc(fpath):
            img = load_img(fpath, target_size=(299, 299))
            img_arr = img_to_array(img)
            img_iv3_input = iv3.preprocess_input(img_arr)
            return np.expand_dims(img_iv3_input, axis=0)

        imgs_iv3_input = np.vstack([keras_load_and_preproc(fp) for fp in img_fpaths])

        model_ref = InceptionV3(weights="imagenet")
        preds_ref = model_ref.predict(imgs_iv3_input)

        with IsolatedSession(using_keras=True) as issn:
            K.set_learning_phase(0)
            model = InceptionV3(weights="imagenet")
            gfn = issn.asGraphFunction(model.inputs, model.outputs)

        with IsolatedSession(using_keras=True) as issn:
            K.set_learning_phase(0)
            feeds, fetches = issn.importGraphFunction(gfn, prefix="InceptionV3")
            preds_tgt = issn.run(fetches[0], {feeds[0]: imgs_iv3_input})

        self.assertTrue(np.all(preds_tgt == preds_ref))
    def test_prediction_vs_tensorflow_inceptionV3(self):
        output_col = "prediction"
        image_df = image_utils.getSampleImageDF()

        # An example of how a pre-trained keras model can be used with TFImageTransformer
        with KSessionWrap() as (sess, g):
            with g.as_default():
                K.set_learning_phase(0)    # this is important but it's on the user to call it.
                # nChannels needed for input_tensor in the InceptionV3 call below
                image_string = utils.imageInputPlaceholder(nChannels = 3)
                resized_images = tf.image.resize_images(image_string,
                                                        InceptionV3Constants.INPUT_SHAPE)
                preprocessed = preprocess_input(resized_images)
                model = InceptionV3(input_tensor=preprocessed, weights="imagenet")
                graph = tfx.strip_and_freeze_until([model.output], g, sess, return_graph=True)

        transformer = TFImageTransformer(inputCol="image", outputCol=output_col, graph=graph,
                                         inputTensor=image_string, outputTensor=model.output,
                                         outputMode="vector")
        transformed_df = transformer.transform(image_df.limit(10))
        self.assertDfHasCols(transformed_df, [output_col])
        collected = transformed_df.collect()
        transformer_values, transformer_topK = self.transformOutputToComparables(collected,
                                                                                 "filePath",
                                                                                 output_col)

        tf_values, tf_topK = self._executeTensorflow(graph, image_string.name, model.output.name,
                                                     image_df)
        self.compareClassSets(tf_topK, transformer_topK)
        self.compareClassOrderings(tf_topK, transformer_topK)
        self.compareArrays(tf_values, transformer_values)
    def test_bare_keras_module(self):
        """ Keras GraphFunctions should give the same result as standard Keras models """
        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        for model_gen, preproc_fn in [(InceptionV3, iv3.preprocess_input),
                                      (Xception, xcpt.preprocess_input),
                                      (ResNet50, rsnt.preprocess_input)]:

            keras_model = model_gen(weights="imagenet")
            target_size = tuple(keras_model.input.shape.as_list()[1:-1])

            _preproc_img_list = []
            for fpath in img_fpaths:
                img = load_img(fpath, target_size=target_size)
                # WARNING: must apply expand dimensions first, or ResNet50 preprocessor fails
                img_arr = np.expand_dims(img_to_array(img), axis=0)
                _preproc_img_list.append(preproc_fn(img_arr))

            imgs_input = np.vstack(_preproc_img_list)

            preds_ref = keras_model.predict(imgs_input)

            gfn_bare_keras = GraphFunction.fromKeras(keras_model)

            with IsolatedSession(using_keras=True) as issn:
                K.set_learning_phase(0)
                feeds, fetches = issn.importGraphFunction(gfn_bare_keras)
                preds_tgt = issn.run(fetches[0], {feeds[0]: imgs_input})

            self.assertTrue(np.all(preds_tgt == preds_ref))
def run(Data, Model, path):
    sess = K.get_session()
    K.set_learning_phase(False)
    data, model = Data(), Model(path)

    if Data == MNIST:
        attack = CarliniL2(sess, model, batch_size=100, max_iterations=2000,
                           binary_search_steps=5, initial_const=1., learning_rate=1e-1,
                           targeted=False)
    else:
        attack = CarliniL2(sess, model, batch_size=100, max_iterations=200,
                           binary_search_steps=3, initial_const=.01, learning_rate=1e-2,
                           targeted=True, confidence=2)

    now = time.time()

    for name,X,y in [["test",data.test_data, data.test_labels]]:
        print("OKAY",name)
        for k in range(0,len(y),5000):
            #if os.path.exists("tmp/"+path.split("/")[1]+"."+name+".adv.X."+str(k)+".npy"):
            #    print('skip',k)
            #    continue
            now = time.time()
            adv = attack.attack(X[k:k+100], y[k:k+100])
            #print('time',time.time()-now)
            #print('accuracy',np.mean(np.argmax(model.model.predict(adv),axis=1)==np.argmax(y[k:k+5000],axis=1)))
            #print('mean distortion',np.mean(np.sum((adv-X[k:k+5000])**2,axis=(1,2,3))**.5))
            np.save("/tmp/"+path.split("/")[1]+"."+name+".adv.X."+str(k),adv)
Example #5
0
def model_fn(input_dim,
             labels_dim,
             hidden_units=[100, 70, 50, 20],
             learning_rate=0.1):
  """Create a Keras Sequential model with layers.

  Args:
    input_dim: (int) Input dimensions for input layer.
    labels_dim: (int) Label dimensions for input layer.
    hidden_units: [int] the layer sizes of the DNN (input layer first)
    learning_rate: (float) the learning rate for the optimizer.

  Returns:
    A Keras model.
  """

  # "set_learning_phase" to False to avoid:
  # AbortionError(code=StatusCode.INVALID_ARGUMENT during online prediction.
  K.set_learning_phase(False)
  model = models.Sequential()

  for units in hidden_units:
    model.add(layers.Dense(units=units, input_dim=input_dim, activation=relu))
    input_dim = units

  # Add a dense final layer with sigmoid function.
  model.add(layers.Dense(labels_dim, activation='sigmoid'))
  compile_model(model, learning_rate)
  return model
def main(_):

    if FLAGS.dataset == 'cifar10':
        (X_train, y_train), (_, _) = cifar10.load_data()
        X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=0)
    else:
        with open('data/train.p', mode='rb') as f:
            train = pickle.load(f)
        X_train, X_val, y_train, y_val = train_test_split(train['features'], train['labels'], test_size=0.33, random_state=0)

    train_output_file = "{}_{}_{}.p".format(FLAGS.network, FLAGS.dataset, 'bottleneck_features_train')
    validation_output_file = "{}_{}_{}.p".format(FLAGS.network, FLAGS.dataset, 'bottleneck_features_validation')

    print("Resizing to", (w, h, ch))
    print("Saving to ...")
    print(train_output_file)
    print(validation_output_file)

    with tf.Session() as sess:
        K.set_session(sess)
        K.set_learning_phase(1)

        model = create_model()

        print('Bottleneck training')
        train_gen = gen(sess, X_train, y_train, batch_size)
        bottleneck_features_train = model.predict_generator(train_gen(), X_train.shape[0])
        data = {'features': bottleneck_features_train, 'labels': y_train}
        pickle.dump(data, open(train_output_file, 'wb'))

        print('Bottleneck validation')
        val_gen = gen(sess, X_val, y_val, batch_size)
        bottleneck_features_validation = model.predict_generator(val_gen(), X_val.shape[0])
        data = {'features': bottleneck_features_validation, 'labels': y_val}
        pickle.dump(data, open(validation_output_file, 'wb'))
def run_test(Data, Model, path):
    sess = K.get_session()
    K.set_learning_phase(False)
    data = Data()
    model = Model(path)

    N = 1000
    X = data.train_data[np.random.choice(np.arange(len(data.train_data)), N, replace=False)].reshape((N,-1))
    #Y = data.train_data[np.random.choice(np.arange(len(data.train_data)), N, replace=False)].reshape((N,-1))
    Y = data.test_data[np.random.choice(np.arange(len(data.test_data)), N, replace=False)].reshape((N,-1))

    #attack = FGS(sess, model, N, .275)
    attack = CarliniL2(sess, model, batch_size=100, binary_search_steps=2, initial_const=1,  targeted=False, max_iterations=500)
    

    idx = np.random.choice(np.arange(len(data.test_data)), N, replace=False)
    Y = attack.attack(data.test_data[idx], data.test_labels[idx]).reshape((N,-1))

    
    iterations = 1000
    
    sigma2 = 100
    mmd2u, mmd2u_null, p_value = kernel_two_sample_test(X, Y, iterations=iterations,
                                                        kernel_function='rbf',
                                                        gamma=1.0/sigma2,
                                                        verbose=True)
def cnn_model_fn(features, labels, mode):
  """Model function for CNN."""
  # Input Layer
  # Reshape X to 4-D tensor: [batch_size, width, height, channels]
  # MNIST images are 28x28 pixels, and have one color channel
  input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])

  if mode == tf.estimator.ModeKeys.TRAIN:
    K.set_learning_phase(1)
  else:
    K.set_learning_phase(0)

  conv1 = Convolution2D(32, (5, 5), activation='relu', input_shape=(28,28,1))(input_layer)
  pool1 = MaxPooling2D(pool_size=(2,2))(conv1)
  conv2 = Convolution2D(64, (5, 5), activation='relu')(pool1)
  pool2 = MaxPooling2D(pool_size=(2,2))(conv2)
  pool2_flat = Flatten()(pool2)
  dense = Dense(1024, activation='relu')(pool2_flat)
  dropout = Dropout(0.4)(dense)
  logits = Dense(10, activation='linear')(dropout)

  predictions = {
      # Generate predictions (for PREDICT and EVAL mode)
      "classes": tf.argmax(input=logits, axis=1),
      # Add `softmax_tensor` to the graph. It is used for PREDICT and by the
      # `logging_hook`.
      "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
  }
  prediction_output = tf.estimator.export.PredictOutput({"classes": tf.argmax(input=logits, axis=1),
     "probabilities": tf.nn.softmax(logits, name="softmax_tensor")})

  if mode == tf.estimator.ModeKeys.PREDICT:
    return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions,
        export_outputs={tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_output})

  # Calculate Loss (for both TRAIN and EVAL modes)
  onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
  loss = tf.losses.softmax_cross_entropy(
      onehot_labels=onehot_labels, logits=logits)
  tf.summary.scalar('loss', loss)
  tf.summary.histogram('conv1', conv1)
  tf.summary.histogram('dense', dense)

  # Configure the Training Op (for TRAIN mode)
  if mode == tf.estimator.ModeKeys.TRAIN:
    optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
    train_op = optimizer.minimize(
        loss=loss,
        global_step=tf.train.get_global_step())

    return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)

  # Add evaluation metrics (for EVAL mode)
  eval_metric_ops = {
      "accuracy": tf.metrics.accuracy(
          labels=labels, predictions=predictions["classes"])}
  return tf.estimator.EstimatorSpec(
      mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
Example #9
0
def cnn_model(features, labels, mode):
    input_layer = tf.reshape(features['x'], shape=[-1, 28, 28, 1])

    if mode == tf.estimator.ModeKeys.TRAIN:
        K.set_learning_phase(1)
    else:
        K.set_learning_phase(0)

    conv1 = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', input_shape=(28, 28, 1),
                          activation='relu')(input_layer)
    conv2 = layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(conv1)
    pool1 = layers.MaxPool2D(pool_size=(2, 2))(conv2)
    dropout = layers.Dropout(0.5)(pool1)

    conv3 = layers.Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu')(dropout)
    conv4 = layers.Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu')(conv3)
    pool2 = layers.MaxPool2D(pool_size=(3, 3))(conv4)
    dropout2 = layers.Dropout(0.5)(pool2)

    flatten = layers.Flatten()(dropout2)

    dense1 = layers.Dense(256)(flatten)
    lrelu = layers.LeakyReLU()(dense1)
    dropout3 = layers.Dropout(0.5)(lrelu)
    dense2 = layers.Dense(256)(dropout3)
    lrelu2 = layers.LeakyReLU()(dense2)
    logits = layers.Dense(10, activation='linear')(lrelu2)

    predictions = {
        "classes": tf.argmax(input=logits, axis=1),
        "probabilities": tf.nn.softmax(logits=logits, name='softmax_tensor')
    }

    prediction_output = tf.estimator.export.PredictOutput({"classes": tf.argmax(input=logits, axis=1),
     "probabilities": tf.nn.softmax(logits, name="softmax_tensor")})

    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs={
            tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_output
        })

    one_hot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
    loss = tf.losses.softmax_cross_entropy(onehot_labels=one_hot_labels, logits=logits)

    tf.summary.scalar('loss', loss)
    tf.summary.histogram('conv1', conv1)
    tf.summary.histogram('dense', dense1)

    if mode == tf.estimator.ModeKeys.TRAIN:
        optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
        train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)

    eval_metrics_ops = {
        'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions['classes'])
    }
    return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics_ops)
Example #10
0
def ds2_gru_model(input_dim=161, fc_size=1024, rnn_size=512, output_dim=29, initialization='glorot_uniform',
                  conv_layers=1, gru_layers=1, use_conv=True):
    """ DeepSpeech 2 implementation
        Architecture:
            Input Spectrogram TIMEx161
            1 Batch Normalisation layer on input
            1-3 Convolutional Layers
            1 Batch Normalisation layer
            1-7 BiDirectional GRU Layers
            1 Batch Normalisation layer
            1 Fully connected Dense
            1 Softmax output
        Details:
           - Uses Spectrogram as input rather than MFCC
           - Did not use BN on the first input
           - Network does not dynamically adapt to maximum audio size in the first convolutional layer. Max conv
              length padded at 2048 chars, otherwise use_conv=False
        Reference:
            https://arxiv.org/abs/1512.02595
            https://github.com/robmsmt/KerasDeepSpeech
    """

    K.set_learning_phase(1)
    # Main acoustic input
    input_data = Input(name='the_input', shape=(None, input_dim))
    m = BatchNormalization(axis=-1, momentum=0.99, epsilon=1e-3, center=True, scale=True)(input_data)

    if use_conv:
        conv = ZeroPadding1D(padding=(0, 2048))(m)
        for l in range(conv_layers):
            m = Conv1D(filters=fc_size, name='conv_{}'.format(l+1), kernel_size=11, padding='valid',
                       activation='relu', strides=2)(conv)
    else:
        for l in range(conv_layers):
            m = TimeDistributed(Dense(fc_size, name='fc_{}'.format(l + 1), activation='relu'))(m)

    m = BatchNormalization(axis=-1, momentum=0.99, epsilon=1e-3, center=True, scale=True)(m)

    for l in range(gru_layers):
        m = Bidirectional(GRU(rnn_size, name='fc_{}'.format(l + 1), return_sequences=True, activation='relu',
                              kernel_initializer=initialization), merge_mode='sum')(m)

    m = BatchNormalization(axis=-1, momentum=0.99, epsilon=1e-3, center=True, scale=True)(m)

    # Last Layer 5+6 Time Dist Dense Layer & Softmax
    m = TimeDistributed(Dense(fc_size, activation=clipped_relu))(m)
    y_pred = TimeDistributed(Dense(output_dim, name="y_pred", activation="softmax"))(m)

    model = Model(inputs=input_data, outputs=y_pred)
    if use_conv:
        model.output_length = lambda x: cnn_output_length(x, 11, 'valid', 2)
    else:
        model.output_length = lambda x: x

    print(model.summary())

    return model
def policy_batch_update(model, update, r_holder, X, R, batch_size, iterations):
    K.set_learning_phase(1) #set learning phase
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for iteration in range(iterations):
            indices = np.random.choice(len(X), size=batch_size,replace = False )
            X_batch, R_batch = X[indices], np.reshape(R[indices],(batch_size, 1))
            sess.run([update], feed_dict={model.input:X_batch, r_holder:R_batch})
            print('Iteration: {}'.format(iteration))
    K.set_learning_phase(0) #set learning phase
def run_pca(Data, num_components=10, invert=False):
    data = Data()

    sess = K.get_session()

    K.set_learning_phase(False)

    shape = (-1, 784)
    
    pca = sklearn.decomposition.PCA(n_components=num_components)

    pca.fit(data.train_data.reshape(shape)) # [:10000]

    if invert:
        model = MNISTModel("models/mnist-pca-cnn-top-"+str(num_components))
    else:
        model = make_model(num_components)
        model.load_weights("models/mnist-pca-top-"+str(num_components))
        model = Wrap(model,pca)

    tf_mean = tf.constant(pca.mean_,dtype=tf.float32)
    tf_components = tf.constant(pca.components_.T,dtype=tf.float32)

    def new_predict(xs):
        # map to PCA space
        xs = tf.reshape(xs,(-1,784))
        xs -= tf_mean
        xs = tf.matmul(xs, tf_components)
    
        # map back
        xs = tf.matmul(xs, tf.transpose(tf_components))
        xs += tf_mean
        xs = tf.reshape(xs, (-1, 28, 28, 1))
        return model.model(xs)

    if invert:
        model.predict = new_predict

    attack = CarliniL2(sess, model, batch_size=100, max_iterations=3000, 
                       binary_search_steps=6, targeted=False,
                       initial_const=1)

    N = 100

    test_adv = attack.attack(data.test_data[:N], data.test_labels[:N])

    print('accuracy',np.mean(np.argmax(sess.run(model.predict(tf.constant(data.test_data,dtype=np.float32))),axis=1)==np.argmax(data.test_labels,axis=1)))

    print(list(test_adv[0].flatten()))

    print('dist',np.mean(np.sum((test_adv-data.test_data[:N])**2,axis=(1,2,3))**.5))

    it = np.argmax(sess.run(model.predict(tf.constant(test_adv))),axis=1)
    print('success',np.mean(it==np.argmax(data.test_labels,axis=1)[:N]))
Example #13
0
def visualize_features(model, problem_name):
    out_path = 'output/figures/{}/features/'
    im_size = (52, 52)
    K.set_learning_phase(0)

    if model is None:
        model = ModelLipnet4(verbose=True, compile_on_build=False, include_top=True)
        model.build_model((1, im_size[0], im_size[1]), 3)
        model.restore('/home/sergii/Documents/Thesis/lipnet/output/models/{}_model_lipnet6.h5'.format(problem_name))

    vis = VisualizerFeatures()
    vis.visualize_model(model.model, out_path.format(problem_name), im_size)
 def getModelData(self, featurize):
     sess = tf.Session()
     with sess.as_default():
         K.set_learning_phase(0)
         inputImage = imageInputPlaceholder(nChannels=3)
         preprocessed = self.preprocess(inputImage)
         model = self.model(preprocessed, featurize)
     return dict(inputTensorName=inputImage.name,
                 outputTensorName=model.output.name,
                 session=sess,
                 inputTensorSize=self.inputShape(),
                 outputMode="vector")
Example #15
0
 def _loadTFGraph(self):
     with KSessionWrap() as (sess, g):
         assert K.backend() == "tensorflow", \
             "Keras backend is not tensorflow but KerasImageTransformer only supports " + \
             "tensorflow-backed Keras models."
         with g.as_default():
             K.set_learning_phase(0)  # Testing phase
             model = load_model(self.getModelFile())
             out_op_name = tfx.op_name(g, model.output)
             self._inputTensor = model.input.name
             self._outputTensor = model.output.name
             return tfx.strip_and_freeze_until([out_op_name], g, sess, return_graph=True)
Example #16
0
def _buildInceptionV3Session(featurize):
    sess = tf.Session()
    with sess.as_default():
        K.set_learning_phase(0)
        inputImage = imageInputPlaceholder(nChannels=3)
        preprocessed = inception_v3.preprocess_input(inputImage)
        model = InceptionV3(input_tensor=preprocessed, weights="imagenet",
                            include_top=(not featurize))
    return dict(inputTensorName=inputImage.name,
                outputTensorName=model.output.name,
                session=sess,
                inputTensorSize=InceptionV3Constants.INPUT_SHAPE,
                outputMode="vector")
Example #17
0
    def _fromKerasModelFile(cls, file_path):
        """
        Load a Keras model from a file path into a `GraphFunction`.

        :param file_path: the (HDF5) file path
        """
        assert file_path.endswith('.h5'), \
            'Keras model must be specified as HDF5 file'

        with IsolatedSession(using_keras=True) as issn:
            K.set_learning_phase(0) # Testing phase
            model = load_model(file_path)
            gfn = issn.asGraphFunction(model.inputs, model.outputs)

        return gfn
Example #18
0
def test_batchnorm_trainable():
    bn_mean = 0.5
    bn_std = 10.

    def get_model(bn_mean, bn_std):
        input = Input(shape=(1,))
        x = normalization.BatchNormalization()(input)
        model = Model(input, x)
        model.set_weights([np.array([1.]), np.array([0.]),
                           np.array([bn_mean]), np.array([bn_std ** 2])])
        return model
    # Simulates training-mode with trainable layer. Should use mini-batch statistics.
    K.set_learning_phase(1)
    model = get_model(bn_mean, bn_std)
    model.compile(loss='mse', optimizer='rmsprop')
    out = model.predict(input_4)
    assert_allclose((input_4 - np.mean(input_4)) / np.std(input_4), out, atol=1e-3)
def run_hidden_pca(Data, Model, path=None):
    sess = K.get_session()
    K.set_learning_phase(False)

    data = Data()
    model = Model(path)
    model2 = Model(path)

    hidden_layer = pop(model2.model) # once to remove dense(10)
    hidden_layer = pop(hidden_layer) # once to remove ReLU
    train_hidden = hidden_layer.predict(data.test_data)
    #val_hidden = hidden_layer.predict(data.validation_data)
    test_hidden = hidden_layer.predict(data.test_data)
    
    pca = sklearn.decomposition.PCA(n_components=test_hidden.shape[1])
    
    pca.fit(train_hidden)

    #r_val = pca.transform(hidden_layer.predict(data.validation_data))
    r_test = pca.transform(hidden_layer.predict(data.test_data))

    attack = FGS(sess, model, eps=.2)
    #attack = CarliniL2(sess, model, batch_size=100, max_iterations=1000, 
    #                   binary_search_steps=2, targeted=False)

    N = 10000

    test_adv = attack.attack(data.test_data[:N], data.test_labels[:N])

    r_test_adv = pca.transform(hidden_layer.predict(test_adv[:N]))

    print(r_test_adv[0])

    show(test_adv[0])

    #compute_thresholds(r_val, r_val_adv)

    plt.figure(figsize=(4,3))
    plt.xlabel('Component Number')
    plt.ylabel('Mean Absolute Value (log scale)')

    plt.semilogy(range(r_test.shape[1]),np.mean(np.abs(r_test),axis=0))
    plt.semilogy(range(r_test_adv.shape[1]),np.mean(np.abs(r_test_adv),axis=0))
    
    plt.show()
Example #20
0
    def load(self, data_dir):
        """Load graph and weight data.

        Args:
            data_dir (:obj:`str`): location of Keras checkpoint (`.hdf5`) files
                and model (in `.json`) structure.  The default behavior
                is to take the latest of each, by OS timestamp.
        """
        # for tensorflow compatibility
        K.set_learning_phase(0)

        # find newest ckpt and graph files
        try:
            latest_ckpt = max(glob.iglob(
                os.path.join(data_dir, '*.h*5')), key=os.path.getctime)
            latest_ckpt_name = os.path.basename(latest_ckpt)
            latest_ckpt_time = str(
                datetime.fromtimestamp(os.path.getmtime(latest_ckpt)))
        except ValueError:
            raise FileNotFoundError('No checkpoint (.hdf5 or .h5) files '
                                    'available at {}'.format(data_dir))
        try:
            latest_json = max(glob.iglob(os.path.join(data_dir, '*.json')),
                              key=os.path.getctime)
            with open(latest_json, 'r') as f:
                model_json = json.loads(f.read())
                self._model = model_from_json(model_json)

            self._model.load_weights(latest_ckpt)
        except ValueError:
            try:
                self._model = load_model(latest_ckpt)
            except ValueError:
                raise FileNotFoundError('The (.hdf5 or .h5) files available at'
                                        '{} don\'t have the model'
                                        ' architecture.'
                                        .format(latest_ckpt))

        self._sess = K.get_session()
        self._tf_predict_var = self._model.outputs[0]
        self._tf_input_var = self._model.inputs[0]
        self._model_name = type(self).__name__
        self._latest_ckpt_name = latest_ckpt_name
        self._latest_ckpt_time = latest_ckpt_time
def run_pca(Data, Model, path=None):
    sess = K.get_session()
    K.set_learning_phase(False)

    data = Data()
    model = Model(path)

    shape = (-1, model.num_channels*model.image_size**2)
    
    pca = sklearn.decomposition.PCA(n_components=shape[1])

    pca.fit(data.train_data.reshape(shape))

    print(pca.explained_variance_ratio_)

    r_test = pca.transform(data.test_data.reshape(shape))

    #attack = FGS(sess, model, eps=.3)
    attack = CarliniL2(sess, model, batch_size=100, max_iterations=1000, 
                       binary_search_steps=2, targeted=False,
                       initial_const=10)

    N = 10000

    #test_adv = attack.attack(data.test_data[:N], data.test_labels[:N])
    test_adv = np.load("tmp/outlieradvtest.npy")

    r_test_adv = pca.transform(test_adv[:N].reshape(shape))

    fig = plt.figure(figsize=(4,3))
    fig.subplots_adjust(bottom=0.17,left=.19)
    
    plt.xlabel('Component Number')
    plt.ylabel('Mean Absolute Value (log scale)')

    plt.semilogy(range(r_test.shape[1]),np.mean(np.abs(r_test),axis=0),label='Valid')
    plt.semilogy(range(r_test_adv.shape[1]),np.mean(np.abs(r_test_adv),axis=0), label='Adversarial')

    plt.legend()
    
    pp = PdfPages('/tmp/a.pdf')
    plt.savefig(pp, format='pdf')
    pp.close()
    plt.show()
Example #22
0
def main():
    """Convert any Keras model to the frugally-deep model format."""

    usage = 'usage: [Keras model in HDF5 format] [image output directory]'
    if len(sys.argv) != 3:
        print(usage)
        sys.exit(1)
    else:
        assert K.backend() == "tensorflow"
        assert K.floatx() == "float32"
        assert K.image_data_format() == 'channels_last'

        in_path = sys.argv[1]
        out_dir = sys.argv[2]
        print('loading {}'.format(in_path))
        K.set_learning_phase(1)
        model = load_model(in_path)
        model = convert_sequential_to_model(model)
        process_layers(model, out_dir)
    def _loadTFGraph(self, sess, graph):
        """
        Loads the Keras model into memory, then uses the passed-in session to load the
        model's inference-related ops into the passed-in Tensorflow graph.

        :return: A tuple (graph, input_name, output_name) where graph is the TF graph
        corresponding to the Keras model's inference subgraph, input_name is the name of the
        Keras model's input tensor, and output_name is the name of the Keras model's output tensor.
        """
        keras_backend = K.backend()
        assert keras_backend == "tensorflow", \
            "Only tensorflow-backed Keras models are supported, tried to load Keras model " \
            "with backend %s." % (keras_backend)
        with graph.as_default():
            K.set_learning_phase(0)  # Inference phase
            model = load_model(self.getModelFile())
            out_op_name = tfx.op_name(model.output, graph)
            stripped_graph = tfx.strip_and_freeze_until([out_op_name], graph, sess,
                                                        return_graph=True)
            return stripped_graph, model.input.name, model.output.name
Example #24
0
def executeKerasInceptionV3(image_df, uri_col="filePath"):
    """
    Apply Keras InceptionV3 Model on input DataFrame.
    :param image_df: Dataset. contains a column (uri_col) for where the image file lives.
    :param uri_col: str. name of the column indicating where each row's image file lives.
    :return: ({str => np.array[float]}, {str => (str, str, float)}).
      image file uri to prediction probability array,
      image file uri to top K predictions (class id, class description, probability).
    """
    K.set_learning_phase(0)
    model = InceptionV3(weights="imagenet")

    values = {}
    topK = {}
    for row in image_df.select(uri_col).collect():
        raw_uri = row[uri_col]
        image = loadAndPreprocessKerasInceptionV3(raw_uri)
        values[raw_uri] = model.predict(image)
        topK[raw_uri] = decode_predictions(values[raw_uri], top=5)[0]
    return values, topK
Example #25
0
    def test_deep_interpreter_cnn(self):
        K.set_learning_phase(0)
        with DeepInterpreter(session=K.get_session()) as di:
            # 1. Load the persisted model
            # 2. Retrieve the input tensor from the loaded model

            retrieved_model = self._load('skater/tests/pre_trained_models/mnist_cnn/model_mnist_cnn_epoch_3')
            input_tensor = retrieved_model.layers[0].input
            output_tensor = retrieved_model.layers[-2].output

            # 3. We will using the last dense layer(pre-softmax) as the output layer
            # 4. Instantiate a model with the new input and output tensor
            new_model = Model(inputs=input_tensor, outputs=output_tensor)
            target_tensor = new_model(input_tensor)
            xs = self.x_test[0:2]
            ys = self.y_test[0:2]

            relevance_scores_elrp = di.explain('elrp', target_tensor * ys, input_tensor, xs, use_case='image')
            relevance_scores_ig = di.explain('ig', target_tensor * ys, input_tensor, xs, use_case='image')
        self.assertEquals(relevance_scores_elrp.shape, (2, 28, 28, 1))
        self.assertEquals(relevance_scores_ig.shape, (2, 28, 28, 1))
Example #26
0
def VGG16_predict(test_data_dir, weigths_dir, N_classes):
    K.set_learning_phase(False)
    # build model
    model = VGG16_Model(img_rows=128, img_cols=128, train=False)
    # load weights
    model.load_weights(weigths_dir)
    # load test data
    test, test_labels = load_image(test_data_dir, N_classes, one_hot=False)
    print("Test images loaded.")
    # predcit labels of test images with new weights
    probs = model.predict(test)
    print("Labels predicted.")
    # calculate accuracy
    predicted_labels = []
    for i in range(np.shape(test_labels)[0]):
        cls_prob = probs[i]
        predicted_labels.append(np.argmax(cls_prob))
    acc = np.mean(np.cast['float32'](np.equal(test_labels, predicted_labels)))
    print("accuracy on test: " + str(acc))

    end = timer()
    print("Total time: ", end - start)
Example #27
0
    def __init__(self, model, fn_A, fn_B, batch_size, perceptual_loss):
        K.set_learning_phase(1)

        assert batch_size % 2 == 0, "batch_size must be an even number"
        self.batch_size = batch_size
        self.model = model

        self.use_lsgan = True
        self.use_mixup = True
        self.mixup_alpha = 0.2
        self.use_perceptual_loss = perceptual_loss
        self.use_mask_refinement = False #OPTIONAL After 15k iteration**

        self.lrD = 1e-4 # Discriminator learning rate
        self.lrG = 1e-4 # Generator learning rate

        generator = GANTrainingDataGenerator(self.random_transform_args, 220, 6, 2)
        self.train_batchA = generator.minibatchAB(fn_A, batch_size)
        self.train_batchB = generator.minibatchAB(fn_B, batch_size)

        self.avg_counter = self.errDA_sum = self.errDB_sum = self.errGA_sum = self.errGB_sum = 0

        self.setup()
def train(name, resume):

    # paths
    log_path = "logs/{}.json".format(name)
    out_path = "snapshots/" + name + ".{epoch:06d}.h5"
    echo('log path', log_path)
    echo('out path', out_path)

    lib.log.info(log_path, {'_commandline': {'name': name, 'resume': resume}})

    # init
    echo('train', (name, resume))
    session = tf.Session('')
    K.set_session(session)
    K.set_learning_phase(1)

    # dataset
    echo('dataset loading...')
    (x_train, y_train), (x_test, y_test) = dataset.load()

    # model building
    echo('model building...')
    model = lib.model.build()
    model.summary()
    if resume:
        echo('Resume Learning from {}'.format(resume))
        model.load_weights(resume, by_name=True)

    # training
    echo('start learning...')
    callbacks = [
        lib.log.JsonLog(log_path),
        keras.callbacks.ModelCheckpoint(out_path, monitor='val_loss', save_weights_only=True)
    ]
    model.fit(x_train, y_train, batch_size=30, epochs=10,
              callbacks=callbacks,
              validation_data=(x_test, y_test))
def gen_model(name, license, model, model_file, version=VERSION, featurize=True):
    g = tf.Graph()
    with tf.Session(graph=g) as session:
        K.set_learning_phase(0)
        inTensor = tf.placeholder(dtype=tf.string, shape=[], name="%s_input" % name)
        decoded = tf.decode_raw(inTensor, tf.uint8)
        imageTensor = tf.to_float(
            tf.reshape(
                decoded,
                shape=[
                    1,
                    model.inputShape()[0],
                    model.inputShape()[1],
                    3]))
        m = model.model(preprocessed=model.preprocess(imageTensor), featurize=featurize)
        outTensor = tf.to_double(tf.reshape(m.output, [-1]), name="%s_sparkdl_output__" % name)
        gdef = tfx.strip_and_freeze_until([outTensor], session.graph, session, False)
    g2 = tf.Graph()
    with tf.Session(graph=g2) as session:
        tf.import_graph_def(gdef, name='')
        filename = "sparkdl-%s_%s.pb" % (name, version)
        print('writing out ', filename)
        tf.train.write_graph(g2.as_graph_def(), logdir="./", name=filename, as_text=False)
        with open("./" + filename, "r") as f:
            h = sha256(f.read()).digest()
            base64_hash = b64encode(h)
            print('h', base64_hash)
    model_file.write(indent(
        scala_template % {
            "license": license,
            "name": name,
            "height": model.inputShape()[0],
            "width": model.inputShape()[1],
            "filename": filename,
            "base64": base64_hash},2))
    return g2
Example #30
0
def main(video_path, output_path):
    try:
        os.mkdir('./img')
    except OSError:
        pass

    K.set_learning_phase(0)  # make sure its testing mode
    # face_cascade = cv2.CascadeClassifier('lbpcascade_frontalface_improved.xml')
    detector = MTCNN()

    # load model and weights
    img_size = 64
    stage_num = [3, 3, 3]
    lambda_local = 1
    lambda_d = 1
    img_idx = 0
    detected = ''  #make this not local variable
    time_detection = 0
    time_network = 0
    time_plot = 0
    skip_frame = 5  # every 5 frame do 1 detection and network forward propagation
    ad = 0.6

    #Parameters
    num_capsule = 3
    dim_capsule = 16
    routings = 2
    stage_num = [3, 3, 3]
    lambda_d = 1
    num_classes = 3
    image_size = 64
    num_primcaps = 7 * 3
    m_dim = 5
    S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]

    model1 = FSA_net_Capsule(image_size, num_classes, stage_num, lambda_d,
                             S_set)()
    model2 = FSA_net_Var_Capsule(image_size, num_classes, stage_num, lambda_d,
                                 S_set)()

    num_primcaps = 8 * 8 * 3
    S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]

    model3 = FSA_net_noS_Capsule(image_size, num_classes, stage_num, lambda_d,
                                 S_set)()

    print('Loading models ...')

    weight_file1 = '../pre-trained/300W_LP_models/fsanet_capsule_3_16_2_21_5/fsanet_capsule_3_16_2_21_5.h5'
    model1.load_weights(weight_file1)
    print('Finished loading model 1.')

    weight_file2 = '../pre-trained/300W_LP_models/fsanet_var_capsule_3_16_2_21_5/fsanet_var_capsule_3_16_2_21_5.h5'
    model2.load_weights(weight_file2)
    print('Finished loading model 2.')

    weight_file3 = '../pre-trained/300W_LP_models/fsanet_noS_capsule_3_16_2_192_5/fsanet_noS_capsule_3_16_2_192_5.h5'
    model3.load_weights(weight_file3)
    print('Finished loading model 3.')

    inputs = Input(shape=(64, 64, 3))
    x1 = model1(inputs)  #1x1
    x2 = model2(inputs)  #var
    x3 = model3(inputs)  #w/o
    avg_model = Average()([x1, x2, x3])
    model = Model(inputs=inputs, outputs=avg_model)

    # capture video
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024 * 1)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768 * 1)

    print('Start detecting pose ...')
    detected_pre = []

    while True:
        # get video frame
        ret, input_img = cap.read()

        img_idx = img_idx + 1
        img_h, img_w, _ = np.shape(input_img)

        if img_idx == 1 or img_idx % skip_frame == 0:
            time_detection = 0
            time_network = 0
            time_plot = 0

            # detect faces using LBP detector
            gray_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
            # detected = face_cascade.detectMultiScale(gray_img, 1.1)
            detected = detector.detect_faces(input_img)

            if len(detected_pre) > 0 and len(detected) == 0:
                detected = detected_pre

            faces = np.empty((len(detected), img_size, img_size, 3))

            input_img = draw_results_mtcnn(detected, input_img, faces, ad,
                                           img_size, img_w, img_h, model,
                                           time_detection, time_network,
                                           time_plot)
            cv2.imwrite('img/' + str(img_idx) + '.png', input_img)

        else:
            input_img = draw_results_mtcnn(detected, input_img, faces, ad,
                                           img_size, img_w, img_h, model,
                                           time_detection, time_network,
                                           time_plot)

        if len(detected) > len(detected_pre) or img_idx % (skip_frame *
                                                           3) == 0:
            detected_pre = detected

        key = cv2.waitKey(1)
Example #31
0
def fam(train_i, train_o, test_i, test_o):
    sess = tf.Session()
    K.set_session(sess)
    K.set_learning_phase(1)

    batch_size = 60
    nb_classes = len(MOD)
    nb_epoch = 20

    img_rows, img_cols = 2 * P * L, 2 * Np
    nb_filters = 96
    nb_pool = 2

    X_train, Y_train = shuffle_in_unison_inplace(np.array(train_i),
                                                 np.array(train_o))

    model = Sequential()
    model.add(
        Convolution2D(64,
                      11,
                      11,
                      subsample=(2, 2),
                      input_shape=(1, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(128, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(nb_classes, init='normal'))
    model.add(Activation('softmax', name="out"))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    """
    datagen = ImageDataGenerator(
        #featurewise_center=True,
        #featurewise_std_normalization=True,
        rotation_range=20,
        #width_shift_range=0.3,
        #height_shift_range=0.3,
        #zoom_range=[0,1.3],
        horizontal_flip=True,
        vertical_flip=True)

    datagen.fit(X_train)

    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size,shuffle=True),
                    samples_per_epoch=len(X_train), nb_epoch=5,verbose=1,validation_data=(test_i[0], test_o[0]))

    """

    model.fit(X_train,
              Y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=1,
              shuffle=True,
              validation_data=(test_i[0], test_o[0]))

    for s in range(len(test_i)):
        if len(test_i[s]) == 0:
            continue
        X_test = test_i[s]
        Y_test = test_o[s]
        score = model.evaluate(X_test, Y_test, verbose=0)
        print("SNR", SNR[s], "Test accuracy:", score[1])

    K.set_learning_phase(0)
    config = model.get_config()
    weights = model.get_weights()

    new_model = Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = "/tmp/fam"
    export_version = 1

    labels_tensor = tf.constant(MOD)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(
        input_tensor=new_model.input,
        classes_tensor=labels_tensor,
        scores_tensor=new_model.output)
    model_exporter.init(sess.graph.as_graph_def(),
                        default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(export_version), sess)
Example #32
0
import tensorflow as tf
from tensorflow.python.framework.graph_util import convert_variables_to_constants
from keras.models import load_model
import keras.backend as K

def freeze(session, keep_var_names = None, output_names = None, clear_devices = True):
    graph = session.graph()  #sessoin graph is like the model
    with graph.as_default(): #get default value
        freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
        #list of all variable names in graph
        output_names = output_names or [] #operations for final frozen graph
        output_names +=[v.op.name for v in tf.global_variables()]
        input_graph_def = graph.as_graph_def() #definition is like shape of the graph
        if clear_devices:
            for node in input_graph_def.node:
                node.device = ''  #set to empty line
        frozen_graph = convert_variables_to_constants(sess = session, input_graph = input_graph_def , output_node_names = output_names,
        variable_names_whitelist = freeze_var_names) #converts variable nodes in to constants

        return frozen_graph

model = load_model(filepath= 'Image-classifier.h5')

K.set_learning_phase(1) #helps eliminate problem

frozen_graph = freeze_graph(K.get_session(),output_names = [model.output.op.name])
tf.train.write(frozen_graph, '-', 'Image-classifier.pb',False) #set it to binary file with False
#write the file
Example #33
0
import argparse
import os
import shutil

import keras
import tensorflow
from keras import backend as K
from keras.models import Sequential
from tensorflow.contrib.session_bundle import exporter
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import graph_io
from tensorflow.python.tools import freeze_graph

K.set_learning_phase(0)  # all new operations will be in test mode from now on


def export_model_to_tensorflow(path_to_trained_keras_model: str):
    print("Loading model for exporting to Protocol Buffer format...")
    model = keras.models.load_model(path_to_trained_keras_model)

    sess = K.get_session()

    # serialize the model and get its weights, for quick re-building
    config = model.get_config()
    weights = model.get_weights()

    # re-build a model where the learning phase is now hard-coded to 0
    new_model = Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = os.path.abspath(os.path.join(
Example #34
0
def _main():

    # Set testing mode (dropout/batchnormalization)
    K.set_learning_phase(TEST_PHASE)

    # Generate testing data
    test_datagen = utils.DroneDataGenerator(rescale=1. / 255)
    test_generator = test_datagen.flow_from_directory(
        FLAGS.test_dir,
        shuffle=False,
        color_mode=FLAGS.img_mode,
        target_size=(FLAGS.img_width, FLAGS.img_height),
        crop_size=(FLAGS.crop_img_height, FLAGS.crop_img_width),
        batch_size=FLAGS.batch_size)

    # Load json and create model
    json_model_path = os.path.join(FLAGS.experiment_rootdir,
                                   FLAGS.json_model_fname)
    model = utils.jsonToModel(json_model_path)

    # Load weights
    weights_load_path = os.path.join(FLAGS.experiment_rootdir,
                                     FLAGS.weights_fname)
    try:
        model.load_weights(weights_load_path)
        print("Loaded model from {}".format(weights_load_path))
    except:
        print("Impossible to find weight path. Returning untrained model")

    # Compile model
    model.compile(loss='mse', optimizer='adam')

    # Get predictions and ground truth
    n_samples = test_generator.samples
    nb_batches = int(np.ceil(n_samples / FLAGS.batch_size))

    predictions, ground_truth, t = utils.compute_predictions_and_gt(
        model, test_generator, nb_batches, verbose=1)

    # Param t. t=1 steering, t=0 collision
    t_mask = t == 1

    # ************************* Steering evaluation ***************************

    # Predicted and real steerings
    pred_steerings = predictions[t_mask, 0]
    real_steerings = ground_truth[t_mask, 0]

    # Compute random and constant baselines for steerings
    random_steerings = random_regression_baseline(real_steerings)
    constant_steerings = constant_baseline(real_steerings)

    # Create dictionary with filenames
    dict_fname = {
        'test_regression.json': pred_steerings,
        'random_regression.json': random_steerings,
        'constant_regression.json': constant_steerings
    }

    steering_dict = []
    # Evaluate predictions: EVA, residuals, and highest errors
    for fname, pred in dict_fname.items():
        abs_fname = os.path.join(FLAGS.experiment_rootdir, fname)
        if fname == 'test_regression.json':
            steering_dict.append(
                evaluate_regression(pred, real_steerings, abs_fname))
        else:
            evaluate_regression(pred, real_steerings, abs_fname)

    # Write predicted and real steerings
    dict_test = {
        'pred_steerings': pred_steerings.tolist(),
        'real_steerings': real_steerings.tolist()
    }
    #utils.write_to_file(dict_test,os.path.join(FLAGS.experiment_rootdir,
    #`                                           'predicted_and_real_steerings.json'))

    # *********************** Collision evaluation ****************************

    # Predicted probabilities and real labels
    pred_prob = predictions[~t_mask, 1]
    pred_labels = np.zeros_like(pred_prob)
    pred_labels[pred_prob >= 0.5] = 1

    real_labels = ground_truth[~t_mask, 1]

    # Compute random, weighted and majorirty-class baselines for collision
    random_labels = random_classification_baseline(real_labels)

    # Create dictionary with filenames
    dict_fname = {
        'test_classification.json': pred_labels,
        'random_classification.json': random_labels
    }
    collision_dict = []

    # Evaluate predictions: accuracy, precision, recall, F1-score, and highest errors
    for fname, pred in dict_fname.items():
        abs_fname = os.path.join(FLAGS.experiment_rootdir, fname)
        if fname == 'test_classification.json':
            collision_dict.append(
                evaluate_classification(pred_prob, pred, real_labels,
                                        abs_fname))
        else:
            evaluate_classification(pred_prob, pred, real_labels, abs_fname)

    # Write predicted probabilities and real labels
    dict_test = {
        'pred_probabilities': pred_prob.tolist(),
        'real_labels': real_labels.tolist()
    }
    #utils.write_to_file(dict_test,os.path.join(FLAGS.experiment_rootdir,
    #                                           'predicted_and_real_labels.json'))
    return steering_dict, collision_dict
Example #35
0
    def main(self, name, opts):
        logging.basicConfig(filename=opts.log_file,
                            format='%(levelname)s (%(asctime)s): %(message)s')
        log = logging.getLogger(name)
        if opts.verbose:
            log.setLevel(logging.DEBUG)
        else:
            log.setLevel(logging.INFO)
            log.debug(opts)

        if opts.seed is not None:
            np.random.seed(opts.seed)

        if not opts.model_files:
            raise ValueError('No model files provided!')

        log.info('Loading model ...')
        K.set_learning_phase(0)
        model = mod.load_model(opts.model_files, log=log.info)

        weight_layer, act_layer = mod.get_first_conv_layer(model.layers, True)
        log.info('Using activation layer "%s"' % act_layer.name)
        log.info('Using weight layer "%s"' % weight_layer.name)

        try:
            dna_idx = model.input_names.index('dna')
        except BaseException:
            raise IOError('Model is not a valid DNA model!')

        fun_outputs = to_list(act_layer.output)
        if opts.store_preds:
            fun_outputs += to_list(model.output)
        fun = K.function([to_list(model.input)[dna_idx]], fun_outputs)

        log.info('Reading data ...')
        if opts.store_outputs or opts.store_preds:
            output_names = model.output_names
        else:
            output_names = None
        data_reader = mod.DataReader(
            output_names=output_names,
            use_dna=True,
            dna_wlen=to_list(model.input_shape)[dna_idx][1]
        )
        nb_sample = dat.get_nb_sample(opts.data_files, opts.nb_sample)
        data_reader = data_reader(opts.data_files,
                                  nb_sample=nb_sample,
                                  batch_size=opts.batch_size,
                                  loop=False,
                                  shuffle=False)

        meta_reader = hdf.reader(opts.data_files, ['chromo', 'pos'],
                                 nb_sample=nb_sample,
                                 batch_size=opts.batch_size,
                                 loop=False,
                                 shuffle=False)

        out_file = h5.File(opts.out_file, 'w')
        out_group = out_file

        weights = weight_layer.get_weights()
        out_group['weights/weights'] = weights[0]
        out_group['weights/bias'] = weights[1]

        def h5_dump(path, data, idx, dtype=None, compression='gzip'):
            if path not in out_group:
                if dtype is None:
                    dtype = data.dtype
                out_group.create_dataset(
                    name=path,
                    shape=[nb_sample] + list(data.shape[1:]),
                    dtype=dtype,
                    compression=compression
                )
            out_group[path][idx:idx+len(data)] = data

        log.info('Computing activations')
        progbar = ProgressBar(nb_sample, log.info)
        idx = 0
        for data in data_reader:
            if isinstance(data, tuple):
                inputs, outputs, weights = data
            else:
                inputs = data
            if isinstance(inputs, dict):
                inputs = list(inputs.values())
            batch_size = len(inputs[0])
            progbar.update(batch_size)

            if opts.store_inputs:
                for i, name in enumerate(model.input_names):
                    h5_dump('inputs/%s' % name,
                            dna.onehot_to_int(inputs[i]), idx)

            if opts.store_outputs:
                for name, output in outputs.items():
                    h5_dump('outputs/%s' % name, output, idx)

            fun_eval = fun(inputs)
            act = fun_eval[0]

            if opts.act_wlen:
                delta = opts.act_wlen // 2
                ctr = act.shape[1] // 2
                act = act[:, (ctr-delta):(ctr+delta+1)]

            if opts.act_fun:
                if opts.act_fun == 'mean':
                    act = act.mean(axis=1)
                elif opts.act_fun == 'wmean':
                    weights = linear_weights(act.shape[1])
                    act = np.average(act, axis=1, weights=weights)
                elif opts.act_fun == 'max':
                    act = act.max(axis=1)
                else:
                    raise ValueError('Invalid function "%s"!' % (opts.act_fun))

            h5_dump('act', act, idx)

            if opts.store_preds:
                preds = fun_eval[1:]
                for i, name in enumerate(model.output_names):
                    h5_dump('preds/%s' % name, preds[i].squeeze(), idx)

            for name, value in next(meta_reader).items():
                h5_dump(name, value, idx)

            idx += batch_size
        progbar.close()

        out_file.close()
        log.info('Done!')

        return 0
Example #36
0
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input

#from IPython.display import SVG
#from keras.utils.vis_utils import model_to_dot
#from keras.utils import plot_model
#from resnets_utils import *
#from keras.initializers import glorot_uniform
#import scipy.misc
from matplotlib.pyplot import imshow
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
#import os
#from scipy.misc import imread, imsave, imresize
#from sklearn.utils import shuffle
#from sklearn.cross_validation import train_test_split
#from sklearn import preprocessing
#import glob
#import scipy.misc
import time
#from tkinter import filedialog
#from tkinter import *
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
tic = time.time()
size = 299
model = load_model('DR_model.h5')
Example #37
0
def mal_agent(X_shard, Y_shard, mal_data_X, mal_data_Y, t, gpu_id, return_dict,
              mal_visible, X_test, Y_test):

    args = gv.args

    shared_weights = np.load(gv.dir_name + 'global_weights_t%s.npy' % t)

    holdoff_flag = 0
    if 'holdoff' in args.mal_strat:
        print('Checking holdoff')
        if 'single' in args.mal_obj:
            target, target_conf, actual, actual_conf = mal_eval_single(
                mal_data_X, mal_data_Y, shared_weights)
            if target_conf > 0.8:
                print('Holding off')
                holdoff_flag = 1

    # tf.reset_default_graph()

    K.set_learning_phase(1)

    print('Malicious Agent on GPU %s' % gpu_id)
    # set enviornment
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    if args.dataset == 'census':
        x = tf.placeholder(shape=(None, gv.DATA_DIM), dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)
    else:
        x = tf.placeholder(shape=(None, gv.IMAGE_ROWS, gv.IMAGE_COLS,
                                  gv.NUM_CHANNELS),
                           dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)

    if 'MNIST' in args.dataset:
        agent_model = model_mnist(type=args.model_num)
    elif args.dataset == 'CIFAR-10':
        agent_model = cifar_10_model()
    elif args.dataset == 'census':
        agent_model = census_model_1()

    logits = agent_model(x)
    prediction = tf.nn.softmax(logits)
    eval_loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                       logits=logits))

    config = tf.ConfigProto(gpu_options=gv.gpu_options)
    # config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    K.set_session(sess)

    if t >= args.mal_delay and holdoff_flag == 0:
        if args.mal_obj == 'all':
            final_delta = mal_all_algs(x, y, logits, agent_model,
                                       shared_weights, sess, mal_data_X,
                                       mal_data_Y, t)
        elif args.mal_obj == 'single' or 'multiple' in args.mal_obj:
            final_delta, penul_delta = mal_single_algs(
                x, y, logits, agent_model, shared_weights, sess, mal_data_X,
                mal_data_Y, t, mal_visible, X_shard, Y_shard)
    elif t < args.mal_delay or holdoff_flag == 1:
        print('Delay/Hold-off')
        final_delta, _ = benign_train(x, y, agent_model, logits, X_shard,
                                      Y_shard, sess, shared_weights)

    final_weights = shared_weights + final_delta
    agent_model.set_weights(final_weights)

    print('---Eval at mal agent---')
    if 'single' in args.mal_obj:
        target, target_conf, actual, actual_conf = mal_eval_single(
            mal_data_X, mal_data_Y, final_weights)
        print(
            'Target:%s with conf. %s, Curr_pred on malicious model for iter %s:%s with conf. %s'
            % (target, target_conf, t, actual, actual_conf))
    elif 'multiple' in args.mal_obj:
        suc_count_local = mal_eval_multiple(mal_data_X, mal_data_Y,
                                            final_weights)
        print('%s of %s targets achieved' % (suc_count_local, args.mal_num))

    eval_success, eval_loss = eval_minimal(X_test, Y_test, final_weights)
    return_dict['mal_success'] = eval_success
    print('Malicious Agent: success {}, loss {}'.format(
        eval_success, eval_loss))
    write_dict = {}
    # just to maintain ordering
    write_dict['t'] = t + 1
    write_dict['eval_success'] = eval_success
    write_dict['eval_loss'] = eval_loss
    file_write(write_dict, purpose='mal_eval_loss')

    return_dict[str(gv.mal_agent_index)] = np.array(final_delta)
    np.save(gv.dir_name + 'mal_delta_t%s.npy' % t, final_delta)

    if 'auto' in args.mal_strat or 'multiple' in args.mal_obj:
        penul_weights = shared_weights + penul_delta
        if 'single' in args.mal_obj:
            target, target_conf, actual, actual_conf = mal_eval_single(
                mal_data_X, mal_data_Y, penul_weights)
            print(
                'Penul weights ---- Target:%s with conf. %s, Curr_pred on malicious model for iter %s:%s with conf. %s'
                % (target, target_conf, t, actual, actual_conf))
        elif 'multiple' in args.mal_obj:
            suc_count_local = mal_eval_multiple(mal_data_X, mal_data_Y,
                                                penul_weights)
            print('%s of %s targets achieved' %
                  (suc_count_local, args.mal_num))

        eval_success, eval_loss = eval_minimal(X_test, Y_test, penul_weights)
        print('Penul weights ---- Malicious Agent: success {}, loss {}'.format(
            eval_success, eval_loss))

    return
Example #38
0
    def setUpClass(cls):
        k.set_learning_phase(1)

        # Get MNIST
        (x_train, y_train), (x_test, y_test), _, _ = load_mnist()
        x_train, y_train, x_test, y_test = x_train[:
                                                   NB_TRAIN], y_train[:
                                                                      NB_TRAIN], x_test[:
                                                                                        NB_TEST], y_test[:
                                                                                                         NB_TEST]
        cls.mnist = (x_train, y_train), (x_test, y_test)

        # Keras classifier
        cls.classifier_k = cls._cnn_mnist_k([28, 28, 1])
        cls.classifier_k.fit(x_train,
                             y_train,
                             batch_size=BATCH_SIZE,
                             nb_epochs=2)

        scores = cls.classifier_k._model.evaluate(x_train, y_train)
        logger.info('[Keras, MNIST] Accuracy on training set: %.2f%%',
                    (scores[1] * 100))
        scores = cls.classifier_k._model.evaluate(x_test, y_test)
        logger.info('[Keras, MNIST] Accuracy on test set: %.2f%%',
                    (scores[1] * 100))

        # Create basic CNN on MNIST using TensorFlow
        cls.classifier_tf = cls._cnn_mnist_tf([28, 28, 1])
        cls.classifier_tf.fit(x_train,
                              y_train,
                              nb_epochs=2,
                              batch_size=BATCH_SIZE)

        scores = get_labels_np_array(cls.classifier_tf.predict(x_train))
        acc = np.sum(np.argmax(scores, axis=1) == np.argmax(
            y_train, axis=1)) / y_train.shape[0]
        logger.info('[TF, MNIST] Accuracy on training set: %.2f%%',
                    (acc * 100))

        scores = get_labels_np_array(cls.classifier_tf.predict(x_test))
        acc = np.sum(np.argmax(scores, axis=1) == np.argmax(
            y_test, axis=1)) / y_test.shape[0]
        logger.info('[TF, MNIST] Accuracy on test set: %.2f%%', (acc * 100))

        # Create basic PyTorch model
        cls.classifier_py = cls._cnn_mnist_py()
        x_train, x_test = np.swapaxes(x_train, 1, 3), np.swapaxes(x_test, 1, 3)
        cls.classifier_py.fit(x_train,
                              y_train,
                              nb_epochs=2,
                              batch_size=BATCH_SIZE)

        scores = get_labels_np_array(cls.classifier_py.predict(x_train))
        acc = np.sum(np.argmax(scores, axis=1) == np.argmax(
            y_train, axis=1)) / y_train.shape[0]
        logger.info('[PyTorch, MNIST] Accuracy on training set: %.2f%%',
                    (acc * 100))

        scores = get_labels_np_array(cls.classifier_py.predict(x_test))
        acc = np.sum(np.argmax(scores, axis=1) == np.argmax(
            y_test, axis=1)) / y_test.shape[0]
        logger.info('\n[PyTorch, MNIST] Accuracy on test set: %.2f%%',
                    (acc * 100))
Example #39
0
    def detect_image(self, image, img):
        start = timer()

        if self.model_image_size != (None, None):
            assert self.model_image_size[
                0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[
                1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(
                image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        print(image_data.shape)
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.
        K.set_learning_phase(0)

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                #                 K.learning_phase(): 0
            })

        print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] +
                                                0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        # create blank txt file for the image
        filename, file_extension = os.path.splitext(img)

        # open(self.predictions_directory+filename+'.txt', 'a').close()
        classnm = []
        topnm = []
        leftnm = []
        bottomnm = []
        rightnm = []

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            classnm.append("\n")
            classnm.append(predicted_class)
            classnm.append("\t")
            box = out_boxes[i]
            score = out_scores[i]

            # label = '{} {:.2f}'.format(predicted_class, score)
            label = predicted_class
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top - 10).astype('int32'))
            left = max(0, np.floor(left - 10).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 10).astype('int32'))
            right = min(image.size[0], np.floor(right + 10).astype('int32'))
            print(label, (left, top), (right, bottom))
            classnm.append(top)
            classnm.append(",")
            classnm.append(left)
            classnm.append(",")
            classnm.append(bottom)
            classnm.append(",")
            classnm.append(right)
            # write bounding boxes to text file
            # with open(self.predictions_directory+filename+'.txt', 'a') as f:
            # f.write(label+' '+str(left)+' '+str(top)+' '+str(right)+' '+str(bottom)+'\n')

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],
                               outline=self.colors[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=self.colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        end = timer()
        print(end - start)

        # save images
        image.save(self.predictions_directory + img)

        return classnm
Example #40
0
from numpy.random import seed
seed(1)

from sklearn.model_selection import train_test_split as tts
import logging

import plotly.plotly as py
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import pandas as pd
import pydot

import keras
from keras import backend as k
k.set_learning_phase(1)
from keras.preprocessing.text import Tokenizer
from keras import initializers
from keras.optimizers import RMSprop
from keras.models import Sequential, Model
from keras.layers import Dense, LSTM, Dropout, Input, Activation, Add, Concatenate
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from keras.optimizers import Adam
#keras.utils.vis_utils import plot_model


logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',\
    level=logging.INFO)

#######################model params###########################
def run_kde(Data, Model, path):
    global DECONST
    sess = K.get_session()
    K.set_learning_phase(False)
    data, model = Data(), Model(path)

    model2 = Model(path)

    hidden_layer = pop(model2.model)  # once to remove dense(10)
    hidden_layer = pop(hidden_layer)  # once to remove ReLU

    #compute_optimal_sigma(sess, model, hidden_layer, data)
    #MNIST SIGMA: 20

    de = [
        DensityEstimate(
            sess,
            hidden_layer,
            data.train_data[np.argmax(data.train_labels, axis=1) == i],
            model.image_size,
            model.num_channels,
            sigma=20) for i in range(10)
    ]
    de2 = [
        DensityEstimate(
            sess,
            hidden_layer,
            data.train_data[np.argmax(data.train_labels, axis=1) == i][:100],
            model.image_size,
            model.num_channels,
            sigma=20) for i in range(10)
    ]

    p = tf.placeholder(
        tf.float32,
        (None, model.image_size, model.image_size, model.num_channels))

    #print(np.log(de[0].predict(data.test_data[:10])))
    #print(sess.run(rmodel.predict(p)[1], {p: data.test_data[:10]}))
    #exit(0)

    N = 1
    print(model.model.predict(data.train_data[:N]))
    print(hidden_layer.predict(data.train_data[:N]))

    for i in range(10):
        print(de[i].predict(data.train_data[:N]))

    start_density = estimate_density_full(model, de,
                                          data.test_data[M:M + N]) + 1e-30
    print("starting density", np.log(start_density))

    DECONST = -np.log(start_density)

    l = np.zeros((N, 10))
    #l[np.arange(N),np.random.random_integers(0,9,N)] = 1
    for i in range(N):
        r = np.random.random_integers(0, 9)
        while r == np.argmax(data.test_labels[i]):
            r = np.random.random_integers(0, 9)
        l[i, r] = 1

    attack1 = CarliniL2(sess,
                        model,
                        batch_size=1,
                        max_iterations=3000,
                        binary_search_steps=3,
                        initial_const=1.0,
                        learning_rate=1e-1,
                        targeted=True)
    attack2 = CarliniL2New(sess,
                           model,
                           batch_size=1,
                           max_iterations=10000,
                           binary_search_steps=5,
                           initial_const=1.0,
                           learning_rate=1e-2,
                           targeted=True,
                           extra_loss=extra_loss(de2, np.argmax(l)))
    #l = data.test_labels[:N]
    #l = np.zeros((N,10))
    #l[np.arange(N),1] = 1
    print("RUN PHASE 1")
    adv = attack1.attack(data.test_data[M:M + N], l)
    print(
        'mean distortion',
        np.mean(
            np.sum((adv - data.test_data[M:M + N])**2, axis=(1, 2, 3))**.5))

    print("RUN PHASE 2")
    adv = attack2.attack(data.test_data[M:M + N], adv, l)

    np.save("/tmp/q" + str(M), adv)
    #adv = np.load("/tmp/qq.npy")

    print(
        'labels',
        np.mean(np.argmax(sess.run(model.predict(p), {p: adv}), axis=1) == l))

    print(
        'mean distortion',
        np.mean(
            np.sum((adv - data.test_data[M:M + N])**2, axis=(1, 2, 3))**.5))

    a = estimate_density_full(model, de, data.test_data[M:M + N]) + 1e-30
    b = estimate_density_full(model, de, adv) + 1e-30

    show(adv)

    print('de of test', np.mean(np.log(a)))
    print('de of adv', np.mean(np.log(b)))

    print('better ratio', np.mean(np.array(a) > np.array(b)))
    exit(0)

    #density = gaussian_kde(np.array(np.log(a))-np.array(np.log(b)))
    #density_a = gaussian_kde(np.log(a))
    #density_b = gaussian_kde(np.log(b))

    xs = np.linspace(-25, 25, 200)

    fig = plt.figure(figsize=(4, 3))
    fig.subplots_adjust(bottom=0.17, left=.15, right=.85)

    plt.xlabel('log(KDE(valid))-log(KDE(adversarial))')
    plt.ylabel('Occurrances')

    #plt.hist(np.log(a),100)
    #plt.hist(np.log(b),100)
    plt.hist(np.log(a) - np.log(b), 100)
    #plt.hist(np.array(np.log(a))-np.array(np.log(b)),100)
    #a = plt.plot(xs,density_a(xs), 'r--',color='blue', label='Valid')
    #b = plt.plot(xs,density_b(xs), color='red', label='Adversarial')
    #plt.plot(xs,density(xs))

    #plt.legend(handles=[a[0], b[0]])

    pp = PdfPages('/tmp/a.pdf')
    plt.savefig(pp, format='pdf')
    pp.close()
    plt.show()
Example #42
0
import numpy as np

import matplotlib.pyplot as plt
from keras.layers import Input
from keras.models import Model
from keras.utils import multi_gpu_model

from chinese_ocr.train.train import random_uniform_num, get_session, get_model
from chinese_ocr.densenet_common import densenet
from chinese_ocr.densenet_common.densenet_model import data_generator
from chinese_ocr.train.synthtext_config import SynthtextConfig
from chinese_ocr.densenet_common.dataset_format import DataSetSynthtext
from predict_tf_tool import DensenetOcr

reload(densenet)
K.set_learning_phase(0)

dataset_path = "/media/chenhao/study/code/other/out"

class_id_file = "char_7476.txt"
train_label_name = "label_train.txt"
val_label_name = "label_val.txt"
test_label_name = "label_test.txt"
dataset_format = 0
sub_img_folder = "default"

data_test = DataSetSynthtext()
data_test.load_data(class_id_file, dataset_path, test_label_name, subset=sub_img_folder)
# label_list = data.load_instance(33)
# print(label_list)
data_test.prepare()
def train_GAN(batch_size, epochs, img_classifier, dataset, eeg_embs, d_optim,
              g_optim):
    """
    Function to train the GAN and show the performance as it trains.

    Function first instantiates and compiles the generator, discriminator, GAN
    and parameters such as the noise dimension. It then uses nested for-loops
    to train in batches. For each batch we see fake and real combinations of
    labels, embeddings, and images being created. These are used to train all
    models. At the end of each epoch the performance metrics are printed and
    we see what the generator is currently producing, given the EEG embeddings.

    input: batch_size: mini-batch size.
    input: epochs: number of epochs.
    input: img_classifier: pretrained classifier to use in the auxiliary feature
           of the discriminator.
    input: dataset: dataset used, in the form [labels, EEG embeddings, images].
    input: eeg_embs: a list of the 20 eeg embeddings, one for each class.
    input: d_optim: the optimizer used to train the discriminator.
    input: g_optim: the optimizer used to train the generator.
    """

    K.set_learning_phase(False)

    all_labels, all_eeg_embs, all_real_images = dataset

    input_noise_dim = 126
    feature_encoding_dim = 126
    tot_num_images = all_real_images.shape[0]
    print('tot images: ', tot_num_images)
    num_classes = 20

    d = build_discriminator((64, 64), img_classifier)
    d.trainable = True
    d.compile(loss=['binary_crossentropy', 'categorical_crossentropy'],
              optimizer=d_optim)

    g = build_generator(input_noise_dim, feature_encoding_dim)
    g.compile(loss='categorical_crossentropy', optimizer=g_optim)

    d_on_g = build_gan(input_noise_dim, feature_encoding_dim, g, d)
    d_on_g.compile(loss=['binary_crossentropy', 'categorical_crossentropy'],
                   optimizer=g_optim)

    num_batches = int(tot_num_images / batch_size)

    print("Number of batches:", num_batches)

    for epoch in range(1, epochs + 1):
        for index in range(num_batches):
            # generate noise from a uniform distribution
            noise = np.random.uniform(-1, 1, (batch_size, input_noise_dim))

            # get some random embedding and label pairings from the dataset (these are
            # used these to generate the fake images)
            rand_indices = np.random.randint(0, tot_num_images, batch_size)
            fake_labels = all_labels[rand_indices]
            fake_eeg_embs = all_eeg_embs[rand_indices]

            # get real images and corresponding labels and eeg embeddings
            real_labels_batch = all_labels[index * batch_size:(index + 1) *
                                           batch_size]
            real_images_batch = all_real_images[index *
                                                batch_size:(index + 1) *
                                                batch_size]

            # generate fake images using the generator
            generated_images = g.predict([noise, fake_eeg_embs], verbose=0)

            # train discriminator on real images (target is 1 - meaning real images)
            d_loss_real = d.train_on_batch(
                real_images_batch,
                [np.array([1] * batch_size),
                 np.array(real_labels_batch)])
            # train discriminator on fake images (target is 0 - meaning fake images)
            d_loss_fake = d.train_on_batch(generated_images, [
                np.array([0] * batch_size),
                np.array(fake_labels).reshape(batch_size, num_classes)
            ])
            d_loss = (d_loss_fake[0] + d_loss_real[0]) * 0.5

            d.trainable = False
            d.compile(loss=['binary_crossentropy', 'categorical_crossentropy'],
                      optimizer=d_optim)

            # train the GAN. The generator will generate fake images (target is 1 - meaning real images)
            g_loss = d_on_g.train_on_batch([noise, fake_eeg_embs], [
                np.array([1] * batch_size),
                np.array(fake_labels).reshape(batch_size, num_classes)
            ])
            d.trainable = True
            d.compile(loss=['binary_crossentropy', 'categorical_crossentropy'],
                      optimizer=d_optim)

        print(
            "Epoch {0}: discriminator_loss : {1:.4f}, generator_loss : {2:.4f}."
            .format(epoch + 1, d_loss, g_loss[0]))

        print_results(g, eeg_embs)
def train_generation():
    datagen = image.ImageDataGenerator(
        featurewise_center=False,
        samplewise_center=False,
        featurewise_std_normalization=False,
        samplewise_std_normalization=False,
        zca_whitening=False,
        rotation_range=0.3,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.,
        zoom_range=0.2,
        channel_shift_range=0.,
        fill_mode='nearest',
        cval=0.0,
        horizontal_flip=False,
        vertical_flip=False,
        rescale=1. / 255,
        preprocessing_function=None,
        # data_format=K.image_data_format(),
    )

    train_generator = datagen.flow_from_directory(
        # '/Users/imperatore/tmp/num_ocr',  # this is the target directory
        r'F:\number_ok1',  # this is the target directory
        target_size=(48, 48),  # all images will be resized to 48*40
        batch_size=256,
        class_mode='categorical',
        color_mode='grayscale')

    validation_generator = datagen.flow_from_directory(
        # '/Users/imperatore/tmp/nums_classed',
        r'F:\number_ok1',
        target_size=(48, 48),
        batch_size=128,
        class_mode='categorical',
        color_mode='grayscale')

    num_class = 10
    input_tensor = Input((48, 48, 1))

    def resnet(input_tensor, units=32, kernel_size=(3, 3)):
        x = input_tensor
        for i in range(3):
            x = res_block(x, units, kernel_size=kernel_size)
            x = Dropout(drop)(x)
        x = MaxPool2D(pool_size=(2, 2))(x)
        return x

    def conv2d_bn(x,
                  units,
                  kernel_size,
                  strides=(1, 1),
                  padding='same',
                  name=None):
        if name is not None:
            bn_name = name + '_bn'
            conv_name = name + '_conv'
        else:
            bn_name = None
            conv_name = None

        x = Conv2D(units,
                   kernel_size,
                   padding=padding,
                   strides=strides,
                   activation='relu',
                   name=conv_name)(x)
        x = BatchNormalization(axis=3, name=bn_name)(x)
        return x

    def res_block(inpt,
                  units,
                  kernel_size,
                  strides=(1, 1),
                  with_conv_shortcut=False):
        x = conv2d_bn(inpt,
                      units=units,
                      kernel_size=kernel_size,
                      strides=strides,
                      padding='same')
        x = conv2d_bn(x, units=units, kernel_size=kernel_size, padding='same')
        if with_conv_shortcut:
            shortcut = conv2d_bn(inpt,
                                 units=units,
                                 strides=strides,
                                 kernel_size=kernel_size)
            x = add([x, shortcut])
            return x
        else:
            x = add([x, inpt])
            return x

    drop = 0.1
    input_tensor = Input((48, 48, 1))
    x = input_tensor

    x = Conv2D(32,
               kernel_size=(3, 3),
               padding='same',
               strides=(1, 1),
               name=None)(x)
    x = Activation(activation='relu')(x)
    # x = BatchNormalization(axis=3, name=None)(x)
    x = MaxPool2D(pool_size=(2, 2))(x)
    x = Dropout(drop)(x)

    x = Conv2D(64,
               kernel_size=(3, 3),
               padding='same',
               strides=(1, 1),
               name=None)(x)
    x = Activation(activation='relu')(x)
    # x = BatchNormalization(axis=3, name=None)(x)
    x = MaxPool2D(pool_size=(2, 2))(x)
    x = Dropout(drop)(x)

    # x = Conv2D(256, kernel_size=(3, 3), padding='same', strides=(1, 1), name=None)(x)
    # x = Activation(activation='relu')(x)
    # # x = BatchNormalization(axis=3, name=None)(x)
    # x = MaxPool2D(pool_size=(2, 2))(x)
    # x = Dropout(drop)(x)

    x = Conv2D(512,
               kernel_size=(3, 3),
               padding='same',
               strides=(1, 1),
               name=None)(x)
    x = Activation(activation='relu')(x)
    # x = BatchNormalization(axis=3, name=None)(x)
    x = MaxPool2D(pool_size=(2, 2))(x)
    x = Dropout(drop)(x)

    # x = resnet(x)

    x = Flatten()(x)
    x = Dense(1000, kernel_initializer='he_normal')(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(drop)(x)

    x = Dense(10, kernel_initializer='he_normal')(x)

    model = Model(inputs=input_tensor, outputs=x)
    print(model.layers)

    kb.set_learning_phase(0)

    model.compile(loss='mean_squared_error',
                  optimizer='adam',
                  metrics=['accuracy'])
    # model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
    print('\n'.join([str(tmp) for tmp in model.layers]))
    print('model length: %s' % len(model.layers))

    early_stopping = EarlyStopping(monitor='val_loss', patience=30)
    model.fit_generator(train_generator,
                        steps_per_epoch=512,
                        epochs=20,
                        validation_data=validation_generator,
                        nb_val_samples=100,
                        verbose=True,
                        callbacks=[early_stopping])

    model.save('cnn3_gen_1.4.h5'
               )  # always save your weights after training or during training
import numpy as np
from keras.datasets import mnist
import keras
import matplotlib.pyplot as plt
from keras import backend
from keras.models import load_model
import tensorflow as tf
import sys
sys.path.append('/home/labadmin/src/cleverhans')
import cleverhans
from cleverhans.attacks import FastGradientMethod
from cleverhans.utils_keras import KerasModelWrapper
from keras.optimizers import Adam

###################################### Trained Model ############################################
backend.set_learning_phase(False)
keras_model = load_model(
    '/home/labadmin/Desktop/iccad_2018/fgsm_retrain/simple_nn_iccad.h5')

#################################### Importing the dataset ######################################
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

########################### Set TF random seed to improve reproducibility #######################
tf.set_random_seed(1234)
Example #46
0
def setKerasOptions():
    K._LEARNING_PHASE = tf.constant(0)
    K.set_learning_phase(False)
    K.set_learning_phase(0)
    K.set_image_data_format('channels_last')
    adam = Adam(lr=0.0001, clipvalue=0.5)
    print("Loaded model from disk")

    y_pred = model.get_layer('softmax').output

    model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam)

    return model


mode = 'train'
load_previous = raw_input('Type yes/no if you want to load previous model: ')

print mode

K.set_learning_phase(1)  # all new operations will be in train mode from now on

data_path = train_path
lab_file = train_lab_file

data_gen = DataGenerator(minibatch_size=minibatch_size,
                         img_dim=img_dim,
                         maxlen=maxlen,
                         val_split=val_split,
                         nb_classes=nb_classes,
                         data_path=data_path,
                         lab_file=lab_file)

# Resume training.
if load_previous == 'yes':
    model = load_model()
Example #48
0

def deprocess_image(x):
    # Util function to convert a tensor into a valid image.
    if K.image_data_format() == 'channels_first':
        x = x.reshape((3, x.shape[2], x.shape[3]))
        x = x.transpose((1, 2, 0))
    else:
        x = x.reshape((x.shape[1], x.shape[2], 3))
    x /= 2.
    x += 0.5
    x *= 255.
    x = np.clip(x, 0, 255).astype('uint8')
    return x

K.set_learning_phase(0)

# Build the InceptionV3 network with our placeholder.
# The model will be loaded with pre-trained ImageNet weights.
model = inception_v3.InceptionV3(weights='imagenet',
                                 include_top=False)
dream = model.input
print('Model loaded.')

# Get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])

# Define the loss.
loss = K.variable(0.)
for layer_name in settings['features']:
    # Add the L2 norm of the features of a layer to the loss.
Example #49
0
from PIL import Image

import os
import re


##########
#target_model_time = "1527999111"
#target_layer_name = "conv2d_60"
##########
target_model_time = "1528320726"
target_layer_name = "conv2d_116"
##########


backend.set_learning_phase(1)

output_dir = "output2"
output_map_dir = "output_map"
if not os.path.exists(output_dir):
	os.mkdir(output_dir)
if not os.path.exists(output_dir+"/"+target_model_time):
	os.mkdir(output_dir+"/"+target_model_time)
if not os.path.exists(output_map_dir):
	os.mkdir(output_map_dir)

model = load_model("model/"+target_model_time+".h5")

count_total_1 = 0
count_correct_1 = 0
accuracy_1 = 0.0
Example #50
0
def render_dream(avatar):
    K.clear_session()

    def eval_loss_and_grads(x):
        outs = fetch_loss_and_grads([x])
        loss_value = outs[0]
        grad_values = outs[1]
        return loss_value, grad_values

    def gradient_ascent(x, iterations, step, max_loss=None):
        for i in range(iterations):
            loss_value, grad_values = eval_loss_and_grads(x)
            if max_loss is not None and loss_value > max_loss:
                break
            x += step * grad_values
        return x

    def preprocess_image(image_path):
        # Util function to open, resize and format pictures
        # into appropriate tensors.
        img = img_to_array(image_path)
        img = np.expand_dims(img, axis=0)
        img = inception_v3.preprocess_input(img)
        return img

    def deprocess_image(x):
        # Util function to convert a tensor into a valid image.
        if K.image_data_format() == 'channels_first':
            x = x.reshape((3, x.shape[2], x.shape[3]))
            x = x.transpose((1, 2, 0))
        else:
            x = x.reshape((x.shape[1], x.shape[2], 3))
        x /= 2.
        x += 0.5
        x *= 255.
        x = np.clip(x, 0, 255).astype('uint8')
        return x

    def resize_img(img, size):
        img = np.copy(img)
        if K.image_data_format() == 'channels_first':
            factors = (1, 1, float(size[0]) / img.shape[2],
                       float(size[1]) / img.shape[3])
        else:
            factors = (1, float(size[0]) / img.shape[1],
                       float(size[1]) / img.shape[2], 1)
        return scipy.ndimage.zoom(img, factors, order=1)

    settings = {
        'features': {
            'mixed2': randint(1, 5) / 10,
            'mixed3': randint(1, 10) / 10,
            'mixed4': randint(1, 20) / 10,
            'mixed5': randint(1, 50) / 10,
        },
    }
    K.set_learning_phase(0)

    # Build the InceptionV3 network with our placeholder.
    # The model will be loaded with pre-trained ImageNet weights.
    # model = inception_v3.InceptionV3(weights='imagenet',
    #                                include_top=False)

    model = load_model('model.hdf5', compile=False)

    dream = model.input

    # Get the symbolic outputs of each "key" layer (we gave them unique names).
    layer_dict = dict([(layer.name, layer) for layer in model.layers])

    # Define the loss.
    loss = K.variable(0.)
    for layer_name in settings['features']:
        # Add the L2 norm of the features of a layer to the loss.
        if layer_name not in layer_dict:
            raise ValueError('Layer ' + layer_name + ' not found in model.')
        coeff = settings['features'][layer_name]
        x = layer_dict[layer_name].output
        # We avoid border artifacts by only involving non-border pixels in the loss.
        scaling = K.prod(K.cast(K.shape(x), 'float32'))
        if K.image_data_format() == 'channels_first':
            loss += coeff * K.sum(K.square(x[:, :, 2:-2, 2:-2])) / scaling
        else:
            loss += coeff * K.sum(K.square(x[:, 2:-2, 2:-2, :])) / scaling

    # Compute the gradients of the dream wrt the loss.
    grads = K.gradients(loss, dream)[0]
    # Normalize gradients.
    grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())

    # Set up function to retrieve the value
    # of the loss and gradients given an input image.
    outputs = [loss, grads]
    fetch_loss_and_grads = K.function([dream], outputs)

    step = 0.01  # Gradient ascent step size
    num_octave = 3  # Number of scales at which to run gradient ascent
    octave_scale = 1.4  # Size ratio between scales
    iterations = 20  # Number of ascent steps per scale
    max_loss = 10.

    img = preprocess_image(avatar)
    if K.image_data_format() == 'channels_first':
        original_shape = img.shape[2:]
    else:
        original_shape = img.shape[1:3]
    successive_shapes = [original_shape]
    for i in range(1, num_octave):
        shape = tuple([int(dim / (octave_scale**i)) for dim in original_shape])
        successive_shapes.append(shape)
    successive_shapes = successive_shapes[::-1]
    original_img = np.copy(img)
    shrunk_original_img = resize_img(img, successive_shapes[0])

    for shape in successive_shapes:
        img = resize_img(img, shape)
        img = gradient_ascent(img,
                              iterations=iterations,
                              step=step,
                              max_loss=max_loss)
        upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
        same_size_original = resize_img(original_img, shape)
        lost_detail = same_size_original - upscaled_shrunk_original_img

        img += lost_detail
        return deprocess_image(np.copy(img))
Example #51
0
def define_and_train(iteration, model_name, load_for_inference, training_data, training_labels, val_data, val_labels, batch_size, classes, input_shape, alpha, strides, train_datagen, epochs, progress_verbose, checkpoint_verbose, train_steps_per_epoch, val_steps_per_epoch, num_gpus):
    K.set_learning_phase(1)
    model, parallel_model, model_file = define_model(iteration, model_name, "cifar", input_shape, classes, alpha, strides, num_gpus, weights=None)
    get_model_weights_CNN_cifar(model, parallel_model, model_name, load_for_inference, model_file, training_data, training_labels, val_data, val_labels, train_datagen, batch_size, epochs, progress_verbose, checkpoint_verbose, train_steps_per_epoch, val_steps_per_epoch, num_gpus)
    return model
import hdf5storage
from tqdm import tqdm

# GPU allocation
K.clear_session()
tf.reset_default_graph()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Tensorflow memory allocation
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1.
session = tf.Session(config=config)
K.tensorflow_backend.set_session(session)
K.set_learning_phase(False)

# Directories of pretrained models/data
model_dir = 'trained_models/lord/model/'
data_loc = 'trained_models/lord/data/celeba_test.npz'
cbk_loc = 'trained_codebooks/one_sample_fixed.mat'

# Load data
data = np.load(data_loc)
x_d_test = np.copy(data['imgs'] / 255.)
y_d_test = np.copy(data['classes'])
# Rearrange y_test as ordinal classes (since absolute value of class doesn't matter)
_, y_d_test_ordinal = np.unique(y_d_test, return_inverse=True)

# Instantiate and load VGGFace with VGG16 core
latent_dim = 128
Example #53
0
def learnModelMulti(MODEL_FOLDER, MODEL_NAME, LOGS_FOLDER, train_set,
                    valid_set):

    if (MODEL_NAME == 'RESNET'):
        K.set_learning_phase(
            0
        )  # set model to inference / test mode manually (required for BN layers)
        base_model = applications.ResNet50(include_top=False,
                                           weights='imagenet',
                                           input_shape=(IMG_SIZE, IMG_SIZE, 3))
        x = base_model.output
        x = layers.GlobalAveragePooling2D()(x)
        #x = layers.Dropout(0.5)(x)
        #base_model.summary()
    elif (MODEL_NAME == 'Xception'):
        K.set_learning_phase(
            0
        )  # set model to inference / test mode manually (required for BN layers)
        base_model = applications.Xception(weights='imagenet',
                                           include_top=False,
                                           input_shape=(IMG_SIZE, IMG_SIZE, 3))
        x = base_model.output
        x = layers.GlobalAveragePooling2D()(x)
        #ox = layers.Dropout(0.7)(x)
    else:
        base_model = applications.VGG16(weights='imagenet',
                                        include_top=False,
                                        input_shape=(IMG_SIZE, IMG_SIZE, 3))
        x = base_model.output
        x = layers.Flatten()(x)  # flatten 3D output to 1D

    print('***** Base model {} loaded:'.format(MODEL_NAME))

    #n_layers_base = len(base_model.layers)

    if (MODEL_NAME == 'Xception' or MODEL_NAME == 'RESNET'):
        K.set_learning_phase(
            1)  # set model to training mode manually (required for BN layers)

    MLT_shared_repr = layers.Dense(NUM_HIDDEN_UNITS, name='shared_repr')(x)
    MLT_shared_repr = layers.BatchNormalization()(MLT_shared_repr)
    MLT_shared_repr = layers.Activation('relu')(MLT_shared_repr)

    #    MLT_shared_repr = layers.Dense(NUM_HIDDEN_UNITS, activation = 'relu', name='shared_repr')(x)
    #    MLT_shared_repr = layers.Activation('relu')(MLT_shared_repr)

    drop_out_layer = layers.Dropout(DROPOUT)(MLT_shared_repr)
    artist_prediction = layers.Dense(
        nClassesArtist,
        activation='softmax',
        name='artist',
        kernel_regularizer=regularizers.l2(0.001))(drop_out_layer)
    year_prediction = layers.Dense(1, name='year')(
        drop_out_layer)  # regression hence no activation function
    type_prediction = layers.Dense(nClassesType,
                                   activation='sigmoid',
                                   name='type')(drop_out_layer)
    mat_prediction = layers.Dense(nClassesMat,
                                  activation='sigmoid',
                                  name='mat')(drop_out_layer)

    global custom_model
    custom_model = Model(
        base_model.input,
        [artist_prediction, year_prediction, type_prediction, mat_prediction])

    #print("***** Full model")
    #custom_model.summary()

    print('# trainable weights '
          'before freezing the conv base:',
          len(custom_model.trainable_weights))

    for layer in base_model.layers:
        layer.trainable = False

    print('# trainable weights '
          'after freezing the conv base:', len(custom_model.trainable_weights))

    artist_loss_weight = 1
    year_loss_weight = 0  #0.1 #0.05
    type_loss_weight = 1
    mat_loss_weight = 1

    f = open(LOGS_FOLDER + 'Run_info.txt', 'a')
    msg = '\nartist_loss_weight = {:.2f}\n'.format(artist_loss_weight)
    msg += 'year_loss_weight = {:.2f}\n'.format(year_loss_weight)
    msg += 'type_loss_weight = {:.2f}\n'.format(type_loss_weight)
    msg += 'mat_loss_weight = {:.2f}\n'.format(mat_loss_weight)
    print(msg)
    f.write(msg)
    f.close()

    #optimizer = tf.train.RMSPropOptimizer(learning_rate=2e-3, decay=0.9)
    #custom_model.compile(optimizer='rmsprop',
    #sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
    mae_tol = cm.mae_tol_param(0.15)
    #loss_artits_w = cm.categorical_crossentropy_w_wrap(artist_weights_matrix)

    if (FOCAL and WEIGHTING):
        artistLoss = cm.w_categorical_focal_loss(alpha=1, gamma=2)
    elif (FOCAL and not WEIGHTING):
        artistLoss = cm.categorical_focal_loss(alpha=1, gamma=2)
    elif (not FOCAL and WEIGHTING):
        artistLoss = lambda y_true, y_pred: cm.w_categorical_crossentropy(
            y_true, y_pred, weights=artist_weights_matrix)
    elif (not FOCAL and not WEIGHTING):
        artistLoss = cm.categorical_crossentropy_abs

    custom_model.compile(optimizer='adam',
                         loss={
                             'artist': artistLoss,
                             'year': 'mae',
                             'type': 'binary_crossentropy',
                             'mat': 'binary_crossentropy'
                         },
                         loss_weights={
                             'artist': artist_loss_weight,
                             'year': year_loss_weight,
                             'type': type_loss_weight,
                             'mat': mat_loss_weight
                         },
                         metrics={
                             'artist': [cm.accuracy_abs, cm.accuracy_w],
                             'year': ['mae', mae_tol],
                             'type': cm.precision,
                             'mat': cm.precision
                         })

    train_gen = data_generator(MODEL_NAME, train_set, dataAugm=True)
    valid_gen = data_generator(MODEL_NAME, valid_set, dataAugm=False)

    #callbacks = [keras.callbacks.TensorBoard(log_dir='../logs',histogram_freq=1]
    #csv_logger = callbacks.CSVLogger(LOGS_FOLDER + MODEL_NAME +'.log')

    callbacks_list = [
        callbacks.CSVLogger(LOGS_FOLDER + MODEL_NAME + '.log'),
        #callbacks.EarlyStopping(monitor='val_artist_accuracy_w',patience=4),
        callbacks.ModelCheckpoint(filepath=MODEL_FOLDER + MODEL_NAME + '.h5',
                                  monitor='val_artist_accuracy_w',
                                  save_best_only=True)
    ]

    history = custom_model.fit_generator(
        train_gen,
        validation_data=valid_gen,
        validation_steps=ceil(valid_set.shape[0] / BATCH_SIZE),
        steps_per_epoch=ceil(2 * train_set.shape[0] / BATCH_SIZE),
        epochs=NUM_EPOCHS,
        verbose=1,
        callbacks=callbacks_list)

    print('***** Training logs saved as ' + LOGS_FOLDER + MODEL_NAME + '.log')

    #custom_model.save(LOGS_FOLDER + 'artistsRD.h5')
    print('***** Model saved as artistsRD.h5')
Example #54
0
    def train(self, model):
        '''
		This function will carry out the training of the gan, including the discriminator step
		'''
        batch_size = 16
        self.output_path = os.path.join(
            self.output_path,
            datetime.datetime.now().strftime("%Y-%m-%d--%Hh%Mm"))
        os.mkdir(self.output_path)
        os.mkdir(os.path.join(self.output_path, "images"))
        self.writer = tf.summary.FileWriter(self.output_path)
        train_data_path = os.path.join(self.image_path, "Train_1")
        validation_data_path = os.path.join(self.image_path, "Validation_1")

        # partition = {"train": [], "validation": []}
        # for image in os.listdir(train_data_path):
        # 	partition["train"].append(os.path.join(train_data_path, image))

        # for image in os.listdir(validation_data_path):
        # 	partition["validation"].append(os.path.join(validation_data_path, image))

        # train_dataset = tf.data.Dataset.from_tensor_slices(partition["train"])
        # train_dataset = train_dataset.apply(tf.data.experimental.shuffle_and_repeat(len(partition["train"])))
        # train_dataset = train_dataset.map(mse_parse_function_gan, num_parallel_calls=8)
        # train_dataset = train_dataset.batch(batch_size)
        # train_dataset = train_dataset.prefetch(1)
        # train_iterator = train_dataset.make_one_shot_iterator()

        # validation_dataset = tf.data.Dataset.from_tensor_slices(partition["validation"])
        # validation_dataset = validation_dataset.shuffle(len(partition["validation"]))
        # validation_dataset = validation_dataset.map(mse_parse_function_gan, num_parallel_calls=4).repeat()
        # validation_dataset = validation_dataset.batch(batch_size)
        # validation_dataset = validation_dataset.prefetch(1)
        # validation_iterator = train_dataset.make_one_shot_iterator()

        datagen = image.ImageDataGenerator(rescale=(1. / 255))
        val_datagen = image.ImageDataGenerator(rescale=(1. / 255))
        epochs = 2000
        # TODO: Try with 2, 3
        disc_training_steps = 1
        num_train_batches = 258500 // batch_size
        num_validation_batches = 10000 // batch_size
        # num_train_batches = 32//batch_size
        # num_validation_batches = 32//batch_size

        #one sided smoothing (https://arxiv.org/pdf/1606.03498.pdf)
        real_images_labels = np.full((batch_size, 1), 0.9)
        generated_images_labels = np.full((batch_size, 1), 0)
        start_time = time.time()

        def val_batch_generator(batch_size):
            for val_batch in val_datagen.flow_from_directory(
                    validation_data_path,
                    target_size=(image_size, image_size),
                    class_mode="input",
                    batch_size=batch_size):
                val_lab = color.rgb2lab(val_batch[0]).astype(np.float32)
                X = preprocess_and_return_X_batch(val_lab)
                Y = val_lab[:, :, :, 1:] / 128
                generated_ab_val = self.generator.predict(X, steps=1)
                disc_x_val = np.concatenate((Y, generated_ab_val), axis=0)
                disc_y_val = np.concatenate((np.ones(
                    (batch_size, 1)), np.zeros((batch_size, 1))),
                                            axis=0)
                yield ([disc_x_val, disc_y_val])

        gen_loss = 0
        for e in range(epochs):
            curr_batch = 0
            for batch in datagen.flow_from_directory(train_data_path,
                                                     target_size=(image_size,
                                                                  image_size),
                                                     class_mode="input",
                                                     batch_size=batch_size):
                lab = color.rgb2lab(batch[0]).astype(np.float32)
                X_train = preprocess_and_return_X_batch(lab)
                Y_train = lab[:, :, :, 1:] / 128
                generated_ab = self.generator.predict(X_train, steps=1)

                # disc_x_train = np.concatenate((Y_train, generated_ab), axis=0)
                # disc_y_train = np.concatenate((real_images_labels, generated_images_labels), axis=0)

                # shuffle_indices = np.arange(disc_x_train.shape[0])
                # np.random.shuffle(shuffle_indices)

                # disc_x_train_shuffled = np.squeeze(disc_x_train[shuffle_indices])
                # disc_y_train_shuffled = np.squeeze(disc_y_train[shuffle_indices])

                #train on real batch then on fake batch (https://github.com/soumith/ganhacks/blob/master/README.md point 4)
                #Set learning phase manually due to Dropout and BatchNormalization layers
                K.set_learning_phase(1)
                #TODO: maybe train whenever acc falls below a certain threshold (90?)
                disc_loss_r, disc_acc_r = self.discriminator.train_on_batch(
                    Y_train, real_images_labels)
                disc_loss_f, disc_acc_f = self.discriminator.train_on_batch(
                    generated_ab, generated_images_labels)

                disc_loss_summary = tf.Summary(value=[
                    tf.Summary.Value(tag="disc_loss",
                                     simple_value=(disc_loss_r + disc_loss_f) /
                                     2)
                ])
                # disc_acc_summary = tf.Summary(value=[tf.Summary.Value(tag="disc_acc", simple_value=(disc_acc_r + disc_acc_f) / 2)])

                if curr_batch % 3 == 2:
                    gen_loss = model.train_on_batch(
                        X_train, [np.ones((batch_size, 1)), Y_train])
                    gen_loss_summary = tf.Summary(value=[
                        tf.Summary.Value(tag="gen_loss",
                                         simple_value=gen_loss[0])
                    ])

                K.set_learning_phase(0)

                current_step = (e) * num_train_batches + curr_batch
                self.writer.add_summary(disc_loss_summary,
                                        current_step * disc_training_steps)
                # self.writer.add_summary(disc_acc_summary, current_step*disc_training_steps)
                if curr_batch % 3 == 2:
                    self.writer.add_summary(gen_loss_summary, current_step)

                curr_batch = curr_batch + 1
                sys.stdout.write(
                    "\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss_bin: %f loss_mse: %f loss: %f] Estimated time left: %s"
                    % (e, epochs, curr_batch, num_train_batches,
                       (disc_loss_r + disc_loss_f) / 2, gen_loss[1],
                       gen_loss[2], gen_loss[0],
                       str(
                           datetime.timedelta(
                               seconds=(
                                   (time.time() - start_time) / curr_batch) *
                               (num_train_batches - curr_batch)))))
                sys.stdout.flush()
                if curr_batch >= num_train_batches:
                    print(
                        str(self.discriminator.metrics_names) + " : " + str(
                            self.discriminator.evaluate_generator(
                                val_batch_generator(batch_size),
                                steps=num_validation_batches)))
                    model.save(os.path.join(self.output_path, "model.h5"))
                    self.discriminator.save(
                        os.path.join(self.output_path, "discriminator.h5"))
                    self.generator.save(
                        os.path.join(self.output_path, "generator.h5"))

                    images = np.concatenate(
                        (X_train * 50 + 50, generated_ab * 128), axis=3)
                    os.mkdir(
                        os.path.join(self.output_path, "images",
                                     "epoch_{}".format(e)))
                    for i in range(batch_size):
                        curr_image = color.lab2rgb(images[i])
                        io.imsave(
                            os.path.join(self.output_path, "images",
                                         "epoch_{}".format(e),
                                         "image_{}.jpg".format(i)), curr_image)
                    break
Example #55
0
import time
import math

from utility import plot_util
from utility.data_loader import load_data_multi_timestamp
from utility.test_util import convert_rel_to_44matrix, iround
from utility.networks import build_model_cross_att

# keras
from keras import backend as K

K.set_image_dim_ordering('tf')
K.set_session(
    K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True,
                                         log_device_placement=False)))  #
K.set_learning_phase(0)  # Run testing mode

SCALER = 1.0  # scale label: 1, 100, 10000
RADIUS_2_DEGREE = 180.0 / math.pi


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--seqs',
                        type=str,
                        required=True,
                        help='h5 file sequences, e.g, 1, 6, 13')
    parser.add_argument('--model',
                        type=str,
                        required=True,
                        help='model architecture')
from keras import callbacks
from keras.utils import plot_model
import time
from futils.util import dice
import argparse
import os

### set important paths

dir_path = os.path.dirname(os.path.realpath(__file__))

path_model = os.path.join(dir_path,'models')
test_dir =  os.path.join(dir_path,'data/train')
val_dir = os.path.join(dir_path,'data/val')

K.set_learning_phase(1)  # try with 1

parser = argparse.ArgumentParser(
    description='End2End Supervised Lobe Segmentation')

parser.add_argument(
    '-path',
    '--path',
    help='Model Path',
    type=str,
    default='/models/')
parser.add_argument(
    '-train',
    '--train_dir',
    help='Train Data',
    type=str,
Example #57
0
from optparse import OptionParser
import pickle

from keras.callbacks import ReduceLROnPlateau
from keras import backend as K
from keras.optimizers import Adam, SGD, RMSprop
from keras.layers import Input
from keras.models import Model
from keras_frcnn import config, data_generators
from keras_frcnn import losses as losses
import keras_frcnn.roi_helpers as roi_helpers
from keras.utils import generic_utils

sys.setrecursionlimit(40000)

K.set_learning_phase(0)  #set learning phase -- 0 for test ; 1 for Train
parser = OptionParser()

parser.add_option("-p",
                  "--path",
                  dest="train_path",
                  help="Path to training data.")
parser.add_option("-o",
                  "--parser",
                  dest="parser",
                  help="Parser to use. One of simple or pascal_voc",
                  default="pascal_voc")
parser.add_option("-n",
                  "--num_rois",
                  type="int",
                  dest="num_rois",
Example #58
0
from __future__ import print_function
import os
import time
import h5py  #conda install -c anaconda h5py=2.6.0y
import numpy as np
import tensorflow as tf
from scipy.misc import imsave
from keras import backend as K
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.convolutional import Convolution3D
from keras.layers.pooling import MaxPooling3D
from keras.layers.core import Dense, Dropout, Activation, Flatten

K.set_learning_phase(
    0)  # solves Issues in Keras model loading in Tensorflow Serving


#**************************************************************
#misc functions
#**************************************************************
def normalize(x):
    # utility function to normalize a tensor by its L2 norm
    return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)


# util function to convert a tensor into a valid image
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + 1e-5)
Example #59
0
                print('the true=',
                      ' '.join([self.idx2w[num] for num in row_true]))
        print('the acc = ', np.mean(acc_list))
        return y_pred, y_true


if __name__ == '__main__':

    path1 = 'data_aishell'
    path2 = 'data_thchs30'
    path3 = 'data_pinyin'

    if not os.path.exists(path3):
        os.mkdir(path3)

    K.set_learning_phase(1)  #set learning phase

    os.environ["CUDA_VISIBLE_DEVICES"] = "2"  # 选择使用的GPU

    wav_files = get_thchs_wav_paths(path2)
    wav_len = len(wav_files)
    train2_wavs = wav_files[:int(0.8 * wav_len)]
    val2_wavs = wav_files[int(0.8 * wav_len):int(0.9 * wav_len)]
    test2_wavs = wav_files[int(0.9 * wav_len):]

    train1_wavs, val1_wavs, test1_wavs = get_wav_files(path1)

    #     name_to_seq, tok = get_thchs_corpus(path2,wav_files, maxlen=48)

    trans2_lines = get_thchs_trans_text(path2, wav_files)
    trans1_lines = get_trans_text(path1)
Example #60
0
def dream(cli_params):
    base_image_path = cli_params.base_image_path
    result_prefix = cli_params.result_prefix
    iterations = cli_params.iterations
    step = cli_params.step
    num_octave = cli_params.num_octave
    octave_scale = cli_params.octave_scale

    if not os.path.isdir(base_image_path):
        raise Exception('base must be a directory')

    # Find the first image in the given directory.
    file_types = ('*.jpg', '*.png')
    image_files = []
    for file_type in file_types:
        image_files.extend(
            glob.glob('{}/*{}'.format(base_image_path, file_type)))
    if not image_files:
        types_as_str = ', '.join(file_types)
        raise Exception('no image files ({}) under {}'.format(
            types_as_str, base_image_path))
    image_file = image_files[0]

    # These are the names of the layers
    # for which we try to maximize activation,
    # as well as their weight in the final loss
    # we try to maximize.
    # You can tweak these setting to obtain new visual effects.
    settings = {
        'features': {
            'mixed2': 0.2,
            'mixed3': 0.5,
            'mixed4': 2.,
            'mixed5': 1.5,
        },
    }

    def preprocess_image(image_path):
        # Util function to open, resize and format pictures
        # into appropriate tensors.
        img = load_img(image_path)
        img = img_to_array(img)
        img = np.expand_dims(img, axis=0)
        img = inception_v3.preprocess_input(img)
        return img

    def postprocess_image(x):
        # Util function to convert a tensor into a valid image.
        if K.image_data_format() == 'channels_first':
            x = x.reshape((3, x.shape[2], x.shape[3]))
            x = x.transpose((1, 2, 0))
        else:
            x = x.reshape((x.shape[1], x.shape[2], 3))
        x /= 2.
        x += 0.5
        x *= 255.
        x = np.clip(x, 0, 255).astype('uint8')
        return x

    K.set_learning_phase(0)

    # Build the InceptionV3 network with our placeholder.
    # The model will be loaded with pre-trained ImageNet weights.
    model = inception_v3.InceptionV3(weights='imagenet', include_top=False)
    dream = model.input
    print('Model loaded.')

    # Get the symbolic outputs of each "key" layer (we gave them unique names).
    layer_dict = dict([(layer.name, layer) for layer in model.layers])

    # Define the loss.
    loss = K.variable(0.)
    for layer_name in settings['features']:
        # Add the L2 norm of the features of a layer to the loss.
        if layer_name not in layer_dict:
            raise ValueError('Layer ' + layer_name + ' not found in model.')
        coeff = settings['features'][layer_name]
        x = layer_dict[layer_name].output
        # We avoid border artifacts by only involving non-border pixels in the loss.
        scaling = K.prod(K.cast(K.shape(x), 'float32'))
        if K.image_data_format() == 'channels_first':
            loss = loss + coeff * K.sum(K.square(x[:, :, 2:-2,
                                                   2:-2])) / scaling
        else:
            loss = loss + coeff * K.sum(K.square(x[:, 2:-2,
                                                   2:-2, :])) / scaling

    # Compute the gradients of the dream wrt the loss.
    grads = K.gradients(loss, dream)[0]
    # Normalize gradients.
    grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())

    # Set up function to retrieve the value
    # of the loss and gradients given an input image.
    outputs = [loss, grads]
    fetch_loss_and_grads = K.function([dream], outputs)

    def eval_loss_and_grads(x):
        outs = fetch_loss_and_grads([x])
        loss_value = outs[0]
        grad_values = outs[1]
        return loss_value, grad_values

    def resize_img(img, size):
        img = np.copy(img)
        if K.image_data_format() == 'channels_first':
            factors = (1, 1, float(size[0]) / img.shape[2],
                       float(size[1]) / img.shape[3])
        else:
            factors = (1, float(size[0]) / img.shape[1],
                       float(size[1]) / img.shape[2], 1)
        return scipy.ndimage.zoom(img, factors, order=1)

    def gradient_ascent(x, shape_number, iterations, step, max_loss=None):
        for i in range(iterations):
            loss_value, grad_values = eval_loss_and_grads(x)
            if max_loss is not None and loss_value > max_loss:
                break
            print(
                json.dumps({
                    'shape_number': shape_number,
                    'iteration': i,
                    'loss_value': loss_value.item(),
                }))
            x += step * grad_values
        return x

    """Process:

    - Load the original image.
    - Define a number of processing scales (i.e. image shapes),
        from smallest to largest.
    - Resize the original image to the smallest scale.
    - For every scale, starting with the smallest (i.e. current one):
        - Run gradient ascent
        - Upscale image to the next scale
        - Reinject the detail that was lost at upscaling time
    - Stop when we are back to the original size.

    To obtain the detail lost during upscaling, we simply
    take the original image, shrink it down, upscale it,
    and compare the result to the (resized) original image.
    """

    # Playing with these hyperparameters will also allow you to achieve new effects
    max_loss = 10.

    img = preprocess_image(image_file)
    if K.image_data_format() == 'channels_first':
        original_shape = img.shape[2:]
    else:
        original_shape = img.shape[1:3]

    successive_shapes = [original_shape]
    for i in range(1, num_octave):
        shape = tuple([int(dim / (octave_scale**i)) for dim in original_shape])
        successive_shapes.append(shape)

    successive_shapes = successive_shapes[::-1]
    original_img = np.copy(img)
    shrunk_original_img = resize_img(img, successive_shapes[0])

    shape_number = 0
    for shape in successive_shapes:
        print('Processing image shape', shape)
        shape_number += 1
        img = resize_img(img, shape)
        img = gradient_ascent(img,
                              shape_number=shape_number,
                              iterations=iterations,
                              step=step,
                              max_loss=max_loss)
        upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
        same_size_original = resize_img(original_img, shape)
        lost_detail = same_size_original - upscaled_shrunk_original_img
        img += lost_detail
        shrunk_original_img = resize_img(original_img, shape)

    save_img(result_prefix + '.png', postprocess_image(np.copy(img)))