Example #1
0
def get_evaluation_context_getter():
    if K.backend() == 'tensorflow':
        import tensorflow as tf
        return tf.get_default_graph().as_default

    if K.backend() == 'theano':
        return contextmanager(lambda: (yield))
  def __init__(self, model, outchannels=[], verbose=1):

    # Bacnend: either tensorflow or theano)
    self.backend = K.backend()

    # load model supports keras.Model and keras.Sequential
    if isinstance(model, Sequential):
      self.model = model.model
    elif isinstance(model, Model):
      self.model = model
    else:
      print("Invalid input model")
      return -1

    # load input tensors
    self.input_tensors = []
    for i in self.model.inputs:
      self.input_tensors.append(i)
    # The learning phase flag is a bool tensor (0 = test, 1 = train)
    # to be passed as input to any Keras function that uses
    # a different behavior at train time and test time.
    self.input_tensors.append(K.learning_phase())

    # If outputchanel is specified, use it.
    # Otherwise evalueate all outputs.
    self.outchannels = outchannels
    if len(self.outchannels) == 0:
      if verbose: print("Evaluated output channel (0-based index): All")
      if K.backend() == "tensorflow":
        self.outchannels = range(self.model.output.shape[1]._value)
      elif K.backend() == "theano":
        self.outchannels = range(model1.output._keras_shape[1])
    else:
      if verbose:
        print("Evaluated output channels (0-based index):")
        print(','.join([str(i) for i in self.outchannels]))

    # Build gradient functions for desired output channels.
    self.get_gradients = {}
    if verbose: print("Building gradient functions")

    # Evaluate over all channels.
    for c in self.outchannels:
      # Get tensor that calcuates gradient
      if K.backend() == "tensorflow":
        gradients = self.model.optimizer.get_gradients(self.model.output[:, c], self.model.input)
      if K.backend() == "theano":
        gradients = self.model.optimizer.get_gradients(self.model.output[:, c].sum(), self.model.input)

      # Build computational graph that calculates the tensfor given inputs
      self.get_gradients[c] = K.function(inputs=self.input_tensors, outputs=gradients)

      # This takes a lot of time for a big model with many tasks.
      # So lets pring the progress.
      if verbose:
        sys.stdout.write('\r')
        sys.stdout.write("Progress: " + str(int((c + 1) * 1.0 / len(self.outchannels) * 1000) * 1.0 / 10) + "%")
        sys.stdout.flush()
    # Done
    if verbose: print("\nDone.")
Example #3
0
def test_load_layers():
    from keras.layers import ConvLSTM2D, TimeDistributed, Bidirectional, Conv2D, Input
    from keras.models import Model

    if K.backend() == 'tensorflow' or K.backend() == 'cntk':
        inputs = Input(shape=(10, 20, 20, 1))
    else:
        inputs = Input(shape=(10, 1, 20, 20))
    td_conv = TimeDistributed(Conv2D(15, (5, 5)))(inputs)
    bi_convlstm2d = Bidirectional(ConvLSTM2D(10, (3, 3)), merge_mode='concat')(td_conv)
    model = Model(inputs=inputs, outputs=bi_convlstm2d)

    weight_value_tuples = []

    # TimeDistributed Conv2D layer
    # use 'channels_first' data format to check that the function is being called correctly for Conv2D
    # old: (filters, stack_size, kernel_rows, kernel_cols)
    # new: (kernel_rows, kernel_cols, stack_size, filters)
    weight_tensor_td_conv_old = list()
    weight_tensor_td_conv_old.append(np.zeros((15, 1, 5, 5)))
    weight_tensor_td_conv_old.append(np.zeros((15,)))
    td_conv_layer = model.layers[1]
    td_conv_layer.layer.data_format = 'channels_first'
    weight_tensor_td_conv_new = topology.preprocess_weights_for_loading(
        td_conv_layer,
        weight_tensor_td_conv_old,
        original_keras_version='1')
    symbolic_weights = td_conv_layer.weights
    assert (len(symbolic_weights) == len(weight_tensor_td_conv_new))
    weight_value_tuples += zip(symbolic_weights, weight_tensor_td_conv_new)

    # Bidirectional ConvLSTM2D layer
    # old ConvLSTM2D took a list of 12 weight tensors, returns a list of 3 concatenated larger tensors.
    weight_tensor_bi_convlstm_old = []
    for j in range(2):  # bidirectional
        for i in range(4):
            weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 15, 10)))  # kernel
            weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 10, 10)))  # recurrent kernel
            weight_tensor_bi_convlstm_old.append(np.zeros((10,)))  # bias

    bi_convlstm_layer = model.layers[2]
    weight_tensor_bi_convlstm_new = topology.preprocess_weights_for_loading(
        bi_convlstm_layer,
        weight_tensor_bi_convlstm_old,
        original_keras_version='1')

    symbolic_weights = bi_convlstm_layer.weights
    assert (len(symbolic_weights) == len(weight_tensor_bi_convlstm_new))
    weight_value_tuples += zip(symbolic_weights, weight_tensor_bi_convlstm_new)

    K.batch_set_value(weight_value_tuples)

    assert np.all(K.eval(model.layers[1].weights[0]) == weight_tensor_td_conv_new[0])
    assert np.all(K.eval(model.layers[1].weights[1]) == weight_tensor_td_conv_new[1])
    assert np.all(K.eval(model.layers[2].weights[0]) == weight_tensor_bi_convlstm_new[0])
    assert np.all(K.eval(model.layers[2].weights[1]) == weight_tensor_bi_convlstm_new[1])
    assert np.all(K.eval(model.layers[2].weights[2]) == weight_tensor_bi_convlstm_new[2])
    assert np.all(K.eval(model.layers[2].weights[3]) == weight_tensor_bi_convlstm_new[3])
    assert np.all(K.eval(model.layers[2].weights[4]) == weight_tensor_bi_convlstm_new[4])
    assert np.all(K.eval(model.layers[2].weights[5]) == weight_tensor_bi_convlstm_new[5])
Example #4
0
def huber_loss(y_true, y_pred, clip_value):
    # Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
    # https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
    # for details.
    assert clip_value > 0.

    x = y_true - y_pred
    if np.isinf(clip_value):
        # Spacial case for infinity since Tensorflow does have problems
        # if we compare `K.abs(x) < np.inf`.
        return .5 * K.square(x)

    condition = K.abs(x) < clip_value
    squared_loss = .5 * K.square(x)
    linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
    if K.backend() == 'tensorflow':
        import tensorflow as tf
        if hasattr(tf, 'select'):
            return tf.select(condition, squared_loss, linear_loss)  # condition, true, false
        else:
            return tf.where(condition, squared_loss, linear_loss)  # condition, true, false
    elif K.backend() == 'theano':
        from theano import tensor as T
        return T.switch(condition, squared_loss, linear_loss)
    else:
        raise RuntimeError('Unknown backend "{}".'.format(K.backend()))
Example #5
0
def clear_session_after_test():
    """Test wrapper to clean up after TensorFlow and CNTK tests.

    This wrapper runs for all the tests in the keras test suite.
    """
    yield
    if K.backend() == 'tensorflow' or K.backend() == 'cntk':
        K.clear_session()
Example #6
0
	def focal_loss_fixed(y_true, y_pred):
		if(K.backend()=="tensorflow"):
			import tensorflow as tf
			pt = tf.where(tf.equal(y_true, 1), y_pred, 1 - y_pred)
			return -K.mean(alpha * K.pow(1. - pt, gamma) * K.log(pt))
		if(K.backend()=="theano"):
			import theano.tensor as T
			pt = T.where(T.eq(y_true, 1), y_pred, 1 - y_pred)
			return -K.mean(alpha * K.pow(1. - pt, gamma) * K.log(pt))
def check_backend():
    """This function will check for the current backend and
    then it will warn the user if the backend is theano."""
    if K.backend() != 'tensorflow':
        print("\nWARNING: you're using a Keras backend different than\n"
              "Tensorflow, which is not recommended. Please verify\n"
              "your configuration file according to: https://keras.io/backend/\n"
              "to make sure you're using Tensorflow Keras backend.\n")
    return K.backend()
    def explain(self, sample, outc=0, reference=False, num_steps=50, verbose=0):
        
        # Each element for each input stream.
        samples = []
        numsteps = []
        step_sizes = []
        
        # If multiple inputs are present, feed them as list of np arrays. 
        if isinstance(sample, list):
            #If reference is present, reference and sample size need to be equal.
            if reference != False: 
                assert len(sample) == len(reference)
            for i in range(len(sample)):
                if reference == False:
                    _output = integrated_gradients.linearly_interpolate(sample[i], False, num_steps)
                else:
                    _output = integrated_gradients.linearly_interpolate(sample[i], False, num_steps)
                samples.append(_output[0])
                numsteps.append(_output[1])
                step_sizes.append(_output[2])
        
        # Or you can feed just a single numpy arrray. 
        elif isinstance(sample, np.ndarray):
            _output = integrated_gradients.linearly_interpolate(sample, reference, num_steps)
            samples.append(_output[0])
            numsteps.append(_output[1])
            step_sizes.append(_output[2])
            
        # Desired channel must be in the list of outputchannels
        assert outc in self.outchannels
        if verbose: print "Explaning the "+str(self.outchannels[outc])+"th output."
            
        # For tensorflow backend
        _input = []
        for s in samples:
            _input.append(s)
        _input.append(0)
        
        if K.backend() == "tensorflow": 
            gradients = self.get_gradients[outc](_input)
        elif K.backend() == "theano":
            gradients = self.get_gradients[outc](_input)
            if len(self.model.inputs) == 1:
                gradients = [gradients]
        
        explanation = []
        for i in range(len(gradients)):
            _temp = np.sum(gradients[i], axis=0)
            explanation.append(np.multiply(_temp, step_sizes[i]))
            

        if isinstance(sample, list):
            return explanation
        elif isinstance(sample, np.ndarray):
            return explanation[0]
        return -1
Example #9
0
def set_keras_backend(backend):
    if K.backend() != backend:
        os.environ["KERAS_BACKEND"] = backend
        importlib.reload(K)
        assert K.backend() == backend
    if backend == "tensorflow":
        K.get_session().close()
        cfg = K.tf.ConfigProto()
        cfg.gpu_options.allow_growth = True
        K.set_session(K.tf.Session(config=cfg))
        K.clear_session()
Example #10
0
def laplacian1d(X):
    Xout = K.zeros(K.eval(K.shape(X)))
    if K.backend() == 'theano':
        Xout = T.set_subtensor(Xout[1:-1], X[2:] + X[:-2])
        Xout = T.set_subtensor(Xout[0], X[1] + X[0])
        Xout = T.set_subtensor(Xout[-1], X[-1] + X[-2])
    elif K.backend() == 'tensorflow':
        Xout[1:-1].assign(X[2:] + X[:-2])
        Xout[0].assign(X[1] + X[0])
        Xout[-1].assign(X[-1] + X[-2])
    return Xout - 2*X
Example #11
0
def diffcc(X):
    Xout = K.zeros(K.eval(K.shape(X)))
    if K.backend() == 'theano':
        Xout = T.set_subtensor(Xout[:, 1:-1], X[:, 2:] + X[:, :-2])
        Xout = T.set_subtensor(Xout[:, 0], X[:, 1] + X[:, 0])
        Xout = T.set_subtensor(Xout[:, -1], X[:, -1] + X[:, -2])
    elif K.backend() == 'tensorflow':
        Xout[:, 1:-1].assign(X[:, 2:] + X[:, :-2])
        Xout[:, 0].assign(X[:, 1] + X[:, 0])
        Xout[:, -1].assign(X[:, -1] + X[:, -2])
    return Xout - 2*X
Example #12
0
def diffrr(X):
    Xout = K.zeros(K.eval(K.shape(X)))
    if K.backend() == 'theano':
        Xout = T.set_subtensor(Xout[1:-1, :], X[2:, :] + X[:-2, :])
        Xout = T.set_subtensor(Xout[0, :], X[1, :] + X[0, :])
        Xout = T.set_subtensor(Xout[-1, :], X[-1, :] + X[-2, :])
    elif K.backend() == 'tensorflow':
        Xout[1:-1, :].assign(X[2:, :] + X[:-2, :])
        Xout[0, :].assign(X[1, :] + X[0, :])
        Xout[-1, :].assign(X[-1, :] + X[-2, :])
    return Xout - 2*X
Example #13
0
def classifier_layers(x, input_shape, trainable=False):

    # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
    # (hence a smaller stride in the region that follows the ROI pool)
    if K.backend() == 'tensorflow':
        x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', input_shape=input_shape, strides=(2, 2), trainable=trainable)
    elif K.backend() == 'theano':
        x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', input_shape=input_shape, strides=(1, 1), trainable=trainable)

    x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b', trainable=trainable)
    x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c', trainable=trainable)
    x = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(x)

    return x
Example #14
0
def get(identifier):
    """Retrieves a Keras Optimizer instance.
    # Arguments
        identifier: Optimizer identifier, one of
            - String: name of an optimizer
            - Dictionary: configuration dictionary.
            - Keras Optimizer instance (it will be returned unchanged).
            - TensorFlow Optimizer instance
                (it will be wrapped as a Keras Optimizer).
    # Returns
        A Keras Optimizer instance.
    # Raises
        ValueError: If `identifier` cannot be interpreted.
    """
    if K.backend() == 'tensorflow':
        # Wrap TF optimizer instances
        if isinstance(identifier, tf.train.Optimizer):
            return TFOptimizer(identifier)
    if isinstance(identifier, dict):
        return deserialize(identifier)
    elif isinstance(identifier, six.string_types):
        config = {'class_name': str(identifier), 'config': {}}
        return deserialize(config)
    if isinstance(identifier, Optimizer):
        return identifier
    else:
        raise ValueError('Could not interpret optimizer identifier:',
identifier)
Example #15
0
def _time_distributed_dense(w, x, b):
    if K.backend() == 'tensorflow':
        x = K.dot(x, w)
        x = K.bias_add(x, b)
    else:
        print("time_distributed_dense doesn't backend tensorflow")
    return x
Example #16
0
    def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        ref_th = KTH.variable(ref)
        ref_tf = KTF.variable(ref)

        inds = [1, 3, 7, 9]
        inds_th = KTH.variable(inds, dtype='int32')
        inds_tf = KTF.variable(inds, dtype='int32')
        th_z = KTH.gather(ref_th, inds_th)
        th_result = KTH.eval(th_z)
        tf_result = KTF.eval(KTF.gather(ref_tf, inds_tf))

        assert_allclose(tf_result, th_result, atol=1e-05)

        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_result.shape

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4)
Example #17
0
def test_clone_sequential_model():
    val_a = np.random.random((10, 4))
    val_out = np.random.random((10, 4))

    model = keras.models.Sequential()
    model.add(keras.layers.Dense(4, input_shape=(4,)))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.Dropout(0.5))
    model.add(keras.layers.Dense(4))

    if K.backend() == 'tensorflow':
        # Everything should work in a new session.
        K.clear_session()

    # With placeholder creation
    new_model = keras.models.clone_model(model)
    new_model.compile('rmsprop', 'mse')
    new_model.train_on_batch(val_a, val_out)

    # On top of new tensor
    input_a = keras.Input(shape=(4,))
    new_model = keras.models.clone_model(
        model, input_tensors=input_a)
    new_model.compile('rmsprop', 'mse')
    new_model.train_on_batch(val_a, val_out)

    # On top of new, non-Keras tensor
    input_a = keras.backend.variable(val_a)
    new_model = keras.models.clone_model(
        model, input_tensors=input_a)
    new_model.compile('rmsprop', 'mse')
    new_model.train_on_batch(None, val_out)
Example #18
0
    def test_experiment_fit(self, get_model, get_loss_metric,
                            get_custom_l, get_callback_fix):
        new_session()
        data, data_val = make_data(train_samples, test_samples)
        model, metrics, cust_objects = prepare_model(get_model(get_custom_l),
                                                     get_loss_metric,
                                                     get_custom_l)

        expe = Experiment(model)

        for mod in [None, model]:
            for data_val_loc in [None, data_val]:
                expe.fit([data], [data_val_loc], model=mod, nb_epoch=2,
                         batch_size=batch_size, metrics=metrics,
                         custom_objects=cust_objects, overwrite=True,
                         callbacks=get_callback_fix)

        expe.backend_name = 'another_backend'
        expe.load_model()
        expe.load_model(expe.mod_id, expe.data_id)

        assert expe.data_id is not None
        assert expe.mod_id is not None
        assert expe.params_dump is not None

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
Example #19
0
def test_from_config(layer_class):
    # cntk does not support stateful yet.
    stateful_flags = (False, True) if K.backend() != 'cntk' else (False,)
    for stateful in stateful_flags:
        l1 = layer_class(units=1, stateful=stateful)
        l2 = layer_class.from_config(l1.get_config())
        assert l1.get_config() == l2.get_config()
Example #20
0
 def inner_loss(y_true, y_pred):
     # Workaround until https://github.com/plaidml/plaidml/pull/284 is accepted
     if K.backend() == "plaidml.keras.backend":
         y_true = K.reshape(y_true, y_pred.shape.dims)
     n_true = K.concatenate([y_true[:, :, :, i:i+1] * mask for i in range(3)], axis=-1)
     n_pred = K.concatenate([y_pred[:, :, :, i:i+1] * mask for i in range(3)], axis=-1)
     return loss_func(n_true, n_pred)
Example #21
0
def test_basic_batchnorm():
    layer_test(normalization.BatchNormalization,
               kwargs={'momentum': 0.9,
                       'epsilon': 0.1,
                       'gamma_regularizer': regularizers.l2(0.01),
                       'beta_regularizer': regularizers.l2(0.01)},
               input_shape=(3, 4, 2))
    layer_test(normalization.BatchNormalization,
               kwargs={'momentum': 0.9,
                       'epsilon': 0.1,
                       'axis': 1},
               input_shape=(3, 4, 2))
    layer_test(normalization.BatchNormalization,
               kwargs={'gamma_initializer': 'ones',
                       'beta_initializer': 'ones',
                       'moving_mean_initializer': 'zeros',
                       'moving_variance_initializer': 'ones'},
               input_shape=(3, 4, 2, 4))
    if K.backend() != 'theano':
        layer_test(normalization.BatchNormalization,
                   kwargs={'momentum': 0.9,
                           'epsilon': 0.1,
                           'axis': 1,
                           'scale': False,
                           'center': False},
                   input_shape=(3, 4, 2, 4))
Example #22
0
def _get_output_shape(model_fn):
    if K.backend() == 'cntk':
        # Create model in a subprocess so that
        # the memory consumed by InceptionResNetV2 will be
        # released back to the system after this test
        # (to deal with OOM error on CNTK backend).
        # TODO: remove the use of multiprocessing from these tests
        # once a memory clearing mechanism
        # is implemented in the CNTK backend.
        def target(queue):
            model = model_fn()
            queue.put(model.output_shape)
        queue = Queue()
        p = Process(target=target, args=(queue,))
        p.start()
        p.join()
        # The error in a subprocess won't propagate
        # to the main process, so we check if the model
        # is successfully created by checking if the output shape
        # has been put into the queue
        assert not queue.empty(), 'Model creation failed.'
        return queue.get_nowait()
    else:
        model = model_fn()
        return model.output_shape
def generator_deconv(noise_dim, img_dim, bn_mode, batch_size, model_name="generator_deconv", dset="mnist"):
    """DCGAN generator based on Deconv2D

    Args:
        noise_dim: Dimension of the noise input
        img_dim: dimension of the image output
        bn_mode: keras batchnorm mode
        batch_size: needed to reshape after the deconv2D
        model_name: model name (default: {"generator_deconv"})
        dset: dataset (default: {"mnist"})

    Returns:
        keras model
    """

    assert K.backend() == "tensorflow", "Deconv not implemented with theano"

    s = img_dim[1]
    f = 512

    if dset == "mnist":
        start_dim = int(s / 4)
        nb_upconv = 2
    else:
        start_dim = int(s / 16)
        nb_upconv = 4

    reshape_shape = (start_dim, start_dim, f)
    bn_axis = -1
    output_channels = img_dim[-1]

    gen_input = Input(shape=noise_dim, name="generator_input")

    # Noise input and reshaping
    x = Dense(f * start_dim * start_dim, input_dim=noise_dim, bias=False)(gen_input)
    x = Reshape(reshape_shape)(x)
    x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)
    x = Activation("relu")(x)

    # Transposed conv blocks: Deconv2D->BN->ReLU
    for i in range(nb_upconv - 1):
        nb_filters = int(f / (2 ** (i + 1)))
        s = start_dim * (2 ** (i + 1))
        o_shape = (batch_size, s, s, nb_filters)
        x = Deconvolution2D(nb_filters, 3, 3,
                            output_shape=o_shape, subsample=(2, 2), border_mode="same", bias=False, init=conv2D_init)(x)
        x = BatchNormalization(mode=2, axis=-1)(x)
        x = Activation("relu")(x)

    # Last block
    s = start_dim * (2 ** (nb_upconv))
    o_shape = (batch_size, s, s, output_channels)
    x = Deconvolution2D(output_channels, 3, 3,
                        output_shape=o_shape, subsample=(2, 2), border_mode="same", bias=False, init=conv2D_init)(x)
    x = Activation("tanh")(x)

    generator_model = Model(input=[gen_input], output=[x], name=model_name)
    visualize_model(generator_model)

    return generator_model
Example #24
0
    def test_build_predict_func(self, get_model):
        """Test the build of a model"""
        new_session()
        X_tr = np.ones((train_samples, input_dim))
        model = get_model()
        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        model_name = model.__class__.__name__

        pred_func = KTB.build_predict_func(model)

        tensors = [X_tr]
        if model_name != 'Model':
            tensors.append(1.)

        res = pred_func(tensors)

        assert len(res[0]) == len(X_tr)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
Example #25
0
def test_weighted_metrics_with_no_sample_weight():
    decimal = decimal_precision[K.backend()]

    model = create_sequential_model()
    model.compile(loss=loss, optimizer='rmsprop', metrics=[loss], weighted_metrics=[loss])

    (x_train, y_train), (x_test, y_test), _ = _get_test_data()

    history = model.fit(x_train, y_train, batch_size=batch_size,
                        epochs=epochs // 3, verbose=0)

    h = history.history
    assert_array_almost_equal(h['loss'], h[loss_full_name], decimal=decimal)
    assert_array_almost_equal(h['loss'], h['weighted_' + loss_full_name], decimal=decimal)

    history = model.fit(x_train, y_train, batch_size=batch_size,
                        epochs=epochs // 3, verbose=0, validation_split=0.1)

    h = history.history
    assert_array_almost_equal(h['val_loss'], h['val_' + loss_full_name], decimal=decimal)
    assert_array_almost_equal(h['val_loss'], h['val_weighted_' + loss_full_name], decimal=decimal)

    model.train_on_batch(x_train[:32], y_train[:32])
    model.test_on_batch(x_train[:32], y_train[:32])

    scores = model.evaluate(x_test, y_test, verbose=0)
    loss_score, metric_score, weighted_metric_score = scores

    assert_almost_equal(loss_score, metric_score, decimal=decimal)
    assert_almost_equal(loss_score, weighted_metric_score, decimal=decimal)
Example #26
0
    def call(self, inputs, **kwargs):
        """Following the routing algorithm from Hinton's paper,
        but replace b = b + <u,v> with b = <u,v>.

        This change can improve the feature representation of the capsule.

        However, you can replace
            b = K.batch_dot(outputs, hat_inputs, [2, 3])
        with
            b += K.batch_dot(outputs, hat_inputs, [2, 3])
        to get standard routing.
        """

        if self.share_weights:
            hat_inputs = K.conv1d(inputs, self.kernel)
        else:
            hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])

        batch_size = K.shape(inputs)[0]
        input_num_capsule = K.shape(inputs)[1]
        hat_inputs = K.reshape(hat_inputs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))

        b = K.zeros_like(hat_inputs[:, :, :, 0])
        print(self.routings)
        for i in range(self.routings):
            c = K.softmax(b, 1)
            o = self.activation(K.batch_dot(c, hat_inputs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(o, hat_inputs, [2, 3])
                if K.backend() == 'theano':
                    o = K.sum(o, axis=1)
        return o
Example #27
0
    def test_experiment_fit_gen(self, get_model, get_loss_metric,
                                get_custom_l, get_callback_fix):
        new_session()
        model, metrics, cust_objects = prepare_model(get_model(get_custom_l),
                                                     get_loss_metric,
                                                     get_custom_l)

        model_name = model.__class__.__name__
        _, data_val_use = make_data(train_samples, test_samples)
        expe = Experiment(model)

        for val in [1, data_val_use]:
            gen, data, data_stream = make_gen(batch_size)
            if val == 1:
                val, data_2, data_stream_2 = make_gen(batch_size)
            expe.fit_gen([gen], [val], nb_epoch=2,
                         model=model,
                         metrics=metrics,
                         custom_objects=cust_objects,
                         samples_per_epoch=64,
                         nb_val_samples=128,
                         verbose=2, overwrite=True,
                         callbacks=get_callback_fix)

            close_gens(gen, data, data_stream)
            if val == 1:
                close_gens(val, data_2, data_stream_2)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
Example #28
0
    def test_repeat_elements(self):
        reps = 3
        for ndims in [1, 2, 3]:
            shape = np.arange(2, 2 + ndims)
            arr = np.arange(np.prod(shape)).reshape(shape)
            arr_th = KTH.variable(arr)
            arr_tf = KTF.variable(arr)

            for rep_axis in range(ndims):
                np_rep = np.repeat(arr, reps, axis=rep_axis)
                th_z = KTH.repeat_elements(arr_th, reps, axis=rep_axis)
                th_rep = KTH.eval(th_z)
                tf_rep = KTF.eval(
                    KTF.repeat_elements(arr_tf, reps, axis=rep_axis))

                assert th_rep.shape == np_rep.shape
                assert tf_rep.shape == np_rep.shape
                assert_allclose(np_rep, th_rep, atol=1e-05)
                assert_allclose(np_rep, tf_rep, atol=1e-05)
                if hasattr(th_z, '_keras_shape'):
                    assert th_z._keras_shape == th_rep.shape

                # test theano shape inference when
                # input shape has None entries
                if K.backend() == 'theano':
                    shape = list(shape)
                    shape[rep_axis] = None
                    x = K.placeholder(shape=shape)
                    y = K.repeat_elements(x, reps, axis=rep_axis)
                    assert y._keras_shape == tuple(shape)
def main():
    """Generate different test models and save them to the given directory."""
    if len(sys.argv) != 3:
        print('usage: [model name] [destination file path]')
        sys.exit(1)
    else:
        model_name = sys.argv[1]
        dest_path = sys.argv[2]

        get_model_functions = {
            'small': get_test_model_small,
            'sequential': get_test_model_sequential,
            'full': get_test_model_full
        }

        if not model_name in get_model_functions:
            print('unknown model name: ', model_name)
            sys.exit(2)

        assert K.backend() == "tensorflow"
        assert K.floatx() == "float32"
        assert K.image_data_format() == 'channels_last'

        np.random.seed(0)

        model_func = get_model_functions[model_name]
        model = model_func()
        model.save(dest_path, include_optimizer=False)

        # Make sure models can be loaded again,
        # see https://github.com/fchollet/keras/issues/7682
        model = load_model(dest_path)
        print(model.summary())
Example #30
0
def test_weighted_metrics_with_multiple_outputs():
    decimal = decimal_precision[K.backend()]

    inputs = Input(shape=(5,))
    x = Dense(5)(inputs)
    output1 = Dense(1, name='output1')(x)
    output2 = Dense(1, name='output2')(x)

    model = Model(inputs=inputs, outputs=[output1, output2])

    metrics = {'output1': [loss], 'output2': [loss]}
    weighted_metrics = {'output2': [loss]}
    loss_map = {'output1': loss, 'output2': loss}

    model.compile(loss=loss_map, optimizer='sgd', metrics=metrics, weighted_metrics=weighted_metrics)

    x = np.array([[1, 1, 1, 1, 1]])
    y = {'output1': np.array([0]), 'output2': np.array([1])}
    weight = 5

    history = model.fit(x, y, sample_weight={'output2': np.array([weight])})

    unweighted_metric = history.history['output2_' + loss_full_name][0]
    weighted_metric = history.history['output2_weighted_' + loss_full_name][0]

    assert_almost_equal(unweighted_metric * weight, weighted_metric, decimal=decimal)
Example #31
0
def Xception(include_top=True, weights='imagenet', input_tensor=None):
    '''Instantiate the Xception architecture,
    optionally loading weights pre-trained
    on ImageNet. This model is available for TensorFlow only,
    and can only be used with inputs following the TensorFlow
    dimension ordering `(width, height, channels)`.
    You should set `image_dim_ordering="tf"` in your Keras config
    located at ~/.keras/keras.json.

    Note that the default input image size for this model is 299x299.

    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or "imagenet" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.

    # Returns
        A Keras model instance.
    '''
    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')
    if K.backend() != 'tensorflow':
        raise Exception('The Xception model is only available with '
                        'the TensorFlow backend.')
    if K.image_dim_ordering() != 'tf':
        warnings.warn('The Xception model is only available for the '
                      'input dimension ordering "tf" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'dimension ordering "th" (channels, width, height). '
                      'You should set `image_dim_ordering="tf"` in your Keras '
                      'config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "tf" dimension ordering.')
        K.set_image_dim_ordering('tf')
        old_dim_ordering = 'th'
    else:
        old_dim_ordering = None

    # Determine proper input shape
    if include_top:
        input_shape = (299, 299, 3)
    else:
        input_shape = (None, None, 3)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Conv2D(32, 3, 3, subsample=(2, 2), bias=False,
               name='block1_conv1')(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, 3, 3, bias=False, name='block1_conv2')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128,
                      1,
                      1,
                      subsample=(2, 2),
                      border_mode='same',
                      bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128,
                        3,
                        3,
                        border_mode='same',
                        bias=False,
                        name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128,
                        3,
                        3,
                        border_mode='same',
                        bias=False,
                        name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     border_mode='same',
                     name='block2_pool')(x)
    x = merge([x, residual], mode='sum')

    residual = Conv2D(256,
                      1,
                      1,
                      subsample=(2, 2),
                      border_mode='same',
                      bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256,
                        3,
                        3,
                        border_mode='same',
                        bias=False,
                        name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256,
                        3,
                        3,
                        border_mode='same',
                        bias=False,
                        name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     border_mode='same',
                     name='block3_pool')(x)
    x = merge([x, residual], mode='sum')

    residual = Conv2D(728,
                      1,
                      1,
                      subsample=(2, 2),
                      border_mode='same',
                      bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728,
                        3,
                        3,
                        border_mode='same',
                        bias=False,
                        name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728,
                        3,
                        3,
                        border_mode='same',
                        bias=False,
                        name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     border_mode='same',
                     name='block4_pool')(x)
    x = merge([x, residual], mode='sum')

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(728,
                            3,
                            3,
                            border_mode='same',
                            bias=False,
                            name=prefix + '_sepconv1')(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(728,
                            3,
                            3,
                            border_mode='same',
                            bias=False,
                            name=prefix + '_sepconv2')(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(728,
                            3,
                            3,
                            border_mode='same',
                            bias=False,
                            name=prefix + '_sepconv3')(x)
        x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

        x = merge([x, residual], mode='sum')

    residual = Conv2D(1024,
                      1,
                      1,
                      subsample=(2, 2),
                      border_mode='same',
                      bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728,
                        3,
                        3,
                        border_mode='same',
                        bias=False,
                        name='block13_sepconv1')(x)
    x = BatchNormalization(name='block13_sepconv1_bn')(x)
    x = Activation('relu', name='block13_sepconv2_act')(x)
    x = SeparableConv2D(1024,
                        3,
                        3,
                        border_mode='same',
                        bias=False,
                        name='block13_sepconv2')(x)
    x = BatchNormalization(name='block13_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     border_mode='same',
                     name='block13_pool')(x)
    x = merge([x, residual], mode='sum')

    x = SeparableConv2D(1536,
                        3,
                        3,
                        border_mode='same',
                        bias=False,
                        name='block14_sepconv1')(x)
    x = BatchNormalization(name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = SeparableConv2D(2048,
                        3,
                        3,
                        border_mode='same',
                        bias=False,
                        name='block14_sepconv2')(x)
    x = BatchNormalization(name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    if include_top:
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(1000, activation='softmax', name='predictions')(x)

    # Create model
    model = Model(img_input, x)

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file(
                'xception_weights_tf_dim_ordering_tf_kernels.h5',
                TF_WEIGHTS_PATH,
                cache_subdir='models')
        else:
            weights_path = get_file(
                'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
                TF_WEIGHTS_PATH_NO_TOP,
                cache_subdir='models')
        model.load_weights(weights_path)

    if old_dim_ordering:
        K.set_image_dim_ordering(old_dim_ordering)
    return model
Example #32
0
        weights = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5',
                           THEANO_WEIGHTS_PATH_NO_TOP,
                           cache_subdir='models')
else:
    if args.model == "vgg19":
        weights = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
                           TF_19_WEIGHTS_PATH_NO_TOP,
                           cache_subdir='models')
    else:
        weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
                           TF_WEIGHTS_PATH_NO_TOP,
                           cache_subdir='models')

model.load_weights(weights)

if K.backend() == 'tensorflow' and K.image_dim_ordering() == "th":
    warnings.warn('You are using the TensorFlow backend, yet you '
                  'are using the Theano '
                  'image dimension ordering convention '
                  '(`image_dim_ordering="th"`). '
                  'For best performance, set '
                  '`image_dim_ordering="tf"` in '
                  'your Keras config '
                  'at ~/.keras/keras.json.')
    convert_all_kernels_in_model(model)

print('Model loaded.')

# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])
Example #33
0
def FCN(basenet='vgg16', trainable_base=True,
        num_output=21, input_shape=(None, None, 3),
        weights='imagenet'):
    """Instantiate the FCN8s architecture with keras.

    # Arguments
        basenet: type of basene {'vgg16'}
        trainable_base: Bool whether the basenet weights are trainable
        num_output: number of classes
        input_shape: input image shape
        weights: pre-trained weights to load (None for training from scratch)
    # Returns
        A Keras model instance
    """
    _handle_data_format()
    basenet = _get_basenet(basenet)
    # input
    input = Input(shape=input_shape)
    # Get skip_layers=[drop7, pool4, pool3] from the base net: VGG16
    skip_layers = basenet(skip_architecture=True)(input)

    drop7 = skip_layers[0]
    score_fr = Conv2D(filters=num_output, kernel_size=(1, 1),
                      padding='valid',
                      name='score_fr')(drop7)
    upscore2 = Conv2DTranspose(filters=num_output, kernel_size=(4, 4),
                               strides=(2, 2), padding='valid', use_bias=False,
                               data_format=K.image_data_format(),
                               name='upscore2')(score_fr)
    # scale pool4 skip for compatibility
    pool4 = skip_layers[1]
    scale_pool4 = Lambda(lambda x: x * 0.01, name='scale_pool4')(pool4)
    score_pool4 = Conv2D(filters=num_output, kernel_size=(1, 1),
                         padding='valid', name='score_pool4')(scale_pool4)
    score_pool4c = _crop(upscore2, offset=(5, 5),
                         name='score_pool4c')(score_pool4)
    fuse_pool4 = add([upscore2, score_pool4c])
    upscore_pool4 = Conv2DTranspose(filters=num_output, kernel_size=(4, 4),
                                    strides=(2, 2), padding='valid',
                                    use_bias=False,
                                    data_format=K.image_data_format(),
                                    name='upscore_pool4')(fuse_pool4)
    # scale pool3 skip for compatibility
    pool3 = skip_layers[2]
    scale_pool3 = Lambda(lambda x: x * 0.0001, name='scale_pool3')(pool3)
    score_pool3 = Conv2D(filters=num_output, kernel_size=(1, 1),
                         padding='valid', name='score_pool3')(scale_pool3)
    score_pool3c = _crop(upscore_pool4, offset=(9, 9),
                         name='score_pool3c')(score_pool3)
    fuse_pool3 = add([upscore_pool4, score_pool3c])
    # score
    upscore8 = Conv2DTranspose(filters=num_output, kernel_size=(16, 16),
                               strides=(8, 8), padding='valid',
                               use_bias=False,
                               data_format=K.image_data_format(),
                               name='upscore8')(fuse_pool3)
    score = _crop(input, offset=(31, 31), name='score')(upscore8)

    # model
    model = Model(input, score, name='fcn_vgg16')

    # load weights
    if weights == 'imagenet':
        weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
                                basenet.WEIGHTS_PATH,
                                cache_subdir='models')
        layer_names = load_weights(model, weights_path)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)
        # Freezing basenet weights
        if not trainable_base:
            for layer in model.layers:
                if layer.name in layer_names:
                    layer.trainable = False

    return model
 def inner(*args, **kwargs):
     if K.backend() == "tensorflow":
         return f(tf, *args, **kwargs)
Example #35
0
def SqueezeNet(
        input_tensor=None,
        input_shape=None,
        weights='imagenet',
        classes=1000,
        use_bn_on_input=False,  # to avoid preprocessing
        first_stride=2):
    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')  #already fixed

    if input_tensor is None:
        raw_img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if use_bn_on_input:
        img_input = BatchNormalization()(raw_img_input)
    else:
        img_input = raw_img_input

    x = Convolution2D(64, (3, 3),
                      strides=(first_stride, first_stride),
                      padding='valid',
                      name='conv1')(img_input)
    x = Activation('relu', name='relu_conv1')(x)
    x = BatchNormalization()(x)

    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)
    x = fire_module(x, fire_id=2, squeeze=16, expand=64)
    x = BatchNormalization()(x)

    x = fire_module(x, fire_id=3, squeeze=16, expand=64)
    x = BatchNormalization()(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)

    x = fire_module(x, fire_id=4, squeeze=32, expand=128)
    x = fire_module(x, fire_id=5, squeeze=32, expand=128)
    x = BatchNormalization()(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)

    x = fire_module(x, fire_id=6, squeeze=48, expand=192)

    x = fire_module(x, fire_id=7, squeeze=48, expand=192)
    x = fire_module(x, fire_id=8, squeeze=64, expand=256)
    x = fire_module(x, fire_id=9, squeeze=64, expand=256)
    x = BatchNormalization()(x)
    x = Dropout(0.5, name='drop9')(x)

    x = Convolution2D(classes, (1, 1), padding='valid', name='conv10')(x)
    x = Activation('relu', name='relu_conv10')(x)
    x = GlobalAveragePooling2D()(x)

    out = Activation('softmax', name='loss')(x)
    #    out = Dense(1, activation='softmax', name='loss')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = raw_img_input

    model = Model(inputs, out, name='squeezenet')

    # load weights
    if weights == 'imagenet':

        weights_path = get_file(
            'squeezenet_weights_tf_dim_ordering_tf_kernels.h5',
            WEIGHTS_PATH,
            cache_subdir='models')
        model.load_weights(weights_path)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)

        if K.image_data_format() == 'channels_first':

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model
Example #36
0
def get_nvidia_model(summary=True):
    """
    Get the keras Model corresponding to the NVIDIA architecture described in:
    Bojarski, Mariusz, et al. "End to end learning for self-driving cars."
    The paper describes the network architecture but doesn't go into details for some aspects.
    Input normalization, as well as ELU activations are just my personal implementation choice.
    :param summary: show model summary
    :return: keras Model of NVIDIA architecture
    """
    init = 'glorot_uniform'

    if K.backend() == 'theano':
        input_frame = Input(shape=(CONFIG['input_channels'], NVIDIA_H,
                                   NVIDIA_W))
    else:
        input_frame = Input(shape=(NVIDIA_H, NVIDIA_W,
                                   CONFIG['input_channels']))

    # standardize input
    x = Lambda(lambda z: z / 127.5 - 1.)(input_frame)

    x = Convolution2D(24,
                      5,
                      5,
                      border_mode='valid',
                      subsample=(2, 2),
                      init=init)(x)
    x = ELU()(x)
    x = Dropout(0.2)(x)
    x = Convolution2D(36,
                      5,
                      5,
                      border_mode='valid',
                      subsample=(2, 2),
                      init=init)(x)
    x = ELU()(x)
    x = Dropout(0.2)(x)
    x = Convolution2D(48,
                      5,
                      5,
                      border_mode='valid',
                      subsample=(2, 2),
                      init=init)(x)
    x = ELU()(x)
    x = Dropout(0.2)(x)
    x = Convolution2D(64, 3, 3, border_mode='valid', init=init)(x)
    x = ELU()(x)
    x = Dropout(0.2)(x)
    x = Convolution2D(64, 3, 3, border_mode='valid', init=init)(x)
    x = ELU()(x)
    x = Dropout(0.2)(x)

    x = Flatten()(x)

    x = Dense(100, init=init)(x)
    x = ELU()(x)
    x = Dropout(0.5)(x)
    x = Dense(50, init=init)(x)
    x = ELU()(x)
    x = Dropout(0.5)(x)
    x = Dense(10, init=init)(x)
    x = ELU()(x)
    out = Dense(1, init=init)(x)

    model = Model(input=input_frame, output=out)

    if summary:
        model.summary()

    return model
Example #37
0
def DenseNet(input_shape=None,
             depth=40,
             nb_dense_block=3,
             growth_rate=12,
             nb_filter=-1,
             nb_layers_per_block=-1,
             bottleneck=False,
             reduction=0.0,
             dropout_rate=0.0,
             weight_decay=1e-4,
             subsample_initial_block=False,
             include_top=True,
             weights=None,
             input_tensor=None,
             pooling=None,
             classes=10,
             activation='softmax'):
    '''Instantiate the DenseNet architecture.

    The model and the weights are compatible with both
    TensorFlow and Theano. The dimension ordering
    convention used by the model is the one
    specified in your Keras config file.

    # Arguments
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` dim ordering)
            or `(3, 224, 224)` (with `channels_first` dim ordering).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 8.
            E.g. `(224, 224, 3)` would be one valid value.
        depth: number or layers in the DenseNet
        nb_dense_block: number of dense blocks to add to end
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters. -1 indicates initial
            number of filters will default to 2 * growth_rate
        nb_layers_per_block: number of layers in each dense block.
            Can be a -1, positive integer or a list.
            If -1, calculates nb_layer_per_block from the network depth.
            If positive integer, a set number of layers per dense block.
            If list, nb_layer is used as provided. Note that list size must
            be nb_dense_block
        bottleneck: flag to add bottleneck blocks in between dense blocks
        reduction: reduction factor of transition blocks.
            Note : reduction value is inverted to compute compression.
        dropout_rate: dropout rate
        weight_decay: weight decay rate
        subsample_initial_block: Changes model type to suit different datasets.
            Should be set to True for ImageNet, and False for CIFAR datasets.
            When set to True, the initial convolution will be strided and
            adds a MaxPooling2D before the initial dense block.
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization) or
            'imagenet' (pre-training on ImageNet)..
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
        activation: Type of activation at the top layer. Can be one of
            'softmax' or 'sigmoid'. Note that if sigmoid is used,
             classes must be 1.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    '''

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as ImageNet with `include_top` '
                         'as true, `classes` should be 1000')

    if activation not in ['softmax', 'sigmoid']:
        raise ValueError('activation must be one of "softmax" or "sigmoid"')

    if activation == 'sigmoid' and classes != 1:
        raise ValueError(
            'sigmoid activation can only be used when classes = 1')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=32,
                                      min_size=8,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = __create_dense_net(classes, img_input, include_top, depth,
                           nb_dense_block, growth_rate, nb_filter,
                           nb_layers_per_block, bottleneck, reduction,
                           dropout_rate, weight_decay, subsample_initial_block,
                           pooling, activation)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='densenet')

    # load weights
    if weights == 'imagenet':
        weights_loaded = False

        if (depth == 121) and (nb_dense_block == 4) and (growth_rate == 32) and (nb_filter == 64) and \
                (bottleneck is True) and (reduction == 0.5) and (subsample_initial_block):
            if include_top:
                weights_path = get_file(
                    'DenseNet-BC-121-32.h5',
                    DENSENET_121_WEIGHTS_PATH,
                    cache_subdir='models',
                    md5_hash='a439dd41aa672aef6daba4ee1fd54abd')
            else:
                weights_path = get_file(
                    'DenseNet-BC-121-32-no-top.h5',
                    DENSENET_121_WEIGHTS_PATH_NO_TOP,
                    cache_subdir='models',
                    md5_hash='55e62a6358af8a0af0eedf399b5aea99')
            model.load_weights(weights_path, by_name=True)
            weights_loaded = True

        if (depth == 161) and (nb_dense_block == 4) and (growth_rate == 48) and (nb_filter == 96) and \
                (bottleneck is True) and (reduction == 0.5) and (subsample_initial_block):
            if include_top:
                weights_path = get_file(
                    'DenseNet-BC-161-48.h5',
                    DENSENET_161_WEIGHTS_PATH,
                    cache_subdir='models',
                    md5_hash='6c326cf4fbdb57d31eff04333a23fcca')
            else:
                weights_path = get_file(
                    'DenseNet-BC-161-48-no-top.h5',
                    DENSENET_161_WEIGHTS_PATH_NO_TOP,
                    cache_subdir='models',
                    md5_hash='1a9476b79f6b7673acaa2769e6427b92')
            model.load_weights(weights_path, by_name=True)
            weights_loaded = True

        if (depth == 169) and (nb_dense_block == 4) and (growth_rate == 32) and (nb_filter == 64) and \
                (bottleneck is True) and (reduction == 0.5) and (subsample_initial_block):
            if include_top:
                weights_path = get_file(
                    'DenseNet-BC-169-32.h5',
                    DENSENET_169_WEIGHTS_PATH,
                    cache_subdir='models',
                    md5_hash='914869c361303d2e39dec640b4e606a6')
            else:
                weights_path = get_file(
                    'DenseNet-BC-169-32-no-top.h5',
                    DENSENET_169_WEIGHTS_PATH_NO_TOP,
                    cache_subdir='models',
                    md5_hash='89c19e8276cfd10585d5fadc1df6859e')
            model.load_weights(weights_path, by_name=True)
            weights_loaded = True

        if weights_loaded:
            if K.backend() == 'theano':
                convert_all_kernels_in_model(model)

            if K.image_data_format() == 'channels_first' and K.backend(
            ) == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')

            print("Weights for the model were loaded successfully")

    return model
Example #38
0
        x[rowi, :padding] = 0

    # 50% of the time the correct output is the input.
    # The other 50% of the time it's 2 * input % 10
    y = (x * np.random.randint(1, 3, size=x.shape)) % 10
    ys = np.zeros((y.size, 10), dtype='int32')
    for i, target in enumerate(y.flat):
        ys[i, target] = 1
    ys = ys.reshape(y.shape + (10, ))

    history = model.fit(x,
                        ys,
                        validation_split=0.05,
                        batch_size=10,
                        verbose=0,
                        epochs=3)
    ground_truth = -np.log(0.5)
    assert (np.abs(history.history['loss'][-1] - ground_truth) < 0.06)


@pytest.mark.skipif(K.backend() != 'tensorflow', reason='Requires TF backend')
def test_embedding_with_clipnorm():
    model = Sequential()
    model.add(layers.Embedding(input_dim=1, output_dim=1))
    model.compile(optimizer=optimizers.SGD(clipnorm=0.1), loss='mse')
    model.fit(np.array([[0]]), np.array([[[0.5]]]), epochs=1)


if __name__ == '__main__':
    pytest.main([__file__])
Example #39
0
def _zero_grad(e, vs):
    if K.backend() == 'cntk':
        return K.stop_gradient(e)
    else:
        z = 0 * K.sum(K.concatenate([K.batch_flatten(v) for v in vs]))
        return K.stop_gradient(e) + z
Example #40
0
    def call(self, x, mask=None):
        # TODO: validate input shape

        assert (len(x) == 3)
        L_flat = x[0]
        mu = x[1]
        a = x[2]

        if self.mode == 'full':
            # Create L and L^T matrix, which we use to construct the positive-definite matrix P.
            L = None
            LT = None
            if K.backend() == 'theano':
                import theano.tensor as T
                import theano

                def fn(x, L_acc, LT_acc):
                    x_ = K.zeros((self.nb_actions, self.nb_actions))
                    x_ = T.set_subtensor(x_[np.tril_indices(self.nb_actions)],
                                         x)
                    diag = K.exp(T.diag(x_)) + K.epsilon()
                    x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)],
                                         diag)
                    return x_, x_.T

                outputs_info = [
                    K.zeros((self.nb_actions, self.nb_actions)),
                    K.zeros((self.nb_actions, self.nb_actions)),
                ]
                results, _ = theano.scan(fn=fn,
                                         sequences=L_flat,
                                         outputs_info=outputs_info)
                L, LT = results
            elif K.backend() == 'tensorflow':
                import tensorflow as tf

                # Number of elements in a triangular matrix.
                nb_elems = (self.nb_actions * self.nb_actions +
                            self.nb_actions) // 2

                # Add leading zero element to each element in the L_flat. We use this zero
                # element when gathering L_flat into a lower triangular matrix L.
                nb_rows = tf.shape(L_flat)[0]
                zeros = tf.expand_dims(tf.tile(K.zeros((1, )), [nb_rows]), 1)
                try:
                    # Old TF behavior.
                    L_flat = tf.concat(1, [zeros, L_flat])
                except (TypeError, ValueError):
                    # New TF behavior
                    L_flat = tf.concat([zeros, L_flat], 1)

                # Create mask that can be used to gather elements from L_flat and put them
                # into a lower triangular matrix.
                tril_mask = np.zeros((self.nb_actions, self.nb_actions),
                                     dtype='int32')
                tril_mask[np.tril_indices(self.nb_actions)] = range(
                    1, nb_elems + 1)

                # Finally, process each element of the batch.
                init = [
                    K.zeros((self.nb_actions, self.nb_actions)),
                    K.zeros((self.nb_actions, self.nb_actions)),
                ]

                # Create mask for the diagonal elements in L_flat. This is used to exponentiate
                # only the diagonal elements, which is done before gathering.
                diag_indices = [0]
                for row in range(1, self.nb_actions):
                    diag_indices.append(diag_indices[-1] + (row + 1))
                diag_mask = np.zeros(1 + nb_elems)  # +1 for the leading zero
                diag_mask[np.array(diag_indices) + 1] = 1
                diag_mask = K.variable(diag_mask)

                def fn(a, x):
                    # Multiply by the mask first to avoid exponentiating everything, even
                    # though we'll multiply by it again later, this saves on computation and
                    # can help prevent NaNs that might crop up during exponentiation.
                    x_ = x * diag_mask
                    # Exponentiate only the elements kept by the mask, with the rest being set
                    # to e^0 = 1 as a result.
                    x_ = K.exp(x_) + K.epsilon()
                    # Remove all the e^0 = 1 entries that were created by exponentiating the
                    # non-diagonal elements of x_ and set them to 0 instead.
                    x_ *= diag_mask
                    # Add the original, non-diagonal elements.
                    x_ += x * (1. - diag_mask)
                    # Finally, gather everything into a lower triangular matrix.
                    L_ = tf.gather(x_, tril_mask)
                    return [L_, tf.transpose(L_)]

                tmp = tf.scan(fn, L_flat, initializer=init)
                if isinstance(tmp, (list, tuple)):
                    # TensorFlow 0.10 now returns a tuple of tensors.
                    L, LT = tmp
                else:
                    # Old TensorFlow < 0.10 returns a shared tensor.
                    L = tmp[:, 0, :, :]
                    LT = tmp[:, 1, :, :]
            else:
                raise RuntimeError('Unknown Keras backend "{}".'.format(
                    K.backend()))
            assert L is not None
            assert LT is not None
            P = K.batch_dot(L, LT)
        elif self.mode == 'diag':
            if K.backend() == 'theano':
                import theano.tensor as T
                import theano

                def fn(x, P_acc):
                    x_ = K.zeros((self.nb_actions, self.nb_actions))
                    x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)],
                                         x)
                    return x_

                outputs_info = [
                    K.zeros((self.nb_actions, self.nb_actions)),
                ]
                P, _ = theano.scan(fn=fn,
                                   sequences=L_flat,
                                   outputs_info=outputs_info)
            elif K.backend() == 'tensorflow':
                import tensorflow as tf

                # Create mask that can be used to gather elements from L_flat and put them
                # into a diagonal matrix.
                diag_mask = np.zeros((self.nb_actions, self.nb_actions),
                                     dtype='int32')
                diag_mask[np.diag_indices(self.nb_actions)] = range(
                    1, self.nb_actions + 1)

                # Add leading zero element to each element in the L_flat. We use this zero
                # element when gathering L_flat into a lower triangular matrix L.
                nb_rows = tf.shape(L_flat)[0]
                zeros = tf.expand_dims(tf.tile(K.zeros((1, )), [nb_rows]), 1)
                try:
                    # Old TF behavior.
                    L_flat = tf.concat(1, [zeros, L_flat])
                except (TypeError, ValueError):
                    # New TF behavior
                    L_flat = tf.concat([zeros, L_flat], 1)

                # Finally, process each element of the batch.
                def fn(a, x):
                    x_ = tf.gather(x, diag_mask)
                    return x_

                P = tf.scan(fn,
                            L_flat,
                            initializer=K.zeros(
                                (self.nb_actions, self.nb_actions)))
            else:
                raise RuntimeError('Unknown Keras backend "{}".'.format(
                    K.backend()))
        assert P is not None
        assert K.ndim(P) == 3

        # Combine a, mu and P into a scalar (over the batches). What we compute here is
        # -.5 * (a - mu)^T * P * (a - mu), where * denotes the dot-product. Unfortunately
        # TensorFlow handles vector * P slightly suboptimal, hence we convert the vectors to
        # 1xd/dx1 matrices and finally flatten the resulting 1x1 matrix into a scalar. All
        # operations happen over the batch size, which is dimension 0.
        prod = K.batch_dot(K.expand_dims(a - mu, 1), P)
        prod = K.batch_dot(prod, K.expand_dims(a - mu, -1))
        A = -.5 * K.batch_flatten(prod)
        assert K.ndim(A) == 2
        return A
    def __init__(self,
                 img_height,
                 img_width,
                 this_scale,
                 angles=[
                     0, np.pi / 6, np.pi / 3, np.pi / 2, 2 * np.pi / 3,
                     5 * np.pi / 6
                 ],
                 aspect_ratios=[2.0, 4.0, 6.0],
                 this_steps=None,
                 this_offsets=None,
                 variances=[0.1, 0.1, 0.2, 0.2],
                 normalize_coords=False,
                 **kwargs):
        '''
        All arguments need to be set to the same values as in the box encoding process, otherwise the behavior is undefined.
        Some of these arguments are explained in more detail in the documentation of the `DRBoxEncoder` class.

        Arguments:
            img_height (int): The height of the input images.
            img_width (int): The width of the input images.
            this_scale (float): A float in [0, 1], the scaling factor for the size of the generated anchor boxes
                as a fraction of the shorter side of the input image.
            angles (list, optional): A list of floats >0 containing different angles for wich to create corresponding anchor boxes in radians.
            aspect_ratios (list, optional): The list of aspect ratios for which default boxes are to be
                generated for this layer.
            variances (list, optional): A list of 5 floats >0. The anchor box offset for each coordinate will be divided by
                its respective variance value.
            normalize_coords (bool, optional): Set to `True` if the model uses relative instead of absolute coordinates,
                i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates.
        '''
        if K.backend() != 'tensorflow':
            raise TypeError(
                "This layer only supports TensorFlow at the moment, but you are using the {} backend."
                .format(K.backend()))

        if (this_scale < 0) or (this_scale > 1):
            raise ValueError(
                "`this_scale` must be in [0, 1] but `this_scale` == {}".format(
                    this_scale))

        if len(variances) != 5:
            raise ValueError(
                "5 variance values must be pased, but {} values were received."
                .format(len(variances)))
        variances = np.array(variances)
        if np.any(variances <= 0):
            raise ValueError(
                "All variances must be >0, but the variances given are {}".
                format(variances))

        self.img_height = img_height
        self.img_width = img_width
        self.this_scale = this_scale
        self.aspect_ratios = aspect_ratios
        self.this_steps = this_steps
        self.this_offsets = this_offsets
        self.variances = variances
        self.normalize_coords = normalize_coords
        self.angles = angles
        # Compute the number of boxes per cell
        self.n_boxes = len(aspect_ratios) * len(angles)
        super(AnchorBoxes, self).__init__(**kwargs)
Example #42
0
import pytest
import numpy as np
from numpy.testing import assert_allclose

from keras.utils.test_utils import layer_test
from keras import backend as K
from keras.layers import convolutional
from keras.models import Sequential
from keras.backend import load_backend

# TensorFlow does not support full convolution.
if K.backend() == 'theano':
    _convolution_paddings = ['valid', 'same', 'full']
else:
    _convolution_paddings = ['valid', 'same']


@pytest.mark.skipif((K.backend() == 'cntk' and load_backend.dev.type() == 0),
                    reason='cntk only support dilated conv on GPU')
@pytest.mark.parametrize(
    'layer_kwargs,input_length,expected_output',
    [
        # Causal
        ({
            'filters': 1,
            'kernel_size': 2,
            'dilation_rate': 1,
            'padding': 'causal',
            'kernel_initializer': 'ones',
            'use_bias': False
        }, 4, [[[0], [1], [3], [5]]]),
Example #43
0
    config = ld.get_config()
    ld = layers.Lambda.from_config(config)

    # test serialization with output_shape function
    def f(x):
        return K.concatenate([K.square(x), x])

    def f_shape(s):
        return tuple(list(s)[:-1] + [2 * s[-1]])

    ld = layers.Lambda(f, output_shape=f_shape)
    config = ld.get_config()
    ld = deserialize_layer({'class_name': 'Lambda', 'config': config})


@pytest.mark.skipif((K.backend() == 'theano'),
                    reason="theano cannot compute "
                    "the output shape automatically.")
def test_lambda_output_shape():
    layer_test(layers.Lambda,
               kwargs={'function': lambda x: K.mean(x, axis=-1)},
               input_shape=(3, 2, 4))


def test_dense():
    layer_test(layers.Dense, kwargs={'units': 3}, input_shape=(3, 2))

    layer_test(layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 2))

    layer_test(layers.Dense, kwargs={'units': 3}, input_shape=(None, None, 2))
Example #44
0
def WideResidualNetwork(depth=28, width=8, dropout_rate=0.0,
                        include_top=True, weights='cifar10',
                        input_tensor=None, input_shape=None,
                        classes=10):
    """Instantiate the Wide Residual Network architecture,
        optionally loading weights pre-trained
        on CIFAR-10. Note that when using TensorFlow,
        for best performance you should set
        `image_dim_ordering="tf"` in your Keras config
        at ~/.keras/keras.json.

        The model and the weights are compatible with both
        TensorFlow and Theano. The dimension ordering
        convention used by the model is the one
        specified in your Keras config file.

        # Arguments
            depth: number or layers in the DenseNet
            width: multiplier to the ResNet width (number of filters)
            dropout_rate: dropout rate
            include_top: whether to include the fully-connected
                layer at the top of the network.
            weights: one of `None` (random initialization) or
                "cifar10" (pre-training on CIFAR-10)..
            input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
                to use as image input for the model.
            input_shape: optional shape tuple, only to be specified
                if `include_top` is False (otherwise the input shape
                has to be `(32, 32, 3)` (with `tf` dim ordering)
                or `(3, 32, 32)` (with `th` dim ordering).
                It should have exactly 3 inputs channels,
                and width and height should be no smaller than 8.
                E.g. `(200, 200, 3)` would be one valid value.
            classes: optional number of classes to classify images
                into, only to be specified if `include_top` is True, and
                if no `weights` argument is specified.

        # Returns
            A Keras model instance.
        """

    if weights not in {'cifar10', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `cifar10` '
                         '(pre-training on CIFAR-10).')

    if weights == 'cifar10' and include_top and classes != 10:
        raise ValueError('If using `weights` as CIFAR 10 with `include_top`'
                         ' as true, `classes` should be 10')

    if (depth - 4) % 6 != 0:
        raise ValueError('Depth of the network must be such that (depth - 4)'
                         'should be divisible by 6.')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=32,
                                      min_size=8,
                                      dim_ordering=K.image_dim_ordering(),
                                      include_top=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = __create_wide_residual_network(classes, img_input, include_top, depth, width,
                                       dropout_rate)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='wide-resnet')

    # load weights
    if weights == 'cifar10':
        if (depth == 28) and (width == 8) and (dropout_rate == 0.0):
            # Default parameters match. Weights for this model exist:

            if K.image_dim_ordering() == 'th':
                if include_top:
                    weights_path = get_file('wide_resnet_28_8_th_dim_ordering_th_kernels.h5',
                                            TH_WEIGHTS_PATH,
                                            cache_subdir='models')
                else:
                    weights_path = get_file('wide_resnet_28_8_th_dim_ordering_th_kernels_no_top.h5',
                                            TH_WEIGHTS_PATH_NO_TOP,
                                            cache_subdir='models')

                model.load_weights(weights_path)

                if K.backend() == 'tensorflow':
                    warnings.warn('You are using the TensorFlow backend, yet you '
                                  'are using the Theano '
                                  'image dimension ordering convention '
                                  '(`image_dim_ordering="th"`). '
                                  'For best performance, set '
                                  '`image_dim_ordering="tf"` in '
                                  'your Keras config '
                                  'at ~/.keras/keras.json.')
                    convert_all_kernels_in_model(model)
            else:
                if include_top:
                    weights_path = get_file('wide_resnet_28_8_tf_dim_ordering_tf_kernels.h5',
                                            TF_WEIGHTS_PATH,
                                            cache_subdir='models')
                else:
                    weights_path = get_file('wide_resnet_28_8_tf_dim_ordering_tf_kernels_no_top.h5',
                                            TF_WEIGHTS_PATH_NO_TOP,
                                            cache_subdir='models')

                model.load_weights(weights_path)

                if K.backend() == 'theano':
                    convert_all_kernels_in_model(model)

    return model
Example #45
0
    def build_network(self,
                      alpha=1.0,
                      depth_multiplier=1,
                      dropout=1e-3,
                      include_top=True,
                      input_shape=(224, 224, 3),
                      input_tensor=None,
                      output_shape=2,
                      pooling=None,
                      weights=None):
        """Instantiates the MobileNet architecture.
        Note that only TensorFlow is supported for now,
        therefore it only works with the data format
        `image_data_format='channels_last'` in your Keras config
        at `~/.keras/keras.json`.
        To load a MobileNet model via `load_model`, import the custom
        objects `relu6` and `DepthwiseConv2D` and pass them to the
        `custom_objects` parameter.
        E.g.
        model = load_model('mobilenet.h5', custom_objects={
                           'relu6': mobilenet.relu6,
                           'DepthwiseConv2D': mobilenet.DepthwiseConv2D})
        # Arguments
            input_shape: optional shape tuple, only to be specified
                if `include_top` is False (otherwise the input shape
                has to be `(224, 224, 3)` (with `channels_last` data format)
                or (3, 224, 224) (with `channels_first` data format).
                It should have exactly 3 inputs channels,
                and width and height should be no smaller than 32.
                E.g. `(200, 200, 3)` would be one valid value.
            alpha: controls the width of the network.
                - If `alpha` < 1.0, proportionally decreases the number
                    of filters in each layer.
                - If `alpha` > 1.0, proportionally increases the number
                    of filters in each layer.
                - If `alpha` = 1, default number of filters from the paper
                     are used at each layer.
            depth_multiplier: depth multiplier for depthwise convolution
                (also called the resolution multiplier)
            dropout: dropout rate
            include_top: whether to include the fully-connected
                layer at the top of the network.
            weights: one of `None` (random initialization),
                  'imagenet' (pre-training on ImageNet),
                  or the path to the weights file to be loaded.
            input_tensor: optional Keras tensor (i.e. output of
                `layers.Input()`)
                to use as image input for the model.
            pooling: Optional pooling mode for feature extraction
                when `include_top` is `False`.
                - `None` means that the output of the model
                    will be the 4D tensor output of the
                    last convolutional layer.
                - `avg` means that global average pooling
                    will be applied to the output of the
                    last convolutional layer, and thus
                    the output of the model will be a
                    2D tensor.
                - `max` means that global max pooling will
                    be applied.
            classes: optional number of classes to classify images
                into, only to be specified if `include_top` is True, and
                if no `weights` argument is specified.
        # Returns
            A Keras model instance.
        # Raises
            ValueError: in case of invalid argument for `weights`,
                or invalid input shape.
            RuntimeError: If attempting to run this model with a
                backend that does not support separable convolutions.
        """

        if K.backend() != 'tensorflow':
            raise RuntimeError(
                'Only TensorFlow backend is currently supported, '
                'as other backends do not support '
                'depthwise convolution.')

        # Determine proper input shape and default size.
        if input_shape is None:
            default_size = 224
        else:
            if K.image_data_format() == 'channels_first':
                rows = input_shape[1]
                cols = input_shape[2]
            else:
                rows = input_shape[0]
                cols = input_shape[1]

            if rows == cols and rows in [128, 160, 192, 224]:
                default_size = rows
            else:
                default_size = 224

        if K.image_data_format() == 'channels_last':
            row_axis, col_axis = (0, 1)
        else:
            row_axis, col_axis = (1, 2)
        rows = input_shape[row_axis]
        cols = input_shape[col_axis]

        if weights == 'imagenet':
            if depth_multiplier != 1:
                raise ValueError('If imagenet weights are being loaded, '
                                 'depth multiplier must be 1')

            if alpha not in [0.25, 0.50, 0.75, 1.0]:
                raise ValueError('If imagenet weights are being loaded, '
                                 'alpha can be one of'
                                 '`0.25`, `0.50`, `0.75` or `1.0` only.')

            if rows != cols or rows not in [128, 160, 192, 224]:
                raise ValueError(
                    'If imagenet weights are being loaded, '
                    'input must have a static square shape (one of '
                    '(128,128), (160,160), (192,192), or (224, 224)).'
                    ' Input shape provided = %s' % (input_shape, ))

        if K.image_data_format() != 'channels_last':
            warnings.warn(
                'The MobileNet family of models is only available '
                'for the input data format "channels_last" '
                '(width, height, channels). '
                'However your settings specify the default '
                'data format "channels_first" (channels, width, height).'
                ' You should set `image_data_format="channels_last"` '
                'in your Keras config located at ~/.keras/keras.json. '
                'The model being returned right now will expect inputs '
                'to follow the "channels_last" data format.')
            K.set_image_data_format('channels_last')
            old_data_format = 'channels_first'
        else:
            old_data_format = None

        if input_tensor is None:
            img_input = Input(shape=input_shape)
        else:
            if not K.is_keras_tensor(input_tensor):
                img_input = Input(tensor=input_tensor, shape=input_shape)
            else:
                img_input = input_tensor

        x = _conv_block(img_input, 32, alpha, strides=(2, 2))
        x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)

        x = _depthwise_conv_block(x,
                                  128,
                                  alpha,
                                  depth_multiplier,
                                  strides=(2, 2),
                                  block_id=2)
        x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)

        x = _depthwise_conv_block(x,
                                  256,
                                  alpha,
                                  depth_multiplier,
                                  strides=(2, 2),
                                  block_id=4)
        x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)

        x = _depthwise_conv_block(x,
                                  512,
                                  alpha,
                                  depth_multiplier,
                                  strides=(2, 2),
                                  block_id=6)
        x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
        x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
        x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
        x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
        x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)

        x = _depthwise_conv_block(x,
                                  1024,
                                  alpha,
                                  depth_multiplier,
                                  strides=(2, 2),
                                  block_id=12)
        x = _depthwise_conv_block(x,
                                  1024,
                                  alpha,
                                  depth_multiplier,
                                  block_id=13)

        if include_top:
            if K.image_data_format() == 'channels_first':
                shape = (int(1024 * alpha), 1, 1)
            else:
                shape = (1, 1, int(1024 * alpha))

            x = GlobalAveragePooling2D()(x)
            x = Reshape(shape, name='reshape_1')(x)
            x = Dropout(dropout, name='dropout')(x)
            x = Conv2D(output_shape, (1, 1), padding='same',
                       name='conv_preds')(x)
            x = Activation('softmax', name='act_softmax')(x)
            x = Reshape((output_shape, ), name='reshape_2')(x)
        else:
            if pooling == 'avg':
                x = GlobalAveragePooling2D()(x)
            elif pooling == 'max':
                x = GlobalMaxPooling2D()(x)

        # Ensure that the model takes into account
        # any potential predecessors of `input_tensor`.
        if input_tensor is not None:
            inputs = get_source_inputs(input_tensor)
        else:
            inputs = img_input

        # Create model.
        self.model = Model(inputs,
                           x,
                           name='mobilenet_%0.2f_%s' % (alpha, rows))

        # load weights
        if weights is not None:
            self.model.load_weights(weights)

        if old_data_format:
            K.set_image_data_format(old_data_format)
        return self.model
def VGG16(include_top=True,
          weights='imagenet',
          input_tensor=None,
          input_shape=None,
          pooling=None,
          classes=1000,
          **kwargs):
    """Instantiates the VGG16 architecture.

    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.

    # Arguments
        include_top: whether to include the 3 fully-connected
            layers at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor
            (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)`
            (with `channels_last` data format)
            or `(3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 input channels,
            and width and height should be no smaller than 32.
            E.g. `(200, 200, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    #backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)

    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')
    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=32,
                                      data_format=backend.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    # Block 1
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv1')(img_input)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x)
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv1')(x)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv2')(x)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv3')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv1')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv2')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv3')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv1')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv2')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv3')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    if include_top:
        # Classification block
        x = layers.Flatten(name='flatten')(x)
        x = layers.Dense(4096, activation='relu', name='fc1')(x)
        x = layers.Dense(4096, activation='relu', name='fc2')(x)
        x = layers.Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = models.Model(inputs, x, name='vgg16')

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            weights_path = keras_utils.get_file(
                'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                file_hash='64373286793e3c8b2b4e3219cbf3544b')
        else:
            weights_path = keras_utils.get_file(
                'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                file_hash='6d6bbae143d832006294945121d1f1fc')
        model.load_weights(weights_path)
        if backend.backend() == 'theano':
            keras_utils.convert_all_kernels_in_model(model)
    elif weights is not None:
        model.load_weights(weights)

    return model
Example #47
0
import numpy as np
import pytest
from keras import backend as K

from keras_contrib import backend as KC
from keras_contrib.layers import SubPixelUpscaling
from keras_contrib.utils.test_utils import layer_test

# TensorFlow does not support full convolution.
if K.backend() == 'theano':
    _convolution_border_modes = ['valid', 'same']
    data_format = 'channels_first'
else:
    _convolution_border_modes = ['valid', 'same']
    data_format = 'channels_last'


@pytest.mark.parametrize('scale_factor', [2, 3, 4])
def test_sub_pixel_upscaling(scale_factor):
    num_samples = 2
    num_row = 16
    num_col = 16
    input_dtype = K.floatx()

    nb_channels = 4 * (scale_factor ** 2)
    input_data = np.random.random((num_samples, nb_channels, num_row, num_col))
    input_data = input_data.astype(input_dtype)

    if K.image_data_format() == 'channels_last':
        input_data = input_data.transpose((0, 2, 3, 1))
Example #48
0
from keras import __version__
from keras import backend as K

from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot

import os
import sys

import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns

print('Using Keras version:', __version__, 'backend:', K.backend())
assert (LV(__version__) >= LV("2.0.0"))

# ## GloVe word embeddings

# In[ ]:

GLOVE_DIR = "/wrk/makoskel/glove.6B"

print('Indexing word vectors.')

embeddings_index = {}
with open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt')) as f:
    for line in f:
        values = line.split()
        word = values[0]
    def __init__(self,
                 confidence_thresh=0.01,
                 iou_threshold=0.45,
                 top_k=200,
                 nms_max_output_size=400,
                 coords='centroids',
                 normalize_coords=True,
                 img_height=None,
                 img_width=None,
                 **kwargs):
        '''
        All default argument values follow the Caffe implementation.

        Arguments:
            confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific
                positive class in order to be considered for the non-maximum suppression stage for the respective class.
                A lower value will result in a larger part of the selection process being done by the non-maximum suppression
                stage, while a larger value will result in a larger part of the selection process happening in the confidence
                thresholding stage.
            iou_threshold (float, optional): A float in [0,1]. All boxes with a Jaccard similarity of greater than `iou_threshold`
                with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers
                to the box score.
            top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the
                non-maximum suppression stage.
            nms_max_output_size (int, optional): The maximum number of predictions that will be left after performing non-maximum
                suppression.
            coords (str, optional): The box coordinate format that the model outputs. Must be 'centroids'
                i.e. the format `(cx, cy, w, h)` (box center coordinates, width, and height). Other coordinate formats are
                currently not supported.
            normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1])
                and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs
                relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`.
                Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect
                coordinates. Requires `img_height` and `img_width` if set to `True`.
            img_height (int, optional): The height of the input images. Only needed if `normalize_coords` is `True`.
            img_width (int, optional): The width of the input images. Only needed if `normalize_coords` is `True`.
        '''
        if K.backend() != 'tensorflow':
            raise TypeError(
                "This layer only supports TensorFlow at the moment, but you are using the {} backend."
                .format(K.backend()))

        if normalize_coords and ((img_height is None) or (img_width is None)):
            raise ValueError(
                "If relative box coordinates are supposed to be converted to absolute coordinates, the decoder needs the image size in order to decode the predictions, but `img_height == {}` and `img_width == {}`"
                .format(img_height, img_width))

        if coords != 'centroids':
            raise ValueError(
                "The DetectionOutput layer currently only supports the 'centroids' coordinate format."
            )

        # We need these members for the config.
        self.confidence_thresh = confidence_thresh
        self.iou_threshold = iou_threshold
        self.top_k = top_k
        self.normalize_coords = normalize_coords
        self.img_height = img_height
        self.img_width = img_width
        self.coords = coords
        self.nms_max_output_size = nms_max_output_size

        # We need these members for TensorFlow.
        self.tf_confidence_thresh = tf.constant(self.confidence_thresh,
                                                name='confidence_thresh')
        self.tf_iou_threshold = tf.constant(self.iou_threshold,
                                            name='iou_threshold')
        self.tf_top_k = tf.constant(self.top_k, name='top_k')
        self.tf_normalize_coords = tf.constant(self.normalize_coords,
                                               name='normalize_coords')
        self.tf_img_height = tf.constant(self.img_height,
                                         dtype=tf.float32,
                                         name='img_height')
        self.tf_img_width = tf.constant(self.img_width,
                                        dtype=tf.float32,
                                        name='img_width')
        self.tf_nms_max_output_size = tf.constant(self.nms_max_output_size,
                                                  name='nms_max_output_size')

        super(DecodeDetections, self).__init__(**kwargs)
Example #50
0
def Deeplabv3(weights='pascal_voc', input_tensor=None, input_shape=(512, 512, 3), classes=21, backbone='mobilenetv2', OS=16, alpha=1.):
    """ Instantiates the Deeplabv3+ architecture

    Optionally loads weights pre-trained
    on PASCAL VOC. This model is available for TensorFlow only,
    and can only be used with inputs following the TensorFlow
    data format `(width, height, channels)`.
    # Arguments
        weights: one of 'pascal_voc' (pre-trained on pascal voc)
            or None (random initialization)
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: shape of input image. format HxWxC
            PASCAL VOC model was trained on (512,512,3) images
        classes: number of desired classes. If classes != 21,
            last layer is initialized randomly
        backbone: backbone to use. one of {'xception','mobilenetv2'}
        OS: determines input_shape/feature_extractor_output ratio. One of {8,16}.
            Used only for xception backbone.
        alpha: controls the width of the MobileNetV2 network. This is known as the
            width multiplier in the MobileNetV2 paper.
                - If `alpha` < 1.0, proportionally decreases the number
                    of filters in each layer.
                - If `alpha` > 1.0, proportionally increases the number
                    of filters in each layer.
                - If `alpha` = 1, default number of filters from the paper
                    are used at each layer.
            Used only for mobilenetv2 backbone

    # Returns
        A Keras model instance.

    # Raises
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
        ValueError: in case of invalid argument for `weights` or `backbone`

    """

    if not (weights in {'pascal_voc', None}):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `pascal_voc` '
                         '(pre-trained on PASCAL VOC)')

    if K.backend() != 'tensorflow':
        raise RuntimeError('The Deeplabv3+ model is only available with '
                           'the TensorFlow backend.')

    if not (backbone in {'xception', 'mobilenetv2'}):
        raise ValueError('The `backbone` argument should be either '
                         '`xception`  or `mobilenetv2` ')

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if backbone == 'xception':
        if OS == 8:
            entry_block3_stride = 1
            middle_block_rate = 2  # ! Not mentioned in paper, but required
            exit_block_rates = (2, 4)
            atrous_rates = (12, 24, 36)
        else:
            entry_block3_stride = 2
            middle_block_rate = 1
            exit_block_rates = (1, 2)
            atrous_rates = (6, 12, 18)

        x = Conv2D(32, 
                   (3, 3), 
                   strides=(2, 2),
                   name='entry_flow_conv1_1', 
                   use_bias=False, 
                   kernel_regularizer=l2(WEIGHT_DECAY),
                   padding='same')(img_input)
        # x = BatchNormalization(name='entry_flow_conv1_1_BN')(x)
        x = GroupNormalization(groups=8)(x)
        x = Activation('relu')(x)

        x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1)
        # x = BatchNormalization(name='entry_flow_conv1_2_BN')(x)
        x = GroupNormalization(groups=8)(x)
        x = Activation('relu')(x)

        x = _xception_block(x, [128, 128, 128], 'entry_flow_block1',
                            skip_connection_type='conv', stride=2,
                            depth_activation=False)
        x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2',
                                   skip_connection_type='conv', stride=2,
                                   depth_activation=False, return_skip=True)

        x = _xception_block(x, [728, 728, 728], 'entry_flow_block3',
                            skip_connection_type='conv', stride=entry_block3_stride,
                            depth_activation=False)
        for i in range(16):
            x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1),
                                skip_connection_type='sum', stride=1, rate=middle_block_rate,
                                depth_activation=False)

        x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1',
                            skip_connection_type='conv', stride=1, rate=exit_block_rates[0],
                            depth_activation=False)
        x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2',
                            skip_connection_type='none', stride=1, rate=exit_block_rates[1],
                            depth_activation=True)

    else:
        OS = 8
        first_block_filters = _make_divisible(32 * alpha, 8)
        x = Conv2D(first_block_filters,
                   kernel_size=3,
                   strides=(2, 2), 
                   padding='same',
                   use_bias=False, 
                   kernel_regularizer=l2(WEIGHT_DECAY),
                   name='Conv')(img_input)
        # x = BatchNormalization(
        #     epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)
        x = GroupNormalization(groups=8)(x)
        x = Activation(relu6, name='Conv_Relu6')(x)

        x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
                                expansion=1, block_id=0, skip_connection=False)

        x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
                                expansion=6, block_id=1, skip_connection=False)
        x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
                                expansion=6, block_id=2, skip_connection=True)

        x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
                                expansion=6, block_id=3, skip_connection=False)
        x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
                                expansion=6, block_id=4, skip_connection=True)
        x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
                                expansion=6, block_id=5, skip_connection=True)

        # stride in block 6 changed from 2 -> 1, so we need to use rate = 2
        x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,  # 1!
                                expansion=6, block_id=6, skip_connection=False)
        x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
                                expansion=6, block_id=7, skip_connection=True)
        x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
                                expansion=6, block_id=8, skip_connection=True)
        x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
                                expansion=6, block_id=9, skip_connection=True)

        x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
                                expansion=6, block_id=10, skip_connection=False)
        x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
                                expansion=6, block_id=11, skip_connection=True)
        x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
                                expansion=6, block_id=12, skip_connection=True)

        x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2,  # 1!
                                expansion=6, block_id=13, skip_connection=False)
        x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
                                expansion=6, block_id=14, skip_connection=True)
        x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
                                expansion=6, block_id=15, skip_connection=True)

        x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4,
                                expansion=6, block_id=16, skip_connection=False)

    # end of feature extractor

    # branching for Atrous Spatial Pyramid Pooling

    # Image Feature branch
    #out_shape = int(np.ceil(input_shape[0] / OS))
    b4 = AveragePooling2D(pool_size=(int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(x)
    b4 = Conv2D(256, 
                (1, 1), 
                padding='same',
                use_bias=False, 
                kernel_regularizer=l2(WEIGHT_DECAY),
                name='image_pooling')(b4)
    # b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)
    b4 = GroupNormalization(groups=8)(b4)
    b4 = Activation('relu')(b4)
    b4 = BilinearUpsampling((int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(b4)

    # simple 1x1
    b0 = Conv2D(256, 
                (1, 1), 
                padding='same', 
                use_bias=False, 
                kernel_regularizer=l2(WEIGHT_DECAY),
                name='aspp0')(x)
    # b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)
    b0 = GroupNormalization(groups=8)(b0)
    b0 = Activation('relu', name='aspp0_activation')(b0)

    # there are only 2 branches in mobilenetV2. not sure why
    if backbone == 'xception':
        # rate = 6 (12)
        b1 = SepConv_BN(x, 256, 'aspp1',
                        rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)
        # rate = 12 (24)
        b2 = SepConv_BN(x, 256, 'aspp2',
                        rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)
        # rate = 18 (36)
        b3 = SepConv_BN(x, 256, 'aspp3',
                        rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)

        # concatenate ASPP branches & project
        x = Concatenate()([b4, b0, b1, b2, b3])
    else:
        x = Concatenate()([b4, b0])

    x = Conv2D(256, 
               (1, 1), 
               padding='same',
               use_bias=False, 
               kernel_regularizer=l2(WEIGHT_DECAY),
               name='concat_projection')(x)
    # x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
    x = GroupNormalization(groups=8)(x)
    x = Activation('relu')(x)
    x = Dropout(0.1)(x)

    # DeepLab v.3+ decoder

    if backbone == 'xception':
        # Feature projection
        # x4 (x2) block
        x = BilinearUpsampling(output_size=(int(np.ceil(input_shape[0] / 4)),
                                            int(np.ceil(input_shape[1] / 4))))(x)
        dec_skip1 = Conv2D(48, 
                           (1, 1), 
                           padding='same',
                           use_bias=False, 
                           kernel_regularizer=l2(WEIGHT_DECAY),
                           name='feature_projection0')(skip1)
        # dec_skip1 = BatchNormalization(
        #     name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
        dec_skip1 = GroupNormalization(groups=8)(dec_skip1)
        dec_skip1 = Activation('relu')(dec_skip1)
        x = Concatenate()([x, dec_skip1])
        x = SepConv_BN(x, 256, 'decoder_conv0',
                       depth_activation=True, epsilon=1e-5)
        x = SepConv_BN(x, 256, 'decoder_conv1',
                       depth_activation=True, epsilon=1e-5)

    # you can use it with arbitary number of classes
    if classes == 21:
        last_layer_name = 'logits_semantic'
    else:
        last_layer_name = 'custom_logits_semantic'

    x = Conv2D(classes, 
               (1, 1), 
               padding='same', 
               kernel_regularizer=l2(WEIGHT_DECAY),
               name=last_layer_name)(x)
    x = BilinearUpsampling(output_size=(input_shape[0], input_shape[1]))(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, x, name='deeplabv3plus')

    # load weights

    if weights == 'pascal_voc':
        if backbone == 'xception':
            weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',
                                    WEIGHTS_PATH_X,
                                    cache_subdir='models')
        else:
            weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',
                                    WEIGHTS_PATH_MOBILE,
                                    cache_subdir='models')
        model.load_weights(weights_path, by_name=True)
    return model
Example #51
0
def update_setup(config_filepath):
    """Update default settings with user settings and check they are valid.

    Load settings from configuration file at ``config_filepath``, and check
    that parameter choices are valid. Non-specified settings are filled in with
    defaults.
    """

    from textwrap import dedent

    # config.read will not thow an error if the filepath does not exist, and
    # user values will not override defaults. So check here:
    assert os.path.isfile(config_filepath), \
        "Config filepath {} does not exist.".format(config_filepath)

    # Load defaults.
    config = load_config(
        os.path.abspath(
            os.path.join(os.path.dirname(__file__), '..', 'config_defaults')))

    # Overwrite with user settings.
    config.read(config_filepath)

    keras_backend = config.get('simulation', 'keras_backend')
    keras_backends = config_string_to_set_of_strings(
        config.get('restrictions', 'keras_backends'))
    assert keras_backend in keras_backends, \
        "Keras backend {} not supported. Choose from {}.".format(
            keras_backend, keras_backends)
    os.environ['KERAS_BACKEND'] = keras_backend
    # The keras import has to happen after setting the backend environment
    # variable!
    import keras.backend as k
    assert k.backend() == keras_backend, \
        "Keras backend set to {} in snntoolbox config file, but has already " \
        "been set to {} by a previous keras import. Set backend " \
        "appropriately in the keras config file.".format(keras_backend,
                                                         k.backend())

    # Name of input file must be given.
    filename_ann = config.get('paths', 'filename_ann')
    assert filename_ann != '', "Filename of input model not specified."

    # Check that simulator choice is valid.
    simulator = config.get('simulation', 'simulator')
    simulators = config_string_to_set_of_strings(
        config.get('restrictions', 'simulators'))
    assert simulator in simulators, \
        "Simulator '{}' not supported. Choose from {}".format(simulator,
                                                              simulators)

    # Warn user that it is not possible to use Brian2 simulator by loading a
    # pre-converted network from disk.
    if simulator == 'brian2' and not config.getboolean('tools', 'convert'):
        print(
            dedent("""\n
            SNN toolbox Warning: When using Brian 2 simulator, you need to
            convert the network each time you start a new session. (No
            saving/reloading methods implemented.) Setting convert = True.
            \n"""))
        config.set('tools', 'convert', str(True))

    # Set default path if user did not specify it.
    if config.get('paths', 'path_wd') == '':
        config.set('paths', 'path_wd', os.path.dirname(config_filepath))

    # Check specified working directory exists.
    path_wd = config.get('paths', 'path_wd')
    assert os.path.exists(path_wd), \
        "Working directory {} does not exist.".format(path_wd)

    # Check that choice of input model library is valid.
    model_lib = config.get('input', 'model_lib')
    model_libs = config_string_to_set_of_strings(
        config.get('restrictions', 'model_libs'))
    assert model_lib in model_libs, "ERROR: Input model library '{}' ".format(
        model_lib) + "not supported yet. Possible values: {}".format(
            model_libs)

    # Check input model is found and has the right format for the specified
    # model library.
    if config.getboolean('tools', 'evaluate_ann') \
            or config.getboolean('tools', 'parse'):
        if model_lib == 'caffe':
            caffemodel_filepath = os.path.join(path_wd,
                                               filename_ann + '.caffemodel')
            caffemodel_h5_filepath = os.path.join(
                path_wd, filename_ann + '.caffemodel.h5')
            assert os.path.isfile(caffemodel_filepath) or os.path.isfile(
                caffemodel_h5_filepath), "File {} or {} not found.".format(
                    caffemodel_filepath, caffemodel_h5_filepath)
            prototxt_filepath = os.path.join(path_wd,
                                             filename_ann + '.prototxt')
            assert os.path.isfile(prototxt_filepath), \
                "File {} not found.".format(prototxt_filepath)
        elif model_lib == 'keras':
            h5_filepath = str(os.path.join(path_wd, filename_ann + '.h5'))
            assert os.path.isfile(h5_filepath), \
                "File {} not found.".format(h5_filepath)
        elif model_lib == 'lasagne':
            h5_filepath = os.path.join(path_wd, filename_ann + '.h5')
            pkl_filepath = os.path.join(path_wd, filename_ann + '.pkl')
            assert os.path.isfile(h5_filepath) or \
                os.path.isfile(pkl_filepath), \
                "File {} not found.".format('.h5 or .pkl')
            py_filepath = os.path.join(path_wd, filename_ann + '.py')
            assert os.path.isfile(py_filepath), \
                "File {} not found.".format(py_filepath)
        else:
            print("For the specified input model library {}, no test is "
                  "implemented to check if input model files exist in the "
                  "specified working directory!".format(model_lib))

    # Set default path if user did not specify it.
    if config.get('paths', 'dataset_path') == '':
        config.set('paths', 'dataset_path', os.path.dirname(__file__))

    # Check that the data set path is valid.
    dataset_path = os.path.abspath(config.get('paths', 'dataset_path'))
    config.set('paths', 'dataset_path', dataset_path)
    assert os.path.exists(dataset_path), "Path to data set does not exist: " \
                                         "{}".format(dataset_path)

    # Check that data set path contains the data in the specified format.
    assert os.listdir(dataset_path), "Data set directory is empty."
    normalize = config.getboolean('tools', 'normalize')
    dataset_format = config.get('input', 'dataset_format')
    if dataset_format == 'npz' and normalize and not os.path.exists(
            os.path.join(dataset_path, 'x_norm.npz')):
        raise RuntimeWarning(
            "No data set file 'x_norm.npz' found in specified data set path " +
            "{}. Add it, or disable normalization.".format(dataset_path))
    if dataset_format == 'npz' and not (
            os.path.exists(os.path.join(dataset_path, 'x_test.npz'))
            and os.path.exists(os.path.join(dataset_path, 'y_test.npz'))):
        raise RuntimeWarning(
            "Data set file 'x_test.npz' or 'y_test.npz' was not found in "
            "specified data set path {}.".format(dataset_path))

    sample_idxs_to_test = eval(config.get('simulation', 'sample_idxs_to_test'))
    num_to_test = config.getint('simulation', 'num_to_test')
    if not sample_idxs_to_test == []:
        if len(sample_idxs_to_test) != num_to_test:
            print(
                dedent("""
            SNN toolbox warning: Settings mismatch. Adjusting 'num_to_test' to 
            equal the number of 'sample_idxs_to_test'."""))
            config.set('simulation', 'num_to_test',
                       str(len(sample_idxs_to_test)))

    # Create log directory if it does not exist.
    if config.get('paths', 'log_dir_of_current_run') == '':
        config.set(
            'paths', 'log_dir_of_current_run',
            os.path.join(path_wd, 'log', 'gui',
                         config.get('paths', 'runlabel')))
    log_dir_of_current_run = config.get('paths', 'log_dir_of_current_run')
    if not os.path.isdir(log_dir_of_current_run):
        os.makedirs(log_dir_of_current_run)

    # Specify filenames for models at different stages of the conversion.
    if config.get('paths', 'filename_parsed_model') == '':
        config.set('paths', 'filename_parsed_model', filename_ann + '_parsed')
    if config.get('paths', 'filename_snn') == '':
        config.set('paths', 'filename_snn',
                   '{}_{}'.format(filename_ann, simulator))

    # Make sure the number of samples to test is not lower than the batch size.
    batch_size = config.getint('simulation', 'batch_size')
    if config.getint('simulation', 'num_to_test') < batch_size:
        print(
            dedent("""\
            SNN toolbox Warning: 'num_to_test' set lower than 'batch_size'.
            In simulators that test samples batch-wise (e.g. INIsim), this
            can lead to undesired behavior. Setting 'num_to_test' equal to
            'batch_size'."""))
        config.set('simulation', 'num_to_test', str(batch_size))

    plot_var = get_plot_keys(config)
    plot_vars = config_string_to_set_of_strings(
        config.get('restrictions', 'plot_vars'))
    assert all([v in plot_vars for v in plot_var]), \
        "Plot variable(s) {} not understood.".format(
            [v for v in plot_var if v not in plot_vars])
    if 'all' in plot_var:
        plot_vars_all = plot_vars.copy()
        plot_vars_all.remove('all')
        config.set('output', 'plot_vars', str(plot_vars_all))

    log_var = get_log_keys(config)
    log_vars = config_string_to_set_of_strings(
        config.get('restrictions', 'log_vars'))
    assert all([v in log_vars for v in log_var]), \
        "Log variable(s) {} not understood.".format(
            [v for v in log_var if v not in log_vars])
    if 'all' in log_var:
        log_vars_all = log_vars.copy()
        log_vars_all.remove('all')
        config.set('output', 'log_vars', str(log_vars_all))

    # Change matplotlib plot properties, e.g. label font size
    try:
        import matplotlib
    except ImportError:
        matplotlib = None
        if len(plot_vars) > 0:
            import warnings
            warnings.warn(
                "Package 'matplotlib' not installed; disabling "
                "plotting. Run 'pip install matplotlib' to enable "
                "plotting.", ImportWarning)
            config.set('output', 'plot_vars', str({}))
    if matplotlib is not None:
        matplotlib.rcParams.update(eval(config.get('output',
                                                   'plotproperties')))

    # Check settings for parameter sweep
    param_name = config.get('parameter_sweep', 'param_name')
    try:
        config.get('cell', param_name)
    except KeyError:
        print("Unkown parameter name {} to sweep.".format(param_name))
        raise RuntimeError

    spike_code = config.get('conversion', 'spike_code')
    spike_codes = config_string_to_set_of_strings(
        config.get('restrictions', 'spike_codes'))
    assert spike_code in spike_codes, \
        "Unknown spike code {} selected. Choose from {}.".format(spike_code,
                                                                 spike_codes)
    if spike_code == 'temporal_pattern':
        num_bits = str(config.getint('conversion', 'num_bits'))
        config.set('simulation', 'duration', num_bits)
        config.set('simulation', 'batch_size', '1')
    elif 'ttfs' in spike_code:
        config.set('cell', 'tau_refrac',
                   str(config.getint('simulation', 'duration')))
    assert keras_backend != 'theano' or spike_code == 'temporal_mean_rate', \
        "Keras backend 'theano' only works when the 'spike_code' parameter " \
        "is set to 'temporal_mean_rate' in snntoolbox config."

    with open(os.path.join(log_dir_of_current_run, '.config'), str('w')) as f:
        config.write(f)

    return config
Example #52
0
def ResNet50(include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000):
    """Instantiates the ResNet50 architecture.

    Optionally loads weights pre-trained
    on ImageNet. Note that when using TensorFlow,
    for best performance you should set
    `image_data_format="channels_last"` in your Keras config
    at ~/.keras/keras.json.

    The model and the weights are compatible with both
    TensorFlow and Theano. The data format
    convention used by the model is the one
    specified in your Keras config file.

    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or "imagenet" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 244)` (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 197.
            E.g. `(200, 200, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=197,
                                      data_format=K.image_data_format(),
                                      include_top=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    x = ZeroPadding2D((3, 3))(img_input)
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='fc1000')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='resnet50')

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file(
                'resnet50_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
        else:
            weights_path = get_file(
                'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                md5_hash='a268eb855778b3df3c7506639542a6af')
        model.load_weights(weights_path)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)

        if K.image_data_format() == 'channels_first':
            if include_top:
                maxpool = model.get_layer(name='avg_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc1000')
                layer_utils.convert_dense_weights_data_format(
                    dense, shape, 'channels_first')

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model
required = None


class gae(candle.Benchmark):  # 1
    def set_locals(self):
        if required is not None:
            self.required = set(required)
        if additional_definitions is not None:
            self.additional_definitions = additional_definitions


# thread optimization
import os
from keras import backend as K
if K.backend() == 'tensorflow' and 'NUM_INTRA_THREADS' in os.environ:
    import tensorflow as tf
    sess = tf.Session(config=tf.ConfigProto(
        inter_op_parallelism_threads=int(os.environ['NUM_INTER_THREADS']),
        intra_op_parallelism_threads=int(os.environ['NUM_INTRA_THREADS'])))
    K.set_session(sess)


# this is a candle requirement
def initialize_parameters():
    gae_common = candle.Benchmark('./',
                                  'gae_params.txt',
                                  'keras',
                                  prog='gae_baseline_keras2',
                                  desc='GAE Network')
    def __init__(self,
                 img_height,
                 img_width,
                 this_scale,
                 next_scale,
                 aspect_ratios=[0.5, 1.0, 2.0],
                 two_boxes_for_ar1=True,
                 this_steps=None,
                 this_offsets=None,
                 clip_boxes=False,
                 variances=[0.1, 0.1, 0.2, 0.2],
                 coords='centroids',
                 normalize_coords=False,
                 **kwargs):
        '''
        All arguments need to be set to the same values as in the box encoding process, otherwise the behavior is undefined.
        Some of these arguments are explained in more detail in the documentation of the `SSDBoxEncoder` class.

        Arguments:
            img_height (int): The height of the input images.
            img_width (int): The width of the input images.
            this_scale (float): A float in [0, 1], the scaling factor for the size of the generated anchor boxes
                as a fraction of the shorter side of the input image.
            next_scale (float): A float in [0, 1], the next larger scaling factor. Only relevant if
                `self.two_boxes_for_ar1 == True`.
            aspect_ratios (list, optional): The list of aspect ratios for which default boxes are to be
                generated for this layer.
            two_boxes_for_ar1 (bool, optional): Only relevant if `aspect_ratios` contains 1.
                If `True`, two default boxes will be generated for aspect ratio 1. The first will be generated
                using the scaling factor for the respective layer, the second one will be generated using
                geometric mean of said scaling factor and next bigger scaling factor.
            clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within image boundaries.
            variances (list, optional): A list of 4 floats >0. The anchor box offset for each coordinate will be divided by
                its respective variance value.
            coords (str, optional): The box coordinate format to be used internally in the model (i.e. this is not the input format
                of the ground truth labels). Can be either 'centroids' for the format `(cx, cy, w, h)` (box center coordinates, width, and height),
                'corners' for the format `(xmin, ymin, xmax,  ymax)`, or 'minmax' for the format `(xmin, xmax, ymin, ymax)`.
            normalize_coords (bool, optional): Set to `True` if the model uses relative instead of absolute coordinates,
                i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates.
        '''
        if K.backend() != 'tensorflow':
            raise TypeError("This layer only supports TensorFlow at the moment, but you are using the {} backend.".format(K.backend()))

        if (this_scale < 0) or (next_scale < 0) or (this_scale > 1):
            raise ValueError("`this_scale` must be in [0, 1] and `next_scale` must be >0, but `this_scale` == {}, `next_scale` == {}".format(this_scale, next_scale))

        if len(variances) != 4:
            raise ValueError("4 variance values must be pased, but {} values were received.".format(len(variances)))
        variances = np.array(variances)
        if np.any(variances <= 0):
            raise ValueError("All variances must be >0, but the variances given are {}".format(variances))

        self.img_height = img_height
        self.img_width = img_width
        self.this_scale = this_scale
        self.next_scale = next_scale
        self.aspect_ratios = aspect_ratios
        self.two_boxes_for_ar1 = two_boxes_for_ar1
        self.this_steps = this_steps
        self.this_offsets = this_offsets
        self.clip_boxes = clip_boxes
        self.variances = variances
        self.coords = coords
        self.normalize_coords = normalize_coords
        # Compute the number of boxes per cell
        if (1 in aspect_ratios) and two_boxes_for_ar1:
            self.n_boxes = len(aspect_ratios) + 1
        else:
            self.n_boxes = len(aspect_ratios)
        super(AnchorBoxes, self).__init__(**kwargs)
Example #55
0
        bias_constraint=lambda x: 0. * x + 2.,
    )
    model.add(dense)
    model.add(Activation('relu'))
    model.add(Dense(y_train.shape[1]))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    model.train_on_batch(x_train[:10], y_train[:10])
    kernel, bias = dense.get_weights()
    assert_allclose(kernel, 1.)
    assert_allclose(bias, 2.)


@pytest.mark.skipif(K.backend() == 'mxnet',
                    reason='MXNet backend does not support constraints. '
                    'Keyword arguments such as `kernel_constraint` '
                    'and `bias_constraint`')
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
                    reason="Only Tensorflow raises a "
                    "ValueError if the gradient is null.")
def test_no_grad():
    inp = Input([3])
    x = Dense(10)(inp)
    x = Lambda(
        lambda l: 1.0 * K.reshape(K.cast(K.argmax(l), 'float32'), [-1, 1]),
        output_shape=lambda x: [x[0], 1])(x)
    mod = Model(inp, x)
    mod.compile('sgd', 'mse')
def Inception_Inflated3d(include_top=True,
                         weights=None,
                         input_tensor=None,
                         input_shape=None,
                         dropout_prob=0.0,
                         endpoint_logit=True,
                         classes=400):
    """Instantiates the Inflated 3D Inception v1 architecture.

    Optionally loads weights pre-trained
    on Kinetics. Note that when using TensorFlow,
    for best performance you should set
    `image_data_format='channels_last'` in your Keras config
    at ~/.keras/keras.json.
    The model and the weights are compatible with both
    TensorFlow and Theano. The data format
    convention used by the model is the one
    specified in your Keras config file.
    Note that the default input frame(image) size for this model is 224x224.

    # Arguments
        include_top: whether to include the the classification 
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or 'kinetics_only' (pre-training on Kinetics dataset only).
            or 'imagenet_and_kinetics' (pre-training on ImageNet and Kinetics datasets).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(NUM_FRAMES, 224, 224, 3)` (with `channels_last` data format)
            or `(NUM_FRAMES, 3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 inputs channels.
            NUM_FRAMES should be no smaller than 8. The authors used 64
            frames per example for training and testing on kinetics dataset
            Also, Width and height should be no smaller than 32.
            E.g. `(64, 150, 150, 3)` would be one valid value.
        dropout_prob: optional, dropout probability applied in dropout layer
            after global average pooling layer. 
            0.0 means no dropout is applied, 1.0 means dropout is applied to all features.
            Note: Since Dropout is applied just before the classification
            layer, it is only useful when `include_top` is set to True.
        endpoint_logit: (boolean) optional. If True, the model's forward pass
            will end at producing logits. Otherwise, softmax is applied after producing
            the logits to produce the class probabilities prediction. Setting this parameter 
            to True is particularly useful when you want to combine results of rgb model
            and optical flow model.
            - `True` end model forward pass at logit output
            - `False` go further after logit to produce softmax predictions
            Note: This parameter is only useful when `include_top` is set to True.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    if not (weights in WEIGHTS_NAME or weights is None
            or os.path.exists(weights)):
        raise ValueError(
            'The `weights` argument should be either '
            '`None` (random initialization) or %s' % str(WEIGHTS_NAME) + ' '
            'or a valid path to a file containing `weights` values')

    if weights in WEIGHTS_NAME and include_top and classes != 400:
        raise ValueError(
            'If using `weights` as one of these %s, with `include_top`'
            ' as true, `classes` should be 400' % str(WEIGHTS_NAME))

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_frame_size=224,
                                      min_frame_size=32,
                                      default_num_frames=64,
                                      min_num_frames=8,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = 4

    # Downsampling via convolution (spatial and temporal)
    x = conv3d_bn(img_input,
                  64,
                  7,
                  7,
                  7,
                  strides=(2, 2, 2),
                  padding='same',
                  name='Conv3d_1a_7x7')

    # Downsampling (spatial only)
    x = MaxPooling3D((1, 3, 3),
                     strides=(1, 2, 2),
                     padding='same',
                     name='MaxPool2d_2a_3x3')(x)
    x = conv3d_bn(x,
                  64,
                  1,
                  1,
                  1,
                  strides=(1, 1, 1),
                  padding='same',
                  name='Conv3d_2b_1x1')
    x = conv3d_bn(x,
                  192,
                  3,
                  3,
                  3,
                  strides=(1, 1, 1),
                  padding='same',
                  name='Conv3d_2c_3x3')

    # Downsampling (spatial only)
    x = MaxPooling3D((1, 3, 3),
                     strides=(1, 2, 2),
                     padding='same',
                     name='MaxPool2d_3a_3x3')(x)

    # Mixed 3b
    branch_0 = conv3d_bn(x,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3b_0a_1x1')

    branch_1 = conv3d_bn(x,
                         96,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3b_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         128,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_3b_1b_3x3')

    branch_2 = conv3d_bn(x,
                         16,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3b_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         32,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_3b_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_3b_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         32,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3b_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_3b')

    # Mixed 3c
    branch_0 = conv3d_bn(x,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3c_0a_1x1')

    branch_1 = conv3d_bn(x,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3c_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         192,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_3c_1b_3x3')

    branch_2 = conv3d_bn(x,
                         32,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3c_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         96,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_3c_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_3c_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_3c_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_3c')

    # Downsampling (spatial and temporal)
    x = MaxPooling3D((3, 3, 3),
                     strides=(2, 2, 2),
                     padding='same',
                     name='MaxPool2d_4a_3x3')(x)

    # Mixed 4b
    branch_0 = conv3d_bn(x,
                         192,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4b_0a_1x1')

    branch_1 = conv3d_bn(x,
                         96,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4b_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         208,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4b_1b_3x3')

    branch_2 = conv3d_bn(x,
                         16,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4b_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         48,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4b_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_4b_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4b_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_4b')

    # Mixed 4c
    branch_0 = conv3d_bn(x,
                         160,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4c_0a_1x1')

    branch_1 = conv3d_bn(x,
                         112,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4c_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         224,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4c_1b_3x3')

    branch_2 = conv3d_bn(x,
                         24,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4c_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         64,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4c_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_4c_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4c_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_4c')

    # Mixed 4d
    branch_0 = conv3d_bn(x,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4d_0a_1x1')

    branch_1 = conv3d_bn(x,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4d_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         256,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4d_1b_3x3')

    branch_2 = conv3d_bn(x,
                         24,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4d_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         64,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4d_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_4d_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4d_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_4d')

    # Mixed 4e
    branch_0 = conv3d_bn(x,
                         112,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4e_0a_1x1')

    branch_1 = conv3d_bn(x,
                         144,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4e_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         288,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4e_1b_3x3')

    branch_2 = conv3d_bn(x,
                         32,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4e_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         64,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4e_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_4e_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         64,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4e_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_4e')

    # Mixed 4f
    branch_0 = conv3d_bn(x,
                         256,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4f_0a_1x1')

    branch_1 = conv3d_bn(x,
                         160,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4f_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         320,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4f_1b_3x3')

    branch_2 = conv3d_bn(x,
                         32,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4f_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         128,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_4f_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_4f_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_4f_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_4f')

    # Downsampling (spatial and temporal)
    x = MaxPooling3D((2, 2, 2),
                     strides=(2, 2, 2),
                     padding='same',
                     name='MaxPool2d_5a_2x2')(x)

    # Mixed 5b
    branch_0 = conv3d_bn(x,
                         256,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5b_0a_1x1')

    branch_1 = conv3d_bn(x,
                         160,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5b_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         320,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_5b_1b_3x3')

    branch_2 = conv3d_bn(x,
                         32,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5b_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         128,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_5b_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_5b_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5b_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_5b')

    # Mixed 5c
    branch_0 = conv3d_bn(x,
                         384,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5c_0a_1x1')

    branch_1 = conv3d_bn(x,
                         192,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5c_1a_1x1')
    branch_1 = conv3d_bn(branch_1,
                         384,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_5c_1b_3x3')

    branch_2 = conv3d_bn(x,
                         48,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5c_2a_1x1')
    branch_2 = conv3d_bn(branch_2,
                         128,
                         3,
                         3,
                         3,
                         padding='same',
                         name='Conv3d_5c_2b_3x3')

    branch_3 = MaxPooling3D((3, 3, 3),
                            strides=(1, 1, 1),
                            padding='same',
                            name='MaxPool2d_5c_3a_3x3')(x)
    branch_3 = conv3d_bn(branch_3,
                         128,
                         1,
                         1,
                         1,
                         padding='same',
                         name='Conv3d_5c_3b_1x1')

    x = layers.concatenate([branch_0, branch_1, branch_2, branch_3],
                           axis=channel_axis,
                           name='Mixed_5c')

    if include_top:
        # Classification block
        x = AveragePooling3D((2, 7, 7),
                             strides=(1, 1, 1),
                             padding='valid',
                             name='global_avg_pool')(x)
        x = Dropout(dropout_prob)(x)

        x = conv3d_bn(x,
                      classes,
                      1,
                      1,
                      1,
                      padding='same',
                      use_bias=True,
                      use_activation_fn=False,
                      use_bn=False,
                      name='Conv3d_6a_1x1')

        num_frames_remaining = int(x.shape[1])
        x = Reshape((num_frames_remaining, classes))(x)

        # logits (raw scores for each class)
        x = Lambda(lambda x: K.mean(x, axis=1, keepdims=False),
                   output_shape=lambda s: (s[0], s[2]))(x)

        if not endpoint_logit:
            x = Activation('softmax', name='prediction')(x)
    else:
        h = int(x.shape[2])
        w = int(x.shape[3])
        x = AveragePooling3D((2, h, w),
                             strides=(1, 1, 1),
                             padding='valid',
                             name='global_avg_pool')(x)

    inputs = img_input
    # create model
    model = Model(inputs, x, name='i3d_inception')

    # load weights
    if weights in WEIGHTS_NAME:
        if weights == WEIGHTS_NAME[0]:  # rgb_kinetics_only
            if include_top:
                weights_url = WEIGHTS_PATH['rgb_kinetics_only']
                model_name = 'i3d_inception_rgb_kinetics_only.h5'
            else:
                weights_url = WEIGHTS_PATH_NO_TOP['rgb_kinetics_only']
                model_name = 'i3d_inception_rgb_kinetics_only_no_top.h5'

        elif weights == WEIGHTS_NAME[1]:  # flow_kinetics_only
            if include_top:
                weights_url = WEIGHTS_PATH['flow_kinetics_only']
                model_name = 'i3d_inception_flow_kinetics_only.h5'
            else:
                weights_url = WEIGHTS_PATH_NO_TOP['flow_kinetics_only']
                model_name = 'i3d_inception_flow_kinetics_only_no_top.h5'

        elif weights == WEIGHTS_NAME[2]:  # rgb_imagenet_and_kinetics
            if include_top:
                weights_url = WEIGHTS_PATH['rgb_imagenet_and_kinetics']
                model_name = 'i3d_inception_rgb_imagenet_and_kinetics.h5'
            else:
                weights_url = WEIGHTS_PATH_NO_TOP['rgb_imagenet_and_kinetics']
                model_name = 'i3d_inception_rgb_imagenet_and_kinetics_no_top.h5'

        elif weights == WEIGHTS_NAME[3]:  # flow_imagenet_and_kinetics
            if include_top:
                weights_url = WEIGHTS_PATH['flow_imagenet_and_kinetics']
                model_name = 'i3d_inception_flow_imagenet_and_kinetics.h5'
            else:
                weights_url = WEIGHTS_PATH_NO_TOP['flow_imagenet_and_kinetics']
                model_name = 'i3d_inception_flow_imagenet_and_kinetics_no_top.h5'

        downloaded_weights_path = get_file(model_name,
                                           weights_url,
                                           cache_subdir='models')
        model.load_weights(downloaded_weights_path)

        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)

        if K.image_data_format() == 'channels_first' and K.backend(
        ) == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your keras config '
                          'at ~/.keras/keras.json.')

    elif weights is not None:
        model.load_weights(weights)

    return model
Example #57
0
 def call(self, x, mask=None):
     if hasattr(x, '_keras_shape'):
         input_shape = x._keras_shape
     elif hasattr(K, 'int_shape'):
         input_shape = K.int_shape(x)
     layer_width = input_shape[self.waxis]
     layer_height = input_shape[self.haxis]
     img_width = self.img_size[0]
     img_height = self.img_size[1]
     # define prior boxes shapes
     box_widths = []
     box_heights = []
     for ar in self.aspect_ratios:
         if ar == 1 and len(box_widths) == 0:
             box_widths.append(self.min_size)
             box_heights.append(self.min_size)
         elif ar == 1 and len(box_widths) > 0:
             box_widths.append(np.sqrt(self.min_size * self.max_size))
             box_heights.append(np.sqrt(self.min_size * self.max_size))
         elif ar != 1:
             box_widths.append(self.min_size * np.sqrt(ar))
             box_heights.append(self.min_size / np.sqrt(ar))
     box_widths = 0.5 * np.array(box_widths)
     box_heights = 0.5 * np.array(box_heights)
     # define centers of prior boxes
     step_x = img_width / layer_width
     step_y = img_height / layer_height
     linx = np.linspace(0.5 * step_x, img_width - 0.5 * step_x, layer_width)
     liny = np.linspace(0.5 * step_y, img_height - 0.5 * step_y,
                        layer_height)
     centers_x, centers_y = np.meshgrid(linx, liny)
     centers_x = centers_x.reshape(-1, 1)
     centers_y = centers_y.reshape(-1, 1)
     # 定义 prior boxes的xmin, ymin, xmax, ymax
     num_priors_ = len(self.aspect_ratios)
     prior_boxes = np.concatenate((centers_x, centers_y), axis=1)
     prior_boxes = np.tile(prior_boxes, (1, 2 * num_priors_))
     prior_boxes[:, ::4] -= box_widths
     prior_boxes[:, 1::4] -= box_heights
     prior_boxes[:, 2::4] += box_widths
     prior_boxes[:, 3::4] += box_heights
     prior_boxes[:, ::2] /= img_width
     prior_boxes[:, 1::2] /= img_height
     prior_boxes = prior_boxes.reshape(-1, 4)
     if self.clip:
         prior_boxes = np.minimum(np.maximum(prior_boxes, 0.0), 1.0)
     # define variances
     num_boxes = len(prior_boxes)
     if len(self.variances) == 1:
         variances = np.ones((num_boxes, 4)) * self.variances[0]
     elif len(self.variances) == 4:
         variances = np.tile(self.variances, (num_boxes, 1))
     else:
         raise Exception('Must provide one or four variances.')
     prior_boxes = np.concatenate((prior_boxes, variances), axis=1)
     prior_boxes_tensor = K.expand_dims(K.variable(prior_boxes), 0)
     if K.backend() == 'tensorflow':
         pattern = [tf.shape(x)[0], 1, 1]
         prior_boxes_tensor = tf.tile(prior_boxes_tensor, pattern)
     elif K.backend() == 'theano':
         #TODO
         pass
     return prior_boxes_tensor
Example #58
0
from keras.initializers import he_normal
from keras import optimizers
from keras.callbacks import LearningRateScheduler, TensorBoard
from keras.layers.normalization import BatchNormalization
from keras.utils.data_utils import get_file

num_classes = 10
batch_size = 128
epochs = 200
iterations = 391
dropout = 0.5
weight_decay = 0.0001
log_filepath = r'./vgg19_retrain_logs/'

from keras import backend as K
if ('tensorflow' == K.backend()):
    import tensorflow as tf
    from keras.backend.tensorflow_backend import set_session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)


def scheduler(epoch):
    if epoch < 80:
        return 0.1
    if epoch < 160:
        return 0.01
    return 0.001

Example #59
0
import os
import urllib2 
import tarfile
import zipfile 
try:
    import cPickle as pickle
except:
    import pickle
import os 
 
from eval_methods import *

os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"

 
if K.backend() == "tensorflow":
    os.environ["CUDA_VISIBLE_DEVICES"] = '2'
    config = K.tf.ConfigProto()
    config.gpu_options.allow_growth = True
#     config.gpu_options.per_process_gpu_memory_fraction = 0.95
    session = K.tf.Session(config=config)
    K.set_session(session)


# Set parameters:
tf.set_random_seed(10086)
np.random.seed(10086)

max_features = 5000
maxlen = 400
batch_size = 128
Example #60
0
"""
"""
import os

from keras import backend as K
from keras.layers import Embedding

from ._mixin_common import mixedomatic

if K.backend() == 'tensorflow':
    import tensorflow as tf
    from tensorflow.contrib.tensorboard.plugins import projector
    from keras.callbacks import TensorBoard


__all__ = ('TensorBoardEmbedding', 'find_embedding_layers', )


def find_embedding_layers(layers):
    '''Recursively find embedding layers.

    :param layers: The keras model layers. Typically obtained via model.layers
    :type layers: list
    '''
    elayers = []
    for layer in layers:
        if isinstance(layer, Embedding):
            elayers.append(layer)

        slayers = getattr(layer, 'layers', [])
        elayers += find_embedding_layers(slayers)