Exemplo n.º 1
0
    def build(self, input_shape):
        self.kernel = []
        if self.conn_type == "S":
            # scale-only parameter
            self.kernel.append(
                self.add_weight("CrowdLayer", (1, self.num_annotators),
                                initializer=Ones(),
                                trainable=True))
        elif self.conn_type == "B":
            # bias-only parameter
            self.kernel.append(
                self.add_weight("CrowdLayer", (1, self.num_annotators),
                                initializer=Zeros(),
                                trainable=True))
        elif self.conn_type == "S+B" or self.conn_type == "B+S":
            # scale and bias parameters
            self.kernel.append(
                self.add_weight("CrowdLayer", (1, self.num_annotators),
                                initializer=Ones(),
                                trainable=True))
            self.kernel.append(
                self.add_weight("CrowdLayer", (1, self.num_annotators),
                                initializer=Zeros(),
                                trainable=True))
        else:
            raise Exception(
                "Unknown connection type for CrowdsRegression layer!")

        super(CrowdsRegression,
              self).build(input_shape)  # Be sure to call this somewhere!
Exemplo n.º 2
0
 def create_model(self):
     self.init_tensorflow()
     self.input = Input(shape=(self.train['x_train'].shape[1], 1))
     self.lstm = LSTM(self._p['units'],
                      batch_size=self._p['batch_size'],
                      recurrent_initializer=Ones(),
                      kernel_initializer=Ones(),
                      use_bias=False,
                      recurrent_activation=self._p['activation'],
                      dropout=0.25)(self.input)
     self.lstm = Dense(1)(self.lstm)
     self.model = Model(inputs=self.input, outputs=self.lstm)
     self.model.compile(loss='mae',
                        optimizer=RMSprop(lr=self._p['lr']),
                        metrics=['mae'])
Exemplo n.º 3
0
    def _deconv(self, feature_map, f, k, s):
        """
        The deconvolution operation to upsample the average feature map downstream
        f = # of filters from previous leaky layer (int)
        k = size of kernel from previous leaky layer
        s = amount of stride from previous leaky layer
        """

        x = Input(shape=(None, None, 1))

        y = Conv2DTranspose(
            filters=1,
            kernel_size=(3, 3),
            strides=(2, 2),
            padding='same',
            kernel_initializer=Ones(),  # set all weights to 1
            bias_initializer=Zeros()  # set all biases to 0
        )(x)

        deconv_model = Model(inputs=[x], outputs=[y])

        inps = [deconv_model.input, K.learning_phase()]  # input placeholder
        outs = [deconv_model.layers[-1].output]  # output placeholder

        deconv_func = K.function(inps, outs)  # evaluation function

        return deconv_func([feature_map, 0])[0]
Exemplo n.º 4
0
    def build(self, input_shape):
        if self.conn_type == "MW":
            # matrix of weights per annotator
            self.kernel = self.add_weight(
                "CrowdLayer",
                (self.output_dim, self.output_dim, self.num_annotators),
                initializer=init_identities,
                trainable=True)
        elif self.conn_type == "VW":
            # vector of weights (one scale per class) per annotator
            self.kernel = self.add_weight(
                "CrowdLayer", (self.output_dim, self.num_annotators),
                initializer=Ones(),
                trainable=True)
        elif self.conn_type == "VB":
            # two vectors of weights (one scale and one bias per class) per annotator
            self.kernel = []
            self.kernel.append(
                self.add_weight("CrowdLayer",
                                (self.output_dim, self.num_annotators),
                                initializer=Zeros(),
                                trainable=True))
        elif self.conn_type == "VW+B":
            # two vectors of weights (one scale and one bias per class) per annotator
            self.kernel = []
            self.kernel.append(
                self.add_weight("CrowdLayer",
                                (self.output_dim, self.num_annotators),
                                initializer=Ones(),
                                trainable=True))
            self.kernel.append(
                self.add_weight("CrowdLayer",
                                (self.output_dim, self.num_annotators),
                                initializer=Zeros(),
                                trainable=True))
        elif self.conn_type == "SW":
            # single weight value per annotator
            self.kernel = self.add_weight("CrowdLayer",
                                          (self.num_annotators, 1),
                                          initializer=Ones(),
                                          trainable=True)
        else:
            raise Exception(
                "Unknown connection type for CrowdsClassification layer!")

        super(CrowdsClassification,
              self).build(input_shape)  # Be sure to call this somewhere!
Exemplo n.º 5
0
    def __init__(self, nhiddenLayers=10):
        super(LogSumExpLayer, self).__init__()
        # Activation functions
        log_activ = backend.log
        exp_activ = backend.exp

        # Define exponential and logarithmic layers
        self.layerexp = Dense(nhiddenLayers,
                              kernel_initializer=Ones(),
                              bias_initializer=Ones(),
                              activation=Activation(exp_activ),
                              name='exp')  # first hidden layer
        self.layerlog = Dense(1,
                              use_bias=False,
                              kernel_initializer=Ones(),
                              activation=Activation(log_activ),
                              trainable=False,
                              name='log')  # output layer
Exemplo n.º 6
0
 def build(self, input_shape):
     self._g = self.add_weight(name='gain',
                               shape=(input_shape[-1], ),
                               initializer=Ones(),
                               trainable=True)
     self._b = self.add_weight(name='bias',
                               shape=(input_shape[-1], ),
                               initializer=Zeros(),
                               trainable=True)
Exemplo n.º 7
0
 def build(self, input_shape):
     self.gamma = self.add_weight(name='gamma',
                                  shape=input_shape[-1:],
                                  initializer=Ones(),
                                  trainable=True)
     self.beta = self.add_weight(name='beta',
                                 shape=input_shape[-1:],
                                 initializer=Zeros(),
                                 trainable=True)
     super().build(input_shape)
Exemplo n.º 8
0
    def build(self, input_shape):

        self.gamma = self.add_weight(
            name="gamma", shape=input_shape[-1:], initializer=Ones(), trainable=True
        )

        self.beta = self.add_weight(
            name="beta", shape=input_shape[-1:], initializer=Zeros(), trainable=True
        )

        super(LayerNormalization, self).build(input_shape)
Exemplo n.º 9
0
    def __init__(self, input_shape, feature_dim=128, weight_decay=1e-4):
        """
        Constructor, initialize member variables.

        :param input_shape: (Array) The shape of the input. E.g. [32, 32, 3].
        :param feature_dim: (Integer) The size of the latent representation. 128 by default.
        :param weight_decay: (Float) The weight decay for the l2 regularitation loss. 1e-4 by default.
        """
        super(ResNetNonLinearHead, self).__init__()

        self.batch_norm_decay = 0.9
        self.batch_norm_epsilon = 1e-5
        self.weight_decay = weight_decay

        self.head = tf.keras.Sequential([
            Dense(
                input_shape,
                use_bias=False,
                kernel_initializer=RandomNormal(stddev=.01),
                kernel_regularizer=tf.keras.regularizers.l2(self.weight_decay),
                bias_regularizer=tf.keras.regularizers.l2(self.weight_decay)),
            SyncBatchNormalization(center=True,
                                   scale=True,
                                   momentum=self.batch_norm_decay,
                                   epsilon=self.batch_norm_epsilon,
                                   gamma_initializer=Ones()),
            Activation("relu"),
            Dense(
                feature_dim,
                use_bias=False,
                kernel_initializer=RandomNormal(stddev=.01),
                kernel_regularizer=tf.keras.regularizers.l2(self.weight_decay),
                bias_regularizer=tf.keras.regularizers.l2(self.weight_decay)),
            SyncBatchNormalization(center=False,
                                   scale=True,
                                   momentum=self.batch_norm_decay,
                                   epsilon=self.batch_norm_epsilon,
                                   gamma_initializer=Ones())
        ])
Exemplo n.º 10
0
    def __init__(self, xdim, ydim, adim):
        super().__init__()

        self.input_output_layer = xdim + ydim + adim
        self.hidden_layer = 128
        self.shapes = [
            self.input_output_layer, self.hidden_layer, self.input_output_layer
        ]

        self.zeros = Zeros()
        self.ones = Ones()

        self.is_built = False
Exemplo n.º 11
0
    def _deconv(self, feature_map):
        """The deconvolution operation to upsample the average feature map downstream"""
        x = Input(shape=(None, None, 1))
        y = Conv2DTranspose(filters=1,
                            kernel_size=(3, 3),
                            strides=(2, 2),
                            padding='same',
                            kernel_initializer=Ones(),
                            bias_initializer=Zeros())(x)

        deconv_model = Model(inputs=[x], outputs=[y])

        inps = [deconv_model.input, K.learning_phase()]  # input placeholder
        outs = [deconv_model.layers[-1].output]  # output placeholder
        deconv_func = K.function(inps, outs)  # evaluation function

        return deconv_func([feature_map, 0])[0]
Exemplo n.º 12
0
def build_nn(q, initialize_cl, cl_df, dat_x=None):
    if dat_x is not None and dat_x.shape[1] == 5:
        features = Input(shape=(5, ), dtype="int32")

        encoders = []
        encoded = []
        for var_idx in range(5):
            if var_idx in [0, 1, 4]:
                current_encoder = preprocessing.CategoryEncoding(
                    output_mode="binary", sparse=True)
            else:
                current_encoder = preprocessing.Normalization()
            encoders.append(current_encoder)
            encoders[var_idx].adapt(dat_x[:, var_idx])
            encoded.append(encoders[var_idx](features[:, var_idx]))

        features_encoded = concatenate(encoded)
        hidden_layer = Dense(units=q, activation='tanh')(features_encoded)
    elif dat_x is None or dat_x.shape[1] > 5:
        features = Input(shape=(dat_x.shape[1], ))
        hidden_layer = Dense(units=q, activation='tanh')(features)

    if not initialize_cl:
        output_layer = Dense(units=1, activation=backend.exp)(hidden_layer)
    else:
        output_layer = Dense(units=1,
                             activation=backend.exp,
                             bias_initializer=Constant(value=cl_df),
                             kernel_initializer=Zeros())(hidden_layer)

    volumes = Input(shape=(1, ))
    offset_layer = Dense(units=1,
                         activation='linear',
                         use_bias=False,
                         trainable=False,
                         kernel_initializer=Ones())(volumes)

    merged = Multiply()([output_layer, offset_layer])

    model = Model(inputs=[features, volumes], outputs=merged)
    model.compile(loss='mse', optimizer='rmsprop', metrics=["mse"])

    return model
Exemplo n.º 13
0
def get_initializer(init_name='truncate_norm', init_stddev=0.05, seed=1024):
    if init_name in ('truncate_norm', 'truncate_normal'):
        return TruncatedNormal(stddev=init_stddev, seed=seed)
    elif init_name in ('glorot_norm', 'glorot_normal', 'xavier_norm',
                       'xavier_normal'):
        return glorot_normal(seed=seed)
    elif init_name in ('he_norm', 'he_normal'):
        return he_normal(seed)
    elif init_name in ('trucate_uniform'):
        return TruncatedNormal(stddev=init_stddev)
    elif init_name in ('glorot_uniform'):
        return glorot_uniform()
    elif init_name in ('he_uniform'):
        return he_uniform()
    elif init_name in ('zero', 'zeros'):
        return Zeros()
    elif init_name in ('ones', 'one'):
        return Ones()
    else:
        raise ValueError('not support {} initializer'.format(init_name))
Exemplo n.º 14
0
    def batch_norm_relu(self,
                        inputs,
                        relu=True,
                        init_zero=False,
                        center=True,
                        scale=True,
                        data_format='channels_last'):
        """
        Performs a batch normalization followed by a ReLU.

        :param inputs: (Tensor) The input of size `[batch, channels, ...]`.
        :param relu: (Boolean) If False, omits the ReLU operation.
        :param init_zero: (Boolean) If True, initializes scale parameter of batch
                normalization with 0 instead of 1 (default).
        :param center: (Boolean) Whether to add learnable bias factor.
        :param scale:  (Boolean) Whether to add learnable scaling factor.
        :param data_format: (String) Either "channels_first" for `[batch, channels, height,
                width]` or "channels_last for `[batch, height, width, channels]`.
        :return: output: A normalized `Tensor` with the same `data_format`.
        """
        if init_zero:
            gamma_initializer = Zeros()
        else:
            gamma_initializer = Ones()

        if data_format == 'channels_first':
            axis = 1
        else:
            axis = 3

        inputs = SyncBatchNormalization(
            axis=axis,
            center=center,
            scale=scale,
            momentum=self.batch_norm_decay,
            epsilon=self.batch_norm_epsilon,
            gamma_initializer=gamma_initializer)(inputs)
        if relu:
            inputs = Activation('relu')(inputs)

        return inputs
Exemplo n.º 15
0
import pygad.kerasga
from pygad.kerasga import KerasGA
import numpy
import pygad
from tensorflow.keras.initializers import Ones, ones
from tensorflow.keras.losses import MeanSquaredError, BinaryCrossentropy, MeanAbsoluteError
from tensorflow.keras import Model
from tensorflow.keras.utils import plot_model, pack_x_y_sample_weight
from tensorflow.keras.models import Sequential
from tensorflow.keras.activations import sigmoid
from pygad.kerasga import predict


# Create a Keras model
model = Sequential([InputLayer(input_shape=(2, )),
                    Dense(units=2, use_bias=True, bias_initializer=Ones(), activation=sigmoid),
                    Dense(units=1, use_bias=True, bias_initializer=Ones(), activation=sigmoid)])

# Create an instance of the pygad.kerasga.KerasGA class.
kerasGA = KerasGA(model=model, num_solutions=9)

# Prepare the Training Data
# XOR problem inputs
data_inputs = numpy.array([[0, 0],
                           [0, 1],
                           [1, 0],
                           [1, 1]])

# XOR problem outputs
data_outputs = numpy.array([[0],
                            [1],
Exemplo n.º 16
0
    return solution_fitness


def callback_generation(ga_instance):
    print("Generation = {generation}".format(
        generation=ga_instance.generations_completed))
    print("Fitness    = {fitness}".format(
        fitness=ga_instance.best_solution()[1]))


# Build the keras model using the functional API.
input_layer = Input(2)
dense_layer = Dense(2,
                    activation=sigmoid,
                    use_bias=True,
                    bias_initializer=Ones())(input_layer)
output_layer = Dense(1,
                     activation=sigmoid,
                     use_bias=True,
                     bias_initializer=Ones())(dense_layer)

model = Model(inputs=input_layer, outputs=output_layer)

# Create an instance of the pygad.kerasga.KerasGA class to build the initial population.
keras_ga = pygad.kerasga.KerasGA(model=model, num_solutions=9)

# XOR problem inputs
data_inputs = numpy.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])

# XOR problem outputs
data_outputs = numpy.array([[0.0], [1.0], [1.0], [0.0]])
Exemplo n.º 17
0

model = Sequential([
    Conv2D(filters=16,
           kernel_size=(3, 3),
           input_shape=(192, 192, 3),
           activation=relu,
           bias_initializer=Zeros(),
           kernel_initializer=RandomNormal(stddev=1., mean=0., seed=None),
           padding='same'),
    MaxPooling2D(pool_size=(3, 3), padding='same'),
    Conv2D(filters=32,
           kernel_size=(3, 3),
           activation=relu,
           bias_initializer=Zeros(),
           kernel_initializer=RandomUniform(minval=-0.06,
                                            maxval=0.06,
                                            seed=None),
           padding='same'),
    MaxPooling2D(pool_size=(3, 3)),
    Conv2D(filters=64,
           kernel_size=(3, 3),
           activation=relu,
           bias_initializer=Ones(),
           kernel_initializer=custom_initializer,
           padding='same'),
    MaxPooling2D(pool_size=(3, 3), padding='SAME'),
])

model.summary()
Exemplo n.º 18
0
 def __init__(self,fmaps,names = ''):
   super(AffineTransform,self).__init__(name = names)
   self.fmaps = fmaps
   self.dense = Dense(fmaps*2,kernel_initializer=RandomNormal(0,1.0),bias_initializer=Ones())
   self.activation = LeakyReLU(alpha = 0.2)
   self.reshape = Reshape((self.fmaps,2))
Exemplo n.º 19
0
import numpy as np
from tensorflow.keras.layers import Flatten, MaxPooling2D, Conv2D, Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.initializers import Ones, he_normal, GlorotNormal
from tensorflow.keras.regularizers import l1, l2
from tensorflow.keras.optimizers import SGD
import matplotlib.pyplot as plt


mini_batch_size = 16
num_epochs = 40
learning_rate = 0.0005

kernel_init = Ones(), he_normal(), GlorotNormal()
kernel_regularizer = l1(0.001), l2(0.001)

(train_data, train_label), (test_data, test_label) = cifar10.load_data()
train_label = to_categorical(train_label, 10)
test_label = to_categorical(test_label, 10)

train_data = train_data.astype('float32') / 255
test_data = test_data.astype('float32') / 255

input_shape = np.shape(train_data)[1:]

x_input = Input(shape=input_shape)
x_cov1 = Conv2D(6, (5, 5), kernel_initializer=kernel_init[1], kernel_regularizer=kernel_regularizer[1], activation='relu')(x_input)
x_pool1 = MaxPooling2D()(x_cov1)
x_cov2 = Conv2D(16, (5, 5), kernel_initializer=kernel_init[1], kernel_regularizer=kernel_regularizer[1], activation='relu')(x_pool1)