Exemplo n.º 1
0
 def __build_cnn2D(self):
     inputs = tf.keras.Input(shape=(self.image_width, self.image_height,
                                    self.history_length))
     x = layers.Lambda(lambda layer: layer / 255)(inputs)
     x = layers.Conv2D(
         filters=16,
         kernel_size=(4, 4),
         strides=(2, 2),
         activation='relu',
         kernel_initializer=initializers.VarianceScaling(scale=2.))(x)
     x = layers.MaxPool2D((2, 2))(x)
     x = layers.Conv2D(
         filters=8,
         kernel_size=(2, 2),
         strides=(1, 1),
         activation='relu',
         kernel_initializer=initializers.VarianceScaling(scale=2.))(x)
     x = layers.MaxPool2D((2, 2))(x)
     x = layers.Flatten()(x)
     x = layers.Dense(
         64,
         activation='relu',
         kernel_initializer=initializers.VarianceScaling(scale=2.))(x)
     predictions = layers.Dense(
         self.num_actions,
         activation='linear',
         kernel_initializer=initializers.VarianceScaling(scale=2.))(x)
     model = tf.keras.Model(inputs=inputs, outputs=predictions)
     model.compile(
         optimizer=optimizers.Adam(self.learning_rate), loss=losses.Huber()
     )  #loss to be removed. It is needed in the bugged version installed on Jetson
     model.summary()
     return model
Exemplo n.º 2
0
 def __build_cnn1D(self):
     inputs = tf.keras.Input(shape=(self.state_size, self.history_length))
     x = layers.Conv1D(
         filters=16,
         kernel_size=4,
         strides=2,
         activation='relu',
         kernel_initializer=initializers.VarianceScaling(scale=2.))(inputs)
     x = layers.Conv1D(
         filters=32,
         kernel_size=2,
         strides=1,
         activation='relu',
         kernel_initializer=initializers.VarianceScaling(scale=2.))(x)
     x = layers.Flatten()(x)
     x = layers.Dense(
         64,
         activation='relu',
         kernel_initializer=initializers.VarianceScaling(scale=2.))(x)
     predictions = layers.Dense(
         self.num_actions,
         activation='linear',
         kernel_initializer=initializers.VarianceScaling(scale=2.))(x)
     model = tf.keras.Model(inputs=inputs, outputs=predictions)
     model.compile(
         optimizer=optimizers.Adam(self.learning_rate), loss=losses.Huber()
     )  #loss to be removed. It is needed in the bugged version installed on Jetson
     model.summary()
     return model
Exemplo n.º 3
0
def get_quantized_initializer(w_initializer, w_range):
    """Gets the initializer and scales it by the range."""

    if isinstance(w_initializer, six.string_types):

        if w_initializer == "he_normal":
            return initializers.VarianceScaling(scale=2 * w_range,
                                                mode="fan_in",
                                                distribution="normal",
                                                seed=None)
        if w_initializer == "he_uniform":
            return initializers.VarianceScaling(scale=2 * w_range,
                                                mode="fan_in",
                                                distribution="uniform",
                                                seed=None)
        elif w_initializer == "glorot_normal":
            return initializers.VarianceScaling(scale=w_range,
                                                mode="fan_avg",
                                                distribution="normal",
                                                seed=None)
        elif w_initializer == "glorot_uniform":
            return initializers.VarianceScaling(scale=w_range,
                                                mode="fan_avg",
                                                distribution="uniform",
                                                seed=None)
        elif w_initializer == "random_uniform":
            return initializers.RandomUniform(-w_range, w_range)

    return w_initializer
Exemplo n.º 4
0
def get_initializer(name, **kwargs):
    """ 
    Return a kernel initializer by name
    """
    custom_inits = {
        # initializers for EfficientNet
        'en_conv':
        initializers.VarianceScaling(scale=2.,
                                     mode='fan_out',
                                     distribution='untruncated_normal'),
        'en_dense':
        initializers.VarianceScaling(scale=1. / 2.,
                                     mode='fan_out',
                                     distribution='uniform')
    }
    if isinstance(name, str):
        name = name.lower()
        if name in custom_inits:
            return custom_inits[name]
        elif name.lower() == 'orthogonal':
            gain = kwargs.get('gain', 1.)
            return initializers.orthogonal(gain)
        return initializers.get(name)
    else:
        return name
Exemplo n.º 5
0
 def __init__(self,
              width,
              depth,
              num_anchors=9,
              separable_conv=True,
              freeze_bn=False,
              **kwargs):
     super(BoxNet, self).__init__(**kwargs)
     self.width = width
     self.depth = depth
     self.num_anchors = num_anchors
     self.separable_conv = separable_conv
     options = {
         'kernel_size': 3,
         'strides': 1,
         'padding': 'same',
         'bias_initializer': 'zeros',
     }
     if separable_conv:
         kernel_initializer = {
             'depthwise_initializer': initializers.VarianceScaling(),
             'pointwise_initializer': initializers.VarianceScaling(),
         }
         options.update(kernel_initializer)
         self.convs = [
             layers.SeparableConv2D(filters=width,
                                    name=f'{self.name}/box-{i}',
                                    **options) for i in range(depth)
         ]
         self.head = layers.SeparableConv2D(filters=num_anchors * 4,
                                            name=f'{self.name}/box-predict',
                                            **options)
     else:
         kernel_initializer = {
             'kernel_initializer':
             initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
         }
         options.update(kernel_initializer)
         self.convs = [
             layers.Conv2D(filters=width,
                           name=f'{self.name}/box-{i}',
                           **options) for i in range(depth)
         ]
         self.head = layers.Conv2D(filters=num_anchors * 4,
                                   name=f'{self.name}/box-predict',
                                   **options)
     self.bns = [[
         layers.BatchNormalization(momentum=MOMENTUM,
                                   epsilon=EPSILON,
                                   name=f'{self.name}/box-{i}-bn-{j}')
         for j in range(3, 8)
     ] for i in range(depth)]
     # self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/box-{i}-bn-{j}') for j in range(3, 8)]
     #             for i in range(depth)]
     self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
     self.reshape = layers.Reshape((-1, 4))
     self.level = 0
Exemplo n.º 6
0
def dense_embedding(n_features=6,
                    n_features_cat=2,
                    activation='relu',
                    number_of_pupcandis=100,
                    embedding_input_dim={0: 13, 1: 3},
                    emb_out_dim=8,
                    with_bias=True,
                    t_mode=0,
                    units=[64, 32, 16]):
    n_dense_layers = len(units)

    inputs_cont = Input(shape=(number_of_pupcandis, n_features-2), name='input')
    pxpy = Input(shape=(number_of_pupcandis, 2), name='input_pxpy')

    embeddings = []
    inputs = [inputs_cont, pxpy]
    for i_emb in range(n_features_cat):
        input_cat = Input(shape=(number_of_pupcandis, 1), name='input_cat{}'.format(i_emb))
        inputs.append(input_cat)
        embedding = Embedding(
            input_dim=embedding_input_dim[i_emb],
            output_dim=emb_out_dim,
            embeddings_initializer=initializers.RandomNormal(
                mean=0,
                stddev=0.4/emb_out_dim),
            name='embedding{}'.format(i_emb))(input_cat)
        embedding = Reshape((number_of_pupcandis, emb_out_dim))(embedding)
        embeddings.append(embedding)

    x = Concatenate()([inputs_cont] + [emb for emb in embeddings])

    for i_dense in range(n_dense_layers):
        x = Dense(units[i_dense], activation='linear', kernel_initializer='lecun_uniform')(x)
        x = BatchNormalization(momentum=0.95)(x)
        x = Activation(activation=activation)(x)

    if t_mode == 0:
        x = GlobalAveragePooling1D(name='pool')(x)
        x = Dense(2, name='output', activation='linear')(x)

    if t_mode == 1:
        if with_bias:
            b = Dense(2, name='met_bias', activation='linear', kernel_initializer=initializers.VarianceScaling(scale=0.02))(x)
            pxpy = Add()([pxpy, b])
        w = Dense(1, name='met_weight', activation='linear', kernel_initializer=initializers.VarianceScaling(scale=0.02))(x)
        w = BatchNormalization(trainable=False, name='met_weight_minus_one', epsilon=False)(w)
        x = Multiply()([w, pxpy])

        x = GlobalAveragePooling1D(name='output')(x)
    outputs = x

    keras_model = Model(inputs=inputs, outputs=outputs)

    keras_model.get_layer('met_weight_minus_one').set_weights([np.array([1.]), np.array([-1.]), np.array([0.]), np.array([1.])])

    return keras_model
Exemplo n.º 7
0
    def __init__(self):
        super(kerasModel, self).__init__()
        self.layersList = []
        self.layersList.append(
            kl.Dense(9,
                     activation="relu",
                     input_shape=(4, ),
                     use_bias=False,
                     kernel_initializer=ki.VarianceScaling(),
                     name="dense_1"))
        self.layersList.append(
            kl.Dense(1,
                     activation="sigmoid",
                     kernel_initializer=ki.VarianceScaling(),
                     use_bias=False,
                     name="out"))

        self.loss = discountedLoss()
        self.optimizer = ko.Adam(lr=1e-2)
        self.train_loss = kme.Mean(name='train_loss')
        self.validation_loss = kme.Mean(name='val_loss')
        self.metric = kme.Accuracy(name="accuracy")

        @tf.function()
        def predict(x):
            """
            This is where we run
            through our whole dataset and return it, when training and testing.
            """
            for l in self.layersList:
                x = l(x)
            return x

        self.predict = predict

        @tf.function()
        def train_step(x, labels, adv):
            """
                This is a TensorFlow function, run once for each epoch for the
                whole input. We move forward first, then calculate gradients with
                Gradient Tape to move backwards.
            """
            with tf.GradientTape() as tape:
                predictions = self.predict(x)
                loss = self.loss.call(y_true=labels,
                                      y_pred=predictions,
                                      adv=adv)
            gradients = tape.gradient(loss, self.trainable_variables)
            self.optimizer.apply_gradients(
                zip(gradients, self.trainable_variables))
            self.train_loss(loss)
            return loss

        self.train_step = train_step
def create_keras_two_layer_dense_model(*,
    input_size, 
    output_size, 
    verbose=False,
    **kwargs
                                       
):

    # ...................................................
    # Create model
    model = Sequential()
    
    #.. First hidden layer
    model.add(Dense(
        input_dim=input_size,
        units=kwargs['h1_unit_size'], 
        activation=kwargs["h1_activation"], 
        kernel_initializer=initializers.VarianceScaling(scale=2.0, seed=0)
    ))
    model.add(tf.keras.layers.Dropout(kwargs["h1_Dropout"]))
    
    
    
    
    #.. Output layer
    model.add(Dense( 
        units=output_size, 
        activation=kwargs["out_activation"],
        kernel_regularizer=tf.keras.regularizers.l2(0.001),
        kernel_initializer=initializers.VarianceScaling(scale=1.0, seed=0)
    ))

    # Print network summary
    if verbose==True:
        print(model.summary())
    else:
        pass

    # ...................................................
    # Define Loss Function and Trianing Operation 
    """ # [option]: Use only default values,  
        model.compile( optimizer='sgd', 
            loss='sparse_categorical_crossentropy', 
            metrics=['acc'])
    """
    model.compile(
        optimizer= kwargs["optimizer"],
        loss= losses.sparse_categorical_crossentropy,
        metrics= kwargs["metrics"] # even one arg must be in the list
    )
    
    return model
Exemplo n.º 9
0
    def block(self, x, iw, ib, filters, upsample=True):
        if upsample:
            y = self.upsample(self.upsample_size)(x)
        else:
            y = layers.Activation('linear')(x)

        # Style vector for use in tRGB
        w_rgb = layers.Dense(filters,
                             kernel_initializer=initializers.VarianceScaling(
                                 200 / y.shape[2]))(iw)
        # Reshape style vector
        w = layers.Dense(x.shape[-1], kernel_initializer='he_uniform')(iw)
        # Crop noise to fit image
        b = layers.Lambda(self.crop)([ib, y])
        # Pass noise through a dense layer to fit filter size
        d = layers.Dense(filters, kernel_initializer='zeros')(b)

        y = self.mod_conv2d(filters)([y, w])
        y = layers.add([y, d])
        y = layers.LeakyReLU(0.2)(y)

        w = layers.Dense(filters, kernel_initializer='he_uniform')(iw)
        d = layers.Dense(filters, kernel_initializer='zeros')(b)

        y = self.mod_conv2d(filters)([y, w])
        y = layers.add([y, d])
        y = layers.LeakyReLU(0.2)(y)

        return y, self.tRGB(y, w_rgb)
Exemplo n.º 10
0
    def conv2d_bn(self,
                  x,
                  nb_filter,
                  num_row,
                  num_col,
                  padding='same',
                  strides=(1, 1),
                  use_bias=False):

        if K.image_data_format() == 'channels_first':
            channel_axis = 1

        else:
            channel_axis = -1

        x = Conv2D(nb_filter, (num_row, num_col),
                   strides=strides,
                   padding=padding,
                   use_bias=use_bias,
                   kernel_regularizer=regularizers.l2(0.00004),
                   kernel_initializer=initializers.VarianceScaling(
                       scale=2.0,
                       mode='fan_in',
                       distribution='normal',
                       seed=None))(x)

        x = BatchNormalization(axis=channel_axis, momentum=0.9997,
                               scale=False)(x)
        x = Activation('relu')(x)
        return x
Exemplo n.º 11
0
 def build(self, input_shape):
     self.denses = [
         KL.Dense(1024,
                  kernel_initializer=initializers.VarianceScaling(),
                  kernel_regularizer=self._kernel_regularizer,
                  activation='relu') for _ in range(2)
     ]
     super().build(input_shape)
Exemplo n.º 12
0
    def __init__(self,
                 width,
                 depth,
                 num_classes=8,
                 num_anchors=9,
                 freeze_bn=False,
                 **kwargs):
        super(ClassNet, self).__init__(**kwargs)
        self.width = width
        self.depth = depth
        self.num_classes = num_classes
        self.num_anchors = num_anchors
        options = {
            'kernel_size': 3,
            'strides': 1,
            'padding': 'same',
        }

        kernel_initializer = {
            'depthwise_initializer': initializers.VarianceScaling(),
            'pointwise_initializer': initializers.VarianceScaling(),
        }
        options.update(kernel_initializer)
        self.convs = [
            layers.SeparableConv2D(filters=self.width,
                                   bias_initializer='zeros',
                                   name=f'{self.name}/class-{i}',
                                   **options) for i in range(self.depth)
        ]
        self.head = layers.SeparableConv2D(
            filters=self.num_classes * self.num_anchors,
            bias_initializer=PriorProbability(probability=0.01),
            name=f'{self.name}/class-predict',
            **options)

        self.bns = [[
            BatchNormalization(freeze=freeze_bn,
                               momentum=MOMENTUM,
                               epsilon=EPSILON,
                               name=f'{self.name}/class-{i}-bn-{j}')
            for j in range(3, 8)
        ] for i in range(self.depth)]
        self.activation = layers.Lambda(lambda x: tf.nn.swish(x))
        self.reshape = layers.Reshape((-1, self.num_classes))
        self.activation_sigmoid = layers.Activation('sigmoid')
        self.level = 0
Exemplo n.º 13
0
 def __init__(self, width, depth, num_classes=20, num_anchors=9, separable_conv=True, freeze_bn=False, **kwargs):
     super(ClassNet, self).__init__(**kwargs)
     self.width = width
     self.depth = depth
     self.num_classes = num_classes
     self.num_anchors = num_anchors
     self.separable_conv = separable_conv
     options = {
         'kernel_size': 3,
         'strides': 1,
         'padding': 'same',
     }
     if self.separable_conv:
         kernel_initializer = {
             'depthwise_initializer': initializers.VarianceScaling(),
             'pointwise_initializer': initializers.VarianceScaling(),
         }
         options.update(kernel_initializer)
         self.convs = [layers.SeparableConv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}',
                                              **options)
                       for i in range(depth)]
         self.head = layers.SeparableConv2D(filters=num_classes * num_anchors,
                                            bias_initializer=PriorProbability(probability=0.01),
                                            name=f'{self.name}/class-predict', **options)
     else:
         kernel_initializer = {
             'kernel_initializer': initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
         }
         options.update(kernel_initializer)
         self.convs = [layers.Conv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}',
                                     **options)
                       for i in range(depth)]
         self.head = layers.Conv2D(filters=num_classes * num_anchors,
                                   bias_initializer=PriorProbability(probability=0.01),
                                   name='class-predict', **options)
     self.bns = [
         [layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/class-{i}-bn-{j}') for j
          in range(3, 8)]
         for i in range(depth)]
     # self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/class-{i}-bn-{j}') for j in range(3, 8)]
     #             for i in range(depth)]
     self.relu = layers.Lambda(swish)
     self.reshape = layers.Reshape((-1, num_classes))
     self.activation = layers.Activation('sigmoid')
     self.level = 0
Exemplo n.º 14
0
    def tRGB(self, x, w):
        size = x.shape[2]
        scale = self.max_size // size
        vs = initializers.VarianceScaling(200 / size)

        x = self.mod_conv2d(3, (1, 1), kernel_initializer=vs,
                            demod=False)([x, w])
        x = self.upsample((scale, scale))(x)

        return x
Exemplo n.º 15
0
    def build(self, input_shape):
        self._conv_classification_head = KL.Conv2D(
            self._multiples * self._num_classes, (1, 1),
            padding='valid',
            activation=None,
            kernel_initializer=self._kernel_initializer_classification_head,
            kernel_regularizer=self._kernel_regularizer,
            name=f'{self.name}classification_head')
        self._conv_box_prediction_head = KL.Conv2D(
            (self._num_classes - 1) * self._multiples * 4, (1, 1),
            padding='valid',
            activation=None,
            kernel_initializer=self._kernel_initializer_box_prediction_head,
            kernel_regularizer=self._kernel_regularizer,
            name=f'{self.name}box_prediction_head')

        if self._use_mask:
            self._segmentation_layers = [
                KL.Conv2D(256, (3, 3),
                          padding='valid',
                          activation='relu',
                          kernel_initializer=initializers.VarianceScaling(
                              scale=2., mode='fan_out'),
                          kernel_regularizer=self._kernel_regularizer),
                KL.Conv2DTranspose(
                    256, (2, 2),
                    strides=(2, 2),
                    padding='valid',
                    activation='relu',
                    kernel_initializer=initializers.VarianceScaling(
                        scale=2., mode='fan_out'),
                    kernel_regularizer=self._kernel_regularizer),
                KL.Conv2D(self._num_classes, (3, 3),
                          padding='valid',
                          activation='relu',
                          kernel_initializer=initializers.VarianceScaling(
                              scale=2., mode='fan_out'),
                          kernel_regularizer=self._kernel_regularizer)
            ]

        super().build(input_shape)
Exemplo n.º 16
0
    def __init__(self, width, depth, num_anchors=9, name='box_net', **kwargs):
        self.name = name
        self.width = width
        self.depth = depth
        self.num_anchors = num_anchors
        options = {
            'kernel_size': 3,
            'strides': 1,
            'padding': 'same',
            'bias_initializer': 'zeros',
            'depthwise_initializer': initializers.VarianceScaling(),
            'pointwise_initializer': initializers.VarianceScaling(),
        }

        self.convs = [layers.SeparableConv2D(filters=width, name=f'{self.name}/box-{i}', **options) for i in range(depth)]
        self.head = layers.SeparableConv2D(filters=num_anchors * 4, name=f'{self.name}/box-predict', **options)

        self.bns = [[layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/box-{i}-bn-{j}') for j in
             range(3, 8)] for i in range(depth)]

        self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
        self.reshape = layers.Reshape((-1, 4))
Exemplo n.º 17
0
def tiny_classifier(source_layers, num_priors, num_classes=21):
    mbox_conf = []
    mbox_loc = []
    for i, x in enumerate(source_layers):
        # source_layers

        # name = x.name.split('/')[0] # name만 추출 (ex: block3b_add)
        name = x.name.split(':')[0] # name만 추출 (ex: block3b_add)


        x1 = SeparableConv2D(num_priors[i] * num_classes, 3, padding='same',
                             depthwise_initializer=initializers.VarianceScaling(),
                             pointwise_initializer=initializers.VarianceScaling(),
                             name= name + '_mbox_conf_1')(x)



        x1 = Flatten(name=name + '_mbox_conf_flat')(x1)
        mbox_conf.append(x1)

        x2 = SeparableConv2D(num_priors[i] * 4, 3, padding='same',
                             depthwise_initializer=initializers.VarianceScaling(),
                             pointwise_initializer=initializers.VarianceScaling(),
                             name= name + '_mbox_loc_1')(x)


        x2 = Flatten(name=name + '_mbox_loc_flat')(x2)
        mbox_loc.append(x2)

    mbox_loc = Concatenate(axis=1, name='mbox_loc')(mbox_loc)
    mbox_loc = Reshape((-1, 4), name='mbox_loc_final')(mbox_loc)

    mbox_conf = Concatenate(axis=1, name='mbox_conf')(mbox_conf)
    mbox_conf = Reshape((-1, num_classes), name='mbox_conf_logits')(mbox_conf)

    predictions = Concatenate(axis=2, name='predictions', dtype=tf.float32)([mbox_loc, mbox_conf])

    return predictions
Exemplo n.º 18
0
	def _get_qfunc(self):

		inputs = Input(shape=self.obs_shape, dtype='uint8')
		cast_input = tf.dtypes.cast(inputs, tf.float32)
		input_scaled = cast_input/255.0

		conv1 = layers.Conv2D(filters=32,kernel_size=(8,8),strides=4,kernel_initializer=initializers.VarianceScaling(scale=2,distribution='untruncated_normal'),
		activation=tf.nn.relu)(input_scaled)

		conv2 = layers.Conv2D(filters=64,kernel_size=(4,4),strides=2,kernel_initializer=initializers.VarianceScaling(scale=2,distribution='untruncated_normal'),
		activation=tf.nn.relu)(conv1)

		conv3 = layers.Conv2D(filters=64,kernel_size=(3,3),strides=1,kernel_initializer=initializers.VarianceScaling(scale=2,distribution='untruncated_normal'),
		activation=tf.nn.relu)(conv2)

		flatten = layers.Flatten()(conv3)
		dense = layers.Dense(units=512,kernel_initializer=initializers.VarianceScaling(scale=2,distribution='untruncated_normal'),
		activation=tf.nn.relu)(flatten)

		outputs = layers.Dense(units=self.n_actions)(dense)
		model = tf.keras.Model(inputs, outputs)

		return model
    def f(x):
        initializer = initializers.VarianceScaling(2,
                                                   mode='fan_avg',
                                                   distribution='normal')

        d = layers.Dense(size,
                         kernel_initializer=initializer,
                         dtype=config.policy)(x)
        if dropout_rate:
            d = layers.Dropout(dropout_rate, dtype=config.policy)(d)
        d = layers.BatchNormalization(dtype=tf.float32)(d)
        if activation is not None:
            d = activation(d)
        return d
Exemplo n.º 20
0
    def __init__(self, width, depth, num_classes=20, num_anchors=9, name='class_net', **kwargs):
        self.name = name
        self.width = width
        self.depth = depth
        self.num_classes = num_classes
        self.num_anchors = num_anchors
        options = {
            'kernel_size': 3,
            'strides': 1,
            'padding': 'same',
            'depthwise_initializer': initializers.VarianceScaling(),
            'pointwise_initializer': initializers.VarianceScaling(),
        }

        self.convs = [layers.SeparableConv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}', **options) for i in range(depth)]
        self.head = layers.SeparableConv2D(filters=num_classes * num_anchors, bias_initializer=PriorProbability(probability=0.01), name=f'{self.name}/class-predict', **options)

        self.bns = [[layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/class-{i}-bn-{j}') for j
             in range(3, 8)] for i in range(depth)]

        self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
        self.reshape = layers.Reshape((-1, num_classes))
        self.activation = layers.Activation('sigmoid')
Exemplo n.º 21
0
    def conv2d_bn(self, x, nb_filter, num_row, num_col, padding='same', strides=(1, 1), use_bias=False, activation_use = 'relu', use_bn = True, use_activation = True):
        

        x = Conv2D(nb_filter, (num_row, num_col),
                   strides=strides,
                   padding=padding,
                   use_bias=use_bias,
                   kernel_regularizer=regularizers.l2(0.00004),
                   kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None))(x)
        
        if use_bn:
            x = BatchNormalization(axis=self.channel_axis, momentum=0.9997, scale=False)(x)
        if use_activation:
            x = Activation(activation_use)(x)
        return x
def create_keras_one_layer_dense_model(*,
    input_size, 
    output_size, 
    verbose=False,
    **kwargs
):

    """
        
        Notes:
        https://www.tensorflow.org/tutorials/keras/save_and_load
    """
    
    
    # ...................................................
    # Create model
    model = Sequential()

    #.. add fully connected layer
    model.add(Dense(
        input_dim=input_size,  # IE 784 PIXELS, !
        units=output_size, 
        activation=kwargs["out_activation"],
        kernel_regularizer=tf.keras.regularizers.l2(0.001),
        kernel_initializer=initializers.VarianceScaling(scale=1.0, seed=0)
    ))

    # Print network summary
    if verbose==True:
        print(model.summary())
    else:
        pass


    # ...................................................
    # Define Loss Function and Trianing Operation 
    """ # [option]: Use only default values,  
        model.compile( optimizer='sgd', 
            loss='sparse_categorical_crossentropy', 
            metrics=['acc'])
    """
    model.compile(
        optimizer= kwargs["optimizer"],
        loss= losses.sparse_categorical_crossentropy,
        metrics= kwargs["metrics"] # even one arg must be in the list
    )
    
    return model
Exemplo n.º 23
0
def run_deep_WaveNet(X_train, y_train, X_val, y_val, X_test, y_test):
    X_train = np.expand_dims(X_train, axis=2)
    X_val = np.expand_dims(X_val, axis=2)
    X_test = np.expand_dims(X_test, axis=2)

    model = wrappers.scikit_learn.KerasClassifier(build_fn=build_deep_WaveNet)

    he_avg_init = initializers.VarianceScaling(scale=2.,
                                               mode='fan_avg',
                                               distribution="uniform")

    opti = [
        optimizers.Adam(learning_rate=10),
        optimizers.Adam(learning_rate=1),
        optimizers.Adam(learning_rate=0.1),
        optimizers.Adam(learning_rate=0.01),
        optimizers.Adam(learning_rate=0.0001),
        optimizers.Adam(learning_rate=0.00001),
    ]
    init = ['he_normal', he_avg_init]
    epochs = [10000]
    batches = [25, 50, 150]

    param_grid = dict(optimizer=opti,
                      epochs=epochs,
                      batch_size=batches,
                      init=init)

    callback = callbacks.EarlyStopping(monitor='loss', patience=50)

    grid = GridSearchCV(estimator=model, param_grid=param_grid)
    grid_result = grid.fit(X_train, y_train, callbacks=[callback])

    print("Best: {} using {}".format(grid_result.best_score_,
                                     grid_result.best_params_))

    model = grid.best_estimator_.model
    test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2)

    print("Model Config : {}".format(model.optimizer.get_config()))
    print("Test Loss : {} Test Acc : {}".format(test_loss, test_acc))
 def f(x):
     padding = 'same' if strides == 1 or config.pad else 'valid'
     initializer = initializers.VarianceScaling(2,
                                                mode='fan_avg',
                                                distribution='normal')
     c = layers.Conv2D(filters,
                       kernel,
                       padding=padding,
                       strides=strides,
                       kernel_initializer=initializer,
                       dtype=config.policy)(x)
     if dropout_rate:
         c = layers.Dropout(dropout_rate,
                            noise_shape=[None, 1, 1, None],
                            dtype=config.policy)(c)
     c = layers.BatchNormalization(dtype=tf.float32)(c)
     if not norelu:
         c = layers.ReLU(dtype=config.policy)(c)
     if pool:
         c = layers.MaxPooling2D(dtype=config.policy)(c)
     return c
Exemplo n.º 25
0
def build_best_WaveNet():
    he_avg_init = initializers.VarianceScaling(scale=2.,
                                               mode='fan_avg',
                                               distribution="uniform")

    model = models.Sequential()

    model.add(layers.InputLayer(input_shape=(265, 1)))

    for rate in (1, 2, 4, 8) * 2:
        model.add(
            layers.Conv1D(filters=20,
                          kernel_size=2,
                          activation="relu",
                          dilation_rate=rate))

    model.add(layers.Conv1D(filters=10, kernel_size=1))

    model.add(layers.Flatten())
    model.add(
        layers.Dense(64, kernel_initializer=he_avg_init, activation='sigmoid'))
    model.add(layers.Dense(1))

    return model
Exemplo n.º 26
0
import tensorflow as tf

from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras import regularizers
from tensorflow.keras import initializers
import tensorflow.keras.backend as K

init = initializers.VarianceScaling(scale=0.5,
                                    mode='fan_in',
                                    distribution='normal')


def dense_bn(x, channels, name=None, l2_reg=1e-4):
    x = layers.Dense(channels,
                     use_bias=False,
                     name='{}_fc'.format(name),
                     kernel_regularizer=regularizers.l2(l2_reg))(x)
    x = layers.BatchNormalization(scale=True, name='{}_bn'.format(name))(x)
    return layers.Activation('relu', name=name)(x)


def dense_selu(x, channels, name=None, l2_reg=1e-4):
    x = layers.Dense(channels,
                     name='{}/fc'.format(name),
                     kernel_initializer=init,
                     kernel_regularizer=regularizers.l2(l2_reg))(x)
    return layers.Activation('selu', name=name + '/selu')(x)


def flatten_pixels(nchannels):
discount_factor = 0.99
n_step = 10
one_step_weight = 1.0 / 2.0
n_step_weight = 1.0 / 2.0
expert_weight = 0.0
model_restore_path = None

# network architecture
conv_layers = {
    'filters': [32, 64, 64, 1024],
    'kernel_sizes': [8, 4, 3, 7],
    'strides': [4, 2, 1, 1],
    'paddings': ['valid' for _ in range(4)],
    'activations': ['relu' for _ in range(4)],
    'initializers':
    [initializers.VarianceScaling(scale=2.0) for _ in range(4)],
    'names': ['conv_%i' % (i) for i in range(1, 5)]
}
dense_layers = None

# exploration parameters
eps_schedule = [[1, 0.1, 1000000], [0.1, 0.01, 5000000],
                [0.01, 0.001, 5000000]]

# training session parameters
target_interval = 10000
warmup_steps = 50000
pretrain_steps = None
learning_interval = 4
num_steps = 12000000
num_episodes = 1500
Exemplo n.º 28
0
    def get_model(self):
        # Input Layer
        user_input = Input(shape=(1, ), dtype='int32', name='user_input')
        item_input = Input(shape=(1, ), dtype='int32', name='item_input')
        text_input = Input(shape=(self.args.sim_feature_size, ),
                           dtype='float32',
                           name='text_input')

        # Embedding layer
        MF_Embedding_User = Embedding(
            input_dim=self.num_users,
            output_dim=self.args.mf_embedding_dim,
            name='mf_embedding_user',
            embeddings_initializer=initializers.VarianceScaling(
                scale=0.01, distribution='normal'),
            embeddings_regularizer=l2(0.01),
            input_length=1)
        MF_Embedding_Item = Embedding(
            input_dim=self.num_items,
            output_dim=self.args.mf_embedding_dim,
            name='mf_embedding_item',
            embeddings_initializer=initializers.VarianceScaling(
                scale=0.01, distribution='normal'),
            embeddings_regularizer=l2(0.01),
            input_length=1)
        # MF part
        mf_user_latent = tf.keras.layers.Flatten()(
            MF_Embedding_User(user_input))
        mf_item_latent = tf.keras.layers.Flatten()(
            MF_Embedding_Item(item_input))  # why Flatten?
        mf_vector = concatenate([mf_user_latent, mf_item_latent
                                 ])  # element-wise multiply    ???

        for idx in range(len(self.args.mf_fc_unit_nums)):  # 学习非线性关系
            layer = Dense(self.args.mf_fc_unit_nums[idx],
                          activation='relu',
                          name="layer%d" % idx)
            mf_vector = layer(mf_vector)

        # Text part
        # text_input = Dense(10, activation='relu', kernel_regularizer=l2(0.01))(text_input)  #   sim? 需要再使用MLP处理下?

        # Concatenate MF and TEXT parts
        predict_vector = concatenate([mf_vector, text_input])

        for idx in range(len(self.args.final_MLP_layers)):  # 整合后再加上MLP?
            layer = Dense(self.args.final_MLP_layers[idx],
                          activation='relu')  # name="layer%d"  % idx
            predict_vector = layer(predict_vector)

        predict_vector = tf.keras.layers.Dropout(0.5)(
            predict_vector)  # 使用dropout?

        if self.args.final_activation == 'softmax':
            predict_vector = Dense(2, activation='softmax',
                                   name="prediction")(predict_vector)
        elif self.args.final_activation == 'sigmoid':
            predict_vector = Dense(1,
                                   activation='sigmoid',
                                   kernel_initializer='lecun_uniform',
                                   name="prediction")(predict_vector)

        # # Final prediction layer
        # predict_vector = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name="prediction")(predict_vector)

        model = Model(inputs=[user_input, item_input, text_input],
                      outputs=predict_vector)
        return model
Exemplo n.º 29
0
    def get_model(self):
        num_layer = len(self.layers)  # Number of layers in the MLP
        # Input variables
        user_input = Input(shape=(1, ), dtype='int32', name='user_input')
        item_input = Input(shape=(1, ), dtype='int32', name='item_input')

        # Embedding layer
        MF_Embedding_User = Embedding(
            input_dim=self.num_users,
            output_dim=self.args.mf_embedding_dim,
            name='mf_embedding_user',
            embeddings_initializer=initializers.VarianceScaling(
                scale=0.01, distribution='normal'),
            embeddings_regularizer=l2(self.args.NCF_reg_mf),
            input_length=1)  #

        MF_Embedding_Item = Embedding(
            input_dim=self.num_items,
            output_dim=self.args.mf_embedding_dim,
            name='mf_embedding_item',
            embeddings_initializer=initializers.VarianceScaling(
                scale=0.01, distribution='normal'),
            embeddings_regularizer=l2(self.args.NCF_reg_mf),
            input_length=1)  #

        MLP_Embedding_User = Embedding(
            input_dim=self.num_users,
            output_dim=int(self.args.mf_fc_unit_nums[0] / 2),
            name="mlp_embedding_user",
            embeddings_initializer=initializers.VarianceScaling(
                scale=0.01, distribution='normal'),
            embeddings_regularizer=l2(self.args.NCF_reg_layers[0]),
            input_length=1)  #

        MLP_Embedding_Item = Embedding(
            input_dim=self.num_items,
            output_dim=int(self.args.mf_fc_unit_nums[0] / 2),
            name='mlp_embedding_item',
            embeddings_initializer=initializers.VarianceScaling(
                scale=0.01, distribution='normal'),
            embeddings_regularizer=l2(self.args.NCF_reg_layers[0]),
            input_length=1)  #

        # MF part
        mf_user_latent = tf.keras.layers.Flatten()(
            MF_Embedding_User(user_input))
        mf_item_latent = tf.keras.layers.Flatten()(
            MF_Embedding_Item(item_input))
        #   mf_vector = merge([mf_user_latent, mf_item_latent], mode='mul')  # element-wise multiply
        mf_vector = Multiply()([mf_user_latent, mf_item_latent])

        # MLP part
        mlp_user_latent = tf.keras.layers.Flatten()(
            MLP_Embedding_User(user_input))
        mlp_item_latent = tf.keras.layers.Flatten()(
            MLP_Embedding_Item(item_input))
        #   mlp_vector = merge([mlp_user_latent, mlp_item_latent], mode='concat')
        mlp_vector = Concatenate()([mlp_user_latent, mlp_item_latent])

        for idx in range(1, num_layer):
            layer = Dense(self.args.mf_fc_unit_nums[idx],
                          activation='relu',
                          name="layer%d" %
                          idx)  # kernel_regularizer=l2(reg_layers[idx]),
            mlp_vector = layer(mlp_vector)

        # Concatenate MF and MLP parts
        # mf_vector = Lambda(lambda x: x * alpha)(mf_vector)
        # mlp_vector = Lambda(lambda x : x * (1-alpha))(mlp_vector)
        #   predict_vector = merge([mf_vector, mlp_vector], mode='concat')
        predict_vector = Concatenate()([mf_vector, mlp_vector])

        # Final prediction layer
        prediction = Dense(1,
                           activation='sigmoid',
                           kernel_initializer='lecun_uniform',
                           name="prediction")(predict_vector)

        model = Model(input=[user_input, item_input], output=prediction)
        return model
Exemplo n.º 30
0
def create_convNN(*, input_size, output_size, kwargs, verbose=False):
    ''' function to build cnn, with 2 convolutions, one hidden layer and one  
        note: its not mistake with kwags, - i kept it intentionally as packed distionary, 
              to allow to read parameter sourse in the code'
    '''
    run = True
    K.clear_session()

    if run == True:
        # Convolutional Network, ........................
        model = keras.Sequential()

        #.. 1st cnn, layer
        model.add(
            keras.layers.Conv2D(filters=kwargs['Conv2D_1__filters'],
                                kernel_size=kwargs['Conv2D_1__kernel_size'],
                                strides=kwargs['Conv2D_1__stride'],
                                activation=kwargs['Conv2D_1__activation'],
                                input_shape=input_size))

        #.. maxpool 1.
        model.add(
            keras.layers.MaxPool2D(pool_size=kwargs['MaxPool2D_1__pool_size']))

        #.. 2nd cnn layer,
        model.add(
            keras.layers.Conv2D(
                filters=kwargs['Conv2D_2__filters'],
                kernel_size=kwargs['Conv2D_2__kernel_size'],
                strides=kwargs['Conv2D_2__stride'],
                activation=kwargs['Conv2D_2__activation'],
            ))

        #.. maxpool 2,
        model.add(
            keras.layers.MaxPool2D(pool_size=kwargs['MaxPool2D_2__pool_size']))

        # flatten the results, .........................
        model.add(keras.layers.Flatten())

        # dense nn, ....................................

        if kwargs["model"] == "two_dense_layers":

            #.. First hidden layer
            model.add(
                Dense(units=kwargs['h1_unit_size'],
                      activation=kwargs["h1_activation"],
                      kernel_regularizer=tf.keras.regularizers.l2(
                          kwargs['h1_l2']),
                      kernel_initializer=initializers.VarianceScaling(
                          scale=2.0, seed=0)))
            model.add(tf.keras.layers.Dropout(kwargs["h1_Dropout"]))

        else:
            pass

        #.. Output layer
        model.add(
            Dense(units=output_size,
                  activation=kwargs["out_activation"],
                  kernel_regularizer=tf.keras.regularizers.l2(
                      kwargs['out_l2']),
                  kernel_initializer=initializers.VarianceScaling(scale=1.0,
                                                                  seed=0)))

        # Define Loss Function and Trianing Operation
        model.compile(
            optimizer=kwargs["optimizer"],
            loss=losses.categorical_crossentropy,
            metrics=kwargs["metrics"]  # even one arg must be in the list
        )

        if verbose == True:
            model.summary()
        else:
            pass

        return model