示例#1
0
 def build(self, input_shape):
     assert isinstance(input_shape, list)
     self.W1 = self.add_weight(name='kernel', 
                                   shape=(1,),
                                   initializer='uniform',
                                   trainable=True,
                                   constraint=min_max_norm(min_value=0.78, max_value=4))
     self.W2 = self.add_weight(name='kernel', 
                                   shape=(1,),
                                   initializer='uniform',
                                   trainable=True,
                                   constraint=min_max_norm(min_value=0.78, max_value=4))
     super(RotationThetaWeightLayer, self).build(input_shape)
示例#2
0
def Heap_Block(Samples):
    Model_input = Input((1500,))

    X = Dense(
        1500, kernel_constraint=min_max_norm(
            0, 0.6), bias_constraint=min_max_norm(
            0, 0.6))(Model_input)  # Initial layer
    X = Activation('relu')(X)

    X = Id_Block(X)  # Starting the task of heaping blocks (3 times)
    X = Id_Block(X)
    X = Id_Block(X)
    X = Dense(5, activation='softmax')(X)  # Classification layer
    model = Model(inputs=Model_input, outputs=X)
    return model
示例#3
0
def Id_Block(X):
    X_copy = X    # make a copy of X

    X = Dense(
        1500, use_bias=True, kernel_constraint=min_max_norm(
            0, 0), bias_constraint=min_max_norm(
            0, 0.6))(X)
    X = Activation('relu')(X)

    X = Dense(
        1500, use_bias=True, kernel_constraint=min_max_norm(
            0, 0.6), bias_constraint=min_max_norm(
            0, 0.6))(X)
    X = Add()([X, X_copy])  # skip these layers
    X = Activation('relu')(X)

    return X
示例#4
0
 def build(self, input_shape):
     initializer_uniform = RandomUniform(minval=0, maxval=1)
     constraint_min_max = min_max_norm(min_value=0.0, max_value=1.0)
     self.b = self.add_weight(name='b',
                              shape=(input_shape[-1], ),
                              initializer=initializer_uniform,
                              constraint=constraint_min_max,
                              trainable=True)
     super(ANDNoisy, self).build(input_shape)
示例#5
0
 def build(self, input_shape):
     '''
     input_shape describes the number of the junctions.
     '''
     assert isinstance(input_shape, list)
     self.W1 = self.add_weight(name='junction_weight_first_element',
                               shape=(1, ),
                               initializer='uniform',
                               trainable=True,
                               constraint=min_max_norm(min_value=0,
                                                       max_value=1))
     self.W2 = self.add_weight(name='junction_weight_second_element',
                               shape=(1, ),
                               initializer='uniform',
                               trainable=True,
                               constraint=min_max_norm(min_value=0,
                                                       max_value=1))
     super(JunctionWeightLayer, self).build(input_shape)
示例#6
0
def test_min_max_norm():
    array = get_example_array()
    for m in get_test_values():
        norm_instance = constraints.min_max_norm(min_value=m, max_value=m * 2)
        normed = norm_instance(K.variable(array))
        value = K.eval(normed)
        l2 = np.sqrt(np.sum(np.square(value), axis=0))
        assert l2[l2 < m].size == 0
        assert l2[l2 > m * 2 + 1e-5].size == 0
示例#7
0
def test_min_max_norm():
    array = get_example_array()
    for m in get_test_values():
        norm_instance = constraints.min_max_norm(min_value=m, max_value=m * 2)
        normed = norm_instance(K.variable(array))
        value = K.eval(normed)
        l2 = np.sqrt(np.sum(np.square(value), axis=0))
        assert l2[l2 < m].size == 0
        assert l2[l2 > m * 2 + 1e-5].size == 0
示例#8
0
def create_dfn(input_shape):

    inputs = Input(shape=(input_shape, ))
    # first sub net

    hidden_1 = Dense(
        units=200,
        activation='relu',  # 100
        activity_regularizer=regularizers.l1(1e-10))(inputs)
    #drop1 = Dropout(.1)(hidden_1)
    hidden_2 = Dense(
        units=200,
        activation='relu',  # 100
        activity_regularizer=regularizers.l1(1e-10))(hidden_1)
    hidden_3 = Dense(
        units=100,
        activation='relu',  # 60
        activity_regularizer=regularizers.l1(1e-10))(hidden_2)

    theta_layer = Dense(1, activation='linear')(hidden_3)

    theta_outputs = Dense(1,
                          activation='linear',
                          kernel_constraint=min_max_norm(max_value=1,
                                                         min_value=1),
                          bias_constraint=min_max_norm(max_value=1,
                                                       min_value=-1),
                          kernel_initializer='ones',
                          bias_initializer='zeros')(theta_layer)
    #bias_initializer= initializers.Constant(value= -0.05 )

    response_outputs = Dense(
        input_shape,
        kernel_constraint=min_max_norm(max_value=3, min_value=1),
        bias_constraint=min_max_norm(max_value=3, min_value=-3),
        kernel_initializer=initializers.Constant(value=1.5),
        bias_initializer=initializers.Constant(value=0),
        activation='sigmoid')(theta_outputs)

    deepFowardFeedNets = Model(inputs,
                               output=[theta_outputs, response_outputs])
    projectTheta = Model(inputs, theta_layer)

    return deepFowardFeedNets, projectTheta
def Decoder(X):
    X = Id_Block(X, 0.05)
    X = Id_Block(X, 0.05)
    X = Id_Block(X, 0.05)
    X = Conv1D(kernel_size=3,
               kernel_constraint=min_max_norm(0, 0.05),
               activation='relu',
               padding='same',
               filters=1)(X)
    return X
示例#10
0
def Id_Block(X, limit=0.09):
    X_copy = X  # the ResNet algorithm
    X = Conv1D(kernel_size=3,
               kernel_constraint=min_max_norm(0, limit),
               activation='relu',
               padding='same',
               filters=32)(X)
    X = Conv1D(kernel_size=3,
               kernel_constraint=min_max_norm(0, limit),
               activation='relu',
               padding='same',
               filters=32)(X)
    X = Conv1D(kernel_size=3,
               kernel_constraint=min_max_norm(0, limit),
               padding='same',
               filters=32)(X)
    X = Add()([X, X_copy])
    X = Activation('relu')(X)
    return X
示例#11
0
    def build(self, input_shapes):
        X_shape = input_shapes[0]
        # number of atom props * output width
        kernel_shape = (X_shape[-1], self.width)

        # atom (self) weights
        self.kernel_dense = self.add_weight(
            shape=kernel_shape,
            initializer=self.kernel_initializer,
            name="dense_kernel",
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint)
        if self.use_bias is not None:
            self.bias = self.add_weight(shape=(self.width, ),
                                        initializer=self.bias_initializer,
                                        name="bias",
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        constraint = min_max_norm(min_value=0.0,
                                  max_value=1.0,
                                  rate=1.0,
                                  axis=0)
        if self.conv_wts == "single":
            self.kernel_neigh = self.add_weight(
                shape=[1],
                initializer=self.kernel_initializer,
                name="kernel_neigh",
                regularizer=self.kernel_regularizer,
                constraint=constraint)
            self.kernel_self = self.add_weight(
                shape=[1],
                initializer=self.kernel_initializer,
                name="kernel_self",
                regularizer=self.kernel_regularizer,
                constraint=constraint)
        elif self.conv_wts == "all":
            self.kernel_neigh = self.add_weight(
                shape=(self.width, ),
                initializer=self.kernel_initializer,
                name="kernel_neigh",
                regularizer=self.kernel_regularizer,
                constraint=constraint)
            self.kernel_self = self.add_weight(
                shape=(self.width, ),
                initializer=self.kernel_initializer,
                name="kernel_neigh",
                regularizer=self.kernel_regularizer,
                constraint=constraint)

        self.built = True
示例#12
0
    def build(self, input_shape):
        self.f = self.add_weight(name='f', shape=(1,),  #
                                 initializer=initializers.Constant(value=0.5),
                                 constraint=constraints.min_max_norm(min_value=0.0, max_value=1.0, rate=0.9),
                                 trainable=True)
        self.smax = self.add_weight(name='smax', shape=(1,),
                                    initializer=initializers.Constant(value=0.5),
                                    constraint=constraints.min_max_norm(min_value=1 / 15, max_value=1.0, rate=0.9),
                                    trainable=True)
        self.qmax = self.add_weight(name='qmax', shape=(1,),
                                    initializer=initializers.Constant(value=0.5),
                                    constraint=constraints.min_max_norm(min_value=0.2, max_value=1.0, rate=0.9),
                                    trainable=True)
        self.ddf = self.add_weight(name='ddf', shape=(1,),
                                   initializer=initializers.Constant(value=0.5),
                                   constraint=constraints.min_max_norm(min_value=0.0, max_value=1.0, rate=0.9),
                                   trainable=True)
        self.tmin = self.add_weight(name='tmin', shape=(1,),
                                    initializer=initializers.Constant(value=0.5),
                                    constraint=constraints.min_max_norm(min_value=0.0, max_value=1.0, rate=0.9),
                                    trainable=True)
        self.tmax = self.add_weight(name='tmax', shape=(1,),
                                    initializer=initializers.Constant(value=0.5),
                                    constraint=constraints.min_max_norm(min_value=0.0, max_value=1.0, rate=0.9),
                                    trainable=True)

        super(PRNNLayer, self).build(input_shape)
示例#13
0
def Discriminator(X):
    X = Id_Block(X)
    X = Id_Block(X)
    X = Id_Block(X)
    X = Conv1D(kernel_size=3,
               kernel_constraint=min_max_norm(0, 0.09),
               activation='relu',
               padding='same',
               filters=1)(X)
    X = keras.layers.Reshape((900, ))(X)
    X = Dense(80, activation='relu')(X)
    X = Dense(30, activation='relu')(X)
    X = Dense(1, activation='sigmoid')(X)
    return X
示例#14
0
def get_decoder(decoder, N, K):
    if decoder == 'SC':
        return SCDecoder(N, K), True
    else:
        try:
            data_path = os.environ['DEEPRAN_DATA_PATH']
        except KeyError:
            data_path = '/home/iwodiany/Projects/DeepRAN/data/'

        model_name = '%s-%s-%s.h5' % (decoder, N, K)
        model_path = data_path + 'models/' + model_name

        nn_decoder = NNDecoder(N, K)

        if os.path.isfile(model_path):
            nn_decoder.decoder = load_model(model_path,
                                            custom_objects={'errors': errors})

        if decoder == 'NN':
            if nn_decoder.decoder is None:
                nn_decoder.compose([512, 256, 128])
                nn_decoder.train(train_llrs=True)
                nn_decoder.decoder.save(model_path)
            return nn_decoder, True
        elif decoder == 'NN_LLR':
            if nn_decoder.decoder is None:
                nn_decoder.compose([512, 256, 128],
                                   final_activation='hard_sigmoid',
                                   constraint=min_max_norm(-1.0, 1.0),
                                   use_bias=False)
                nn_decoder.train(train_llrs=True)
                nn_decoder.decoder.save(model_path)
            return nn_decoder, True
        else:
            raise ValueError(
                'Unsupported decoder type! Supported decoders: SC, NN, NN_LLR!'
            )
示例#15
0
y_train = keras.utils.to_categorical(y_train, num_classes)
Y_test = keras.utils.to_categorical(Y_test, num_classes)

model = Sequential()
model.add(
    Dense(512,
          activation='relu',
          input_shape=(784, ),
          kernel_constraint=max_norm(2.)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu', bias_constraint=non_neg()))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu', kernel_constraint=unit_norm()))
model.add(Dropout(0.2))
model.add(
    Dense(10, activation='softmax', bias_constraint=min_max_norm(0.0, 1.0)))

model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(),
              metrics=['accuracy'])

history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
示例#16
0
import sys

gl = glob.glob(os.path.join('train', '*.[sf]'))
poscount, locidx = rfutils.count_locpos(gl)

embedding_dim = 4
filter_sizes = (3, 8)
num_filters = 1
dropout_prob = (0.01, 0.01)
hidden_dims = 10

K.set_floatx('float64')

in_vals = Input((poscount, 1), name='vals', dtype='float64')
normd = BatchNormalization(axis=1,
                           gamma_constraint=min_max_norm(),
                           beta_constraint=min_max_norm())(in_vals)
in_locs = Input((poscount, ), name='locs', dtype='uint64')
embed_locs = Embedding(locidx.watermark, embedding_dim,
                       input_length=poscount)(in_locs)
merged = concatenate([embed_locs, normd])
drop = Dropout(dropout_prob[0])(merged)
conv_list = []
for filtsz in filter_sizes:
    tmp = Conv1D(num_filters, filtsz, activation='relu')(drop)
    tmp = Flatten()(MaxPooling1D()(tmp))
    conv_list.append(tmp)
out = Dense(1, activation='sigmoid')(Dense(hidden_dims, activation='relu')(
    Dropout(dropout_prob[1])(concatenate(conv_list))))
ml = Model(inputs=[in_locs, in_vals], outputs=out)
ml.compile(Adam(lr=0.01), metrics=['acc'], loss=binary_crossentropy)
示例#17
0
import rfutils
import sys

gl = glob.glob(os.path.join('train', '*.[sf]'))
poscount, locidx = rfutils.count_locpos(gl)

embedding_dim = 4
dropout_prob = 0.4
dense_count = int(sys.argv[3]) if len(sys.argv) > 3 else 50
optr = Adam(lr=0.03)

K.set_floatx('float64')

in_vals = Input((poscount, 1), name='vals', dtype='float64')
normd = BatchNormalization(
    axis=1, gamma_constraint=min_max_norm(),
    beta_constraint=min_max_norm())(in_vals)
in_locs = Input((poscount, ), name='locs', dtype='uint64')
embed_locs = Embedding(
    locidx.watermark, embedding_dim, input_length=poscount)(in_locs)
merged = concatenate([embed_locs, normd])
dense_list = []
for i in range(dense_count):
    dense_list.append(
        Dropout(dropout_prob)(Dense(1, activation='sigmoid')(Flatten()(
            merged))))
mult = multiply(dense_list)
ml = Model(inputs=[in_locs, in_vals], outputs=mult)
ml.compile(optr, metrics=['acc'], loss=mse)

locs, vals, labels = rfutils.read_data(gl, poscount, locidx)
示例#18
0
    def embedding_model_lstm(self,
                             words,
                             embedding_weights_a=None,
                             embedding_weights_b=None,
                             trainable=False,
                             skip_embed=False,
                             return_sequences_b=False):

        lstm_unit_a = units
        lstm_unit_b = units * 2
        embed_unit = int(hparams['embed_size'])

        x_shape = (tokens_per_sentence, )
        decoder_dim = units * 2  # (tokens_per_sentence, units *2)

        if hparams['dense_activation'] is None or hparams[
                'dense_activation'] == 'none':
            decoder_dim = embed_unit

        valid_word_a = Input(shape=x_shape)
        #valid_word_b = Input(shape=x_shape)

        embeddings_a = Embedding(words,
                                 embed_unit,
                                 weights=[embedding_weights_a],
                                 input_length=tokens_per_sentence,
                                 trainable=trainable)

        embed_a = embeddings_a(valid_word_a)

        ### encoder for training ###
        lstm_a = Bidirectional(
            LSTM(
                units=lstm_unit_a,
                return_sequences=True,
                dropout=0.5
                #return_state=True,
                #recurrent_dropout=0.2,
                #input_shape=(None,)
            ),
            merge_mode='concat',
            trainable=True)

        recurrent_a = lstm_a(embed_a)

        #############
        #conv1d_b = Conv1D(tokens_per_sentence,lstm_unit_b)(recurrent_a)

        lstm_b = AttentionDecoder(
            units=lstm_unit_b,
            output_dim=decoder_dim,
            kernel_constraint=min_max_norm(),
            dropout=0.5
            #return_sequences=return_sequences_b,
            #return_state=True
        )

        recurrent_b = lstm_b(recurrent_a)

        if hparams['dense_activation'] is not None and hparams[
                'dense_activation'] != 'none':
            dense_b = Dense(
                embed_unit,
                input_shape=(tokens_per_sentence, ),
                activation=hparams['dense_activation']  #softmax, tanh, or relu
                #name='dense_layer_b',
            )

            decoder_b = dense_b(recurrent_b)  # recurrent_b

            dropout_b = Dropout(0.5)(decoder_b)

            model = Model([valid_word_a], dropout_b)  # decoder_b

        else:
            model = Model([valid_word_a], recurrent_b)

        ### boilerplate ###

        adam = optimizers.Adam(lr=learning_rate)

        # loss try 'categorical_crossentropy', 'mse', 'binary_crossentropy'
        # optimizer try 'rmsprop'
        model.compile(optimizer=adam, loss='mse', metrics=['acc'])

        return model, None, None  #, None, model_inference
input_bits = Input(dtype='float32', shape=(M, ), name='input')
inner_layer = Dense(M, activation='relu')(input_bits)
inner_layer = Dense(n_channel, activation='linear')(inner_layer)
to_channel = BatchNormalization(
    axis=-1,
    momentum=0.99,
    epsilon=0.001,
    center=True,
    scale=True,
    beta_initializer='zeros',
    gamma_initializer='ones',
    moving_mean_initializer='zeros',
    moving_variance_initializer='ones',
    beta_regularizer=None,
    gamma_regularizer=None,
    beta_constraint=min_max_norm(min_value=0, max_value=0),
    gamma_constraint=unit_norm(axis=-1))(inner_layer)

encoder = Model(input_bits, to_channel)
# We need an extra identity in tensorflow for the input placeholder
#output = Lambda(tf.identity,output_shape=(n_channel,), arguments=None, name = 'lambda_output')(to_channel)
tf.identity(to_channel, name='output')

print(encoder.summary())

# Export directory
export_dir = os.path.dirname(os.path.realpath(__file__))
# Load weights from Keras model
encoder.load_weights(export_dir + '/export/keras_weights_encoder.h5')
# Now save as tensorflow model
sess = K.get_session()