Esempio n. 1
0
def get_encoder(shape, C=1, name="encoder"):
    inputs = Input(shape=(shape, ))
    modeled = Dense(100,
                    activation='relu',
                    kernel_constraint=MinMaxNorm(0, C),
                    bias_constraint=MinMaxNorm(0, C))(inputs)
    model = Model(inputs, modeled)
    model.compile(optimizer="adam", loss='mean_squared_error')
    return model
Esempio n. 2
0
def get_task(shape, C=1, activation=None, name="task"):
    inputs = Input(shape=(shape, ))
    modeled = Dense(1,
                    activation=activation,
                    kernel_constraint=MinMaxNorm(0, C),
                    bias_constraint=MinMaxNorm(0, C))(inputs)
    model = Model(inputs, modeled)
    model.compile(optimizer="adam", loss='mean_squared_error')
    return model
Esempio n. 3
0
def get_base_model(shape, activation=None, C=1, name="BaseModel"):
    inputs = Input(shape=(shape, ))
    modeled = Dense(100,
                    activation='relu',
                    kernel_constraint=MinMaxNorm(0, C),
                    bias_constraint=MinMaxNorm(0, C))(inputs)
    modeled = Dense(1,
                    activation=activation,
                    kernel_constraint=MinMaxNorm(0, C),
                    bias_constraint=MinMaxNorm(0, C))(modeled)
    model = Model(inputs, modeled, name=name)
    model.compile(optimizer='adam', loss='mean_squared_error')
    return model
    def build(self, input_shape):


        if self.type_code == 'Binary_0':
            H_init = np.random.rand(1, self.kern, self.kern, self.shots)
            #H_init = np.random.normal(0, 1, (1, self.kern, self.kern, 1, self.shots)) / np.sqrt(
                #self.kern * self.kern)+0.5
            H_init = tf.constant_initializer(H_init)
            self.H = self.add_weight(name='H', shape=(1,self.kern, self.kern, 1,self.shots), initializer=H_init, trainable=True,
                                        regularizer=self.my_regularizer)

        # --------Binary -1 1 -------
        if self.type_code == 'Binary_1':
            H_init = np.random.normal(0, 1, (1, self.kern, self.kern,1,self.shots)) / np.sqrt(
                self.kern * self.kern)
            H_init = tf.constant_initializer(H_init)

            self.H = self.add_weight(name='H', shape=(1, self.kern, self.kern,1,self.shots),
                                     initializer=H_init, trainable=True,
                                     regularizer=self.my_regularizer)

            # --------Gray scale -------
        if self.type_code == 'Gray_scale':

            H_init = np.random.normal(0, 1, (1, self.kern, self.kern, self.shots)) / np.sqrt(
                self.kern * self.kern)
            H_init = tf.constant_initializer(H_init)

            if self.type_reg == 'Physical':
                self.H = self.add_weight(name='H', shape=(1, self.kern, self.kern,1,self.shots),
                                        initializer=H_init, trainable=True,
                                        constraint=MinMaxNorm(min_value=0.0, max_value=1.0, rate=1.0))
Esempio n. 5
0
def create_model(input_shape, num_classes):
    """ Create a model to train on MNIST data. """

    # ensure weights and bias values are clipped
    clip = MinMaxNorm(min_value=-0.5, max_value=0.5, axis=0)

    inp = Input(shape=input_shape)
    x = Reshape((28, 28, 1))(inp)
    x = Conv2D(32,
               kernel_size=(3, 3),
               activation="relu",
               kernel_constraint=clip,
               bias_constraint=clip)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Conv2D(64,
               kernel_size=(3, 3),
               activation="relu",
               kernel_constraint=clip,
               bias_constraint=clip)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Flatten()(x)
    x = Dropout(0.4)(x)
    x = Dense(num_classes,
              activation='softmax',
              kernel_constraint=clip,
              bias_constraint=clip)(x)

    model = Model(inputs=inp, outputs=x)

    return model
Esempio n. 6
0
 def __init__(self, num_units, input_size, output_size):
     super(MLP, self).__init__()
     self.layer1 = layers.Dense(num_units,
                                activation='relu',
                                input_shape=(input_size, ),
                                kernel_regularizer=L2(l2=0.01),
                                kernel_constraint=MinMaxNorm(min_value=-5.0,
                                                             max_value=5.0))
     self.layer2 = layers.Dense(num_units,
                                activation='relu',
                                input_shape=(num_units, ),
                                kernel_regularizer=L2(l2=0.01),
                                kernel_constraint=MinMaxNorm(min_value=-5.0,
                                                             max_value=5.0))
     self.layer3 = layers.Dense(output_size,
                                input_shape=(num_units, ),
                                kernel_regularizer=L2(l2=0.01))
Esempio n. 7
0
def PReLUlip(k_coef_lip=1.0):
    """
    PreLu activation, with Lipschitz constraint.

    Args:
        k_coef_lip: lipschitz coefficient to be enforced
    """
    return PReLU(alpha_constraint=MinMaxNorm(min_value=-k_coef_lip,
                                             max_value=k_coef_lip))
Esempio n. 8
0
    def build(self, input_shape, **kwargs):
        self.threshold1 = self.add_weight("threshold1",
                                          shape=[1],
                                          initializer=self.kernel_initializer,
                                          constraint=MinMaxNorm(min_value=0.0,
                                                                max_value=0.3,
                                                                rate=1.0),
                                          dtype=self.dtype,
                                          trainable=self.trainable,
                                          **kwargs)

        self.threshold2 = self.add_weight("threshold2",
                                          shape=[1],
                                          initializer=self.kernel_initializer,
                                          constraint=MinMaxNorm(min_value=0.2,
                                                                max_value=0.5,
                                                                rate=1.0),
                                          dtype=self.dtype,
                                          trainable=self.trainable,
                                          **kwargs)

        self.threshold3 = self.add_weight("threshold3",
                                          shape=[1],
                                          initializer=self.kernel_initializer,
                                          constraint=MinMaxNorm(min_value=0.4,
                                                                max_value=0.8,
                                                                rate=1.0),
                                          dtype=self.dtype,
                                          trainable=self.trainable,
                                          **kwargs)

        self.threshold4 = self.add_weight("threshold4",
                                          shape=[1],
                                          initializer=self.kernel_initializer,
                                          constraint=MinMaxNorm(min_value=0.8,
                                                                max_value=2.0,
                                                                rate=1.0),
                                          dtype=self.dtype,
                                          trainable=self.trainable,
                                          **kwargs)
        self.built = True
Esempio n. 9
0
 def build(self, shape):
     weight_constraint = MinMaxNorm(min_value=0.0,
                                    max_value=1.0,
                                    rate=1.0,
                                    axis=0)
     self.w1 = self.add_weight(name='w1',
                               shape=(shape[3], ),
                               initializer="ones",
                               trainable=True,
                               constraint=weight_constraint)
     # self.w2 = self.add_weight(name='w2', shape=(1,), initializer="ones", trainable=True)
     self.w2 = 1 - self.w1
    def build(self, input_shape):

        M = round((self.input_dim[0] * self.input_dim[1] * self.input_dim[2]) *
                  self.compression)
        # ---------------------------  Types of codes initial values ---------------------------------------

        # --------Binary -1 1 -------
        if self.type_code == 'Binary_1':
            H_init = np.random.normal(0, 1,
                                      (1, self.kern, self.kern, M)) / np.sqrt(
                                          self.kern * self.kern)
            H_init = tf.constant_initializer(H_init)

            self.H = self.add_weight(name='H',
                                     shape=(1, self.kern, self.kern, M),
                                     initializer=H_init,
                                     trainable=True,
                                     regularizer=self.my_regularizer)

        # --------Binary 0,1  -------
        if self.type_code == 'Binary_0':
            #H_init = np.random.rand(1, self.kern, self.kern, M)
            H_init = np.random.normal(0, 1,
                                      (1, self.kern, self.kern, M)) / np.sqrt(
                                          self.kern * self.kern)
            H_init = tf.constant_initializer(H_init)

            self.H = self.add_weight(name='H',
                                     shape=(1, self.kern, self.kern, M),
                                     initializer=H_init,
                                     trainable=True,
                                     regularizer=self.my_regularizer)

        # --------Gray scale -------
        if self.type_code == 'Gray_scale':

            H_init = np.random.normal(0, 1,
                                      (1, self.kern, self.kern, M)) / np.sqrt(
                                          self.kern * self.kern)
            H_init = tf.constant_initializer(H_init)

            if self.type_reg == 'Physical':
                self.H = self.add_weight(name='H',
                                         shape=(1, self.kern, self.kern, M),
                                         initializer=H_init,
                                         trainable=True,
                                         constraint=MinMaxNorm(min_value=0.0,
                                                               max_value=1.0,
                                                               rate=1.0))

        super(Single_Pixel_Layer_trunc, self).build(input_shape)
Esempio n. 11
0
def createBaseline(learning_rate, l2_factor, dropout_factor):
    # Create CNN from the paper.
    model = Sequential()

    # Starts with input 2D convolutional layer with input shape matching the images shaper and
    # l2 regularization enabled.
    model.add(Conv2D(96, (11, 11), input_shape=(512, 512, 1), kernel_regularizer=l2(l2_factor), bias_regularizer=l2(l2_factor)))

    # Use batch normalization for the first layer, then do activation and pooling.
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D((3, 3)))

    # Add next 2D convolutional layer with smaller filter size and the same regularization as
    # in the first layer.
    model.add(Conv2D(384, (5, 5), kernel_regularizer=l2(l2_factor), bias_regularizer=l2(l2_factor)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D((3, 3)))

    # The next three layers are almost the same, having similar approach to VGG19 with multiple
    # convolutional layers connected in sequence before pooling and all having 3x3 filters
    # (a.k.a. kernels). The filter count decreases slightly to account for compression of
    # information the farther it goes through the network. All layers use l2 regularization.
    model.add(Conv2D(384, (3, 3), activation='relu', kernel_regularizer=l2(l2_factor), bias_regularizer=l2(l2_factor)))
    model.add(Conv2D(256, (3, 3), activation='relu', kernel_regularizer=l2(l2_factor), bias_regularizer=l2(l2_factor)))
    model.add(Conv2D(256, (3, 3), kernel_regularizer=l2(l2_factor), bias_regularizer=l2(l2_factor)))

    # Apply activation and pooling.
    model.add(Activation('relu'))
    model.add(MaxPooling2D((3, 3)))

    # Flatten the outputs form 2 dimensional layers to be ready for fully-connected layers.
    model.add(Flatten())

    # Create fully connected layers with dropout layers in-between.
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(dropout_factor))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(dropout_factor))
    model.add(Dense(1, activation='sigmoid', bias_constraint=MinMaxNorm(min_value=0.1, max_value=0.9)))


    # Compile the model with Adam optimizer and Binary Crossentropy loss function for binary
    # classification. The metrics used for this model are accuracy, mean-square-error,
    # precision and recall.
    model.compile(optimizer=Adam(learning_rate=learning_rate),
                  loss=BinaryCrossentropy(from_logits=True),
                  metrics=['accuracy', 'mse', Precision(), Recall()])

    return model
Esempio n. 12
0
 def __init__(self, output_dim, dicparams, **kwargs):
     const = MinMaxNorm(
             min_value=-dicparams.get("MinNorm", 1),
             max_value=dicparams.get("MaxNorm", 1e-5),
             rate=dicparams.get("NormRate", 1.0),
             axis=0
     )
     bias_init = dicparams.get("bias_initializer", "zeros")
     self.units = output_dim
     self.kconstraint = constraints.get(const)
     self.kinitializer1 = Identity()
     self.kinitializer2 = initializers.get(GenKinit())
     self.binitializer = get_init(bias_init)
     self.use_bias = dicparams.get("use_bias", False)
     self.activation = dicparams.get("activation")
     super(GenDense, self).__init__(**kwargs)
Esempio n. 13
0
 def get_base_model(input_shape=(166, ), output_shape=(1, ), C=1):
     inputs = Input(shape=input_shape)
     modeled = Flatten()(inputs)
     modeled = Dense(100,
                     activation='relu',
                     kernel_constraint=MinMaxNorm(0, C),
                     bias_constraint=MinMaxNorm(0, C))(modeled)
     modeled = Dense(100,
                     activation='relu',
                     kernel_constraint=MinMaxNorm(0, C),
                     bias_constraint=MinMaxNorm(0, C))(modeled)
     modeled = Dense(np.prod(output_shape),
                     activation=None,
                     kernel_constraint=MinMaxNorm(0, C),
                     bias_constraint=MinMaxNorm(0, C))(modeled)
     model = Model(inputs, modeled)
     model.compile(optimizer=Adam(0.001), loss='mean_squared_error')
     return model
Esempio n. 14
0
 def get_base_model(
         input_shape=(768, ), output_shape=10, activation="softmax", C=1):
     inputs = Input(input_shape)
     modeled = Dense(100,
                     activation='relu',
                     kernel_constraint=MinMaxNorm(0, C),
                     bias_constraint=MinMaxNorm(0, C))(inputs)
     modeled = Dense(100,
                     activation='relu',
                     kernel_constraint=MinMaxNorm(0, C),
                     bias_constraint=MinMaxNorm(0, C))(modeled)
     modeled = Dense(10,
                     activation=activation,
                     kernel_constraint=MinMaxNorm(0, C),
                     bias_constraint=MinMaxNorm(0, C))(modeled)
     model = Model(inputs, modeled)
     model.compile(optimizer=Adam(0.001),
                   loss='categorical_crossentropy',
                   metrics=["accuracy"])
     return model
Esempio n. 15
0
    def compile_elmo(self, print_summary=False):
        """
        Compiles a Language Model RNN based on the given parameters
        """

        if self.parameters['token_encoding'] == 'word':
            # Train word embeddings from scratch
            word_inputs = Input(shape=(None, ),
                                name='word_indices',
                                dtype='int32')
            embeddings = Embedding(self.parameters['vocab_size'],
                                   self.parameters['hidden_units_size'],
                                   trainable=True,
                                   name='token_encoding')
            inputs = embeddings(word_inputs)

            # Token embeddings for Input
            drop_inputs = SpatialDropout1D(
                self.parameters['dropout_rate'])(inputs)
            lstm_inputs = TimestepDropout(
                self.parameters['word_dropout_rate'])(drop_inputs)

            # Pass outputs as inputs to apply sampled softmax
            next_ids = Input(shape=(None, 1), name='next_ids', dtype='float32')
            previous_ids = Input(shape=(None, 1),
                                 name='previous_ids',
                                 dtype='float32')
        elif self.parameters['token_encoding'] == 'char':
            # Train character-level representation
            word_inputs = Input(shape=(
                None,
                self.parameters['token_maxlen'],
            ),
                                dtype='int32',
                                name='char_indices')
            inputs = self.char_level_token_encoder()(word_inputs)

            # Token embeddings for Input
            drop_inputs = SpatialDropout1D(
                self.parameters['dropout_rate'])(inputs)
            lstm_inputs = TimestepDropout(
                self.parameters['word_dropout_rate'])(drop_inputs)

            # Pass outputs as inputs to apply sampled softmax
            next_ids = Input(shape=(None, 1), name='next_ids', dtype='float32')
            previous_ids = Input(shape=(None, 1),
                                 name='previous_ids',
                                 dtype='float32')

        # Reversed input for backward LSTMs
        re_lstm_inputs = Lambda(function=ELMo.reverse)(lstm_inputs)
        mask = Lambda(function=ELMo.reverse)(drop_inputs)

        # Forward LSTMs
        for i in range(self.parameters['n_lstm_layers']):
            if self.parameters['cuDNN']:
                lstm = LSTM(units=self.parameters['lstm_units_size'],
                            return_sequences=True,
                            kernel_constraint=MinMaxNorm(
                                -1 * self.parameters['cell_clip'],
                                self.parameters['cell_clip']),
                            recurrent_constraint=MinMaxNorm(
                                -1 * self.parameters['cell_clip'],
                                self.parameters['cell_clip']))(lstm_inputs)
            else:
                lstm = LSTM(units=self.parameters['lstm_units_size'],
                            return_sequences=True,
                            activation="tanh",
                            recurrent_activation='sigmoid',
                            kernel_constraint=MinMaxNorm(
                                -1 * self.parameters['cell_clip'],
                                self.parameters['cell_clip']),
                            recurrent_constraint=MinMaxNorm(
                                -1 * self.parameters['cell_clip'],
                                self.parameters['cell_clip']))(lstm_inputs)
            lstm = Camouflage(mask_value=0)(inputs=[lstm, drop_inputs])
            # Projection to hidden_units_size
            proj = TimeDistributed(
                Dense(self.parameters['hidden_units_size'],
                      activation='linear',
                      kernel_constraint=MinMaxNorm(
                          -1 * self.parameters['proj_clip'],
                          self.parameters['proj_clip'])))(lstm)
            # Merge Bi-LSTMs feature vectors with the previous ones
            lstm_inputs = add([proj, lstm_inputs],
                              name='f_block_{}'.format(i + 1))
            # Apply variational drop-out between BI-LSTM layers
            lstm_inputs = SpatialDropout1D(
                self.parameters['dropout_rate'])(lstm_inputs)

        # Backward LSTMs
        for i in range(self.parameters['n_lstm_layers']):
            if self.parameters['cuDNN']:
                re_lstm = LSTM(
                    units=self.parameters['lstm_units_size'],
                    return_sequences=True,
                    kernel_constraint=MinMaxNorm(
                        -1 * self.parameters['cell_clip'],
                        self.parameters['cell_clip']),
                    recurrent_constraint=MinMaxNorm(
                        -1 * self.parameters['cell_clip'],
                        self.parameters['cell_clip']))(re_lstm_inputs)
            else:
                re_lstm = LSTM(
                    units=self.parameters['lstm_units_size'],
                    return_sequences=True,
                    activation='tanh',
                    recurrent_activation='sigmoid',
                    kernel_constraint=MinMaxNorm(
                        -1 * self.parameters['cell_clip'],
                        self.parameters['cell_clip']),
                    recurrent_constraint=MinMaxNorm(
                        -1 * self.parameters['cell_clip'],
                        self.parameters['cell_clip']))(re_lstm_inputs)
            re_lstm = Camouflage(mask_value=0)(inputs=[re_lstm, mask])
            # Projection to hidden_units_size
            re_proj = TimeDistributed(
                Dense(self.parameters['hidden_units_size'],
                      activation='linear',
                      kernel_constraint=MinMaxNorm(
                          -1 * self.parameters['proj_clip'],
                          self.parameters['proj_clip'])))(re_lstm)
            # Merge Bi-LSTMs feature vectors with the previous ones
            re_lstm_inputs = add([re_proj, re_lstm_inputs],
                                 name='b_block_{}'.format(i + 1))
            # Apply variational drop-out between BI-LSTM layers
            re_lstm_inputs = SpatialDropout1D(
                self.parameters['dropout_rate'])(re_lstm_inputs)

        # Reverse backward LSTMs' outputs = Make it forward again
        re_lstm_inputs = Lambda(function=ELMo.reverse,
                                name="reverse")(re_lstm_inputs)

        # Project to Vocabulary with Sampled Softmax
        sampled_softmax = SampledSoftmax(
            num_classes=self.parameters['vocab_size'],
            num_sampled=int(self.parameters['num_sampled']),
            tied_to=embeddings if self.parameters['weight_tying']
            and self.parameters['token_encoding'] == 'word' else None)
        outputs = sampled_softmax([lstm_inputs, next_ids])
        re_outputs = sampled_softmax([re_lstm_inputs, previous_ids])

        self._model = Model(inputs=[word_inputs, next_ids, previous_ids],
                            outputs=[outputs, re_outputs])
        self._model.compile(optimizer=tf.keras.optimizers.Adagrad(
            lr=self.parameters['lr'], clipvalue=self.parameters['clip_value']),
                            loss=None)
        if print_summary:
            self._model.summary()
def create_calibration_model(
        storage_coefs, loss_coefs, radius, length, thickness, poisson, inertia,
        storage_data, storage_bounds, storage_table_shape, loss_data,
        loss_bounds, loss_table_shape, delta_stiffness_mlp, delta_damping_mlp,
        stiffness_low, stiffness_up, damping_low, damping_up, input_min,
        input_range, select_freq, select_temp, batch_input_shape, myDtype):
    batch_adjusted_shape = (batch_input_shape[1], )
    inputLayer = Input(shape=(batch_input_shape[1], ))

    normalizedInputLayer = Lambda(
        lambda x, input_min=input_min, input_range=input_range:
        (x - input_min) / input_range)(inputLayer)

    freqLayer = inputsSelection(batch_adjusted_shape, select_freq)(inputLayer)
    tempLayer = inputsSelection(batch_adjusted_shape, select_temp)(inputLayer)

    omegaLayer = Lambda(lambda x: 2 * np.pi * x)(freqLayer)

    moduliInputLayer = Concatenate(axis=-1)([tempLayer, freqLayer])

    storageModulusLayer = TableInterpolation(table_shape=storage_table_shape,
                                             dtype=myDtype,
                                             trainable=False)
    storageModulusLayer.build(input_shape=moduliInputLayer.shape)
    storageModulusLayer.set_weights([storage_data, storage_bounds])
    storageModulusLayer = storageModulusLayer(moduliInputLayer)

    lossModulusLayer = TableInterpolation(table_shape=loss_table_shape,
                                          dtype=myDtype,
                                          trainable=False)
    lossModulusLayer.build(input_shape=moduliInputLayer.shape)
    lossModulusLayer.set_weights([loss_data, loss_bounds])
    lossModulusLayer = lossModulusLayer(moduliInputLayer)

    stiffnessLayer = Stiffness(input_shape=storageModulusLayer.shape,
                               dtype=myDtype,
                               trainable=True,
                               kernel_constraint=MinMaxNorm(min_value=-1.0,
                                                            max_value=0.5,
                                                            rate=1.0))
    stiffnessLayer.build(input_shape=storageModulusLayer.shape)
    stiffnessLayer.set_weights(
        [np.asarray([poisson], dtype=stiffnessLayer.dtype)])
    stiffnessLayer = stiffnessLayer(storageModulusLayer)

    deltaStiffnessLayer = delta_stiffness_mlp(normalizedInputLayer)
    scaledDeltaStiffnessLayer = Lambda(
        lambda x, stiffness_low=stiffness_low, stiffness_up=stiffness_up: x *
        (stiffness_up - stiffness_low) + stiffness_low)(deltaStiffnessLayer)
    correctedStiffnessLayer = Lambda(lambda x: x[0] + x[1])(
        [stiffnessLayer, scaledDeltaStiffnessLayer])

    dampingInputLayer = Concatenate(axis=-1)(
        [correctedStiffnessLayer, storageModulusLayer, lossModulusLayer])

    dampingLayer = Damping(input_shape=dampingInputLayer.shape,
                           dtype=myDtype,
                           trainable=False)
    dampingLayer.build(input_shape=dampingInputLayer.shape)
    dampingLayer.set_weights([np.asarray([inertia], dtype=dampingLayer.dtype)])
    dampingLayer = dampingLayer(dampingInputLayer)

    deltaDampingLayer = delta_damping_mlp(normalizedInputLayer)
    scaledDeltaDampingLayer = Lambda(
        lambda x, damping_low=damping_low, damping_up=damping_up: x *
        (damping_up - damping_low) + damping_low)(deltaDampingLayer)
    correctedDampingLayer = Lambda(lambda x: x[0] + x[1])(
        [dampingLayer, scaledDeltaDampingLayer])

    FRFAmpInputLayer = Concatenate(axis=-1)(
        [omegaLayer, correctedStiffnessLayer, correctedDampingLayer])

    FRFAmpLayer = FRFAmplitude(input_shape=FRFAmpInputLayer.shape,
                               dtype=myDtype,
                               trainable=False)
    FRFAmpLayer.build(input_shape=FRFAmpInputLayer.shape)
    FRFAmpLayer.set_weights([np.asarray([inertia], dtype=FRFAmpLayer.dtype)])
    FRFAmpLayer = FRFAmpLayer(FRFAmpInputLayer)

    functionalModel = Model(inputs=[inputLayer], outputs=[FRFAmpLayer])

    functionalModel.compile(
        loss='mean_squared_error',
        optimizer=Adam(5e-2),
        metrics=['mean_absolute_error', 'mean_squared_error'])
    return functionalModel
Esempio n. 17
0
    def optimization_ideal(x,y,z,max_iter=3,d = 1):
        # _without_intercept
        iterations = 0
        thisgoodness1 = -np.Inf
        iter = []
        fit_cost = []
        model = None
        
        while iterations<max_iter:
            model1 = keras.Sequential([keras.layers.Dense(1, activation='linear', input_dim=1,bias_constraint=MinMaxNorm(min_value=0.0, max_value=0.0, rate=1.0, axis=0))])
            selected = random.sample(range(0,len(z)),int(len(z)-3))
            
            x_train = []
            y_train = []
            z_train = []
            z_test = []
            x_test = []
            y_test = []
            for i in range(len(z)):
                if i in selected:
                    x_train.append(x[i])
                    y_train.append(y[i])
                    z_train.append(z[i])
                else:
                    z_test.append(z[i])
                    x_test.append(x[i])
                    y_test.append(y[i])

            x_train = np.array(x_train).reshape(-1,1)
            y_train = np.array(y_train).reshape(-1,1)
            z_train = np.array(z_train).reshape(-1,1)
            x_test = tf.convert_to_tensor(np.array(x_test).reshape(1,-1),dtype=tf.float32)
            y_test = tf.convert_to_tensor(np.array(y_test).reshape(1,-1),dtype=tf.float32)
            z_test = tf.convert_to_tensor(np.array(z_test).reshape(1,-1),dtype=tf.float32)

            def custom_loss(z_test):
                def loss(y_test,y_pred):
                    # return 0.6*(y_test-y_pred)**2 - 0.4*z_test # (for mu-mu)
                    # return 0.9*(y_test-y_pred)**2 - 0.1*z_test # (for mu-mu)
                    # return 0.3*(y_test-y_pred)**2 - 0.7*z_test #(for sigma-sigma mu_h=mu_x),(for sigma-sigma mu_h=mu_h_pred)
                    return 0.2*(y_test-y_pred)**2 - 0.8*z_test #(for sigma-sigma mu_h=mu_x),(for sigma-sigma mu_h=mu_h_pred)
                    # return 0.05*(y_test-y_pred)**2 - 0.95*z_test #(for sigma-sigma)
                return loss
            
            model1.compile(loss=custom_loss(z_test), optimizer='sgd')
            model1.fit(x_train, y_train, batch_size=len(x_train), epochs=1000)
            set = range(len(x))
            for point1 in np.setdiff1d(set,selected):
                dis = abs(model1.get_weights()[0][0][0]*x[point1]-y[point1]+model1.get_weights()[1][0])/((model1.get_weights()[0][0][0]*model1.get_weights()[0][0][0] + 1)**0.5)
                if dis<=d:
                    np.append(x_train,x[point1])
                    np.append(y_train,y[point1])
                    np.append(z_train,z[point1])
            
            thisgoodness = sum(z_train)/len(z_train)

            iterations += 1

            if thisgoodness[0]>thisgoodness1:
                thisgoodness1 = thisgoodness[0]
                iter.append(iterations)
                fit_cost.append(thisgoodness1)
                model = model1
            
        bestFit = [model.get_weights()[0][0][0],model.get_weights()[1][0]]
        
        return [bestFit,np.round(thisgoodness1,decimals=3),iter,fit_cost]