Пример #1
0
    def test_nested(self):
        inputs = np.random.standard_normal((1024, 5))
        outputs = (inputs.dot(np.random.standard_normal(
            (5, 1))).squeeze(axis=-1) > 0).astype('int32')

        model = models.Sequential()
        model.add(
            layers.Dense(
                units=5,
                input_shape=(5, ),
                activation='tanh',
                bias_constraint=constraints.max_norm(100.0),
                name='Dense',
            ))
        model.add(layers.Dense(
            units=2,
            activation='softmax',
            name='Output',
        ))
        if TF_KERAS:
            import tensorflow as tf
            base_opt = tf.keras.optimizers.Adam(amsgrad=True, decay=1e-4)
        else:
            base_opt = keras.optimizers.Adam(amsgrad=True, decay=1e-4)
        model.compile(
            optimizer=LRMultiplier(LRMultiplier(base_opt, {'Dense': 1.2}),
                                   {'Output': 2.0}),
            loss='sparse_categorical_crossentropy',
        )
        model.fit(
            inputs,
            outputs,
            validation_split=0.1,
            epochs=1000,
            callbacks=[
                callbacks.ReduceLROnPlateau(patience=2, verbose=True),
                callbacks.EarlyStopping(patience=5),
            ],
        )

        model_path = os.path.join(
            tempfile.gettempdir(),
            'test_lr_multiplier_%f.h5' % np.random.random())
        model.save(model_path)
        model = models.load_model(model_path,
                                  custom_objects=self._get_custom_objects())

        predicted = model.predict(inputs).argmax(axis=-1)
        self.assertLess(np.sum(np.abs(outputs - predicted)), 20)
    def test_lr_plateau(self):
        inputs = np.random.standard_normal((1024, 5))
        outputs = (inputs.dot(np.random.standard_normal(
            (5, 1))).squeeze(axis=-1) > 0).astype('int32')

        model = models.Sequential()
        model.add(
            layers.Dense(
                units=2,
                input_shape=(5, ),
                use_bias=False,
                activation='softmax',
                name='Output',
            ))
        model.compile(
            optimizer=LRMultiplier(AdamV2(), {'Output': 100.0}),
            loss='sparse_categorical_crossentropy',
        )
        model.fit(
            inputs,
            outputs,
            validation_split=0.1,
            epochs=1000,
            callbacks=[
                callbacks.ReduceLROnPlateau(patience=2, verbose=True),
                callbacks.EarlyStopping(patience=5),
            ],
        )

        predicted = model.predict(inputs).argmax(axis=-1)
        self.assertLess(np.sum(np.abs(outputs - predicted)), 20)
Пример #3
0
def slow_fast_nn(lr_s=1,
                 lr_f=18,
                 n_inp=30,
                 n_slow=30,
                 n_fast=30,
                 train_slow=True):
    inp = Input(shape=(n_inp, ), name='input')

    slow = Dense(n_slow, activation="tanh", name='slow')(inp)
    fast = Dense(n_fast, activation="tanh", name='fast')(inp)

    #To be able to assign a different learning rate to the entire path in fast or slow
    s2 = Dense(1, activation="linear", name='slow_2')(slow)
    f2 = Dense(1, activation="linear", name='fast_2')(fast)
    added = keras.layers.Add()([s2, f2])
    out = Dense(1, activation="sigmoid", name="output")(added)
    model = Model(inputs=inp, outputs=out)
    if (train_slow):
        model.compile(optimizer=LRMultiplier(
            'adam', {
                'slow': lr_s,
                'fast': lr_f,
                'slow_2': lr_s,
                'fast_2': lr_f
            }),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
    else:
        slow.trainable = False
        s2.trainable = False
        model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.05, ),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
    return model
Пример #4
0
def test_d2_reliance(old_model,
                     train_list,
                     test_l,
                     test_h,
                     figType,
                     batch_size=1500,
                     lr_s=1,
                     lr_f=10,
                     epoch=1):
    model = clone_model(old_model)
    model.compile(
        optimizer=LRMultiplier('adam', {
            'slow': lr_s,
            'fast': lr_f,
            'slow_2': lr_s,
            'fast_2': lr_f
        }),
        #SGD(lr=0.1,),
        loss='binary_crossentropy',
        metrics=['accuracy'])

    history = Test_NBatchLogger(test_l=test_l, test_h=test_h)
    fs_hist = model.fit(train_list[0],
                        train_list[1],
                        batch_size=batch_size,
                        epochs=epoch,
                        validation_data=(train_list[2], train_list[3]),
                        callbacks=[history])
    fig, (ax1, ax2) = plt.subplots(2)
    ax1.plot(history.acc)
    ax1.plot(history.losses)

    ax2.plot(history.pred_l)
    ax2.plot(history.pred_h)

    ax1.set_title('%s_Training_accuracy' % (figType))
    ax1.set(xlabel='Batch', ylabel='Accuracy')
    ax1.legend(['Accuracy', 'Loss'], loc='lower left')

    ax2.set_title('%s_Test_Probability' % (figType))
    ax2.set(xlabel='Batch', ylabel='Probability')
    ax2.set_ylim((0, 1))
    ax2.legend(['Low_F0', 'High_F0'], loc='lower left')

    plt.tight_layout()

    figname = train_fig_dir + '%s.png' % (figType)
    plt.savefig(figname)
    plt.close()
    test_l_pred = model.predict(test_l)
    test_h_pred = model.predict(test_h)

    return test_l_pred, test_h_pred
    def test_restore_weights(self):
        inputs = np.random.standard_normal((1024, 5))
        outputs = (inputs.dot(np.random.standard_normal(
            (5, 1))).squeeze(axis=-1) > 0).astype('int32')
        weight = np.random.standard_normal((5, 2))

        if TF_KERAS:
            from tensorflow.python.ops import random_ops
            random_ops.random_seed.set_random_seed(0xcafe)

        model = models.Sequential()
        model.add(
            layers.Dense(
                units=2,
                input_shape=(5, ),
                use_bias=False,
                activation='softmax',
                weights=[weight],
                name='Output',
            ))
        model.compile(optimizer=AdamV2(),
                      loss='sparse_categorical_crossentropy')
        model.fit(inputs, outputs, shuffle=False, epochs=30)
        one_pass_loss = model.evaluate(inputs, outputs)

        if TF_KERAS:
            from tensorflow.python.ops import random_ops
            random_ops.random_seed.set_random_seed(0xcafe)

        model = models.Sequential()
        model.add(
            layers.Dense(
                units=2,
                input_shape=(5, ),
                use_bias=False,
                activation='softmax',
                weights=[weight],
                name='Output',
            ))
        model.compile(optimizer=LRMultiplier(AdamV2(), {}),
                      loss='sparse_categorical_crossentropy')
        model.fit(inputs, outputs, shuffle=False, epochs=15)
        model_path = os.path.join(
            tempfile.gettempdir(),
            'test_lr_multiplier_%f.h5' % np.random.random())
        model.save(model_path)
        model = models.load_model(model_path,
                                  custom_objects=self._get_custom_objects())
        model.fit(inputs, outputs, shuffle=False, epochs=15)
        two_pass_loss = model.evaluate(inputs, outputs)
        self.assertAlmostEqual(one_pass_loss, two_pass_loss, places=2)
    def test_nested(self):
        inputs = np.random.standard_normal((1024, 5))
        outputs = (inputs.dot(np.random.standard_normal((5, 1))).squeeze(axis=-1) > 0).astype('int32')

        model = models.Sequential()
        model.add(layers.Dense(
            units=5,
            input_shape=(5,),
            activation='tanh',
            name='Dense',
        ))
        model.add(layers.Dense(
            units=2,
            activation='softmax',
            name='Output',
        ))
        model.compile(
            optimizer=LRMultiplier(LRMultiplier('adam', {'Dense': 1.2}), {'Output': 2.0}),
            loss='sparse_categorical_crossentropy',
        )
        model.fit(
            inputs,
            outputs,
            validation_split=0.1,
            epochs=1000,
            callbacks=[
                callbacks.ReduceLROnPlateau(patience=2, verbose=True),
                callbacks.EarlyStopping(patience=5),
            ],
        )

        if not EAGER_MODE:
            model_path = os.path.join(tempfile.gettempdir(), 'test_lr_multiplier_%f.h5' % np.random.random())
            model.save(model_path)
            model = models.load_model(model_path, custom_objects={'LRMultiplier': LRMultiplier})

        predicted = model.predict(inputs).argmax(axis=-1)
        self.assertLess(np.sum(np.abs(outputs - predicted)), 20)
Пример #7
0
    def train(self, imgs, classes, batch_size, n_epochs, model_dir,
              tensorboard_dir):
        img_id = Input(shape=(1, ))
        class_id = Input(shape=(1, ))

        content_code = self.content_embedding(img_id)
        class_code = self.class_embedding(class_id)
        if self.config.adain_enabled:
            class_adain_params = self.class_modulation(class_code)
            generated_img = self.generator([content_code, class_adain_params])
        else:
            generated_img = self.generator([content_code, class_code])

        model = Model(inputs=[img_id, class_id], outputs=generated_img)

        model.compile(optimizer=LRMultiplier(optimizer=optimizers.Adam(
            beta_1=0.5, beta_2=0.999),
                                             multipliers={
                                                 'content-embedding': 10,
                                                 'class-embedding': 10
                                             }),
                      loss=self.__perceptual_loss_multiscale)

        lr_scheduler = CosineLearningRateScheduler(max_lr=1e-4,
                                                   min_lr=1e-5,
                                                   total_epochs=n_epochs)
        early_stopping = EarlyStopping(monitor='loss',
                                       mode='min',
                                       min_delta=1,
                                       patience=100,
                                       verbose=1)

        tensorboard = EvaluationCallback(imgs, classes, self.content_embedding,
                                         self.class_embedding,
                                         self.class_modulation, self.generator,
                                         tensorboard_dir,
                                         self.config.adain_enabled)

        checkpoint = CustomModelCheckpoint(self, model_dir)

        model.fit(x=[np.arange(imgs.shape[0]), classes],
                  y=imgs,
                  batch_size=batch_size,
                  epochs=n_epochs,
                  callbacks=[
                      lr_scheduler, early_stopping, checkpoint, tensorboard,
                      WandbCallback()
                  ],
                  verbose=1)
    def test_compare_rate(self):
        inputs = np.random.standard_normal((1024, 5))
        outputs = (inputs.dot(np.random.standard_normal(
            (5, 1))).squeeze(axis=-1) > 0).astype('int32')
        weight = np.random.standard_normal((5, 2))

        model = models.Sequential()
        model.add(
            layers.Dense(
                units=2,
                input_shape=(5, ),
                use_bias=False,
                activation='softmax',
                weights=[weight],
                name='Output',
            ))
        model.compile(optimizer=AdamV2(),
                      loss='sparse_categorical_crossentropy')
        model.fit(inputs, outputs, epochs=30)
        default_loss = model.evaluate(inputs, outputs)

        model = models.Sequential()
        model.add(
            layers.Dense(
                units=2,
                input_shape=(5, ),
                use_bias=False,
                activation='softmax',
                weights=[weight],
                name='Output',
            ))
        model.compile(optimizer=LRMultiplier(AdamV2(), {'Output': 2.0}),
                      loss='sparse_categorical_crossentropy')
        model.fit(inputs, outputs, epochs=30)

        model_path = os.path.join(
            tempfile.gettempdir(),
            'test_lr_multiplier_%f.h5' % np.random.random())
        model.save(model_path)
        model = models.load_model(model_path,
                                  custom_objects=self._get_custom_objects())

        quick_loss = model.evaluate(inputs, outputs)
        self.assertLess(quick_loss, default_loss)

        predicted = model.predict(inputs).argmax(axis=-1)
        self.assertLess(np.sum(np.abs(outputs - predicted)), 300)
def ff_nn_one(n_slow=40,
              n_fast=40,
              n_inp=30,
              lr_s=1,
              lr_f=10,
              penalty=0.001,
              activation='linear'):
    model = Sequential()

    inp = Input(shape=(n_inp, ), name='input')
    slow = Dense(n_slow, activation=activation, name='slow')(inp)
    fast = Dense(n_fast,
                 kernel_regularizer=regularizers.l1(penalty),
                 activation=activation,
                 name='fast')(inp)
    s2 = Dense(n_slow, activation=activation, name='s2')(slow)
    f2 = Dense(n_fast,
               kernel_regularizer=regularizers.l1(penalty),
               activation=activation,
               name='f2')(fast)
    added = Concatenate()([s2, f2])
    ###use layers.concatenate rather than layers.add so that all the slow and fast units are only
    ###concatenated but not added as the same shape. E.g., if we have 30 slow units and 30 fast units
    ###concatenate would give you a shape of 60 but add would give you a shape of 30. If we have, instead,
    ###30 slow units and 10 fast units using concatenate would give you a shape of 40 but add
    ###would not work in this case.

    out = Dense(1, activation="sigmoid", name="output")(added)
    model = Model(inputs=inp, outputs=out)

    ####for now we will just have one fixed learning rate for the slow-fast pretraining, which
    #### is essentially a slow-slow model
    model.compile(
        optimizer=LRMultiplier(Adam(lr=5e-5), {
            'slow': lr_s,
            'fast': lr_f,
            's2': lr_s,
            'f2': lr_f
        }),
        #SGD(lr=0.05),
        loss='binary_crossentropy',
        metrics=['accuracy'])
    return model
def slow_fast_nn_one(lr_s=1,
                     lr_f=10,
                     n_inp=30,
                     n_slow=40,
                     n_fast=40,
                     penalty=0.001,
                     activation='linear',
                     train_slow=False):
    inp = Input(shape=(n_inp, ), name='input')
    slow = Dense(n_slow, activation=activation, name='slow')(inp)
    fast = Dense(n_fast,
                 kernel_regularizer=regularizers.l1(penalty),
                 activation=activation,
                 name='fast')(inp)
    #To be able to assign a different learning rate to the entire path in fast or slow
    s2 = Dense(n_slow, activation=activation, name='s2')(slow)
    f2 = Dense(n_fast,
               kernel_regularizer=regularizers.l1(penalty),
               activation=activation,
               name='f2')(fast)
    # added = Add()([slow, fast])
    added = Concatenate()([s2, f2])
    out = Dense(1, activation="sigmoid", name="output")(added)
    model = Model(inputs=inp, outputs=out)
    if (train_slow):
        ###haven't been able to use the LRMultiplier function yet
        model.compile(optimizer=LRMultiplier(Adam(lr=5e-5), {
            'slow': lr_s,
            'fast': lr_f,
            's2': lr_s,
            'f2': lr_f
        }),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
    else:
        slow.trainable = False  ##silence these to unfreeze the slow pathway
        # s2.trainable = False
        model.compile(optimizer=SGD(lr=0.1),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
    return model
    def test_restore_weights(self):
        if EAGER_MODE:
            return
        inputs = np.random.standard_normal((1024, 5))
        outputs = (inputs.dot(np.random.standard_normal((5, 1))).squeeze(axis=-1) > 0).astype('int32')
        weight = np.random.standard_normal((5, 2))

        model = models.Sequential()
        model.add(layers.Dense(
            units=2,
            input_shape=(5,),
            use_bias=False,
            activation='softmax',
            weights=[weight],
            name='Output',
        ))
        model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
        model.fit(inputs, outputs, shuffle=False, epochs=30)
        one_pass_loss = model.evaluate(inputs, outputs)

        model = models.Sequential()
        model.add(layers.Dense(
            units=2,
            input_shape=(5,),
            use_bias=False,
            activation='softmax',
            weights=[weight],
            name='Output',
        ))
        model.compile(optimizer=LRMultiplier('adam', {}), loss='sparse_categorical_crossentropy')
        model.fit(inputs, outputs, shuffle=False, epochs=15)
        model_path = os.path.join(tempfile.gettempdir(), 'test_lr_multiplier_%f.h5' % np.random.random())
        model.save(model_path)
        model = models.load_model(model_path, custom_objects={'LRMultiplier': LRMultiplier})
        model.fit(inputs, outputs, shuffle=False, epochs=15)
        two_pass_loss = model.evaluate(inputs, outputs)
        self.assertAlmostEqual(one_pass_loss, two_pass_loss, places=2)
Пример #12
0
    def set_variable_learning_rates(self, model, conv_layer_m, dense_layer_m):
        """The set_fixed_params function is used to freeze the weights of the convolution layer if the initial part of the network is to be used only as a feature extractor

			:param model: keras model with preset parameters
			:type model: keras.model (required)

			:param conv_layer_m: Learning rate multiplier for convolution layer
			:type conv_layer_m: float(required)

			:param dense_layer_m: Learning rate multiplier for dense layer
			:type dense_layer_m: float(required)

			:returns: Updated model with variable learning rates
			:rtype: keras.model
		"""
        lr_dict = {}

        for layer in model.layers:
            if ('conv' in layer.name):
                lr_dict.update({layer.name: conv_layer_m})
            if ('dense' in layer.name):
                lr_dict.update({layer.name: dense_layer_m})

        key = 'conv3d_1_input'

        if key in lr_dict:
            del lr_dict[key]

        #print(lr_dict)

        variable_optimizer = LRMultiplier(self.optimizer, lr_dict)

        model.compile(loss=self.loss_function,
                      optimizer=variable_optimizer,
                      metrics=['mae'])
        return model
Пример #13
0
                                 monitor='val_cls_loss', mode='auto', save_weights_only=True, save_best_only=True, period=1)
    tensorboard = TensorBoard(log_dir=save_weights_path)
    logs = CSVLogger(filename=os.path.join(save_weights_path, 'training.log'))
    reduce_lr = ReduceLROnPlateau(monitor='val_cls_loss', mode='auto', factor=0.1, patience=10, verbose=1)
    # reduce_lr = LearningRateScheduler(schedule=step_decay, verbose=1)
    # early_stopping = EarlyStopping(monitor='val_cls_loss', min_delta=0, patience=5, verbose=1)

    if num_gpu > 1:
        training_model = multi_gpu_model(model, gpus=num_gpu)
        checkpoint = ParallelModelCheckpoint(checkpoint, model)
    else:
        training_model = model

    optimizer = optimizers.SGD(lr=initial_learning_rate, momentum=0.9)
    # optimizer = optimizers.RMSprop(lr=initial_learning_rate)
    # optimizer = optimizers.Adam(lr=initial_learning_rate)
    train_generator = data_generator_wrapper(train_data, batch_size, num_cls, is_train=True)
    val_generator = data_generator_wrapper(val_data, batch_size, num_cls, is_train=False)
    training_model.compile(optimizer=LRMultiplier(optimizer, multipliers={'mask':10., 'cls':10., 'adv':10.}),
                           loss={'cls': celoss,
                                 'adv': celoss,
                                 'loc': l1loss},
                           metrics={'cls': unswap_acc},
                           loss_weights={'cls': 1.,
                                         'adv': 1.,
                                         'loc': 1.})
    training_model.fit_generator(generator=train_generator, steps_per_epoch=len(train_data) // batch_size,
                                 initial_epoch=0, epochs=500, verbose=1, callbacks=[checkpoint, tensorboard, logs, reduce_lr],
                                 validation_data=val_generator, validation_steps=len(val_data) // batch_size,
                                 use_multiprocessing=True, workers=8, max_queue_size=16)
Пример #14
0
    def build_model(self):

        # build net
        if self.config.model.type == "inceptionResnetV2":
            self.build_inceptionResNetV2()
        elif self.config.model.type == "vgg":
            self.build_vgg()
        elif self.config.model.type == "vgg_attention":
            self.build_vgg_attention()
        elif self.config.model.type == "efficientNet":
            self.build_efficientNet()
        elif self.config.model.type == "efficientNetSTN":
            self.build_efficientNetSTN()
        elif self.config.model.type == "dummy":
            self.build_dummy_model()
        else:
            print('model type is not supported')
            raise

        # configure optimizer
        if self.config.trainer.learning_rate_schedule_type == 'LearningRateScheduler':
            lr_ = 0.0
            adam1 = optimizers.Adam(lr=0.0)
        elif self.config.trainer.learning_rate_schedule_type == 'ReduceLROnPlateau':
            print('decrease_platue')
            lr_ = self.config.trainer.learning_rate
            adam1 = optimizers.Adam(lr=self.config.trainer.learning_rate)
        else:
            print('invalid learning rate configuration')
            raise

        adamW_ = AdamW(
            lr=lr_,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=None,
            decay=self.config.trainer.learning_rate_decay,
            weight_decay=self.config.model.weight_decay,
            batch_size=self.config.data_loader.batch_size,
            samples_per_epoch=self.config.data_loader.sampels_per_epcoh,
            epochs=self.config.trainer.num_epochs)

        if self.config.model.type == "efficientNetSTN":
            lr_mult = self.config.model.LR_multiplier_stn
            adamW_ = LRMultiplier(
                adamW_, {
                    'model_3': lr_mult,
                    'loc2': lr_mult,
                    'loc3': lr_mult,
                    'loc4': lr_mult
                })

        # configure loss and metrics
        if self.config.model.loss == 'triplet':
            self.configure_triplet_loss()
        elif self.config.model.loss == 'cosface' or self.config.model.loss == 'softmax':
            self.configure_cosface_or_softmax_loss()
        else:
            print('invalid loss type')
            raise

        self.model.compile(loss=self.loss_func,
                           loss_weights=self.loss_weights,
                           optimizer=adamW_,
                           metrics=self.metrics)

prediction = Dense(3,activation='softmax',name='new_dene_2')(x)
model_1 = Model(inputs=inception_model_1.input,outputs=prediction)

layers = ['new_pool_1','new_batch_1','new_dense_1','new_batch_2','new_drop_2','new_dene_2']
for layer in layers:
    lr_dict[layer] = 0.0002*10

!pip install keras-lr-multiplier

from keras_lr_multiplier import LRMultiplier

model_1.summary()

model_1.compile(loss='categorical_crossentropy',optimizer=LRMultiplier('adam', lr_dict),metrics=['accuracy'])

train_set_1 = preprocess_input(preprocess_initial(train_files))

test_set_1 = preprocess_input(preprocess_initial(test_files))

valid_set_1 = preprocess_input(preprocess_initial(valid_files))

layers = ['new_pool_1','new_batch_1','new_dense_1','new_batch_2','new_drop_2','new_dene_2']
model_1.get_layer('new_dene_2').set_weights([np.array(weights[10],dtype=np.float32),np.zeros([3],dtype=np.float32)])
model_1.get_layer('new_dense_1').set_weights([np.array(weights[4],dtype=np.float32),np.zeros([512],dtype=np.float32)])

# the first 249 layers and unfreeze the rest:
for layer in model_1.layers[:249]:
   layer.trainable = False
for layer in model_1.layers[249:]:
Пример #16
0
n_tags = len(tag_lb.classes_)
n_spks = len(spk_lb.classes_)
crf_lr_multiplier = 1
test_data = data_generator('test', X, Y, SPK, SPK_C, mode, batch_size)
input_X = Input(shape=(None, None), dtype='int32')
s2v_module = get_s2v_module(encoder_type, word_embedding_matrix, n_hidden,
                            dropout_rate)
output = TimeDistributed(s2v_module)(input_X)

bilstm_layer = Bidirectional(
    LSTM(units=n_hidden, activation='tanh', return_sequences=True))
dropout_layer = Dropout(dropout_rate)

input_SPK_C = Input(shape=(None, ), dtype='int32')
dense_layer_crf = Dense(units=n_tags if batch_size == 1 else n_tags + 1)
crf = OurCRF(ignore_last_label=False if batch_size == 1 else True)
output = crf(dense_layer_crf(dropout_layer(bilstm_layer(output))))

model = Model([input_X, input_SPK_C], output)
model.compile(optimizer=LRMultiplier('adam', {'our_crf': crf_lr_multiplier}),
              loss=crf.loss_wrapper(input_SPK_C),
              metrics=[])

# import keras

model.load_weights('5.h5')
print(model.summary())
# predictions=model.predict_generator(test_data,steps=10)
# print(predictions)
# print(X)
model.predict(X, steps=1)