コード例 #1
0
ファイル: apps.py プロジェクト: andriyandrushko0/djn
 def _load_model(cls):
     model = load_model(cls.model_path)
     model.compile(
         optimizer=tf.keras.optimizers.Adam(),
         loss=CategoricalCrossentropy(),
         metrics=["accuracy"],
     )
     cache.set(cls.cache_name, model, None)
     return model
コード例 #2
0
ファイル: agent.py プロジェクト: alomb/FlatlandChallenge
    def _build_compile_model(self):
        model = Sequential()
        model.add(
            Dense(10, input_shape=(self._state_size, ), activation='relu'))
        model.add(
            Dense(10, input_shape=(self._state_size, ), activation='relu'))
        model.add(Dense(self._action_size))

        model.compile(loss=CategoricalCrossentropy(),
                      optimizer=self._optimizer)
        return model
コード例 #3
0
def make_classification_trainer(
    lr: float, logs_dir: str, name: str, verbose: int, batch_size: int, steps_per_epoch: Union[str, int]
) -> KerasTrainer:
    return KerasTrainer(
        compilation_parameters=KerasCompilationParameters(
            loss=CategoricalCrossentropy(from_logits=False), metrics=[categorical_accuracy], optimizer=Adam(lr)
        ),
        data_preparator=KerasDataPreparator(batch_size=batch_size, steps=steps_per_epoch),
        callbacks=make_classification_callbacks(join(logs_dir, name)),
        verbose=verbose,
    )
コード例 #4
0
    def tr34_model_init(self):
        def build_model(net):
            return vggvox_resnet2d_icassp(
                input_dim=self.params["dim"],
                num_class=self.params["n_classes"],
                mode="pretrain",
                config=self.config,
                net=net,
            )

        model_34 = build_model('resnet34s')
        model = model_34
        pretrain_path = os.path.join(os.path.dirname(__file__),
                                     self.config["resume_pretrained"])
        if self.config["resume_pretrained"]:
            if os.path.isfile(pretrain_path):
                model.load_weights(pretrain_path,
                                   by_name=True,
                                   skip_mismatch=True)
                if self.class_num >= self.tr34_mconfig.CLASS_NUM_THS:
                    frz_layer_num = self.tr34_mconfig.INIT_BRZ_L_NUM
                else:
                    frz_layer_num = self.tr34_mconfig.INIT_BRZ_L_NUM_WILD
                for layer in model.layers[:frz_layer_num]:
                    layer.trainable = False
            pretrain_output = model.output
            weight_decay = self.tr34_mconfig.TR34_INIT_WD
            y = keras.layers.Dense(
                self.params["n_classes"],
                activation="softmax",
                kernel_initializer="orthogonal",
                use_bias=False,
                trainable=True,
                kernel_regularizer=keras.regularizers.l2(weight_decay),
                bias_regularizer=keras.regularizers.l2(weight_decay),
                name="prediction",
            )(pretrain_output)
            model = keras.models.Model(model.input,
                                       y,
                                       name="vggvox_resnet2D_{}_{}_new".format(
                                           "softmax", "gvlad"))
            opt = keras.optimizers.Adam(lr=3e-4)
            loss_fun = CategoricalCrossentropy(label_smoothing=0.2)
            model.compile(optimizer=opt, loss=loss_fun, metrics=["acc"])
        #model.summary()
        callbacks = list()
        if self.tr34_mconfig.ENABLE_CB_ES:
            early_stopping = EarlyStopping(monitor="val_loss", patience=15)
            callbacks.append(early_stopping)
        if self.tr34_mconfig.ENABLE_CB_LRS:
            normal_lr = LearningRateScheduler(self.step_decay)
            callbacks.append(normal_lr)
        return model, callbacks
コード例 #5
0
 def init_model(self, input_shape, num_classes, **kwargs):
     inputs = Input(shape=input_shape)
     sequence_len = input_shape[0]
     lstm_units_array = np.array([32, 64, 128, 256, 512])
     lstm_units = lstm_units_array[np.argmin(
         np.abs(lstm_units_array - sequence_len))]
     lstm_1 = CuDNNLSTM(lstm_units, return_sequences=True)(inputs)
     activation_1 = Activation('tanh')(lstm_1)
     if num_classes >= 20:
         if num_classes < 30:
             dropout1 = SpatialDropout1D(0.5)(activation_1)
             attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
         else:
             attention_1 = Attention(
                 8, 16)([activation_1, activation_1, activation_1])
         k_num = 10
         kmaxpool_l = Lambda(lambda x: tf.reshape(tf.nn.top_k(
             tf.transpose(x, [0, 2, 1]), k=k_num, sorted=True)[0],
                                                  shape=[-1, k_num, 128]))(
                                                      attention_1)
         flatten = Flatten()(kmaxpool_l)
         dropout2 = Dropout(rate=0.5)(flatten)
     else:
         dropout1 = SpatialDropout1D(0.5)(activation_1)
         attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
         pool_l = GlobalMaxPool1D()(attention_1)
         dropout2 = Dropout(rate=0.5)(pool_l)
     dense_1 = Dense(units=256, activation='relu')(dropout2)
     #         dense_1 = Dense(units=256, activation='softplus',kernel_regularizer=regularizers.l2(0.01),
     #                        activity_regularizer=regularizers.l1(0.01))(dropout2)
     #dense_1 = DropConnect(Dense(units=256, activation='softplus'), prob=0.5)(dropout2)
     outputs = Dense(units=num_classes, activation='softmax')(dense_1)
     loss_fun = CategoricalCrossentropy(label_smoothing=0.2)
     model = TFModel(inputs=inputs, outputs=outputs)
     optimizer = optimizers.Nadam(lr=0.002,
                                  beta_1=0.9,
                                  beta_2=0.999,
                                  epsilon=None,
                                  schedule_decay=0.004)
     model.compile(
         optimizer=optimizer,
         loss=loss_fun,
         #loss="sparse_categorical_crossentropy",
         metrics=['accuracy'])
     model.summary()
     self._model = model
     self.is_init = True
コード例 #6
0
    def mutate(self, offspring, shape=(2, 2)):
        for r in range(len(offspring)):
            for c in range(len(offspring[r])):
                if isinstance(offspring[r][c], np.float32):
                    if offspring[r][c] == 0:
                        offspring[r][c] = uniform(-1, 1)
                    else:
                        offspring[r][c] *= uniform(0, 1)
                else:
                    new_vals = [w * uniform(0, 1) for w in offspring[r][c]]
                    offspring[r][c] = new_vals

        new_weight = np.array(offspring).reshape(shape)
        return Solution(
            new_weight,
            calc_solution_fitness(new_weight, self.model,
                                  CategoricalCrossentropy(), self.X, self.y))
コード例 #7
0
def create_sub_model(num_classes=29):
    model_input = Input(shape=42)

    x = model_input
    x = GaussianNoise(0.02)(x)

    x = block(512)(x)
    x = block(256)(x)
    x = block(128, 0)(x)

    x = Dense(num_classes, kernel_initializer="glorot_normal")(x)
    x = Softmax()(x)

    model = Model(model_input, x)
    model.compile(optimizer=Adam(learning_rate=5e-4),
                  loss=CategoricalCrossentropy(label_smoothing=0.1),
                  metrics=['accuracy'])
    return model
コード例 #8
0
    def init_model(self,
                   input_shape,
                   num_classes,
                   **kwargs):
        inputs = Input(shape=input_shape)
        # bnorm_1 = BatchNormalization(axis=2)(inputs)
        sequence_len = input_shape[0]
        lstm_units_array = np.array([32, 64, 128, 256, 512])
        lstm_units = lstm_units_array[np.argmin(np.abs(lstm_units_array-sequence_len))]
        lstm_1 = Bidirectional(CuDNNLSTM(lstm_units, name='blstm_1',
                                         return_sequences=True),
                               merge_mode='concat')(inputs)
        activation_1 = Activation('tanh')(lstm_1)
        dropout1 = SpatialDropout1D(0.5)(activation_1)
        if lstm_units <=128:
            attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
        else:
            attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
        pool_1 = GlobalMaxPool1D()(attention_1)
        dropout2 = Dropout(rate=0.5)(pool_1)
        dense_1 = Dense(units=256, activation='relu')(dropout2)
#         dense_1 = Dense(units=256, activation='relu',kernel_regularizer=regularizers.l2(0.01),
#                        activity_regularizer=regularizers.l1(0.01))(dropout2)
        #dense_1 = DropConnect(Dense(units=256, activation='relu'), prob=0.5)(dropout2)
        outputs = Dense(units=num_classes, activation='softmax')(dense_1)

        model = TFModel(inputs=inputs, outputs=outputs)
        loss_fun = CategoricalCrossentropy(label_smoothing=0.2)
        optimizer = optimizers.Adam(
            # learning_rate=1e-3,
            lr=1e-3,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            decay=0.0002,
            amsgrad=True)
        model.compile(
            optimizer=optimizer,
            loss=loss_fun,
            #loss="sparse_categorical_crossentropy",
            metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True
コード例 #9
0
    def get_ga_model(self):
        iteration = 0
        loss_metric = CategoricalCrossentropy()
        weights = get_trainable_weights(self.model, self.layers_to_optimize)

        individuals = self.initialize_population(self.population_size, weights,
                                                 self.model, loss_metric,
                                                 self.X, self.y)

        while iteration < self.iterations:
            individuals = self.reproduce_next_gen(individuals)

            print(' GA training for iteration {}'.format(iteration + 1) +
                  ' - Best fitness of {}'.format(individuals[0].fitness))
            iteration += 1
        best_weights = individuals[0].weights_arr

        return set_trainable_weights(self.model, best_weights,
                                     self.layers_to_optimize)
コード例 #10
0
    def init_model(self,
                   input_shape,
                   num_classes,
                   max_layer_num=5,
                   **kwargs):
        model = Sequential()
        min_size = min(input_shape[:2])
        channels_array = np.array([32, 64, 128, 128])
        pool_array = [2,3,3,4]
        max_channels_idx = np.argmin(np.abs(channels_array-min_size))
        input_shape = (input_shape[0], input_shape[1], 1)
        max_layer_num = min(max_channels_idx+1,len(channels_array)-1)
        for i in range(max_layer_num, -1, -1):
            if i == 0:
                model.add(Conv2D(channels_array[i],3,padding='same'))
            else:
                model.add(Conv2D(channels_array[i],3,input_shape=input_shape,padding='same'))
            model.add(Activation('relu'))
            model.add(BatchNormalization())
            model.add(MaxPooling2D(pool_size=(2, 2)))
            min_size //= 2
            if min_size < 2:
                break

        model.add(Flatten())
        model.add(Dense(channels_array[max_layer_num]))
        model.add(Dropout(rate=0.5))
        model.add(Activation('relu'))
        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

        # optimizer = tf.keras.optimizers.SGD(lr=0.01, decay=1e-6)
        loss_fun = CategoricalCrossentropy(label_smoothing=0.1)
        optimizer = tf.keras.optimizers.Adam()
        # optimizer = optimizers.SGD(lr=1e-3, decay=2e-4, momentum=0.9, clipvalue=5)
        model.compile(loss=loss_fun,
                      optimizer=optimizer,
                      metrics=['accuracy'])
        model.summary()
        self.is_init = True
        self._model = model
コード例 #11
0
    def get_pso_model(self):
        iteration = 0;
        loss_metric = CategoricalCrossentropy()
        self.model.reset_metrics()
        weights = get_trainable_weights(self.model, self.layers_to_optimize)

        swarm = self.initialize_swarm(self.swarm_size, weights, self.model, loss_metric, self.X, self.y)

        best_particle = self.find_best_particle(swarm)
        self.set_gbest(swarm, best_particle)

        while iteration < self.iterations:
            self.update_positions(swarm, self.model, loss_metric, self.X, self.y)

            self.update_gbest(swarm)

            print(' PSO training for iteration {}'.format(iteration + 1) + ' - Best fitness of {}'.format(
                swarm[0].gbest_fitness))
            iteration += 1
        best_weights = swarm[0].gbest

        return set_trainable_weights(self.model, best_weights, self.layers_to_optimize)
コード例 #12
0
    def two_point_crossover(self, individual1, individual2, shape):
        offspring = list()

        one = individual1.weights_flat.tolist()
        two = individual2.weights_flat.tolist()

        for r in range(len(one)):
            offspring.append(np.zeros_like(one[r]))
            for c in range(len(one[r])):
                if isinstance(offspring[r][c], np.float32):
                    offspring[r][c] = self.perform_element_level_crossover(
                        one[r][c], two[r][c])
                else:
                    for w in range(len(one[r][c])):
                        offspring[r][c][
                            w] = self.perform_element_level_crossover(
                                one[r][c][w], two[r][c][w])

        new_weight = np.array(offspring).reshape(shape)

        return Solution(
            new_weight,
            calc_solution_fitness(new_weight, self.model,
                                  CategoricalCrossentropy(), self.X, self.y))
コード例 #13
0
    def __init__(self,
                 E_output_dim,
                 E_intermediate_dim=40,
                 E_gauss_noise_wt=1.0,
                 E_gnoise_sd=0.05,
                 E_dropout=0.1,
                 latent_dim=3,
                 n_labels=50,
                 name='E_classifier',
                 **kwargs):

        super(Model_E_classifier, self).__init__(name=name, **kwargs)
        self.n_labels = n_labels
        E_gnoise_sd_weighted = E_gauss_noise_wt * E_gnoise_sd
        self.encoder_E = Encoder_E_classifier(
            gaussian_noise_sd=E_gnoise_sd_weighted,
            dropout_rate=E_dropout,
            latent_dim=latent_dim,
            intermediate_dim=E_intermediate_dim,
            name='E_encoder')
        self.softmax_E = tf.Dense(n_labels,
                                  activation="softmax",
                                  name='predictions')
        self.cce = CategoricalCrossentropy()
コード例 #14
0
def prepare_metrics():
    train_acc_metric = CategoricalAccuracy()
    val_acc_metric = CategoricalAccuracy()
    train_loss_metric = CategoricalCrossentropy(from_logits=True)
    val_loss_metric = CategoricalCrossentropy(from_logits=True)
    return train_acc_metric, train_loss_metric, val_acc_metric, val_loss_metric
コード例 #15
0
else:
    input_shape = (img_width, img_height, 3)

#names = ['alexnet', 'VGG', 'Squeezenet', 'deepsqueeze']
#names = ['Squeezenet', 'deepsqueeze', 'son_of_deepsqueeze', 'the_big_squeeze','alexnet', 'inceptionV3']
names = ['alexnet']

for name in names:
    np.random.seed(333)
    model = initModel(name, input_shape)
    # model.compile(loss='categorical_crossentropy',
    #               optimizer='adam',
    #               metrics=['accuracy'])
    model.compile(
        optimizer=sgd,
        loss=CategoricalCrossentropy(),
        metrics=[categorical_accuracy, top_2_accuracy, top_3_accuracy])

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='categorical')
コード例 #16
0
    def fit(self, X_train, y_train, X_validation=None, y_validation=None):
        """
        Fits the model to the training data. If a generator was specified via
        set_training_parameters, it will be used. By default, the loss function is the
        cross-entropy.
        :param X_train: training instances
        :param y_train:  training labels
        :param X_validation: validation instances
        :param y_validation: validation labels
        """

        X_train = np.array(X_train)
        y_train = to_categorical(y_train)

        if X_validation is not None and y_validation is not None:

            X_validation, y_validation = self._ensure_validation_data_length(
                X_validation, y_validation)

            X_validation = np.array(X_validation)
            y_validation = to_categorical(y_validation)

            if self.generators["validation"] is not None:
                self.generators["validation"].set_data(X_validation,
                                                       y_validation)
                val_data = self.generators["validation"]
            else:
                val_data = (X_validation, y_validation)

        else:
            val_data = None

        if self.enable_per_example_loss:
            loss = CategoricalCrossentropy(from_logits=False,
                                           reduction=tf.losses.Reduction.NONE)
        else:
            loss = CategoricalCrossentropy(from_logits=False)

        self.model.compile(optimizer=self.optimizer,
                           loss=loss,
                           metrics=["categorical_accuracy"])

        if self.generators["train"] is None:
            res = self.model.fit(
                X_train,
                y_train,
                batch_size=self.batch_size,
                epochs=self.epochs,
                validation_data=val_data,
                shuffle=True,
                verbose=0,
                callbacks=self.callbacks,
            )
        else:
            self.generators["train"].set_data(X_train, y_train)
            res = self.model.fit_generator(
                self.generators["train"],
                epochs=self.epochs,
                validation_data=val_data,
                shuffle=True,
                workers=4,
                verbose=0,
                callbacks=self.callbacks,
            )

        return res
コード例 #17
0
        tf_vals = {
            "weights": sess.run([W0, W1, W2, W3, W4]),
            "biases": sess.run([b0, b1, b2, b3, b4]),
        }

keras_inputs = Input(shape=(len(inputs[0]), ))

x = keras_inputs
for shape in [9, 7, 5, 3]:
    x = Dense(shape, activation="sigmoid")(x)
outputs = Dense(len(targets[0]), activation=None)(x)

model = Model(inputs=keras_inputs, outputs=outputs)
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
model.compile(optimizer="SGD", loss=CategoricalCrossentropy(from_logits=True))

model.set_weights([
    layer for weight, bias in zip(deepcopy(initial_weights),
                                  deepcopy(initial_biases))
    for layer in (weight, bias.reshape(-1))
])

history = model.fit(
    np.array(deepcopy(inputs)),
    np.array(deepcopy(targets)),
    epochs=epochs,
    verbose="0",
)

keras_vals = {
コード例 #18
0
    def init_model(self,
                   input_shape,
                   num_classes,
                   **kwargs):
        freq_axis = 2
        channel_axis = 3
        channel_size = 128
        min_size = min(input_shape[:2])
        melgram_input = Input(shape=input_shape)
        # x = ZeroPadding2D(padding=(0, 37))(melgram_input)
        # x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(x)

        x = Reshape((input_shape[0], input_shape[1], 1))(melgram_input)
        # Conv block 1
        x = Convolution2D(64, 3, 1, padding='same', name='conv1')(x)
        x = BatchNormalization(axis=channel_axis, name='bn1')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
        x = Dropout(0.1, name='dropout1')(x)
        #x = DropBlock2D(block_size=2, keep_prob=0.8, name='dropout1')(x) 

        # Conv block 2
        x = Convolution2D(channel_size, 3, 1, padding='same', name='conv2')(x)
        x = BatchNormalization(axis=channel_axis, name='bn2')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2')(x)
        x = Dropout(0.1, name='dropout2')(x)
        #x = DropBlock2D(block_size=2, keep_prob=0.9, name='dropout2')(x)

        # Conv block 3
        x = Convolution2D(channel_size, 3, 1, padding='same', name='conv3')(x)
        x = BatchNormalization(axis=channel_axis, name='bn3')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3')(x)
        x = Dropout(0.1, name='dropout3')(x)
        #x = DropBlock2D(block_size=2, keep_prob=0.9, name='dropout3')(x)
        sequence_len = input_shape[0]
        gru_units_array = np.array([32, 64, 128, 256, 512])
        gru_units = gru_units_array[np.argmin(np.abs(gru_units_array-sequence_len))]
        if gru_units>=128:
            channel_size=256
        if min_size // 24 >= 4:
            # Conv block 4
            x = Convolution2D(channel_size, 3, 1, padding='same', name='conv4')(x)
            x = BatchNormalization(axis=channel_axis, name='bn4')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4')(x)
            x = Dropout(0.1, name='dropout4')(x)
            #x = DropBlock2D(block_size=2, keep_prob=0.9, name='dropout4')(x)

        x = Reshape((-1, channel_size))(x)
        # if num_classes > gru_units:
        #     gru_units = int(num_classes * 1.5)
        # GRU block 1, 2, output
        if gru_units<128:
            x = CuDNNGRU(gru_units, return_sequences=False, name='gru1')(x)
        else:
            x = CuDNNGRU(gru_units, return_sequences=True, name='gru1')(x)
            x = CuDNNGRU(gru_units, return_sequences=False, name='gru2')(x)
        # x = Dense(max(int(num_classes*1.5), 128), activation='relu', name='dense1')(x)
        x = Dropout(0.3)(x)
        outputs = Dense(num_classes, activation='softmax', name='output')(x)
        loss_fun = CategoricalCrossentropy(label_smoothing=0.2)
        model = TFModel(inputs=melgram_input, outputs=outputs)
        optimizer = optimizers.Adam(
            # learning_rate=1e-3,
            lr=1e-3,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            decay=1e-4,
            amsgrad=True)
        model.compile(
            optimizer=optimizer,
            loss=loss_fun,
            #loss="sparse_categorical_crossentropy",
            metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True
コード例 #19
0
mlp.add(Dense(1024, input_shape=(numsize, ), activation='sigmoid'))
#mlp.add(Dense(32, input_shape=(numsize,), activation='sigmoid'))
mlp.add(Dropout(0.5))

combinedInput = tensorflow.keras.layers.concatenate([mlp.output, cnn.output])
#x=Dense(516,activation="relu")(combinedInput)
x = (Dense(num_classes,
           activation='relu',
           kernel_initializer=GlorotUniform(),
           kernel_regularizer=l2(0.1),
           bias_initializer=constant(0.1)))(combinedInput)

model = tensorflow.keras.models.Model(inputs=[mlp.input, cnn.input], outputs=x)
opt = tensorflow.keras.optimizers.Adam(lr=0.0001, epsilon=0.05)

loss = CategoricalCrossentropy(from_logits=True)

model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
#model.load_weights('saved_weight')

model.summary()
history = model.fit([vtrain, x_train],
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_data=([vtest, x_test], y_test),
                    shuffle=True)
#model.save_weights('saved_weight350')
score = model.evaluate([vtest, x_test], y_test, verbose=0)
コード例 #20
0
 def compile_model(self, model):
     loss = CategoricalCrossentropy()
     model.compile(
         loss=loss,
         metrics=[TopKCategoricalAccuracy(k=1)],
     )