示例#1
0
    def __init__(self, in_shape, regional_labels, batch_size=64, epochs=50):
        NeuralNet.__init__(self,
                           in_shape,
                           regional_labels,
                           epochs=epochs,
                           batch_size=batch_size)

        features = in_shape[1]

        inputs = Input(shape=(features, ), name="input")

        x = Dense(4096, activation="relu", name="dense1")(inputs)
        x = Dense(4096, activation="relu", name="dense2")(x)
        x = Dense(4096, activation="relu", name="dense3")(x)
        x = Dense(4096, activation="relu", name="dense4")(x)
        x = Dense(2048, activation="relu", name="dense5")(x)
        y = Dense(self.encoder.transform(self.labels).shape[1],
                  activation="softmax",
                  name="output")(x)

        self.model = kerasModel(inputs, y)
        self.model.compile(optimizer="adam",
                           loss='categorial_crossentropy',
                           metrics=["acc"])
        self.name = 'multiclassSimpleNN'
示例#2
0
def get_roi_model(vgg_model):
    in_img = Input(shape=(None, None, 512))
    in_roi = Input(shape=(1, 4))

    #roi_model = Sequential()
    out_roi_pool = RoiPoolingConv(7, 1)([in_img, in_roi])
    model = kerasModel([in_img, in_roi], out_roi_pool)
    #     net = merge([in_img, features],mode=cross_input_asym,output_shape=cross_input_shape)(out_roi_pool, features)
    net = Flatten()(out_roi_pool)
    net = Dense(4096,
                kernel_initializer=lambda shape: K.random_normal(shape),
                input_shape=(25088, ))(net)
    net = Activation('relu')(net)
    net = Dropout(0.5)(net)
    net = Dense(4096,
                kernel_initializer=lambda shape: K.random_normal(shape))(net)
    net = Activation('relu')(net)
    net = Dropout(0.5)(net)
    net = Dense(1000,
                kernel_initializer=lambda shape: K.random_normal(shape))(net)
    net = Activation('softmax')(net)
    roi_model = kerasModel(inputs=model.input, outputs=net)
    roi_model.set_weights(vgg_model.get_weights()[-6:])
    return roi_model
示例#3
0
    def load_feature_map_model(self, t):
        if t == 'vgg':
            self.feature_map_extractor_model = VGG19(
                weights='imagenet'
            )  #, include_top=False, input_shape=(800,800,3))
        else:
            self.feature_map_extractor_model = VGG19(
                weights='imagenet'
            )  #, include_top=False, input_shape=(800,800,3))

        self.feature_map_extractor_model = kerasModel(
            input=self.feature_map_extractor_model.input,
            output=self.feature_map_extractor_model.get_layer('fc2').output)
        self.feature_map_extractor_model.summary()
        self.rl_model = get_q_network(self.pretrained_model)
    def build_model(self):
        main_input = Input(shape=(None, ), dtype='int32', name='main_input')
        x = Embedding(output_dim=self.embedding_dimension,
                      input_dim=n_vocab)(main_input)
        x = Convolution1D(64, 5, padding='same', activation='relu')(x)

        if self.dropout_parameter > 0.0:
            x = Dropout(self.dropout_parameter)(x)

        if self.rnn_type is 'GRU':
            rnn = GRU(self.rnn_units, return_sequences=True)
        elif self.rnn_type is 'LSTM':
            rnn = LSTM(self.rnn_units, return_sequences=True)
        else:
            rnn = SimpleRNN(self.rnn_units)

        if self.bidirectional:
            x = Bidirectional(rnn)(x)
        else:
            x = rnn(x)

        if self.maxPooling:
            x = MaxPooling1D(strides=1, padding='same')(x)
            print("Using MaxPooling")
        elif self.averagePooling:
            x = AveragePooling1D(strides=1, padding='same')(x)
            print("Using AveragePooling")
        slot_output = TimeDistributed(Dense(n_slots, activation='softmax'),
                                      name='slot_output')(x)
        intent_output = TimeDistributed(Dense(n_classes, activation='softmax'),
                                        name='intent_output')(x)
        model = kerasModel(inputs=[main_input],
                           outputs=[intent_output, slot_output])

        # rmsprop is recommended for RNNs https://stats.stackexchange.com/questions/315743/rmsprop-and-adam-vs-sgd
        model.compile(optimizer='rmsprop',
                      loss={
                          'intent_output': 'categorical_crossentropy',
                          'slot_output': 'categorical_crossentropy'
                      })
        plot_model(model, 'models/' + self.name + '.png')

        self.model = model

        return
示例#5
0
    def __init__(self, in_shape, regional_labels, batch_size=64, epochs=1):
        NeuralNet.__init__(self,
                           in_shape,
                           regional_labels,
                           batch_size,
                           epochs,
                           batch_size=batch_size)

        features = in_shape[1]

        inputs = Input(shape=(1, features), name="input")

        x = Conv1D(16, 20, padding="same", activation="relu",
                   name="conv1")(inputs)
        x = MaxPooling1D(5, padding="same", name="pool1")(x)

        x = Conv1D(16, 10, padding="same", activation="relu", name="conv2")(x)
        x = MaxPooling1D(5, padding="same", name="pool2")(x)

        x = Flatten()(x)
        x = Dropout(0.2, name="drop1")(x)

        x = Dense(256, activation="relu", name="dense1")(x)
        x = Dropout(0.2, name="drop2")(x)

        x = Dense(128, activation="relu", name="dense2")(x)
        x = Dropout(0.2, name="drop3")(x)

        x = Dense(64, activation="relu", name="dense3")(x)
        x = Dropout(0.2, name="drop4")(x)

        y = Dense(self.encoder.transform(self.labels).shape[1],
                  activation="softmax",
                  name="output")(x)

        self.model = kerasModel(inputs, y)
        self.model.compile(optimizer="adam",
                           loss='categorial_crossentropy',
                           metrics=["acc"])
        self.name = 'multiclassSimpleCNN'
示例#6
0
    def __init__(self, in_shape, regional_labels, batch_size=64, epochs=50):
        NeuralNet.__init__(self,
                           in_shape,
                           regional_labels,
                           epochs=epochs,
                           batch_size=batch_size)

        features = in_shape[1]

        inputs = Input(shape=(1, features), name="input")

        x = SimpleRNN(4096, activation="relu", name="RNN1")(inputs)
        y = Dense(self.encoder.transform(self.labels).shape[1],
                  activation="softmax",
                  name="output")(x)

        self.model = kerasModel(inputs, y)
        self.model.compile(
            optimizer="adam",
            loss='categorical_crossentropy',
            metrics=["acc", top_3_accuracy, top_k_categorical_accuracy])
        self.name = 'multiclassNNScratch'
示例#7
0
    def train(self, tr_x, tr_y, va_x=None, va_y=None, te_x=None):
        audio_features = [c for c in tr_x.columns if "spec" in c]

        # データのセット・スケーリング
        numerical_features = [
            c for c in tr_x.columns
            if (c not in self.categorical_features) and (
                c not in audio_features)
        ]
        validation = va_x is not None

        # パラメータ
        dropout = self.params['dropout']
        nb_epoch = self.params['nb_epoch']
        patience = self.params['patience']

        # モデルの構築
        inp_cats = []
        embs = []
        data = pd.concat([tr_x, va_x, te_x]).reset_index(drop=True)

        for c in self.categorical_features:
            inp_cat = Input(shape=[1], name=c)
            inp_cats.append(inp_cat)
            embs.append((Embedding(data[c].max() + 1, 4)(inp_cat)))
        cats = Flatten()(concatenate(embs))
        cats = Dense(10, activation="linear")(cats)
        cats = BatchNormalization()(cats)
        cats = PReLU()(cats)

        inp_numerical = Input(shape=[len(numerical_features)],
                              name="numerical")
        nums = Dense(32, activation="linear")(inp_numerical)
        nums = BatchNormalization()(nums)
        nums = PReLU()(nums)
        nums = Dropout(dropout)(nums)

        # https://www.kaggle.com/zerrxy/plasticc-rnn
        inp_audio = Input(shape=[512], name="audio")
        audio = Reshape((512, 1))(inp_audio)

        audio = TimeDistributed(Dense(40, activation='relu'))(audio)
        audio = Bidirectional(GRU(80, return_sequences=True))(audio)
        audio = SpatialDropout1D(0.2)(audio)

        audio = GlobalMaxPool1D()(audio)
        audio = Dropout(dropout)(audio)

        x = concatenate([nums, cats, audio])
        x = BatchNormalization()(x)
        x = Dropout(dropout / 2)(x)
        out = Dense(1, activation="sigmoid", name="out1")(x)

        model = kerasModel(inputs=inp_cats + [inp_numerical] + [inp_audio],
                           outputs=out)
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=[prauc])

        # print(model.summary())
        n_train = len(tr_x)
        batch_size_nn = 256

        tr_x = get_keras_data(tr_x, numerical_features,
                              self.categorical_features, audio_features)
        va_x = get_keras_data(va_x, numerical_features,
                              self.categorical_features, audio_features)

        clr_tri = CyclicLR(base_lr=1e-5,
                           max_lr=1e-2,
                           step_size=n_train // batch_size_nn,
                           mode="triangular2")
        ckpt = ModelCheckpoint(
            f'../output/model/model_{self.run_fold_name}.hdf5',
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        if validation:
            early_stopping = EarlyStopping(monitor='val_loss',
                                           patience=patience,
                                           verbose=1,
                                           restore_best_weights=True)
            model.fit(tr_x,
                      tr_y,
                      epochs=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2,
                      validation_data=(va_x, va_y),
                      callbacks=[ckpt, clr_tri, early_stopping])
        else:
            model.fit(tr_x,
                      tr_y,
                      nb_epoch=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2)
        model.load_weights(f'../output/model/model_{self.run_fold_name}.hdf5')

        # モデル・スケーラーの保持
        self.model = model
示例#8
0
    def train(self, tr_x, tr_y, va_x=None, va_y=None, te_x=None):
        audio_features = [c for c in tr_x.columns if "spec" in c]

        # データのセット・スケーリング
        numerical_features = [
            c for c in tr_x.columns if (c not in audio_features)
        ]
        validation = va_x is not None

        # パラメータ
        dropout = self.params['dropout']
        nb_epoch = self.params['nb_epoch']
        patience = self.params['patience']

        # モデルの構築
        inp_numerical = Input(shape=[len(numerical_features)],
                              name="numerical")
        nums = Dense(32, activation="linear")(inp_numerical)
        nums = BatchNormalization()(nums)
        nums = PReLU()(nums)
        nums = Dropout(dropout)(nums)

        # https://www.kaggle.com/yuval6967/3rd-place-cnn
        inp_audio = Input(shape=[512], name="audio")
        audio = Reshape((512, 1))(inp_audio)
        audio = Conv1D(256, 32, padding='same', name='Conv1')(audio)
        audio = BatchNormalization()(audio)
        audio = LeakyReLU(alpha=0.1)(audio)
        audio = Dropout(0.2)(audio)
        audio = Conv1D(256, 24, padding='same', name='Conv2')(audio)
        audio = BatchNormalization()(audio)
        audio = LeakyReLU(alpha=0.1)(audio)
        audio = Dropout(0.2)(audio)
        audio = Conv1D(128, 16, padding='same', name='Conv3')(audio)
        audio = BatchNormalization()(audio)
        audio = LeakyReLU(alpha=0.1)(audio)
        audio = GlobalMaxPool1D()(audio)
        audio = Dropout(dropout)(audio)

        x = concatenate([nums, audio])
        x = BatchNormalization()(x)
        x = Dropout(dropout / 2)(x)
        out = Dense(1, activation="sigmoid", name="out1")(x)

        model = kerasModel(inputs=[inp_numerical] + [inp_audio], outputs=out)
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=[prauc])

        # print(model.summary())
        n_train = len(tr_x)
        batch_size_nn = 512

        tr_x = get_keras_data(tr_x, numerical_features, audio_features)
        va_x = get_keras_data(va_x, numerical_features, audio_features)

        clr_tri = CyclicLR(base_lr=1e-5,
                           max_lr=1e-2,
                           step_size=n_train // batch_size_nn,
                           mode="triangular2")
        ckpt = ModelCheckpoint(
            f'../output/model/model_{self.run_fold_name}.hdf5',
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        if validation:
            early_stopping = EarlyStopping(monitor='val_loss',
                                           patience=patience,
                                           verbose=1,
                                           restore_best_weights=True)
            model.fit(tr_x,
                      tr_y,
                      epochs=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2,
                      validation_data=(va_x, va_y),
                      callbacks=[ckpt, clr_tri, early_stopping])
        else:
            model.fit(tr_x,
                      tr_y,
                      nb_epoch=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2)
        model.load_weights(f'../output/model/model_{self.run_fold_name}.hdf5')

        # モデル・スケーラーの保持
        self.model = model
示例#9
0
    def train(self, tr_x, tr_y, va_x=None, va_y=None, te_x=None):

        # データのセット・スケーリング
        numerical_features = [
            c for c in tr_x.columns if c not in self.categorical_features
        ]
        validation = va_x is not None

        # パラメータ
        dropout = self.params['dropout']
        nb_epoch = self.params['nb_epoch']
        patience = self.params['patience']

        # モデルの構築
        inp_cats = []
        embs = []
        data = pd.concat([tr_x, va_x, te_x]).reset_index(drop=True)

        for c in self.categorical_features:
            inp_cat = Input(shape=[1], name=c)
            inp_cats.append(inp_cat)
            embs.append((Embedding(data[c].max() + 1, 4)(inp_cat)))
        cats = Flatten()(concatenate(embs))
        cats = Dense(4, activation="linear")(cats)
        cats = BatchNormalization()(cats)
        cats = PReLU()(cats)

        inp_numerical = Input(shape=[len(numerical_features)],
                              name="numerical")
        nums = Dense(32, activation="linear")(inp_numerical)
        nums = BatchNormalization()(nums)
        nums = PReLU()(nums)
        nums = Dropout(dropout)(nums)

        x = concatenate([nums, cats])
        x = se_block(x, 32 + 4)
        x = BatchNormalization()(x)
        x = Dropout(dropout / 2)(x)
        x = Dense(1000, activation="relu")(x)
        x = Dense(800, activation="relu")(x)
        x = Dense(300, activation="relu")(x)
        out = Dense(1, activation="sigmoid", name="out1")(x)

        model = kerasModel(inputs=inp_cats + [inp_numerical], outputs=out)
        model.compile(loss='binary_crossentropy', optimizer='adam')
        # print(model.summary())
        n_train = len(tr_x)
        batch_size_nn = 256

        tr_x = get_keras_data(tr_x, numerical_features,
                              self.categorical_features)
        va_x = get_keras_data(va_x, numerical_features,
                              self.categorical_features)

        clr_tri = CyclicLR(base_lr=1e-5,
                           max_lr=1e-2,
                           step_size=n_train // batch_size_nn,
                           mode="triangular2")
        ckpt = ModelCheckpoint(
            f'../output/model/model_{self.run_fold_name}.hdf5',
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        if validation:
            early_stopping = EarlyStopping(monitor='val_loss',
                                           patience=patience,
                                           verbose=1,
                                           restore_best_weights=True)
            model.fit(tr_x,
                      tr_y,
                      epochs=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2,
                      validation_data=(va_x, va_y),
                      callbacks=[ckpt, clr_tri, early_stopping])
        else:
            model.fit(tr_x,
                      tr_y,
                      nb_epoch=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2)
        model.load_weights(f'../output/model/model_{self.run_fold_name}.hdf5')

        # モデル・スケーラーの保持
        self.model = model
示例#10
0
    def train(
        self,
        tr_x: pd.DataFrame,
        tr_y: pd.DataFrame,
        va_x: pd.DataFrame = None,
        va_y: pd.DataFrame = None,
        te_x: pd.DataFrame = None,
    ) -> None:
        # データのセット・スケーリング
        numerical_features = [
            c for c in tr_x.columns
            if c not in self.categorical_features  # type: ignore
        ]
        validation = va_x is not None

        # パラメータ
        dropout = self.params["dropout"]
        nb_epoch = self.params["nb_epoch"]
        patience = self.params["patience"]

        # モデルの構築
        inp_cats = []
        embs = []
        data = pd.concat([tr_x, va_x, te_x]).reset_index(drop=True)

        for c in self.categorical_features:  # type: ignore
            inp_cat = Input(shape=[1], name=c)
            inp_cats.append(inp_cat)
            embs.append((Embedding(data[c].max() + 1, 4)(inp_cat)))

        cats = Flatten()(concatenate(embs))
        cats = Dense(4, activation="linear")(cats)
        cats = BatchNormalization()(cats)
        cats = PReLU()(cats)

        inp_numerical = Input(shape=[len(numerical_features)],
                              name="numerical")
        nums = Dense(500, activation="linear")(inp_numerical)
        nums = BatchNormalization()(nums)
        nums = PReLU()(nums)
        x = Dropout(dropout)(nums)

        x = concatenate([nums, cats])
        x = se_block(nums, 32)
        x = BatchNormalization()(x)
        x = Dropout(dropout / 2)(x)
        x = Dense(250, activation="relu")(x)
        x = Dense(125, activation="relu")(x)
        x = Dense(64, activation="relu")(x)
        x = Dense(40, activation="relu")(x)
        out = Dense(31, activation="softmax", name="out1")(x)

        model = kerasModel(inputs=[inp_numerical], outputs=out)
        model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
        # print(model.summary())
        n_train = len(tr_x)
        batch_size_nn = 256

        tr_x = get_keras_data(tr_x, numerical_features,
                              self.categorical_features)
        va_x = get_keras_data(va_x, numerical_features,
                              self.categorical_features)

        clr_tri = CyclicLR(
            base_lr=1e-5,
            max_lr=1e-2,
            step_size=n_train // batch_size_nn,
            mode="triangular2",
        )
        ckpt = ModelCheckpoint(
            f"../output/model/model_{self.run_fold_name}.hdf5",
            save_best_only=True,
            monitor="val_loss",
            mode="min",
        )
        if validation:
            early_stopping = EarlyStopping(
                monitor="val_loss",
                patience=patience,
                verbose=1,
                restore_best_weights=True,
            )
            model.fit(
                tr_x,
                tr_y,
                epochs=nb_epoch,
                batch_size=batch_size_nn,
                verbose=2,
                validation_data=(va_x, va_y),
                callbacks=[ckpt, clr_tri, early_stopping],
            )
        else:
            model.fit(tr_x,
                      tr_y,
                      nb_epoch=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2)
        model.load_weights(
            f"../output/model/model_{self.run_fold_name}.hdf5")  # type: ignore

        # モデル・スケーラーの保持
        self.model = model
示例#11
0
def get_merge_model():
    in_img = Input(shape=(None, None, 512))
    in_roi = Input(shape=(1, 4))

    out_roi_pool = RoiPoolingConv(7, 1)([in_img, in_roi])
    model = kerasModel([in_img, in_roi], out_roi_pool)