コード例 #1
0
 def fit(self, X, y, subpolicies=None):
     if subpolicies is not None:
         which = np.random.randint(len(subpolicies), size=len(X))
         for i, subpolicy in enumerate(subpolicies):
             X[which == i] = subpolicy(X[which == i])
     X = X.astype(np.float32) / 255
     callback = TqdmCallback(leave=False, file=sys.stdout, verbose=0)
     callback.on_train_batch_begin = callback.on_batch_begin
     callback.on_train_batch_end = callback.on_batch_end
     self.model.fit(X,
                    y,
                    CHILD_BATCH_SIZE,
                    CHILD_EPOCHS,
                    verbose=0,
                    callbacks=[callback])
     return self
コード例 #2
0
    def run(self,
            numHidden1,
            numHidden2=0,
            numEpochs=20,
            batchSize=512,
            codeSuffix=''):
        x_train, y_train = self.loadData()
        x_train = np.array(x_train, np.float32)
        numClasses = max(y_train) + 1

        train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
        train_data = train_data.repeat().shuffle(5000).batch(
            batchSize).prefetch(1)

        layers = []
        if (numHidden1 > 0):
            layers += [tf.keras.layers.Dense(numHidden1, activation='relu')]
        if (numHidden2 > 0):
            layers += [tf.keras.layers.Dense(numHidden2, activation='relu')]

        layers += [tf.keras.layers.Dense(numClasses)]
        model = tf.keras.Sequential(layers)
        model.compile(optimizer='adam',
                      loss=tf.keras.losses.SparseCategoricalCrossentropy(
                          from_logits=True),
                      metrics=['accuracy'])

        model.fit(train_data,
                  epochs=numEpochs,
                  steps_per_epoch=100,
                  verbose=0,
                  callbacks=[TqdmCallback(verbose=2)])

        self.writeCodeFile(model, codeSuffix)
コード例 #3
0
def main():
    df = pd.read_csv(args.input)
    df = df[['ID', 'FORM', 'XPOSTAG']]
    print(df.head())
    sentences = Preparing_tagged_data(df)
    print('Maximum sequence length:', MAX)
    word2idx, idx2word, tag2idx, idx2tag = preparedicts(df)
    X, y = prepareData(sentences, word2idx, tag2idx)
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.05,
                                                        random_state=7)

    print("Dataset dimentions \n")
    print(X_train.shape, y_train.shape)
    print(X_test.shape, y_test.shape)

    embedding_matrix = embeddings(word2idx)

    model = BUILD_MODEL(X, MAX, len(word2idx), len(tag2idx), embedding_matrix)

    history = model.fit(X_train,
                        y_train,
                        epochs=10,
                        batch_size=32,
                        validation_split=0.1,
                        verbose=0,
                        callbacks=[TqdmCallback(verbose=2)])

    #evaluate_model(history)
    print("Saving model at ", args.output)
    model.save(args.output)
    TestData(model, X_test, y_test, idx2tag)
コード例 #4
0
ファイル: train.py プロジェクト: rubentea16/dvc-versioning
def train_top_model():
    train_data = np.load(open('bottleneck_features_train.npy', 'rb'))
    train_labels = np.array(
        [0] * (int(nb_train_samples / 2)) + [1] * (int(nb_train_samples / 2)))

    validation_data = np.load(open('bottleneck_features_validation.npy', 'rb'))
    validation_labels = np.array(
        [0] * (int(nb_validation_samples / 2)) +
        [1] * (int(nb_validation_samples / 2)))

    model = Sequential()
    model.add(Flatten(input_shape=train_data.shape[1:]))
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy', metrics=['accuracy'])

    model.fit(train_data, train_labels,
              epochs=epochs,
              batch_size=batch_size,
              validation_data=(validation_data, validation_labels),
              verbose=0,
              callbacks=[TqdmCallback(), CSVLogger("metrics.csv")])
    model.save_weights(top_model_weights_path)
コード例 #5
0
ファイル: ML.py プロジェクト: wolperting3r/Masterarbeit
def train_model(model, train_data, train_labels, regenerate=True):
    # Build tensorflow dataset
    dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels)).batch(128)
    if regenerate:
        # Train Model
        early_stopping_callback = keras.callbacks.EarlyStopping(monitor='loss',
                                                                min_delta=10e-8,
                                                                patience=50,
                                                                verbose=0,
                                                                mode='auto',
                                                                baseline=None)
        model.fit(dataset,
                  shuffle=True,
                  epochs=5000,  # war 10000
                  verbose=0,
                  callbacks=[TqdmCallback(verbose=1),
                            early_stopping_callback])

        # Save model
        model.save('model.h5')
    else:
        model = tf.keras.models.load_model('model.h5')
        print(model.summary())

    return model
コード例 #6
0
def get_callbacks():
    filepath1 = "./weights/weights-race-{epoch:02d}-{val_dense_5_accuracy:.2f}.h5"
    checkpoint1 = ModelCheckpoint(filepath1,
                                  monitor='val_dense_5_accuracy',
                                  verbose=1,
                                  mode='auto',
                                  save_best_only=True)

    filepath2 = "./weights/weights-age-{epoch:02d}-{val_dense_3_accuracy:.2f}.h5"
    checkpoint2 = ModelCheckpoint(filepath2,
                                  monitor='val_dense_3_accuracy',
                                  verbose=1,
                                  mode='auto',
                                  save_best_only=True)

    def lr_scheduler(epoch, lr):
        decay_rate = 0.1
        decay_step = 50
        if epoch % decay_step == 0 and epoch > 10:
            lr = decay_rate * lr
        return lr

    scheduler = LearningRateScheduler(lr_scheduler, verbose=1)

    es = EarlyStopping(monitor='val_loss',
                       patience=40,
                       verbose=1,
                       min_delta=1e-2)

    callbacks_list = [checkpoint1, checkpoint2, scheduler, TqdmCallback()]

    return callbacks_list
コード例 #7
0
ファイル: models.py プロジェクト: asavine/differential-ml
def train_model(model,
                train_id, 
                x_train, 
                y_train, 
                dydx_train=None,
                scaled_MSE=None, 
                epochs = EPOCHS,
                x_true = None,
                y_true = None,
                dydx_true = None):
   
    x_norm, y_norm, dydx_norm = get_norm_layers(x_train, y_train, dydx_train)
    if scaled_MSE is not None:
        scaled_MSE.adapt(dydx_train)
    
    history = model.fit(
        x_norm(x_train), [y_norm(y_train), dydx_norm(dydx_train)], 
        steps_per_epoch = STEPS_PER_EPOCH,
        epochs=epochs,
        callbacks=[
                   # tf.keras.callbacks.TensorBoard(log_dir = log_dir+train_id, histogram_freq=1),
                   tf.keras.callbacks.EarlyStopping(monitor='loss',patience=100),
                   TqdmCallback(verbose=1)
                   ],
        validation_data = (x_norm(x_true), [y_norm(y_true), dydx_norm(dydx_true)]),
        verbose=0
        )
    return history, x_norm, y_norm
コード例 #8
0
def train_top_model():
    train_data = np.load(train_feature_path)
    train_labels = np.array([0] * (nb_train_samples // 2) + [1] *
                            (nb_train_samples // 2))

    validation_data = np.load(valid_feature_path)
    validation_labels = np.array([0] * (nb_validation_samples // 2) + [1] *
                                 (nb_validation_samples // 2))

    model = Sequential()
    model.add(Flatten(input_shape=train_data.shape[1:]))
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    model.fit(train_data,
              train_labels,
              epochs=epochs,
              batch_size=batch_size,
              validation_data=(validation_data, validation_labels),
              verbose=0,
              callbacks=[TqdmCallback(),
                         CSVLogger(metrics_file)])
    model.save_weights(top_model_weights_path)

    model_json = model.to_json()

    with open(top_model_json_path, "w") as json_file:
        json_file.write(model_json)
コード例 #9
0
    def build_model(self, input_shape, nb_classes):
        input_layer = keras.layers.Input(input_shape)

        x = input_layer
        input_res = input_layer

        for d in range(self.depth):

            x = self._inception_module(x)

            if self.use_residual and d % 3 == 2:
                x = self._shortcut_layer(input_res, x)
                input_res = x

        gap_layer = keras.layers.GlobalAveragePooling1D()(x)

        output_layer = keras.layers.Dense(nb_classes, activation='softmax')(gap_layer)

        model = keras.models.Model(inputs=input_layer, outputs=output_layer)

        model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(self.lr),
                      metrics=['accuracy'])

        reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=50,
                                                      min_lr=0.0001)

        file_path = self.output_directory + 'best_model.hdf5'

        model_checkpoint = keras.callbacks.ModelCheckpoint(filepath=file_path, monitor='loss',
                                                           save_best_only=True)
        
        self.callbacks = [reduce_lr, model_checkpoint, TqdmCallback(verbose=0)]

        return model
コード例 #10
0
    def __poison__tensorflow(self, x_poison: np.ndarray,
                             y_poison: np.ndarray) -> Tuple[Any, Any]:
        """
        Optimize the poison by matching the gradient within the perturbation budget.

        :param x_poison: List of samples to poison.
        :param y_poison: List of the labels for x_poison.
        :return: A pair of poisoned samples, B-score (cosine similarity of the gradients).
        """
        self.backdoor_model.compile(loss=None, optimizer=self.optimizer)

        callbacks = [self.lr_schedule]
        if self.verbose > 0:
            from tqdm.keras import TqdmCallback

            callbacks.append(TqdmCallback(verbose=self.verbose - 1))

        # Train the noise.
        self.backdoor_model.fit(
            [x_poison, y_poison, np.arange(len(y_poison))],
            callbacks=callbacks,
            batch_size=self.batch_size,
            epochs=self.max_epochs,
            verbose=0,
        )
        [input_noised_, B_] = self.backdoor_model.predict(  # pylint: disable=C0103
            [x_poison, y_poison, np.arange(len(y_poison))],
            batch_size=self.batch_size)

        return input_noised_, B_
コード例 #11
0
    def _fit_transform(self,
                       graph: Graph,
                       return_dataframe: bool = True,
                       verbose: bool = True) -> EmbeddingResult:
        """Return node embedding"""
        try:
            from tqdm.keras import TqdmCallback
            traditional_verbose = False
        except AttributeError:
            traditional_verbose = True

        if has_gpus() and self._use_mirrored_strategy:
            strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
        else:
            strategy = tf.distribute.get_strategy()

        # # Build the model
        with strategy.scope():
            model = self._build_model(graph)

        # Get the model input
        training_input = self._build_input(graph, verbose=verbose)

        if not isinstance(training_input, tuple):
            raise ValueError("The provided input data is not a tuple.")

        # Fit the model
        model.fit(
            *training_input,
            epochs=self._epochs,
            verbose=traditional_verbose and verbose > 0,
            batch_size=(self._batch_size if issubclass(
                training_input[0].__class__, Sequence) else None),
            steps_per_epoch=self._get_steps_per_epoch(graph),
            callbacks=[
                EarlyStopping(
                    monitor="loss",
                    min_delta=self._early_stopping_min_delta,
                    patience=self._early_stopping_patience,
                    mode="min",
                ),
                ReduceLROnPlateau(
                    monitor="loss",
                    min_delta=self._learning_rate_plateau_min_delta,
                    patience=self._learning_rate_plateau_patience,
                    factor=0.5,
                    mode="min",
                ),
                *((TqdmCallback(verbose=1, leave=False), )
                  if not traditional_verbose and verbose > 0 else ()),
            ],
        )

        # Extract and return the embedding
        return self._extract_embeddings(graph,
                                        model,
                                        return_dataframe=return_dataframe)
コード例 #12
0
ファイル: DNN_Softmax.py プロジェクト: gklb/gklb
def DNN_Classify(
    train_data,
    train_size,
    #model_load,
    #learning_period,
    hist,  # time length of input data
    iterations
    #update_period, # gradient will be updated for every 10 iterations
    #save_direct # location of saving weights
):

    variables = train_data.iloc[:train_size].copy()
    labels = torch.as_tensor(variables.Label.iloc[hist:].copy().values)
    labels = torch.nn.functional.one_hot(labels.long())
    variables = variables.drop('Label', axis=1).copy()
    scaler.fit(variables)
    _, features = preprocFeatures(variables)
    #gainArr = gainArr.values.tolist()[hist:]

    features = flattenSeries(features, hist)

    inputdim = len(features[-1])
    #model_load = False

    model = tf.keras.Sequential()
    model.add(tf.keras.layers.Dense(128, input_dim=inputdim,
                                    activation='relu'))
    model.add(tf.keras.layers.Dense(52, activation='relu'))
    model.add(tf.keras.layers.Dense(2, activation='softmax'))
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
    model.compile(optimizer=optimizer, loss='categorical_crossentropy')

    early_stopping = EarlyStopping(monitor='loss',
                                   mode='min',
                                   verbose=0,
                                   patience=50)
    mc = ModelCheckpoint(save_direct + '/test_historic_' + str(train_size) +
                         '.h5',
                         monitor='loss',
                         mode='min',
                         save_best_only=True)

    if model_load == False:
        pass
    else:
        model.load_weights(save_direct + '/test_historic_' +
                           str(train_size - learning_period) + '.h5')

    model.fit(np.array(features),
              np.array(labels),
              epochs=iterations,
              verbose=0,
              callbacks=[TqdmCallback(verbose=0), early_stopping, mc])

    model.save_weights(save_direct + '/test_historic_' + str(train_size) +
                       '.h5')
コード例 #13
0
def train_model(model,
                train_data,
                train_labels,
                parameters,
                silent,
                regenerate=True):
    # Build tensorflow dataset
    dataset = tf.data.Dataset.from_tensor_slices(
        (train_data, train_labels)).batch(parameters['batch_size'])
    if regenerate:
        # Train Model
        # Early stopping callback
        early_stopping_callback = keras.callbacks.EarlyStopping(
            monitor='loss',
            min_delta=10e-8,
            patience=50,
            verbose=0,
            mode='auto',
            baseline=None)
        # History to csv callback
        path = os.path.dirname(os.path.abspath(__file__))
        param_str = parameters['filename']
        file_name = os.path.join(path, '..', 'models', 'history',
                                 'history' + param_str + '.csv')
        csv_logger = tf.keras.callbacks.CSVLogger(file_name,
                                                  separator=',',
                                                  append=False)

        # Train model
        model.fit(
            dataset,
            shuffle=True,
            epochs=parameters['epochs'],  # war 10000
            verbose=0,
            callbacks=[
                TqdmCallback(verbose=(0 if silent else 1)),
                early_stopping_callback, csv_logger
            ])

        # Save model
        path = os.path.dirname(os.path.abspath(__file__))
        param_str = parameters['filename']
        file_name = os.path.join(path, '..', 'models', 'models',
                                 'model' + param_str + '.h5')
        model.save(file_name)
    else:
        path = os.path.dirname(os.path.abspath(__file__))
        param_str = parameters['filename']
        file_name = os.path.join(path, '..', 'models', 'models',
                                 'model' + param_str + '.h5')
        model = tf.keras.models.load_model(file_name)
        print(model.summary())

    return model
コード例 #14
0
def train(DATA_URL, SAVE_URL):
    x_train, y_train, x_test = data_loader(DATA_URL)
    model = create_cnn_model(x_train)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    # 파일 이름에 에포크 번호를 포함시킵니다(`str.format` 포맷)
    checkpoint_path = os.path.join(SAVE_URL,"my_model.h5")
    # checkpoint_dir = SAVE_URL

    model.fit(x_train, y_train, epochs=20, verbose=0, callbacks=[TqdmCallback(verbose=2)])
    model.save(checkpoint_path)
    return model
コード例 #15
0
    def build_model(self, input_shape, nb_classes, len_series):
        nb_rows = [
            np.int(self.ratio[0] * len_series),
            np.int(self.ratio[1] * len_series)
        ]
        nb_cols = input_shape[2]

        input_layer = keras.layers.Input(input_shape)

        x_layer_1 = keras.layers.Conv2D(
            self.num_filter, (nb_rows[0], nb_cols),
            kernel_initializer='lecun_uniform',
            activation='relu',
            padding='valid',
            strides=(1, 1),
            data_format='channels_first')(input_layer)
        x_layer_1 = keras.layers.GlobalMaxPooling2D(
            data_format='channels_first')(x_layer_1)

        y_layer_1 = keras.layers.Conv2D(
            self.num_filter, (nb_rows[1], nb_cols),
            kernel_initializer='lecun_uniform',
            activation='relu',
            padding='valid',
            strides=(1, 1),
            data_format='channels_first')(input_layer)
        y_layer_1 = keras.layers.GlobalMaxPooling2D(
            data_format='channels_first')(y_layer_1)

        concat_layer = keras.layers.concatenate([x_layer_1, y_layer_1])
        #concat_layer = keras.layers.Dense(128, kernel_initializer = 'lecun_uniform', activation = 'relu')(concat_layer)
        #concat_layer = keras.layers.Dense(128, kernel_initializer = 'lecun_uniform', activation = 'relu')(concat_layer)
        concat_layer = keras.layers.Dropout(0.25)(concat_layer)

        output_layer = keras.layers.Dense(nb_classes,
                                          kernel_initializer='lecun_uniform',
                                          activation='softmax')(concat_layer)

        model = keras.models.Model(input_layer, output_layer)

        model.compile(loss='categorical_crossentropy',
                      optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
                      metrics=['accuracy'])

        self.callbacks_ = []

        if self.verbose:
            self.callbacks_.append(TqdmCallback(verbose=0))

        return model
コード例 #16
0
def train_2(model_fn,
            data_microbioma,
            data_domain,
            latent_space=10,
            folds=5,
            epochs=20,
            batch_size=128,
            learning_rate_scheduler=ExpDecayScheluder(),
            random_seed=347,
            verbose=0):
    data_zeros_latent = np.zeros((data_microbioma.shape[0], latent_space),
                                 dtype=data_microbioma.dtype)
    results = []
    models = []
    train_callbacks = [
        callbacks.EarlyStopping(monitor='val_loss',
                                patience=epochs + 1,
                                restore_best_weights=True)
    ]
    if verbose >= 0:
        train_callbacks += [TqdmCallback(verbose=verbose)]
    if learning_rate_scheduler is not None:
        train_callbacks += [learning_rate_scheduler.make()]

    if folds <= 1:
        m_train, m_test = data_microbioma, data_microbioma
        d_train, d_test = data_domain, data_domain
        z_train, z_test = data_zeros_latent, data_zeros_latent
        tf.random.set_seed(random_seed)
        r, m = train_kfold(model_fn, m_train, d_train, z_train, m_test, d_test,
                           z_test, batch_size, epochs, train_callbacks)
        results.append(r)
        models.append(m)

    else:
        kf = KFold(n_splits=folds, random_state=random_seed, shuffle=True)
        tf.random.set_seed(random_seed)

        for train_index, test_index in kf.split(data_microbioma):
            m_train, m_test = data_microbioma[train_index], data_microbioma[
                test_index]
            d_train, d_test = data_domain[train_index], data_domain[test_index]
            z_train, z_test = data_zeros_latent[
                train_index], data_zeros_latent[test_index]
            r, m = train_kfold(model_fn, m_train, d_train, z_train, m_test,
                               d_test, z_test, batch_size, epochs,
                               train_callbacks)
            results.append(r)
            models.append(m)
    return results, models
コード例 #17
0
def train(training_samples, samples_labels, num_of_epochs=100, batch_size=1024, validation_split=0.0):
    model = prepare_network_model(samples_labels.shape[-1])
    conv_weights = []

    get_weights = LambdaCallback(on_epoch_end=lambda batch, logs: conv_weights.append(np.array([np.rot90(x, 3) for x in model.layers[0].get_weights()[0].squeeze()[::-1].T])))

    history_temp = model.fit(training_samples, samples_labels,
                            batch_size=batch_size,
                            epochs=num_of_epochs,
                            validation_split=validation_split,
                            callbacks=[get_weights, TqdmCallback(verbose=1)],
                            verbose=0)

    return np.array(conv_weights)
コード例 #18
0
    def train(
        self,
        X_train=None,
        y_train=None,
        validation_data=None,
        early_stopping: int = None,
        tqdm=True,
        **params,
    ):
        """Base method to train input model. Based on Keras hyperparameters

        Args:
            X_train ([pd.DataFrame, np.array], optional): the Keras `x` input. Defaults to None. If None, the learner will take the attribute `X_train` fitted in the class.
            y_train ([pd.DataFrame, np.array], optional): the Keras `y` input. Defaults to None. If None, the learner will take the attribute `y_train` fitted in the class.
            validation_data ([pd.DataFrame, np.array], optional): the Keras `validation_data` input. Defaults to None. If None, the learner will take the attribute `validation_data` fitted in the class.
            early_stopping (int, optional): Number of epochs before Early Stopping. Defaults to None. If None, the training will last `epochs`.
            tqdm (bool, optional): To create `tqdm` progress bar during training or not. Defaults to True.
        """
        if X_train is not None:
            self.X_train = X_train
        if y_train is not None:
            self.y_train = y_train
        if validation_data is not None:
            self.validation_data = validation_data

        if early_stopping:
            self.early_stopping_ = early_stopping
            es = keras.callbacks.EarlyStopping(monitor="val_loss",
                                               patience=self.early_stopping)
            self.callbacks.append(es)
        if tqdm:
            tqdm_nb = TqdmCallback()
            self.callbacks.append(tqdm_nb)

        self.set_params(params)
        logging.info("Start training")
        history = self.model.model.fit(
            x=self.X_train.values,
            y=self.y_train,
            batch_size=self.batch_size,
            shuffle=self.shuffle,
            epochs=self.epochs,
            verbose=self.verbose,
            validation_data=self.validation_data,
            class_weight=self.class_weight_dict,
            callbacks=self.callbacks,
        )
        self.history = history.history
コード例 #19
0
ファイル: training.py プロジェクト: wolperting3r/Masterarbeit
def train_model(model, train_data, train_labels, val_data, val_labels,
                parameters, silent):
    # Build tensorflow dataset
    train_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels))\
                    .batch(parameters['batch_size'])\
                    .prefetch(parameters['batch_size']*4)

    val_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels))\
                    .batch(parameters['batch_size'])\
                    .prefetch(parameters['batch_size']*4)
    # Train Model
    # Early stopping callback
    early_stopping_callback = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                            min_delta=10e-8,
                                                            patience=5,
                                                            verbose=0,
                                                            mode='auto',
                                                            baseline=None)
    # History to csv callback
    path = os.path.dirname(os.path.abspath(sys.argv[0]))
    param_str = parameters['filename']
    file_name = os.path.join(path, 'models', 'history',
                             'history' + param_str + '.csv')
    csv_logger = tf.keras.callbacks.CSVLogger(file_name,
                                              separator=',',
                                              append=False)

    # Train model
    model.fit(
        train_dataset,
        validation_data=val_dataset,
        shuffle=True,
        epochs=parameters['epochs'],  # war 10000
        verbose=0,
        callbacks=[
            TqdmCallback(verbose=(0 if silent else 1)),
            early_stopping_callback, csv_logger
        ])
    # validation_steps=2,

    # Save model
    path = os.path.dirname(os.path.abspath(sys.argv[0]))
    param_str = parameters['filename']
    file_name = os.path.join(path, 'models', 'models',
                             'model' + param_str + '.h5')
    model.save(file_name)
    return model
コード例 #20
0
def fit():
    train_ds, val_ds, test_ds, feature_columns = read_data()
    all_inputs, encoded_features = prepare_data(
        columns=["mileage", "year", "power", "model_number", "consumption"],
        train_ds=train_ds)
    all_features = tf.keras.layers.concatenate(encoded_features)
    x = tf.keras.layers.Dense(12, activation="relu")(all_features)
    x = tf.keras.layers.Dropout(0.05)(x)
    x = tf.keras.layers.Dense(7, activation="relu")(x)
    output = tf.keras.layers.Dense(1)(x)
    model = tf.keras.Model(all_inputs, output)

    model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.1),
                  loss=tf.keras.losses.MeanSquaredError(),
                  metrics=[
                      tfa.metrics.r_square.RSquare(y_shape=(1, )),
                      tf.metrics.MeanAbsolutePercentageError()
                  ])

    epochs = 100

    history = model.fit(train_ds,
                        validation_data=val_ds,
                        epochs=epochs,
                        verbose=0,
                        callbacks=[TqdmCallback(verbose=0, position=0)])

    plot_loss(history, epochs=range(epochs))
    plt.show()

    logger.info(model.summary())

    accuracy = model.evaluate(test_ds)
    logger.info(f"Accuracy : {accuracy}")

    y = tf.concat([y for x, y in test_ds], axis=0)
    logger.info(
        tf.concat([model.predict(test_ds),
                   tf.expand_dims(y, -1)], axis=1))

    model.save('neural_net_classifier', save_format='h5')

    tf.keras.utils.plot_model(model, show_shapes=True, rankdir="LR")
コード例 #21
0
def build_and_train_keras_sequential():
    train_ds, val_ds, test_ds, feature_columns = read_data()

    feature_layer = tf.keras.layers.DenseFeatures(feature_columns)

    model = tf.keras.Sequential([
        feature_layer,
        layers.Dense(24, activation='relu'),
        layers.Dropout(.1),
        layers.Dense(10, activation='relu'),
        layers.Dropout(.1),
        layers.Dense(1)
    ])

    model.compile(optimizer='adam',
                  loss=tf.keras.losses.MeanSquaredError(),
                  metrics=[
                      tfa.metrics.r_square.RSquare(y_shape=(1, )),
                      tf.metrics.MeanAbsolutePercentageError()
                  ])

    epochs = 100

    history = model.fit(train_ds,
                        validation_data=val_ds,
                        epochs=epochs,
                        verbose=0,
                        callbacks=[TqdmCallback(verbose=0, position=0)])

    plot_loss(history, epochs=range(epochs))
    plt.show()

    logger.info(model.summary())

    accuracy = model.evaluate(test_ds)
    logger.info(f"Accuracy : {accuracy}")

    y = tf.concat([y for x, y in test_ds], axis=0)
    logger.info(
        tf.concat([model.predict(test_ds),
                   tf.expand_dims(y, -1)], axis=1))

    model.save('neural_net_classifier', save_format='tf')
コード例 #22
0
    def build_model(self, input_shape, nb_classes, len_series, ratio, num_filter):
        #ratio = self.conv_config['ratio']
        nb_rows = [np.int(ratio[0]*len_series), np.int(ratio[1]*len_series)]
        nb_cols = input_shape[2]

        input_layer = keras.layers.Input(input_shape)

        x_layer_1 = keras.layers.Conv2D(num_filter, (nb_rows[0], nb_cols), kernel_initializer='lecun_uniform', activation='relu',
                                        padding='valid', strides=(1, 1), data_format='channels_last')(input_layer)
        x_layer_1 = keras.layers.GlobalMaxPooling2D(
            data_format='channels_first')(x_layer_1)

        y_layer_1 = keras.layers.Conv2D(num_filter, (nb_rows[1], nb_cols), kernel_initializer='lecun_uniform', activation='relu',
                                        padding='valid', strides=(1, 1), data_format='channels_last')(input_layer)
        y_layer_1 = keras.layers.GlobalMaxPooling2D(
            data_format='channels_last')(y_layer_1)

        concat_layer = keras.layers.concatenate([x_layer_1, y_layer_1])

        layer_2 = keras.layers.Dense(
            64, kernel_initializer='lecun_uniform', activation='relu')(concat_layer)

        layer_3 = keras.layers.Dense(
            128, kernel_initializer='lecun_uniform', activation='relu')(layer_2)
        layer_3 = keras.layers.Dropout(0.25)(layer_3)

        output_layer = keras.layers.Dense(
            nb_classes, kernel_initializer='lecun_uniform', activation='softmax')(layer_3)

        model = keras.models.Model(input_layer, output_layer)

        model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(
            learning_rate=0.001), metrics=['accuracy'])

        #factor = 1. / np.cbrt(2)

        #reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=factor, patience=100, min_lr=1e-4, cooldown=0, mode='auto')

        self.callbacks = [TqdmCallback(verbose=0)]

        return model
コード例 #23
0
ファイル: mlp.py プロジェクト: timkne/dl_time_series_class
    def build_model(self, input_shape, nb_classes):
        input_layer = keras.layers.Input(input_shape)

        # flatten/reshape because when multivariate all should be on the same axis
        input_layer_flattened = keras.layers.Flatten()(input_layer)

        layer_1 = keras.layers.Dropout(0.1)(input_layer_flattened)
        layer_1 = keras.layers.Dense(500, activation='relu')(layer_1)

        layer_2 = keras.layers.Dropout(0.2)(layer_1)
        layer_2 = keras.layers.Dense(500, activation='relu')(layer_2)

        layer_3 = keras.layers.Dropout(0.2)(layer_2)
        layer_3 = keras.layers.Dense(500, activation='relu')(layer_3)

        output_layer = keras.layers.Dropout(0.3)(layer_3)
        output_layer = keras.layers.Dense(nb_classes,
                                          activation='softmax')(output_layer)

        model = keras.models.Model(inputs=input_layer, outputs=output_layer)

        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizers.Adadelta(),
                      metrics=['accuracy'])

        reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                      factor=0.5,
                                                      patience=200,
                                                      min_lr=0.0001)

        file_path = self.output_dir + 'best_model.hdf5'

        model_checkpoint = keras.callbacks.ModelCheckpoint(filepath=file_path,
                                                           monitor='loss',
                                                           save_best_only=True)

        self.callbacks = [reduce_lr, model_checkpoint, TqdmCallback(verbose=0)]

        return model
コード例 #24
0
    def callback(self, model):
        call = []
        if os.path.exists(checkpoint_path):
            if os.listdir(checkpoint_path):
                logger.debug('load the model')
                model.load_weights(
                    os.path.join(checkpoint_path,
                                 self.calculate_the_best_weight()))
                logger.debug(
                    f'读取的权重为{os.path.join(checkpoint_path, self.calculate_the_best_weight())}'
                )

        cp_callback = tf.keras.callbacks.ModelCheckpoint(
            filepath=checkpoint_file_path,
            verbose=1,
            save_weights_only=True,
            save_best_only=True,
            period=1)
        call.append(cp_callback)
        tensorboard_callback = tf.keras.callbacks.TensorBoard(
            log_dir=log_dir,
            histogram_freq=1,
            write_images=True,
            update_freq=UPDATE_FREQ)
        call.append(tensorboard_callback)

        lr_callback = tf.keras.callbacks.ReduceLROnPlateau(
            factor=0.01, patience=LR_PATIENCE)
        call.append(lr_callback)

        csv_callback = tf.keras.callbacks.CSVLogger(filename=csv_path,
                                                    append=True)
        call.append(csv_callback)

        early_callback = tf.keras.callbacks.EarlyStopping(
            min_delta=0, verbose=1, patience=EARLY_PATIENCE)
        call.append(early_callback)
        call.append(TqdmCallback())
        return (model, call)
コード例 #25
0
def main():
    df = pd.read_csv(args.input)
    print(df.head())
    sentences = Preparing_tagged_data(df)
    print('Maximum sequence length:', MAX)
    word2idx, idx2word, tag2idx, idx2tag = preparedicts(df)

    X, y = prepareData(sentences, word2idx, tag2idx)
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.05,
                                                        random_state=7)

    print("Dataset dimentions \n")
    print(X_train.shape, y_train.shape)
    print(X_test.shape, y_test.shape)

    embedding_matrix = embeddings(word2idx)
    print(MAX)

    model = BUILD_MODEL(X, MAX, len(word2idx), len(tag2idx), embedding_matrix)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    history = model.fit(X_train,
                        y_train,
                        epochs=10,
                        batch_size=64,
                        validation_split=0.1,
                        verbose=0,
                        callbacks=[TqdmCallback(verbose=1)])

    evaluate_model(history)
    TestData(model, X_test, y_test, idx2tag)
    print("Saving model at ", args.output)
    model.save(args.output)
コード例 #26
0
input_noise = np.expand_dims(input_noise,0) * NOISE_REDUCTION
target = np.expand_dims(ct_small,0)

# initialize
results_all = []
predicted_all = []
epochs_saved = [0]
previous_epochs = 0
model = get_architecture(target[0], archi, ch_init, g_noise, act_max_value, act_out_max_value)
opt = tf.keras.optimizers.Adam(lr_value) 
loss_masked, mask_used = choose_loss(mask_target, mask_target2, mask_target3, LOSS_USED=0)
model.compile(optimizer=opt, loss=loss_masked)

# Train model
for i in tqdm(range(LOOP_MASKS)):
    results = model.fit(input_noise, target,  epochs=EPOCHS, verbose=0, callbacks=[TqdmCallback(verbose=0)]);
    results_all.extend(results.history['loss'])
    predicted_all.append(model.predict(input_noise)[0,...])
    epochs_saved.append(epochs_saved[-1] + EPOCHS)
    # sneak peek
    loss_masked, mask_used = choose_loss(mask_target, mask_target2, mask_target3, LOSS_USED=2)
    results = model.fit(input_noise, target,  epochs=EPOCHS_sneak_peek, verbose=0, callbacks=[TqdmCallback(verbose=0)]);
    loss_masked, mask_used = choose_loss(mask_target, mask_target2, mask_target3, LOSS_USED=0)
    results_all.extend(results.history['loss'])
    predicted_all.append(model.predict(input_noise)[0,...])
    epochs_sneak_peak = epochs_saved[-1] + EPOCHS_sneak_peek
    epochs_saved.append(epochs_sneak_peak)

    lr_value = lr_value * LR_REDUCE
    K.set_value(model.optimizer.learning_rate, lr_value)
コード例 #27
0
    def fit(self, X, Y, output_slices=None, **kwargs):
        with get_device(self.config):
            checkpoint = self.model_path.joinpath('checkpoint')

            if checkpoint.exists() and not self.no_load:
                if self.verbose:
                    print(f'Restoring model weights from {checkpoint}')
                self.load()

            elif self.no_load and X is None:
                raise Exception(
                    'Model exists, but no_load is set and no training data was given.'
                )

            elif X is not None and Y is not None:
                self.scalerx.fit(ensure_format(X), ensure_format(Y))
                self.scalery.fit(ensure_format(Y))

                # Gather all data (train, validation, test, ...) into singular object
                datasets = kwargs['datasets'] = kwargs.get('datasets', {})
                datasets.update({'train': {'x': X, 'y': Y}})

                for key, data in datasets.items():
                    if data['x'] is not None:
                        datasets[key].update({
                            'x_t':
                            self.scalerx.transform(ensure_format(data['x'])),
                            'y_t':
                            self.scalery.transform(ensure_format(data['y'])),
                        })
                assert (np.isfinite(datasets['train']['x_t']).all()
                        ), 'NaN values found in X training data'

                self.update_config({
                    'output_slices':
                    output_slices or {
                        '': slice(None)
                    },
                    'n_inputs':
                    datasets['train']['x_t'].shape[1],
                    'n_targets':
                    datasets['train']['y_t'].shape[1],
                })
                self.build()

                callbacks = []
                model_kws = {
                    'batch_size':
                    self.batch,
                    'epochs':
                    max(1, int(self.n_iter / max(1,
                                                 len(X) / self.batch))),
                    'verbose':
                    0,
                    'callbacks':
                    callbacks,
                }

                if self.verbose:
                    callbacks.append(
                        TqdmCallback(model_kws['epochs'],
                                     data_size=len(X),
                                     batch_size=self.batch))

                if self.debug:
                    callbacks.append(
                        tf.keras.callbacks.TensorBoard(histogram_freq=1,
                                                       profile_batch=(2, 60)))

                if 'args' in kwargs:

                    if getattr(kwargs['args'], 'plot_loss', False):
                        callbacks.append(
                            PlottingCallback(kwargs['args'], datasets, self))

                    if getattr(kwargs['args'], 'save_stats', False):
                        callbacks.append(
                            StatsCallback(kwargs['args'], datasets, self))

                    if getattr(kwargs['args'], 'best_epoch', False):
                        if 'valid' in datasets and 'x_t' in datasets['valid']:
                            model_kws['validation_data'] = (
                                datasets['valid']['x_t'],
                                datasets['valid']['y_t'])
                            callbacks.append(ModelCheckpoint(self.model_path))

                self.model.fit(datasets['train']['x_t'],
                               datasets['train']['y_t'], **model_kws)

                if not self.no_save:
                    self.save()

            else:
                raise Exception(
                    f"No trained model exists at: \n{self.model_path}")
            return self
コード例 #28
0
ファイル: final.py プロジェクト: NathanJGaul/EENG645A-final
def train_unet(config,
               train_dataloader=None,
               val_dataloader=None,
               loss=None,
               metrics=None,
               checkpoint_dir=None):

    epochs = 15
    batch_size = 1

    model = get_model(backbone='vvg16',
                      encoder_freeze=config["encoder_freeze"],
                      n_classes=1,
                      activation='sigmoid',
                      dropout=config["dropout"])

    model.compile(optimizer=config["optimizer"](config["learning_rate"]),
                  loss=loss,
                  metrics=metrics)

    history = model.fit(train_dataloader,
                        steps_per_epoch=len(train_dataloader),
                        epochs=epochs,
                        verbose=0,
                        batch_size=batch_size,
                        validation_data=val_dataloader,
                        validation_steps=len(val_dataloader),
                        callbacks=[
                            TuneReportCallback(
                                {
                                    "loss": "loss",
                                    "iou_score": "iou_score",
                                    "val_loss": "val_loss",
                                    "val_iou_score": "val_iou_score",
                                },
                                on="epoch_end"),
                            TqdmCallback(verbose=2),
                        ])

    # save best model of the trial
    with tune.checkpoint_dir(step=1) as checkpoint_dir:
        checkpoint_dir = os.path.dirname(
            os.path.dirname(checkpoint_dir))  # go up two directories
        score_file_path = os.path.join(checkpoint_dir, 'score')
        score_file_exists = os.path.isfile(score_file_path)
        new_val_iou_score = history.history['val_iou_score'][0]
        best_model_file_path = os.path.join(checkpoint_dir, 'best_model.h5')

        if score_file_exists:
            old_val_iou_score = 0
            with open(score_file_path) as f:
                old_val_iou_score = float(f.read())
            if new_val_iou_score > old_val_iou_score:
                # we have a new best model
                with open(score_file_path, 'w') as f:
                    f.write(str(new_val_iou_score))
                model.save(best_model_file_path)
        else:
            # first model of the trial
            with open(score_file_path, 'w') as f:
                f.write(str(new_val_iou_score))
            model.save(best_model_file_path)

    print(history.history.keys())
コード例 #29
0
def run_training(
    model_f,
    lr_f,
    name,
    epochs,
    batch_size,
    train_data,
    val_data,
    img,
    img_size,
    mixed_float=True,
    notebook=True,
):
    """
    val_data : (X_val, Y_val) tuple
    """
    if mixed_float:
        policy = mixed_precision.Policy('mixed_float16')
        mixed_precision.set_policy(policy)

    st = time.time()

    inputs = keras.Input((200, 200, 3))
    mymodel = AdiposeModel(inputs, model_f)
    loss = keras.losses.BinaryCrossentropy(from_logits=True)
    mymodel.compile(optimizer='adam',
                    loss=loss,
                    metrics=[
                        keras.metrics.BinaryAccuracy(threshold=0.5),
                    ])

    logdir = 'logs/fit/' + name
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir,
                                                          histogram_freq=1,
                                                          profile_batch='3,5',
                                                          update_freq='epoch')
    lr_callback = keras.callbacks.LearningRateScheduler(lr_f, verbose=1)

    savedir = 'savedmodels/' + name + '/{epoch}'
    save_callback = keras.callbacks.ModelCheckpoint(savedir,
                                                    save_weights_only=True,
                                                    verbose=1)

    if notebook:
        tqdm_callback = TqdmNotebookCallback(
            metrics=['loss', 'binary_accuracy'], leave_inner=False)
    else:
        tqdm_callback = TqdmCallback()

    train_ds = create_train_dataset(img, train_data, img_size, batch_size)
    val_ds = create_train_dataset(img, val_data, img_size, batch_size, True)

    image_callback = ValFigCallback(val_ds, logdir)

    mymodel.fit(
        x=train_ds,
        epochs=epochs,
        steps_per_epoch=len(train_data) // batch_size,
        callbacks=[
            tensorboard_callback,
            lr_callback,
            save_callback,
            tqdm_callback,
            image_callback,
        ],
        verbose=0,
        validation_data=val_ds,
        validation_steps=10,
    )

    print('Took {} seconds'.format(time.time() - st))

    mymodel.evaluate(val_ds, steps=1000)
コード例 #30
0
ファイル: train.py プロジェクト: ninatubau/3D-RCAN
training_data = data_gen.flow(*list(zip(*training_data)))

if validation_data is not None:
    validation_data = data_gen.flow(*list(zip(*validation_data)))
    checkpoint_filepath = 'weights_{epoch:03d}_{val_loss:.8f}.hdf5'
else:
    checkpoint_filepath = 'weights_{epoch:03d}_{loss:.8f}.hdf5'

output_dir = pathlib.Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print('Training RCAN model')
model.fit_generator(
    training_data,
    epochs=config['epochs'],
    steps_per_epoch=config['steps_per_epoch'],
    validation_data=validation_data,
    validation_steps=config['steps_per_epoch'],
    verbose=0,
    callbacks=[
        keras.callbacks.LearningRateScheduler(
            staircase_exponential_decay(config['epochs'] // 4)),
        keras.callbacks.ModelCheckpoint(
            str(output_dir / checkpoint_filepath),
            monitor='loss' if validation_data is None else 'val_loss',
            save_best_only=True),
        keras.callbacks.TensorBoard(log_dir=str(output_dir),
                                    write_graph=False),
        TqdmCallback(tqdm_class=tqdm)
    ])