Пример #1
0
 def __init__(self, weight_path=None):
     self.model = apply_blstm()
     self.model.compile(loss='mean_squared_error',
                        optimizer='adam',
                        metrics=[Accuracy()])
     self.model_unet = apply_unet()
     self.model_unet.compile(loss='mean_squared_error',
                             optimizer='adam',
                             metrics=[Accuracy()])
     self.model_unetpp = Nest_Net()
     self.model_unetpp.compile(loss='mean_squared_error',
                               optimizer='adam',
                               metrics=[Accuracy()])
Пример #2
0
def compiled_model(INPUT_SHAPE: list, QNT_CLASS: int) -> tf.keras.Model:
    """
    A função retorna o modelo compilado.

    Return a compiled model.
  """
    INPUT_SHAPE = tuple(INPUT_SHAPE)

    base_model = MobileNetV2(include_top=False,
                             weights='imagenet',
                             input_tensor=Input(shape=INPUT_SHAPE,
                                                name='inputs'))

    for layer in base_model.layers:
        layer.trainable = False

    mod = base_model.output
    mod = AveragePooling2D()(mod)
    mod = Flatten()(mod)
    mod = Dropout(0.5)(mod)
    mod = Dense(QNT_CLASS, activation='softmax')(mod)

    mod_retorno = Model(inputs=base_model.input, outputs=mod)

    mod_retorno.compile(
        loss=CategoricalCrossentropy(),
        optimizer=Adagrad(),
        metrics=[Accuracy(), Precision(),
                 AUC(), FalseNegatives()])
    return mod_retorno
Пример #3
0
 def run(self):
     model = load_model(input()['model'].path,
                        custom_objects=ak.CUSTOM_OBJECTS)
     if self.data_type == 'csv':
         dataset = np.loadtxt(input().path)
         X = dataset[:, :-1]
         y = dataset[:, -1]
     else:
         if self.data_type == 'image':
             dataset = keras.preprocessing.image_dataset_from_directory(
                 input().path,
                 batch_size=64,
                 image_size=(self.image_height, self.image_widht))
         else:
             dataset = keras.preprocessing.text_dataset_from_directory(
                 input().path, batch_size=64)
         X = np.array()
         y = np.array()
         for data, labels in dataset:
             X = np.vstack((X, data))
             y = np.vstack((y, labels))
     prediction = model.predict(X)
     if self.metric == 'accuracy':
         metric = Accuracy()
     else:
         metric = MeanSquaredError()
     result = metric(y, prediction)
     print(f'{self.metric}:', result)
Пример #4
0
def do_training(initial_learning_rate=0.001):
    gids = get_gids_from_database("densenet")
    training_gen, validation_gen = initialize_train_and_validation_generators("densenet", gids, batch_size=4, label_target_size=224)
    steps_per_epoch = next(training_gen)
    validation_steps = next(validation_gen)

    model = DenseNet()
    metrics = [Accuracy(), CategoricalAccuracy(),
               CategoricalCrossentropy(), ArgmaxMeanIoU(num_classes=6, name="mean_iou")]
    optimizer = RMSProp(learning_rate=initial_learning_rate)
    model.compile(optimizer=optimizer, loss=categorical_crossentropy, metrics=metrics)

    start_time = int(time.time())
    model_path = f"weights/{start_time}_{model.name}/"
    os.mkdir(model_path)

    metrics_to_log = [metric.name for metric in metrics]
    callbacks = [
        save_model_on_epoch_end(model.name, model, model_path),
        metrics_to_csv_logger(model_path + "/batch.csv", ["loss"] + metrics_to_log),
        CSVLogger(model_path + "/epoch.csv", separator=";"),
        LearningRateScheduler(lr_schedule(initial_lr=initial_learning_rate)),
    ]
    model.fit(training_gen, epochs=50, steps_per_epoch=steps_per_epoch,
              validation_data=validation_gen, validation_steps=validation_steps,
              callbacks=callbacks)
Пример #5
0
def SegNet3D(shape, weights=None):
    inputs = Input(shape)
    conv, pool = inputs, inputs

    # encoder
    for numOfFilters in [4, 8, 16, 32]:
        conv = SegNet3DBlock(pool, layers=2, filters=numOfFilters)
        pool = MaxPooling3D((2, 2, 2))(conv)

    conv = SegNet3DBlock(pool, layers=3, filters=128)

    # decoder
    for numOfFilters in [64, 32, 16, 8]:
        upsam = UpSampling3D((2, 2, 2))(conv)
        conv = SegNet3DBlock(upsam, layers=2, filters=numOfFilters)

    conv = SegNet3DBlock(upsam, layers=2, filters=4)

    outputs = Conv3D(1, 1, activation='sigmoid')(conv)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=1e-4), loss='binary_crossentropy',
                  metrics=[Precision(), Recall(), AUC(), Accuracy()])
    model.summary()

    return model
def _eval_metrics_fn():
    return {
        "acc": Accuracy(),
        "mse": MeanSquaredError(),
        "acc_fn": lambda labels, outputs: tf.equal(
            tf.cast(outputs, tf.int32), tf.cast(labels, tf.int32)
        ),
    }
Пример #7
0
def calc_accuracy():
    m = Accuracy()

    def _f(y_true, y_pred):
        m.update_state(y_true, y_pred)
        return m.result().numpy()

    _f.__name__ = 'keras_metrics_{}'.format('accuracy')

    return _f
Пример #8
0
 def accuracy_fn(target, output):
     """
     function: 获取准确值
     :param target: label
     :param output: prediction
     :return: accuracy
     """
     accuracy = Accuracy()
     output = round(output)
     accuracy.update_state(y_true=target, y_pred=output)
     accuracy_value = accuracy.result().numpy()
     return accuracy_value
 def __init__(self,
              lstm_units=100,
              char_vocab=26,
              char_embed_size=100,
              cnn_filters=300,
              cnn_kernel_size=5,
              dropout_rate=0.2,
              max_char_len=10,
              lr=1e-4):
     super().__init__()
     self.char_embedding = Embedding(char_vocab + 1, char_embed_size)
     self.word_embedding = tf.Variable(
         np.load('../data/embedding.npy'),
         dtype=tf.float32,
         name='pretrained_embedding',
         trainable=False,
     )
     self.char_cnn = Conv1D(filters=cnn_filters,
                            kernel_size=cnn_kernel_size,
                            activation='elu',
                            padding='same')
     self.embed_drop = Dropout(dropout_rate)
     self.embed_fc = Dense(cnn_filters, 'elu', name='embed_fc')
     self.word_cnn = Conv1D(filters=cnn_filters,
                            kernel_size=cnn_kernel_size,
                            activation='elu',
                            padding='same')
     self.word_drop = Dropout(dropout_rate)
     self.max_char_len = max_char_len
     self.char_embed_size = char_embed_size
     self.cnn_filters = cnn_filters
     self.dropout_l1 = Dropout(dropout_rate)
     self.dropout_l2 = Dropout(dropout_rate)
     self.dropout_l3 = Dropout(dropout_rate)
     self.blstm_l1 = Bidirectional(LSTM(lstm_units, return_sequences=True))
     self.blstm_l2 = Bidirectional(LSTM(lstm_units, return_sequences=True))
     self.blstm_l3 = Bidirectional(LSTM(lstm_units, return_sequences=True))
     self.dropout_op = Dropout(dropout_rate)
     self.units = 2 * lstm_units
     self.fc = Dense(units=self.units, activation='elu')
     self.out_linear = Dense(2)
     self.optimizer = Adam(lr)
     self.accuracy = Accuracy()
     self.decay_lr = tf.optimizers.schedules.ExponentialDecay(
         lr, 1000, 0.90)
     self.logger = logging.getLogger('tensorflow')
     self.logger.setLevel(logging.INFO)
Пример #10
0
    def define_encoder_classifier(self, weight_directory=None, alpha=0):
        """
        Define the encoder-classifier. If weight_directory is None, then it is assumed that the current working
        directory corresponds to the model, whose weights don't yet exist. Hence there is a call to self.train().
        :param weight_directory: The hyper-parameter string corresponding to the directory in which the encoder's
        weights are found.
        :return: A Keras Model instance.
        """
        # Network layers
        if weight_directory is None:
            _, encoder_output_layer = self.define_encoder()
            _, encoder, _ = self.train()
        else:
            encoder, encoder_output_layer = self.get_pretrained_encoder(
                weight_directory)
        projection_head = self.define_projection_head(encoder_output_layer)

        # Network tensors
        ec_gaussian_input_layer = Input(shape=self.gaussian_shape,
                                        name='ec_gaussian_input')
        ec_mnist_input_layer = Input(shape=self.mnist_shape,
                                     name='ec_mnist_input')
        encoder_classifier_input_layer = [
            ec_gaussian_input_layer, ec_mnist_input_layer
        ]
        projection_head_input_layer = encoder(
            encoder_classifier_input_layer)[1]
        class_probabilities = projection_head(projection_head_input_layer)
        ec_output_layer = class_probabilities  # [ec_gaussian_input_layer, class_probabilities]

        # Model definition
        encoder_classifier = Model(encoder_classifier_input_layer,
                                   ec_output_layer,
                                   name='encoder_classifier')
        encoder_classifier.summary()
        plot_model(encoder,
                   to_file=os.path.join(self.image_directory,
                                        'encoder_classifier.png'),
                   show_shapes=True)

        encoder_classifier.compile(optimizers.Adam(lr=self.learning_rate),
                                   loss=CategoricalCrossentropy(
                                       name='categorical_cross_entropy',
                                       label_smoothing=alpha),
                                   metrics=[Accuracy()])
        return encoder_classifier
Пример #11
0
def do_training(initial_learning_rate=0.1):
    gids = get_gids_from_database("unet")
    training_gen, validation_gen = initialize_train_and_validation_generators(
        "unet", gids, batch_size=4, label_target_size=388)
    steps_per_epoch = next(training_gen)
    validation_steps = next(validation_gen)

    model = UNet(input_size=(572, 572, 3))
    metrics = [
        Accuracy(),
        CategoricalAccuracy(),
        CategoricalCrossentropy(),
        ArgmaxMeanIoU(num_classes=6, name="mean_iou")
    ]
    optimizer = SGD(learning_rate=initial_learning_rate,
                    momentum=0.99,
                    nesterov=True)
    model.compile(optimizer=optimizer,
                  loss=categorical_crossentropy,
                  metrics=metrics)

    start_time = int(time.time())
    os.mkdir(f"weights/{start_time}_{model.name}/")

    metrics_to_log = [
        "loss", "accuracy", "categorical_accuracy", "mean_iou",
        "categorical_crossentropy"
    ]
    model_path = f"weights/{start_time}_{model.name}/"
    callbacks = [
        save_model_on_epoch_end(model.name, model, model_path),
        metrics_to_csv_logger(model_path + "/batch.csv", metrics_to_log),
        CSVLogger(model_path + "/epoch.csv", separator=";"),
        LearningRateScheduler(lr_schedule(initial_lr=initial_learning_rate)),
    ]

    model.fit(training_gen,
              epochs=20,
              steps_per_epoch=steps_per_epoch,
              validation_data=validation_gen,
              validation_steps=validation_steps,
              callbacks=callbacks)
Пример #12
0
 def __init__(self, lstm_units=100, lr=1e-4, dropout_rate=0.2):
     super().__init__()
     self.embedding = tf.Variable(np.load('../data/embedding.npy'),
                                  dtype=tf.float32,
                                  name='pretrained_embedding',
                                  trainable=False)
     self.dropout_l1 = Dropout(dropout_rate)
     self.dropout_l2 = Dropout(dropout_rate)
     self.dropout_l3 = Dropout(dropout_rate)
     self.blstm_l1 = Bidirectional(LSTM(lstm_units, return_sequences=True))
     self.blstm_l2 = Bidirectional(LSTM(lstm_units, return_sequences=True))
     self.blstm_l3 = Bidirectional(LSTM(lstm_units, return_sequences=True))
     self.dropout_op = Dropout(dropout_rate)
     self.units = 2 * lstm_units
     self.fc = Dense(units=self.units, activation='elu')
     self.out_linear = Dense(2)
     self.optimizer = Adam(lr)
     self.decay_lr = ExponentialDecay(lr, 1000, 0.90)
     self.accuracy = Accuracy()
     self.logger = logging.getLogger('tensorflow')
     self.logger.setLevel(logging.INFO)
Пример #13
0
 def __init__(self,
              char_vocab=26,
              char_embed_size=100,
              cnn_filters=300,
              cnn_kernel_size=5,
              dropout_rate=0.2,
              max_char_len=10,
              lr=1e-4):
     super().__init__()
     self.char_embedding = Embedding(char_vocab + 1, char_embed_size)
     self.word_embedding = tf.Variable(
         np.load('../data/embedding.npy'),
         dtype=tf.float32,
         name='pretrained_embedding',
         trainable=False,
     )
     self.char_cnn = Conv1D(filters=cnn_filters,
                            kernel_size=cnn_kernel_size,
                            activation='elu',
                            padding='same')
     self.embed_drop = Dropout(dropout_rate)
     self.embed_fc = Dense(cnn_filters, 'elu', name='embed_fc')
     self.word_cnn = Conv1D(filters=cnn_filters,
                            kernel_size=cnn_kernel_size,
                            activation='elu',
                            padding='same')
     self.word_drop = Dropout(dropout_rate)
     self.attentive_pooling = KernelAttentivePooling(dropout_rate)
     self.out_linear = Dense(2)
     self.max_char_len = max_char_len
     self.char_embed_size = char_embed_size
     self.cnn_filters = cnn_filters
     self.optimizer = Adam(lr)
     self.accuracy = Accuracy()
     self.decay_lr = tf.optimizers.schedules.ExponentialDecay(
         lr, 1000, 0.95)
     self.logger = logging.getLogger('tensorflow')
     self.logger.setLevel(logging.INFO)
Пример #14
0
def do_training(initial_learning_rate=0.001):
    gids = get_gids_from_database("wnet")
    training_gen, validation_gen = initialize_train_and_validation_generators(
        "wnet",
        gids,
        batch_size=5,
        label_target_size=256,
        use_image_as_label=True)
    steps_per_epoch = next(training_gen)
    validation_steps = next(validation_gen)

    full_model, encoder_model = WNet(nb_classes=6)
    metrics = [Accuracy(), CategoricalAccuracy(), CategoricalCrossentropy()]
    optimizer = Adam(lr=initial_learning_rate)
    full_model.compile(optimizer=optimizer,
                       loss=categorical_crossentropy,
                       metrics=metrics)

    start_time = int(time.time())
    model_path = f"weights/{start_time}_{full_model.name}/"
    os.mkdir(model_path)

    metrics_to_log = [metric.name for metric in metrics]
    callbacks = [
        save_model_on_epoch_end(full_model.name, full_model, model_path),
        save_model_on_epoch_end(encoder_model.name, encoder_model, model_path),
        metrics_to_csv_logger(model_path + "/batch.csv",
                              ["loss"] + metrics_to_log),
        CSVLogger(model_path + "/epoch.csv", separator=";"),
    ]
    full_model.fit(training_gen,
                   epochs=1,
                   steps_per_epoch=steps_per_epoch,
                   validation_data=validation_gen,
                   validation_steps=validation_steps,
                   callbacks=callbacks)
def report(model, ds_test_images, ds_test_labels, threshold=0.5):
    """"""

    true_iter = ds_test_labels.as_numpy_iterator()
    y_true = np.hstack(list(true_iter))
    y_pred = model.predict(ds_test_images).flatten()

    class_labels = np.unique(y_true)
    depth = class_labels.shape[0]

    y_true_oh = tf.one_hot(y_true, depth=depth)
    y_pred_oh = tf.one_hot(np.where(y_pred < threshold, 0, 1), depth=depth)

    results = {'Accuracy': [], 'Precision': [], 'Recall': []}

    m = Accuracy()
    _ = m.update_state(y_true, np.around(y_pred).astype(int))
    results['Accuracy'].append(m.result().numpy())
    results['Precision'].append(" ")
    results['Recall'].append(" ")

    prec = [Precision(class_id=n) for n in class_labels]
    rec = [Recall(class_id=n) for n in class_labels]

    for p, r in zip(prec, rec):
        p.update_state(y_true_oh, y_pred_oh)
        r.update_state(y_true_oh, y_pred_oh)
        results['Accuracy'].append(" ")
        results['Precision'].append(p.result().numpy())
        results['Recall'].append(r.result().numpy())

    row_labels = [
        'All' if i == 0 else f'Class {i-1}' for i in range(depth + 1)
    ]

    return pd.DataFrame(data=results, index=row_labels)
Пример #16
0
def cnn_1d_pipeline(train_data, train_labels, val_data, val_labels, test_data,
                    test_labels, classes, weights):
    """This function is the main pipeline for the 1D CNN.
    
    Parameters :
    ------------
    xxx_data : np.array
    Data of the train, validation and test datasets

    xxx_labels : np.array
    Labels of the train, validation and test datasets

    classes : np.array
    Classes of the dataset

    weights : np.array
    Weights applied for each class during training

    Returns :
    -----------
    Accuracy : float
    Precision : float 
    Recall : float
    F1-Score : float
    Metrics evaluated on the test set
    """

    # Reshaping the data and labels
    train_data = train_data.reshape(
        train_data.shape[0] * train_data.shape[1] * train_data.shape[2],
        train_data.shape[3])
    train_labels = train_labels.reshape(
        train_labels.shape[0] * train_labels.shape[1] * train_labels.shape[2])

    val_data = val_data.reshape(
        val_data.shape[0] * val_data.shape[1] * val_data.shape[2],
        val_data.shape[3])
    val_labels = val_labels.reshape(val_labels.shape[0] * val_labels.shape[1] *
                                    val_labels.shape[2])

    test_shape = test_labels.shape
    test_data = test_data.reshape(
        test_data.shape[0] * test_data.shape[1] * test_data.shape[2],
        test_data.shape[3])
    test_labels = test_labels.reshape(
        test_labels.shape[0] * test_labels.shape[1] * test_labels.shape[2])

    #Calculating the weights of each pixel
    train_weights = np.ones((train_labels.shape[0]))
    for i in range(len(classes)):
        train_weights[train_labels == classes[i]] = weights[i]

    val_weights = np.ones((val_labels.shape[0]))
    for i in range(len(classes)):
        val_weights[val_labels == classes[i]] = weights[i]

    test_weights = np.ones((test_labels.shape[0]))
    for i in range(len(classes)):
        test_weights[test_labels == classes[i]] = weights[i]

    # One-Hot Encoding the labels

    train_labels = tf.one_hot(train_labels, depth=17, dtype=tf.int8).numpy()
    val_labels = tf.one_hot(val_labels, depth=17, dtype=tf.int8).numpy()

    # Creating a data generator class

    class DataGenerator(tf.keras.utils.Sequence):
        def __init__(self,
                     data,
                     labels,
                     weights,
                     batch_size=32,
                     n_classes=10,
                     shuffle=True):
            'Initialization'
            self.data = data
            self.labels = labels
            self.weights = weights
            self.batch_size = batch_size
            self.shuffle = shuffle
            self.on_epoch_end()

        def __len__(self):
            'Denotes the number of batches per epoch'
            return int(np.floor(len(self.data) / self.batch_size))

        def __getitem__(self, index):
            'Generate one batch of data'
            # Generate indexes of the batch
            indexes = self.indexes[index * self.batch_size:(index + 1) *
                                   self.batch_size]

            # Get data and labels
            data_yield = self.data[indexes]
            labels_yield = self.labels[indexes]
            weights_yield = self.weights[indexes]

            return data_yield, labels_yield, weights_yield

        def on_epoch_end(self):
            'Updates indexes after each epoch'
            self.indexes = np.arange(len(self.data))
            if self.shuffle == True:
                np.random.shuffle(self.indexes)

    ## -- Training the model --

    batch_size = 3000
    steps_per_epoch = 20
    train_generator = DataGenerator(train_data,
                                    train_labels,
                                    train_weights,
                                    batch_size,
                                    shuffle=True)
    val_generator = DataGenerator(val_data,
                                  val_labels,
                                  val_weights,
                                  batch_size,
                                  shuffle=True)

    checkpoint_filepath = "tmp/checkpoint_salinas_convnet"
    model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=checkpoint_filepath,
        save_weights_only=True,
        monitor='val_loss',
        mode='min',
        save_best_only=True)

    model = cnn_1D(shape=(train_data.shape[1], 1),
                   kernel_size=7,
                   nb_filters_0=64,
                   nb_dense_neurons=500,
                   output_channels=17,
                   kernel_reg=0.0009875,
                   dense_reg=0.0012559,
                   dropout=0)
    model.compile(loss="categorical_crossentropy",
                  optimizer=Adam(learning_rate=0.00074582),
                  metrics=["accuracy"])

    print(model.summary())

    # Training the model
    history = model.fit(train_generator,
                        steps_per_epoch=steps_per_epoch,
                        epochs=500,
                        validation_data=val_generator,
                        callbacks=[
                            model_checkpoint_callback,
                            tf.keras.callbacks.EarlyStopping(
                                monitor='val_accuracy', patience=30)
                        ])

    # Making a prediction on the test data
    y_pred = model.predict(test_data, batch_size=batch_size)
    y_pred = np.argmax(y_pred[:, 1:], axis=1) + 1

    # Calculating accuracy
    accuracy = Accuracy()
    accuracy.update_state(test_labels, y_pred)
    pred_accuracy = accuracy.result()
    print(pred_accuracy)

    # Computing the confusion matrix
    conf = confusion_matrix(test_labels, y_pred)
    print(conf)

    y_pred = y_pred.reshape(test_shape)
    y_pred[test_labels.reshape(test_shape) == 0] = 0
    y_pred = y_pred.reshape(test_shape[0] * test_shape[1] * test_shape[2])

    # Calculating metrics
    accuracy = Accuracy()
    accuracy.update_state(test_labels, y_pred, sample_weight=test_weights)

    recall = tf.keras.metrics.Recall()
    recall.update_state(
        tf.one_hot(test_labels, depth=17).numpy().flatten(),
        tf.one_hot(y_pred, depth=17).numpy().flatten())

    precision = tf.keras.metrics.Precision()
    precision.update_state(
        tf.one_hot(test_labels, depth=17).numpy().flatten(),
        tf.one_hot(y_pred, depth=17).numpy().flatten())

    return (accuracy.result().numpy(), precision.result().numpy(),
            recall.result().numpy(),
            2 * precision.result().numpy() * recall.result().numpy() /
            (precision.result().numpy() + recall.result().numpy()))
def main():
    # data
    fashion_mnist = keras.datasets.fashion_mnist
    (train_images, train_labels), \
        (test_images, test_labels) = fashion_mnist.load_data()
    num_classes = np.max(train_labels) + 1  # 10

    padded = True
    if padded:
        train_images = np.load("./mnist_train_images_padded.npy")
    train_labels = np.eye(num_classes)[train_labels]

    # model
    selected_model = "ResNet20v2"
    # selected_model = "keras.applications.ResNet50V2"
    n = 2  # order of ResNetv2, 2 or 6
    version = 2
    depth = model_depth(n, version)
    model_type = 'ResNet%dv%d' % (depth, version)

    metrics = [Accuracy()]

    train_images = np.expand_dims(train_images, -1)
    input_shape = train_images.shape[1:]

    if selected_model == "ResNet20v2":
        model = resnet_v2(input_shape=input_shape,
                          depth=depth,
                          num_classes=num_classes)
    elif selected_model == "keras.applications.ResNet50V2":
        model = tf.keras.applications.ResNet50V2(include_top=True,
                                                 weights=None,
                                                 input_shape=input_shape,
                                                 classes=num_classes)
    else:
        return

    # plot model
    plot_model(model, to_file=f"{selected_model}.png", show_shapes=True)

    model.compile(
        loss='categorical_crossentropy',
        optimizer=Adam(),  # learning_rate=lr_schedule(0)
        metrics=metrics)

    # checkpoint = ModelCheckpoint(filepath=filepath, monitor="acc",verbose=1)
    logdir = os.path.join("logs",
                          datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
    csv_logger = CSVLogger(os.path.join(logdir, "training.log.csv"),
                           append=True)
    tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir,
                                                          histogram_freq=1)
    callbacks = [csv_logger, tensorboard_callback]
    # makedir_exist_ok()

    # fit
    epochs = 100
    batch_size = 32
    history = model.fit(train_images,
                        train_labels,
                        epochs=epochs,
                        batch_size=batch_size,
                        callbacks=callbacks)
Пример #18
0
    model = Model(inputs=img_input, outputs=output)

    return model


model = Nest_Net()

tf.keras.utils.plot_model(model, to_file='blstm.png', show_shapes=True)
"""# 训练"""

m_unet = Nest_Net()
from tensorflow.keras.metrics import Accuracy
m_unet.compile(loss='mean_squared_error',
               optimizer='adam',
               metrics=[Accuracy()])
m_unet.fit(X[:20, :, :, :], M['vocals'][:20, :, :, :], batch_size=2, epochs=20)
"""# 预测"""

track = [mus[-1]]

X, M = dataset(track)

X_origin = stft(track[0].audio.T, nperseg=4096, noverlap=3072)[-1]

print(X.shape, len(M), M['vocals'].shape, X_origin.shape)

M_predict = m_unet.predict(X)
print(M_predict.shape)

MM_predict = {
Пример #19
0
x = keras.layers.Dense(256, activation='relu')(x)
#################################################
x = keras.layers.Dropout(0.3)(x)
##################################################
# FC 2
x = keras.layers.Dense(256, activation='relu')(x)
##################################################
x = keras.layers.Dropout(0.3)(x)
##################################################
outputLayer = keras.layers.Dense(7, activation='softmax')(x)

# Define
model = keras.Model(inputs=inputLayer, outputs=outputLayer)

# Define metrics
metrics_list = [Accuracy()]

opt = keras.optimizers.Adam(learning_rate=0.0005)
# Compile model
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])

# Summarize Model
model.summary()

# Train Model validation_data=test
history = model.fit(train, validation_data=test, shuffle=True, epochs=10)

model.save('emotion_model')

# test
model.evaluate(test, verbose=2)
Пример #20
0
    def __init__(self,
                 n_classes,
                 n_hidden_layers=3,
                 n_units=96,
                 consistency_loss='mse',
                 consistency_scale=10,
                 stabilization_scale=100,
                 xi=0.6,
                 padding_value=0.,
                 sigma=0.01,
                 schedule='rampup',
                 schedule_length=5,
                 version='mono_directional'):
        """
        Constructs a Dual Student model.

        :param n_classes: number of classes (i.e. number of units in the last layer of each student)
        :param n_hidden_layers: number of hidden layers in each student (i.e. LSTM layers)
        :param n_units: number of units for each hidden layer
        :param consistency_loss: one of 'mse', 'kl'
        :param consistency_scale: maximum value of weight for consistency constraint
        :param stabilization_scale: maximum value of weight for stabilization constraint
        :param xi: threshold for stable sample
        :param padding_value: value used to pad input sequences (used as mask_value for Masking layer)
        :param sigma: standard deviation for noisy augmentation
        :param schedule: type of schedule for lambdas, one of 'rampup', 'triangular_cycling', 'sinusoidal_cycling'
        :param schedule_length:
        :param version: one of:
            - 'mono_directional': both students have mono-directional LSTM layers
            - 'bidirectional: both students have bidirectional LSTM layers
            - 'imbalanced': one student has mono-directional LSTM layers, the other one bidirectional
        """
        super(DualStudent, self).__init__()

        # store parameters
        self.n_classes = n_classes
        self.padding_value = padding_value
        self.n_units = n_units
        self.n_hidden_layers = n_hidden_layers
        self.xi = xi
        self.consistency_scale = consistency_scale
        self.stabilization_scale = stabilization_scale
        self.sigma = sigma
        self.version = version
        self.schedule = schedule
        self.schedule_length = schedule_length
        self._lambda1 = None
        self._lambda2 = None

        # schedule for lambdas
        if schedule == 'rampup':
            self.schedule_fn = sigmoid_rampup
        elif schedule == 'triangular_cycling':
            self.schedule_fn = triangular_cycling
        elif schedule == 'sinusoidal_cycling':
            self.schedule_fn = sinusoidal_cycling
        else:
            raise ValueError('Invalid schedule')

        # loss
        self._loss_cls = SparseCategoricalCrossentropy()  # classification loss
        self._loss_sta = MeanSquaredError()  # stabilization loss
        if consistency_loss == 'mse':
            self._loss_con = MeanSquaredError()  # consistency loss
        elif consistency_loss == 'kl':
            self._loss_con = KLDivergence()
        else:
            raise ValueError('Invalid consistency metric')

        # metrics for training
        self._loss1 = Mean(
            name='loss1')  # we want to average the loss for each batch
        self._loss2 = Mean(name='loss2')
        self._loss1_cls = Mean(name='loss1_cls')
        self._loss2_cls = Mean(name='loss2_cls')
        self._loss1_con = Mean(name='loss1_con')
        self._loss2_con = Mean(name='loss2_con')
        self._loss1_sta = Mean(name='loss1_sta')
        self._loss2_sta = Mean(name='loss2_sta')
        self._acc1 = SparseCategoricalAccuracy(name='acc1')
        self._acc2 = SparseCategoricalAccuracy(name='acc2')

        # metrics for testing
        self._test_loss1 = Mean(name='test_loss1')
        self._test_loss2 = Mean(name='test_loss2')
        self._test_acc1_train_phones = SparseCategoricalAccuracy(
            name='test_acc1_train_phones')
        self._test_acc2_train_phones = SparseCategoricalAccuracy(
            name='test_acc2_train_phones')
        self._test_acc1 = Accuracy(name='test_acc1')
        self._test_acc2 = Accuracy(name='test_acc2')
        self._test_per1 = PhoneErrorRate(name='test_per1')
        self._test_per2 = PhoneErrorRate(name='test_per2')

        # compose students
        if version == 'mono_directional':
            lstm_types = ['mono_directional', 'mono_directional']
        elif version == 'bidirectional':
            lstm_types = ['bidirectional', 'bidirectional']
        elif version == 'imbalanced':
            lstm_types = ['mono_directional', 'bidirectional']
        else:
            raise ValueError('Invalid student version')
        self.student1 = self._get_student('student1', lstm_types[0])
        self.student2 = self._get_student('student2', lstm_types[1])

        # masking layer (just to use compute_mask and remove padding)
        self.mask = Masking(mask_value=self.padding_value)
Пример #21
0
 def compile(self):
     super(DogCNN, self).compile()
     self.optimizer = Adam()
     self.loss_fn = CategoricalCrossentropy()
     self.accuracy_fn = Accuracy()
Пример #22
0
def price_direction_accuracy(targets_directs, predicts_directs):
    accur = Accuracy()
    accur.update_state(targets_directs, predicts_directs)

    return accur.result().numpy()
Пример #23
0
def cnn_1d_pipeline(train_data, train_labels, val_data, val_labels, test_data, test_labels, classes, weights):

    """This function is the main pipeline for the 1D CNN.
    
    Parameters :
    ------------
    xxx_data : np.array
    Data of the train, validation and test datasets

    xxx_labels : np.array
    Labels of the train, validation and test datasets

    classes : np.array
    Classes of the dataset

    weights : np.array
    Weights applied for each class during training

    Returns :
    -----------
    Accuracy : float
    Precision : float 
    Recall : float
    F1-Score : float
    Metrics evaluated on the test set
    """

    # Reshaping the data

    train_data = train_data.reshape(train_data.shape[0]*train_data.shape[1]*train_data.shape[2], train_data.shape[3])
    train_labels = train_labels.reshape(train_labels.shape[0]*train_labels.shape[1]*train_labels.shape[2])

    val_data = val_data.reshape(val_data.shape[0]*val_data.shape[1]*val_data.shape[2], val_data.shape[3])
    val_labels = val_labels.reshape(val_labels.shape[0]*val_labels.shape[1]*val_labels.shape[2])
    
    test_shape = test_labels.shape
    test_data = test_data.reshape(test_data.shape[0]*test_data.shape[1]*test_data.shape[2], test_data.shape[3])
    test_labels = test_labels.reshape(test_labels.shape[0]*test_labels.shape[1]*test_labels.shape[2])


    # Computing the weights of each pixel
    train_weights = np.ones((train_labels.shape[0])) 
    for i in range(len(classes)):
        train_weights[train_labels == classes[i]] = weights[i]

    val_weights = np.ones((val_labels.shape[0])) 
    for i in range(len(classes)):
        val_weights[val_labels == classes[i]] = weights[i]

    test_weights = np.ones((test_labels.shape[0])) 
    for i in range(len(classes)):
        test_weights[test_labels == classes[i]] = weights[i]

     # One-Hot Encoding the labels
    train_labels = tf.one_hot(train_labels, depth=6, dtype=tf.int8).numpy()
    val_labels = tf.one_hot(val_labels, depth=6, dtype=tf.int8).numpy()



    # Creating a data generator class

    class DataGenerator(tf.keras.utils.Sequence):
        def __init__(self, data, labels, weights, batch_size=32, n_classes=10, shuffle=True):
            'Initialization'
            self.data = data
            self.labels = labels
            self.weights = weights
            self.batch_size = batch_size
            self.shuffle = shuffle
            self.on_epoch_end()

        def __len__(self):
            'Denotes the number of batches per epoch'
            return int(np.floor(len(self.data) / self.batch_size))

        def __getitem__(self, index):
            'Generate one batch of data'
            # Generate indexes of the batch
            indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

            # Get data and labels
            data_yield = self.data[indexes]
            labels_yield = self.labels[indexes]
            weights_yield = self.weights[indexes]

            return data_yield, labels_yield, weights_yield

        def on_epoch_end(self):
            'Updates indexes after each epoch'
            self.indexes = np.arange(len(self.data))
            if self.shuffle == True:
                np.random.shuffle(self.indexes)
    
    
    ## -- Training the model --

    batch_size = 30000 
    steps_per_epoch = 100
    train_generator = DataGenerator(train_data, train_labels, train_weights, batch_size, shuffle=True)
    val_generator = DataGenerator(val_data, val_labels, val_weights, batch_size, shuffle=True)

    checkpoint_filepath = "tmp/checkpoint_SPARCS_cnn_1D"
    model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=checkpoint_filepath,
        save_weights_only=True,
        monitor='val_loss',
        mode='min',
        save_best_only=True)

    model = cnn_1D(shape=(10, 1), kernel_size=3, nb_filters_0=128, nb_dense_neurons=180, kernel_reg=0.0083, dense_reg=0.054, output_channels=6, dropout=0.0, learning_rate=0.000322)
    print(model.summary())

    # Training the model
    history = model.fit(train_generator,
                steps_per_epoch=steps_per_epoch, 
                epochs=500,
                validation_data = val_generator,
                validation_steps = 5,
                callbacks=[EarlyStopping(monitor='val_accuracy', patience=40)]
                )


    # Making a prediction
    y_pred = model.predict(test_data, batch_size=30000)
    y_pred = np.argmax(y_pred, axis=1)

    # Computing accuracy
    accuracy = Accuracy()
    accuracy.update_state(test_labels, y_pred, sample_weight=test_weights)
    pred_accuracy = accuracy.result()
    print(pred_accuracy)

    # Computing the confusion matrix
    print("Computing confusion matrix")
    conf = confusion_matrix(test_labels, y_pred)
    print(conf)
    np.save("logs/metrics/SPARCS/cnn_1D/confusion_matrix", conf)


    y_pred = y_pred.reshape(test_shape)
    print(y_pred.shape)

    # Plotting result images on the test dataset
    c = 0
    cmap = plt.get_cmap('viridis', 6)
    for image in y_pred[:30]:
        plt.imshow(image+1e-5, vmin=0, vmax=6, cmap=cmap)
        plt.colorbar()
        plt.savefig(f"images/SPARCS/cnn_1D/pred{c}_raw")
        plt.clf()

        plt.imshow(test_labels.reshape(test_shape)[c]+1e-5, vmin=0, vmax=6, cmap=cmap)
        plt.colorbar()
        plt.savefig(f"images/SPARCS/cnn_1D/GT{c}")
        plt.clf()

        c+=1

    # Applying a median filter on the results
    for i in range(len(y_pred)):
        y_pred[i] = medfilt(y_pred[i], kernel_size = 3)



    # Plotting the median filtered result images
    c = 0
    for image in y_pred[:30]:
        plt.imshow(image+1e-5, vmin=0, vmax=6, cmap=cmap)
        plt.colorbar()
        plt.savefig(f"images/SPARCS/cnn_1D/pred{c}_filtered")
        plt.clf()

        c+=1
    
    # Computing metrics on the test dataset
    y_pred = y_pred.reshape(test_shape[0]*test_shape[1]*test_shape[2])
    accuracy = Accuracy()
    accuracy.update_state(test_labels, y_pred, sample_weight=test_weights)


    test_labels = tf.one_hot(test_labels, depth=6).numpy().flatten()
    y_pred = tf.one_hot(y_pred, depth=6).numpy().flatten()
    recall = tf.keras.metrics.Recall()
    recall.update_state(test_labels, y_pred)

    precision = tf.keras.metrics.Precision()
    precision.update_state(test_labels, y_pred)


    # Printing metrics
    test_accuracy = accuracy.result().numpy()
    test_recall = recall.result().numpy()
    test_precision = precision.result().numpy()
    test_f1 = 2/(1/test_recall + 1/test_precision)
    
    print("Test accuracy = ", test_accuracy)
    print("Test recall =", test_recall)
    print("Test precision=", test_precision)
    print("Test f1 =", test_f1)
Пример #24
0
            cur_y = i
            merged_y += [cur_y]
    return merged_y


def plot_confusion_matrix(y_true, y_pred):
    cm = confusion_matrix(y_true, y_pred, normalize='true')
    plt.imshow(cm, cmap='Blues')
    plt.xlabel('predictions')
    plt.ylabel('ground truth')


# predict
y_pred = np.argmax(dnn.predict(x=test_x, batch_size=256), axis=1)
y_true = np.argmax(test_y, axis=1)
accuracy = Accuracy()

# frame-by-frame at the state level
accuracy.update_state(y_true, y_pred)
print('Frame-by-frame accuracy at the state level: {:.2f}%'.format(
    accuracy.result().numpy() * 100))
plt.figure()
plot_confusion_matrix(y_true, y_pred)
plt.title('Frame-by-frame confusion matrix at the state level')

# frame-by-frame at the phoneme level
y_pred_phones = states2phones(y_pred, phones, stateList)
y_true_phones = states2phones(y_true, phones, stateList)
accuracy.reset_states()
accuracy.update_state(y_true_phones, y_pred_phones)
print('Frame-by-frame accuracy at the phoneme level: {:.2f}%'.format(
Пример #25
0
    MaxPooling2D(),
    Conv2D(32, 3, padding='same', activation='relu'),
    # MaxPooling2D(),
    # Conv2D(64, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    # Dropout(0.2),
    Flatten(),
    # Dense(512, activation='relu'),
    Dense(64, activation='relu'),
    # Dense(128, activation='relu'),
    Dense(1, activation='sigmoid')
])

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=[Accuracy(), Precision(),
                       Recall()])

model.summary()

print("Start time:", datetime.now())
print()
history = model.fit_generator(train_data_gen,
                              steps_per_epoch=total_train // batch_size,
                              epochs=epochs,
                              validation_data=test_data_gen,
                              validation_steps=total_val // batch_size)
print()
print("End time:", datetime.now())

model.save('model_weights/model.h5')