示例#1
0
def test_evaluate(mod, tfrecords):
    #Create
    mod.read_data(validation_split=True)

    metric_list = [keras_metrics.CategoricalAccuracy(name="acc")]

    mod.model.compile(loss="categorical_crossentropy",
                      optimizer=tf.keras.optimizers.Adam(
                          lr=float(mod.config['train']['learning_rate'])),
                      metrics=metric_list)

    #Method 1, class eval method
    print("Before evaluation")
    y_pred, y_true = mod.evaluate(mod.val_split)

    print("evaluated")
    test_acc = keras_metrics.CategoricalAccuracy()
    test_acc.update_state(y_true=y_true, y_pred=y_pred)
    method1_eval_accuracy = test_acc.result().numpy()

    assert y_pred.shape == y_true.shape

    #Method 2, keras eval method
    metric_list = mod.model.evaluate(mod.val_split)
    metric_dict = {}
    for index, value in enumerate(metric_list):
        metric_dict[mod.model.metrics_names[index]] = value

    assert method1_eval_accuracy == metric_dict["acc"]
示例#2
0
    def compile(self):
        if not self.model:
            raise RuntimeError("You have to build the model before compiling it.")

        self.model.compile(
            optimizer=optimizers.Adam(learning_rate=self.config.model.learning_rate),
            loss=losses.CategoricalCrossentropy(from_logits=True),
            metrics=[metrics.CategoricalAccuracy(name="accuracy")],
            weighted_metrics=[metrics.CategoricalAccuracy("weighted_accuracy")],
            sample_weight_mode="temporal",
        )
示例#3
0
def train(model_file='./mnist/trained_model', num_epochs=5, init=None):

    train_ds, test_ds = load_data()

    model = MNISTModel()

    optimizer = optimizers.Adam()

    train_loss = metrics.Mean(name='train_loss')
    train_accuracy = metrics.CategoricalAccuracy(name='train_accuracy')

    test_loss = metrics.Mean(name='test_loss')
    test_accuracy = metrics.CategoricalAccuracy(name='test_accuracy')

    if init != None:
        model.load_weights(model_file)

    @tf.function
    def train_step(images, labels):
        with tf.GradientTape() as tape:
            logits = model(images)
            loss_value = loss_object(labels, logits)
        grads = tape.gradient(loss_value, model.trainable_variables)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))
        train_loss(loss_value)
        train_accuracy(labels, logits)

    @tf.function
    def test_step(images, labels):
        logits = model(images)
        loss_value = loss_object(labels, logits)
        test_loss(loss_value)
        test_accuracy(labels, logits)

    def loss_object(labels, logits):
        return tf.nn.softmax_cross_entropy_with_logits(labels=labels,
                                                       logits=logits)

    for epoch in range(num_epochs):
        for images, labels in train_ds:
            train_step(images, labels)

        for test_images, test_labels in test_ds:
            test_step(test_images, test_labels)

        template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
        print(
            template.format(epoch + 1, train_loss.result(),
                            train_accuracy.result() * 100, test_loss.result(),
                            test_accuracy.result() * 100))

    model.save_weights(model_file)
示例#4
0
def model_vgg16_cifar(n_clasif, xshape):
    input_shape = xshape[1:]

    model = Sequential()
    # 2 x Conv
    model.add(
        Conv2D(64, (3, 3),
               input_shape=input_shape,
               padding='same',
               activation='relu'))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # 2 x Conv
    model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # 3 x Conv
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # 3 x Conv
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # 3 x Conv
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(
        Dense(1000,
              activation='relu',
              activity_regularizer=regularizers.l2(0.001)))
    model.add(
        Dense(1000,
              activation='relu',
              activity_regularizer=regularizers.l2(0.001)))
    #model.add(Dense(4096 , activation='relu'))
    model.add(
        Dense(n_clasif,
              activation='linear',
              activity_regularizer=regularizers.l2(0.001)))

    model.summary()

    # Compile the model
    model.compile(
        loss=losses.categorical_crossentropy(from_logits=True),
        optimizer=optimizers.Adam(
            learning_rate=0.001),  #optimizers.SGD(lr=0.03), 
        metrics=[metrics.CategoricalAccuracy('acc')])
    return model
示例#5
0
    def build_model(self,
                    embedding_size=EMBEDDING_SIZE,
                    input_length=MAX_DOCUMENT_LENGTH,
                    link_embedding_size=LINK_EMBEDDING_SIZE,
                    link_input_length=LINK_INPUT_LENGTH):
        he_inputs = keras.Input(shape=(input_length, ), name="hebrew")
        en_inputs = keras.Input(shape=(input_length, ), name="english")
        link_inputs = keras.Input(shape=(link_input_length, ), name="links")

        assert getattr(
            self, 'he_embed_model', None
        ) is not None, "CnnClfEnsemble.load_all_embedding_models() needs to be called before calling build_model()"
        he_embed = self.he_embed_model(he_inputs)
        en_embed = self.en_embed_model(en_inputs)
        link_embed = self.link_embed_model(link_inputs)

        self.model_head = self.get_model_head()
        outputs = self.model_head([he_embed, en_embed, link_embed])

        self.model = keras.Model(inputs=[he_inputs, en_inputs, link_inputs],
                                 outputs=outputs)
        self.model.compile(
            optimizer=optimizers.Adam(),  #learning_rate=0.001), 
            loss=losses.CategoricalCrossentropy(from_logits=False),
            metrics=[
                metrics.CategoricalAccuracy(),
                metrics.Recall(class_id=0),
                metrics.Precision(class_id=0)
            ])
示例#6
0
def test_evaluate(test_config):
    #Create class
    mod = main.AttentionModel()

    #Replace config for testing env
    for key, value in test_config.items():
        for nested_key, nested_value in value.items():
            mod.config[key][nested_key] = nested_value

    #Create
    mod.create()
    mod.read_data(validation_split=True)

    #Method 1, class eval method
    print("Before evaluation")
    y_pred, y_true = mod.evaluate(mod.val_split)

    print("evaluated")

    test_acc = keras_metrics.CategoricalAccuracy()
    test_acc.update_state(y_true=y_true, y_pred=y_pred)
    method1_eval_accuracy = test_acc.result().numpy()

    assert y_pred.shape == y_true.shape

    #Method 2, keras eval method
    metric_list = mod.model.evaluate(mod.val_split)
    metric_dict = {}
    for index, value in enumerate(metric_list):
        metric_dict[mod.model.metrics_names[index]] = value

    assert method1_eval_accuracy == metric_dict["acc"]

    #F1 requires integer, not softmax
    f1s = metrics.f1_scores(y_true, y_pred)
示例#7
0
def create_models(height, width, channels, classes, learning_rate, weighted_sum=True):
    #Define model structure
    sensor_inputs, sensor_outputs, spatial_attention_outputs, spectral_attention_outputs = define_model(
        height = height,
        width = width,
        channels = channels,
        classes = classes,
        weighted_sum=weighted_sum, softmax=True)

    #Full model compile
    model = tf.keras.Model(inputs=sensor_inputs,
                                outputs=sensor_outputs,
                                name="DeepTreeAttention")

    #compile full model
    metric_list = [metrics.CategoricalAccuracy(name="acc")]    
    model.compile(loss="categorical_crossentropy",
                       optimizer=tf.keras.optimizers.Adam(
                           lr=float(learning_rate)),
                       metrics=metric_list)
    #compile
    loss_dict = {
        "spatial_attention_1": "categorical_crossentropy",
        "spatial_attention_2": "categorical_crossentropy",
        "spatial_attention_3": "categorical_crossentropy"
    }

    # Spatial Attention softmax model
    spatial_model = tf.keras.Model(inputs=sensor_inputs,
                                        outputs=spatial_attention_outputs,
                                        name="DeepTreeAttention")

    spatial_model.compile(
        loss=loss_dict,
        loss_weights=[0.01, 0.1, 1],
        optimizer=tf.keras.optimizers.Adam(
            lr=float(learning_rate)),
        metrics=metric_list)

    # Spectral Attention softmax model
    spectral_model = tf.keras.Model(inputs=sensor_inputs,
                                         outputs=spectral_attention_outputs,
                                         name="DeepTreeAttention")

    #compile loss dict
    loss_dict = {
        "spectral_attention_1": "categorical_crossentropy",
        "spectral_attention_2": "categorical_crossentropy",
        "spectral_attention_3": "categorical_crossentropy"
    }

    spectral_model.compile(
        loss=loss_dict,
        loss_weights=[0.01, 0.1, 1],
        optimizer=tf.keras.optimizers.Adam(
            lr=float(learning_rate)),
        metrics=metric_list)
    
    
    return model, spatial_model, spectral_model
示例#8
0
def eval_use_model(model_name, path_model_file, test_file, class_num):
    """
    evaluating model by using entire model (weights, architecture, optimizers, etc.)

    Arguments:\n
    model_name --> String, Resnet50/Resnet18/VGG16/VGG19
    path_model_file --> String, path which store .hdf5 of model's weight\n
    test_file --> String, path to which store .h5 file of test dataset
    class_num --> Int, number of class/label\n

    Returns:\n
    none
    """
    # Load model weights
    new_model = Model()
    new_model = load_model(path_model_file)
    new_model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=[
                          metrics.AUC(),
                          metrics.CategoricalAccuracy(),
                          metrics.TruePositives(),
                          metrics.TrueNegatives(),
                          metrics.FalsePositives(),
                          metrics.FalseNegatives()
                      ])

    # retrieve X_test, Y_test
    X_test, Y_test = retrieve_test_dataset(test_file, int(class_num))

    for i in range(4):
        hasil = new_model.evaluate(X_test, Y_test)
        print(new_model.metrics_names)
        print(hasil)
示例#9
0
def train(opts, model):

    task_opts = opts['task']
    sess_opts = opts['session']

    # Optimizer and criterion.
    opt = optimizers.SGD(learning_rate=0.1)
    crit = losses.BinaryCrossentropy()

    # Accuracy metric.
    train_acc = metrics.CategoricalAccuracy(name='train_acc')

    # Generate batch for selected task.
    src, tgt = Task.generate_batch(task_opts, sess_opts.batch_size)

    for epoch in range(sess_opts.epochs):
        train_acc.reset_states()

        p, loss = train_step(model, src, tgt, crit, opt)
        train_acc(tgt, p)

        if epoch % 500 == 0:
            fig, ax = plt.subplots(2, 1)
            ax[0].imshow(src[0].T)
            ax[1].imshow(p.numpy()[0].T)
            plt.show()

        print(f'Epoch {epoch + 1}, '
              f'Loss: {loss}, '
              f'Accuracy: {train_acc.result() * 100} ')
示例#10
0
def test_eval(model, test_df, y_test):
    test_data = BertPreprocessing(
        test_df[["sentence1", "sentence2"]].values.astype("str"),
        y_test,
        batch_size=config.batch_size,
        shuffle=False,
    )

    y_pred = model.predict(test_data)

    size = y_pred.shape[0]
    y_test = y_test[:size, :]

    accuracy = metrics.CategoricalAccuracy()
    accuracy.update_state(y_test, y_pred)

    precision = metrics.Precision()
    precision.update_state(y_test, y_pred)

    recall = metrics.Recall()
    recall.update_state(y_test, y_pred)

    f1 = tfa.metrics.F1Score(num_classes=3, average="macro")
    f1.update_state(y_test, y_pred)

    auc = metrics.AUC()
    auc.update_state(y_test, y_pred)

	print(f"""
	Accuracy: {accuracy.result().numpy()}
	Precision: {precision.result().numpy()}
	Recall: {recall.result().numpy()}
	F1 score: {f1.result().numpy()}
	AUC: {auc.result().numpy()}
	""")
def train_network(training_set, training_labels, save_path='network.keras'):
    global model
    print('----TRAINING----')
    model = Sequential([
        Flatten(input_shape=training_set[0].shape),
        Dense(256, activation='relu'),
        Dense(64, activation='relu'),
        Dense(10, activation='softmax'),
    ])
    model_metrics = [
        metrics.CategoricalAccuracy(),
        metrics.Recall(),
        metrics.AUC(),
        metrics.SensitivityAtSpecificity(.8),
        metrics.SpecificityAtSensitivity(.8), f1_score, fbeta_score
    ]
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=model_metrics)
    print(model.summary())

    train_time = time.time_ns()
    history = model.fit(training_set,
                        training_labels,
                        epochs=2,
                        batch_size=32,
                        validation_split=0)
    print((time.time_ns() - train_time) / 1000000)
    print(str(history.history['loss'])[1:-1].replace(',', ''))
    print(str(history.history['categorical_accuracy'])[1:-1].replace(',', ''))

    model.save(save_path)
示例#12
0
def ejer3_imagenet(n_epochs):
    input_image = Input(shape=(32, 32, 3))
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train, y_train = preprocessing(x_train, y_train)
    x_test, y_test = preprocessing(x_test, y_test)

    model_keras = MobileNet(include_top=False,
                            weights="imagenet",
                            input_tensor=input_image)
    #model_keras.trainable = False

    model = Sequential()
    model.add(model_keras)
    model.add(Flatten())
    model.add(Dropout(0.1))
    model.add(BatchNormalization())
    model.add(Dense(10, activation='softmax'))

    model.compile(loss=losses.CategoricalCrossentropy(),
                  optimizer=optimizers.Adam(learning_rate=0.00001),
                  metrics=[metrics.CategoricalAccuracy('acc')])

    model.summary()

    history = model.fit(x_train,
                        y_train,
                        validation_data=(x_test, y_test),
                        batch_size=50,
                        epochs=n_epochs,
                        verbose=1)

    acc, val_acc, loss, val_loss = plot_ejercicio(history)

    np.savetxt("ejer3{}epochs{}_mobilenet.txt".format("_imagenet_", n_epochs),
               np.array([acc, val_acc, loss, val_loss]).T)
示例#13
0
def run_gradient_tape():
    model = Sequential([
        layers.Reshape(target_shape=(28 * 28, ), input_shape=(28, 28)),
        layers.Dense(256, activation='relu'),
        layers.Dense(256, activation='relu'),
        layers.Dense(256, activation='relu'),
        layers.Dense(10)
    ])
    # model.build(input_shape=(None, 28*28))
    model.summary()

    optim = optimizers.Adam(lr=0.001)
    accu = metrics.CategoricalAccuracy()

    for step, (x, y) in enumerate(train_db):
        with tf.GradientTape() as tape:
            out = model(x)
            loss = tf.square(out - y)
            loss = tf.reduce_sum(loss) / 32

        accu.update_state(y, out)

        grads = tape.gradient(loss, model.trainable_variables)
        optim.apply_gradients(zip(grads, model.trainable_variables))

        if step % 200 == 0:
            print(step, 'Loss:', float(loss), '- Accu:', accu.result().numpy())
            accu.reset_states()
示例#14
0
def test_register_from_metrics():
    # As well as direction inference.
    tracker = metrics_tracking.MetricsTracker(
        metrics=[metrics.CategoricalAccuracy(),
                 metrics.MeanSquaredError()])
    assert tracker.names == ['categorical_accuracy', 'mean_squared_error']
    assert tracker.directions['categorical_accuracy'] == 'max'
    assert tracker.directions['mean_squared_error'] == 'min'
示例#15
0
def evaluate(x_test, y_test):
    model = keras.models.load_model(WEIGHTS_PATH)
    model.compile(loss='categorical_crossentropy',
                  metrics=[metrics.CategoricalAccuracy(), metrics.Precision(), metrics.Recall()])
    loss, accuracy, precision, recall = model.evaluate(x_test, y_test, verbose=1)
    F1_Score = 2 * (precision * recall) / (precision + recall)
    print('loss:%.4f accuracy:%.4f precision:%.4f recall:%.4f F1_Score:%.4f'
          % (loss, accuracy, precision, recall, F1_Score))
示例#16
0
def test_evaluate(test_config):
    #Create class
    mod = Houston2018.AttentionModel(config="conf/houston_config.yml")

    #Replace config for testing env
    for key, value in test_config.items():
        for nested_key, nested_value in value.items():
            mod.config[key][nested_key] = nested_value

    #Create
    mod.create()
    mod.read_data(validation_split=True)

    metric_list = [
        keras_metrics.TopKCategoricalAccuracy(k=2, name="top_k"),
        keras_metrics.CategoricalAccuracy(name="acc")
    ]

    mod.model.compile(loss="categorical_crossentropy",
                      optimizer=tf.keras.optimizers.Adam(
                          lr=float(mod.config['train']['learning_rate'])),
                      metrics=metric_list)

    #Method 1, class eval method
    print("Before evaluation")
    y_pred, y_true = mod.evaluate(mod.val_split)

    print("evaluated")
    test_acc = keras_metrics.CategoricalAccuracy()
    test_acc.update_state(y_true=y_true, y_pred=y_pred)
    method1_eval_accuracy = test_acc.result().numpy()

    assert y_pred.shape == y_true.shape

    #Method 2, keras eval method
    metric_list = mod.model.evaluate(mod.val_split)
    metric_dict = {}
    for index, value in enumerate(metric_list):
        metric_dict[mod.model.metrics_names[index]] = value

    assert method1_eval_accuracy == metric_dict["acc"]

    #F1 requires integer, not softmax
    f1s = metrics.f1_scores(y_true, y_pred)
示例#17
0
def test_register_from_metrics():
    # As well as direction inference.
    tracker = metric.MetricsTracker(
        metrics=[metrics.CategoricalAccuracy(),
                 metrics.MeanSquaredError()])
    assert set(tracker.metrics.keys()) == {
        'categorical_accuracy', 'mean_squared_error'
    }
    assert tracker.metrics['categorical_accuracy'].direction == 'max'
    assert tracker.metrics['mean_squared_error'].direction == 'min'
示例#18
0
def test_register_from_metrics():
    # As well as direction inference.
    tracker = metrics_tracking.MetricsTracker(
        metrics=[metrics.CategoricalAccuracy(), metrics.MeanSquaredError()]
    )
    assert set(tracker.metrics.keys()) == {
        "categorical_accuracy",
        "mean_squared_error",
    }
    assert tracker.metrics["categorical_accuracy"].direction == "max"
    assert tracker.metrics["mean_squared_error"].direction == "min"
示例#19
0
 def initialize_model(self):
     input = layers.Input(shape=(self.timestep_num, self.channel_num))
     L1 = layers.GRU(self.__RNN1_UnitNum, return_sequences=True)(input)
     L2 = layers.Dense(self.__Dense_UnitNum, activation='tanh')(L1)
     L3 = layers.GRU(self.__RNN2_UnitNum, return_sequences=True)(L2)
     output = layers.Dense(self.class_num, activation='softmax')(L3)
     self.model = models.Model(input, output)
     self.model.compile(
         optimizer=optimizers.Adam(),
         loss=losses.CategoricalCrossentropy(),
         metrics=[metrics.CategoricalAccuracy()],
     )
示例#20
0
def classifier_train(classifier_model, train_images_code, train_labels, hyperparams):
    classifier_model.compile(loss=losses.CategoricalCrossentropy(),
                                optimizer="RMSprop", metrics=[metrics.CategoricalAccuracy()])


    train_X, test_X, train_ground, test_ground = train_test_split(train_images_code, train_labels,
                                                                    test_size=0.2)


    train_hist = classifier_model.fit(train_X, train_ground, batch_size=hyperparams.batch_size,verbose=1,
                                epochs=hyperparams.epochs, validation_data=(test_X, test_ground))

    return classifier_model, train_hist
示例#21
0
def build_simple_model(dataset='Fashion Mnist',
                       opt='sgd',
                       hidden=None,
                       funcs=None,
                       loss=None,
                       metrics_list=None):
    model = models.Sequential()
    if dataset == 'CIFAR-10':
        model.add(layers.Flatten(input_shape=[32, 32, 3]))
    elif ('Fashion Mnist'):
        model.add(layers.Flatten(input_shape=[28, 28]))
    for i in hidden.keys():
        model.add(layers.Dense(hidden[i], activation=funcs[i].lower()))
    model.add(layers.Dense(10, activation="softmax"))

    loss_dict = {
        'Categorical Crossentropy': 'categorical_crossentropy',
        'Binary Crossentropy': 'binary_crossentropy',
        'Categorical Hinge': 'categorical_hinge',
        'Huber loss': 'huber_loss'
    }
    metrics_dict = {
        'auc':
        metrics.AUC(),
        'recall':
        metrics.Recall(),
        'accuracy':
        metrics.CategoricalAccuracy()
        if loss.startswith('Categorical') else metrics.Accuracy(),
        'precision':
        metrics.Precision(),
        'categorical Hinge':
        metrics.CategoricalHinge(),
        'squared Hinge':
        metrics.SquaredHinge(),
        'Kullback-Leibler divergence':
        metrics.KLDivergence(),
        'mean absolute error':
        metrics.MeanAbsoluteError(),
        'mean squared error':
        metrics.MeanSquaredError()
    }
    if metrics_list is not None and len(metrics_list) > 0:
        metrics_list = [metrics_dict.get(m, m) for m in metrics_list]
    else:
        metrics_list = ['accuracy']

    loss_f = loss_dict.get(loss)

    model.compile(loss=loss_f, optimizer=opt, metrics=metrics_list)
    return model
示例#22
0
def main():
    raw_dataset=tf.data.TFRecordDataset(train_filenames)
    raw_dataset=raw_dataset.take(720000)

    feat_desc={
        'bcateid':tf.io.FixedLenFeature([],tf.int64),
        'mcateid':tf.io.FixedLenFeature([],tf.int64),
        'scateid':tf.io.FixedLenFeature([],tf.int64),
        'dcateid':tf.io.FixedLenFeature([],tf.int64),
        'pieces':tf.io.FixedLenFeature([20],tf.int64)
    }

    def _parse_function(example_proto):
        parsed = tf.io.parse_single_example(example_proto,feat_desc)
        output={}
        output['bcateid']=parsed.pop('bcateid')
        output['mcateid']=parsed.pop('mcateid')
        output['scateid']=parsed.pop('scateid')
        output['dcateid']=parsed.pop('dcateid')
        return (parsed,output)

    dataset=raw_dataset.map(_parse_function)
    dataset=dataset.shuffle(1024)
    dataset=dataset.batch(40,True)

    encoder=get_encoder(700,8000)

    keras.utils.plot_model(encoder,'text_clf_gru.png',show_shapes=True)
    
    encoder.compile(
        optimizer=optimizers.Adam(),
        loss={
            'bcateid':loss_fn,
            'mcateid':loss_fn,
            'scateid':loss_fn_many,
            'dcateid':loss_fn_many
        },
        metrics=[metrics.CategoricalAccuracy()])

    callbacks=[
        keras.callbacks.ModelCheckpoint(
            filepath='train10/spmodel_{epoch}.ckpt',
            save_weights_only=True,
            verbose=1
        )
    ]

    encoder.fit(dataset,epochs=3,callbacks=callbacks)
示例#23
0
 def __get_metric(self, metric):
     if metric == "auc":
         return m.AUC()
     elif metric == "accuracy":
         return m.Accuracy()
     elif metric == "binary_accuracy":
         return m.BinaryAccuracy()
     elif metric == "categorical_accuracy":
         return m.CategoricalAccuracy()
     elif metric == "binary_crossentropy":
         return m.BinaryCrossentropy()
     elif metric == "categorical_crossentropy":
         return m.CategoricalCrossentropy()
     elif metric == "sparse_categorical_crossentropy":
         return m.SparseCategoricalCrossentropy()
     elif metric == "kl_divergence":
         return m.KLDivergence()
     elif metric == "poisson":
         return m.Poission()
     elif metric == "mse":
         return m.MeanSquaredError()
     elif metric == "rmse":
         return m.RootMeanSquaredError()
     elif metric == "mae":
         return m.MeanAbsoluteError()
     elif metric == "mean_absolute_percentage_error":
         return m.MeanAbsolutePercentageError()
     elif metric == "mean_squared_logarithm_error":
         return m.MeanSquaredLogarithmError()
     elif metric == "cosine_similarity":
         return m.CosineSimilarity()
     elif metric == "log_cosh_error":
         return m.LogCoshError()
     elif metric == "precision":
         return m.Precision()
     elif metric == "recall":
         return m.Recall()
     elif metric == "true_positive":
         return m.TruePositives()
     elif metric == "true_negative":
         return m.TrueNegatives()
     elif metric == "false_positive":
         return m.FalsePositives()
     elif metric == "false_negative":
         return m.FalseNegatives()
     else:
         raise Exception("specified metric not defined")
示例#24
0
def merged_modeltrain(encoder,classifier_model, train_images, train_labels):
    merged_model = merge_models(encoder, classifier_model)
    merged_model.compile(loss=losses.CategoricalCrossentropy(),
                        optimizer="RMSprop", metrics=[metrics.CategoricalAccuracy()])
                        
    #print(merged_model.summary())
    train_X, test_X, train_ground, test_ground = train_test_split(train_images, train_labels,
                                                                    test_size=0.2)
                                                                    
    print("For merged model:")
    epochs = input_fns.input_epochs()
    batch_size = input_fns.input_batch_size()

    train_hist = merged_model.fit(train_X, train_ground, batch_size=batch_size,verbose=1,
                                epochs=epochs, validation_data=(test_X, test_ground))

    return merged_model, train_hist
示例#25
0
def train(model, x_train, y_train):
    """
    Train the target model and save the weight of the model
    :param model: the model that will be trained
    :param x_train: the image as numpy format
    :param y_train: the label for x_train
    :param weights_path: path to save the model file
    :return: None
    """
    model.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.Adam(lr=5e-5),
                  metrics=[metrics.CategoricalAccuracy(), metrics.Precision(), metrics.Recall()])
    model.fit(x_train,
              y_train,
              batch_size=BATCH_SIZE,
              epochs=EPOCHS)
    model.save(WEIGHTS_PATH)
示例#26
0
    def __init__(self,
                 epochs,
                 batch_size,
                 n_classes,
                 create_dir_bool=True,
                 verbose=2):
        print("########## Init NN ##########")
        self.metrics = [tkm.CategoricalAccuracy(name='ACC')]

        self.n_classes = n_classes
        self.exp_desc = ""
        self.verbose = verbose
        self.epochs = epochs
        self.batch_size = batch_size

        self.date = datetime.now().strftime("%d-%m_%H%M%S")
        if (create_dir_bool):
            create_dir(self.date)
示例#27
0
def load_simple_model(model_path='',
                      weights_path='',
                      opt='sgd',
                      loss=None,
                      metrics_list=None):
    model = models.load_model(model_path)
    model.load_weights(weights_path)
    loss_dict = {
        'Categorical Crossentropy': 'categorical_crossentropy',
        'Binary Crossentropy': 'binary_crossentropy',
        'Categorical Hinge': 'categorical_hinge',
        'Huber loss': 'huber_loss'
    }
    metrics_dict = {
        'auc':
        metrics.AUC(),
        'recall':
        metrics.Recall(),
        'accuracy':
        metrics.CategoricalAccuracy()
        if loss.startswith('Categorical') else metrics.Accuracy(),
        'precision':
        metrics.Precision(),
        'categorical Hinge':
        metrics.CategoricalHinge(),
        'squared Hinge':
        metrics.SquaredHinge(),
        'Kullback-Leibler divergence':
        metrics.KLDivergence(),
        'mean absolute error':
        metrics.MeanAbsoluteError(),
        'mean squared error':
        metrics.MeanSquaredError()
    }
    if metrics_list is not None and len(metrics_list) > 0:
        metrics_list = [metrics_dict.get(m, m) for m in metrics_list]
    else:
        metrics_list = ['accuracy']

    loss_f = loss_dict.get(loss)

    model.compile(loss=loss_f, optimizer=opt, metrics=metrics_list)
    return model
示例#28
0
    def create(self, name="Hang2020",weights=None, submodel=None):
        """Load a model
            Args:
                weights: a saved model weights from previous run
                name: a model name from DeepTreeAttention.models
            """
        self.model = self.get_model(name)
        
        if weights:
            self.model.load_weights(weights)

        #metrics
        metric_list = [
            metrics.TopKCategoricalAccuracy(k=2, name="top_k"),
            metrics.CategoricalAccuracy(name="acc")]
        
        #compile
        self.model.compile(loss="categorical_crossentropy",
                           optimizer=tf.keras.optimizers.Adam(
                               lr=float(self.config['train']['learning_rate'])),
                           metrics=metric_list)
示例#29
0
def create_compiled_keras_model():
    optimizer = SGD(lr=0.02,
                    decay=1e-6,
                    momentum=0.9,
                    nesterov=True,
                    clipnorm=5)
    '''
    model = classification_model(input_dim = (99, 161),
                                filters = 256, 
                                kernel_size = 1,
                                strides = 1,
                                padding = 'valid',
                                output_dim = 4)
    '''
    model = createNaiveModel(input_dim=(99, 161, 1), strides=2, output_dim=4)
    #model = add_categorical_loss(model, 4)
    # the keras classification model

    #model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),optimizer=tf.keras.optimizers.SGD(learning_rate=0.02),metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
    #model.compile(loss = {'categorical_crossentropy' : lambda y_true, y_pred : y_pred}, optimizer = optimizer)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=[metrics.CategoricalAccuracy()])
    return model
示例#30
0
    x_train = (x_train.reshape(-1, 784) / 255).astype(np.float32)
    x_test = (x_test.reshape(-1, 784) / 255).astype(np.float32)
    t_train = np.eye(10)[t_train].astype(np.float32)
    t_test = np.eye(10)[t_test].astype(np.float32)
    '''
    2. モデルの構築
    '''
    model = DNN(200, 10)
    '''
    3. モデルの学習
    '''
    criterion = losses.CategoricalCrossentropy()
    optimizer = optimizers.SGD(learning_rate=0.01)
    train_loss = metrics.Mean()
    train_acc = metrics.CategoricalAccuracy()

    def compute_loss(t, y):
        return criterion(t, y)

    def train_step(x, t):
        with tf.GradientTape() as tape:
            preds = model(x)
            loss = compute_loss(t, preds)
        grads = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))
        train_loss(loss)
        train_acc(t, preds)

        return loss