Esempio n. 1
0
def test_with_tf():
    classes = 3
    y_true = tf.cast(tf.random.uniform(shape=(34, 54, 12), maxval=classes),
                     dtype=tf.int32)
    y_pred = tf.cast(tf.random.uniform(shape=y_true.shape, maxval=classes),
                     dtype=tf.int32)
    y_pred_one_hot = tf.one_hot(y_pred, depth=classes)
    y_true_one_hot = tf.one_hot(y_true, depth=classes)
    tf_metric = CategoricalAccuracy()
    tf_metric.update_state(y_true_one_hot, y_pred_one_hot)
    own_metric = ComplexCategoricalAccuracy()
    own_metric.update_state(y_true_one_hot, y_pred_one_hot)
    # set_trace()
    assert own_metric.result().numpy() == tf_metric.result().numpy()
    y_true = np.array([
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [
            0., 0., 0., 0.
        ],  # This shows tf does not ignore cases with [0. 0. 0. 0.] (unlabeled)
        [0., 0., 1., 0.],  # 3
        [0., 0., 1., 0.],  # 3
        [0., 0., 0., 0.],  # 3
        [0., 0., 1., 0.]  # 3
    ])
    y_pred = np.array([
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [0., 0., 0., 1.],  # 4
        [0., 0., 0., 1.],  # 4
        [0., 0., 0., 1.],  # 4
        [0., 0., 0., 1.]  # 4
    ])
    tf_metric = CategoricalAccuracy()
    tf_metric.update_state(y_true, y_pred)
    own_metric = ComplexCategoricalAccuracy()
    own_metric.update_state(y_true, y_pred,
                            ignore_unlabeled=False)  # to make it as tf
    assert own_metric.result().numpy() == tf_metric.result().numpy()
    y_true = np.array([[1., 0.], [1., 0.], [1., 0.], [1., 0.], [1., 0.],
                       [1., 0.], [1., 0.], [1., 0.], [1., 0.], [0., 1.]])
    y_pred = np.array([[1., 0.], [1., 0.], [1., 0.], [1., 0.], [1., 0.],
                       [1., 0.], [1., 0.], [1., 0.], [1., 0.], [1., 0.]])
    tf_metric = CategoricalAccuracy()
    tf_metric.update_state(y_true, y_pred)
    own_metric = ComplexCategoricalAccuracy()
    own_metric.update_state(y_true, y_pred,
                            ignore_unlabeled=False)  # to make it as tf
    assert own_metric.result().numpy() == tf_metric.result().numpy()
Esempio n. 2
0
    def get_metrics(y, y_pred):

        cnf_matrix = confusion_matrix(y, y_pred)

        false_positive = cnf_matrix.sum(
            axis=0) - np.diag(cnf_matrix).astype(float)
        false_negative = cnf_matrix.sum(
            axis=1) - np.diag(cnf_matrix).astype(float)
        true_positive = np.diag(cnf_matrix).astype(float)
        true_negative = (
            cnf_matrix.sum() -
            (false_positive + false_negative + true_positive)).astype(float)

        y = to_categorical(y, num_classes=5)
        y_pred = to_categorical(y_pred, num_classes=5)

        auc = AUC()
        _ = auc.update_state(y, y_pred)
        acc = CategoricalAccuracy()
        _ = acc.update_state(y, y_pred)

        return {
            'accuracy': acc.result().numpy(),
            'auc': auc.result().numpy(),
            'sensitivity': true_positive / (true_positive + false_negative),
            'specificity': true_negative / (true_negative + false_positive)
        }
class ExperimentClassify:
    def __init__(self, model, optimizer, exptConfig):

        self.now = dt.now().strftime('%Y-%m-%d--%H-%M-%S')
        self.exptConfig = exptConfig
        self.model = model
        self.optimizer = optimizer
        self.loss = CategoricalCrossentropy(
            from_logits=exptConfig['LossParams']['fromLogits'])

        # ------------ metrics ----------------------
        self.catAccTest = CategoricalAccuracy()
        self.catAccTrain = CategoricalAccuracy()

        self.exptFolder = os.path.join(
            exptConfig['OtherParams']['exptBaseFolder'], self.now,
            exptConfig['ModelParams']['name'])
        self.modelFolder = os.path.join(self.exptFolder, 'model')
        self.chkptFolder = os.path.join(self.exptFolder, 'checkpoints')

        os.makedirs(self.modelFolder, exist_ok=True)
        os.makedirs(self.chkptFolder, exist_ok=True)

        self.stepNumber = 0
        self.evalNumber = 0
        self.epoch = 0

        # All the logs go here ...
        # ------------------------
        self.createMetaData()

        self.logDir = os.path.join(self.exptFolder, 'logs')
        self.scalarWriter = tf.summary.create_file_writer(
            os.path.join(self.logDir, 'scalars', 'metrics'))
        self.graphWriter = tf.summary.create_file_writer(
            os.path.join(self.logDir, 'graph'))

        return

    def step(self, x, y):

        with tf.GradientTape() as tape:

            yHat = self.model.call(x)
            loss = self.loss(y, yHat)

            grads = tape.gradient(loss, self.model.trainable_weights)
            self.optimizer.apply_gradients(
                zip(grads, self.model.trainable_weights))

        self.catAccTrain.update_state(y, yHat)

        with self.scalarWriter.as_default():
            tf.summary.scalar('training loss', data=loss, step=self.stepNumber)
            tf.summary.scalar('training accuracy',
                              data=self.catAccTrain.result().numpy(),
                              step=self.stepNumber)

        self.stepNumber += 1

        return loss.numpy()

    def eval(self, x, y):

        yHat = self.model.predict(x)
        self.catAccTest.update_state(y, yHat)

        with self.scalarWriter.as_default():
            tf.summary.scalar('testing accuracy',
                              data=self.catAccTest.result().numpy(),
                              step=self.evalNumber)

        self.evalNumber += 1

        return self.catAccTest.result().numpy()

    def createMetaData(self):

        if not os.path.exists(self.exptFolder):
            os.makedirs(self.exptFolder)

        with open(os.path.join(self.exptFolder, 'config.json'), 'w') as fOut:
            json.dump(self.exptConfig, fOut)

        return

    def createModelSummary(self, x):
        tf.summary.trace_on(graph=True)
        self.model.predict(x)
        with self.graphWriter.as_default():
            tf.summary.trace_export('name', step=0)
        tf.summary.trace_off()

    def saveModel(self):

        try:
            self.model.save(self.modelFolder)
        except Exception as e:
            print(f'Unable to save the model: {e}')

        return

    def checkPoint(self):
        try:
            epoch = self.epoch
            step = self.stepNumber
            self.model.save_weights(
                os.path.join(self.chkptFolder, f'{epoch:07d}-{step:07d}'))
        except Exception as e:
            print(f'Unable to checkpoint: {self.stepNumber}: {e}')
        return
                                      teacher_model.trainable_variables))
        loss_value_test = training.loss([x_val_main_, x_val_aux_], y_val_)

        epoch_loss_avg(loss_value)
        epoch_accuracy(
            y_train_,
            tf.nn.softmax(teacher_model([x_train_main_, x_train_aux_])))
        epoch_loss_avg_val(loss_value_test)
        epoch_accuracy_val(
            y_val_, tf.nn.softmax(teacher_model([x_val_main_, x_val_aux_])))

    # 学習進捗の表示
    print(
        'Epoch {}/{}: Loss: {:.3f}, Accuracy: {:.3%}, Validation Loss: {:.3f}, Validation Accuracy: {:.3%}'
        .format(epoch, EPOCHS_T, epoch_loss_avg.result(),
                epoch_accuracy.result(), epoch_loss_avg_val.result(),
                epoch_accuracy_val.result()))

# Studentモデルの定義
student = KDModel.Students(NUM_CLASSES, T)
student_model = student.createModel(inputs_main)

# Studentモデルの学習
student_model.summary()
# plot_model(student_soft_model, show_shapes=True, to_file='student_model.png')
kd = KDModel.KnowledgeDistillation(teacher_model, student_model, T, ALPHA)
history_student = LossAccHistory()
for epoch in range(1, EPOCHS_S + 1):
    epoch_loss_avg = Mean()
    epoch_loss_avg_val = Mean()
    epoch_accuracy = CategoricalAccuracy()
Esempio n. 5
0
    if manager.latest_checkpoint:
        print("Restored from {}".format(manager.latest_checkpoint))
    else:
        print("Initializing from scratch.")

    for epoch in range(config['epochs']):
        for X, y in tqdm(train_ds):
            train_step(X, y)

        for X, y in tqdm(val_ds):
            val_step(X, y)

        template = '학습 에포크: {}, 손실: {}, 정확도: {}, 테스트 손실: {}, 테스트 정확도: {}'
        print (template.format(epoch+1,
                            train_loss.result(),
                            train_accuracy.result()*100,
                            val_loss.result(),
                            val_accuracy.result()*100))
        

        # save checkpoint
        save_path = manager.save()
        print(f'Saved checkpoint for epoch {int(ckpt.step)}: {save_path}\n')
        ckpt.step.assign_add(1)


    print('\n학습이 완료됐습니다!!!!\n')

    print('<Model 정보 요약>')
    model.summary()
Esempio n. 6
0
        valid_accuracy.reset_states()

    for imgs, masks in train_data:
        trainStep(imgs, masks)

    for imgs, masks in valid_data:
        if not args.train:
            validStep(imgs, masks)
        else:
            trainStep(imgs, masks)

    template1 = "epoch[{0}/{1}] training: mean loss: {2}, accuracy: {3}"
    template2 = "validation: mean loss: {0}, accuracy: {1}"
    if not args.train:
        print(template1.format(epoch + 1, args.epochs, train_loss.result(),
                               train_accuracy.result()),
              end='\t')
        print(template2.format(valid_loss.result(), valid_accuracy.result()))
    else:
        print(
            template1.format(epoch + 1, args.epochs, train_loss.result(),
                             train_accuracy.result()))

    if not os.path.exists('./checkpoints'):
        os.mkdir('./checkpoints')

    if not args.train:
        if valid_loss.result() < bestloss:
            bestloss = valid_loss.result()
            model.save_weights('./checkpoints/mycheckpoint.h5')
    else:
Esempio n. 7
0
def train(args):
    # config_tf2(args['configuration']['xla'])
    # Create log, checkpoint and export directories
    checkpoint_dir, log_dir, export_dir = create_env_directories(
        args, get_experiment_name(args))

    train_weight_dataset = dataloader.get_dataset(
        args['dataloader'],
        transformation_list=args['dataloader']['train_list'],
        num_classes=args["num_classes"],
        split='train_weights')
    train_arch_dataset = dataloader.get_dataset(
        args['dataloader'],
        transformation_list=args['dataloader']['train_list'],
        num_classes=args["num_classes"],
        split='train_arch')
    val_dataset = dataloader.get_dataset(
        args['dataloader'],
        transformation_list=args['dataloader']['val_list'],
        num_classes=args["num_classes"],
        split='validation')

    setup_mp(args)

    # define model, optimizer and checkpoint callback
    model = model_name_to_class[args['model_name']](
        args['framework'],
        input_shape=args['input_size'],
        label_dim=args['num_classes']).model
    model.summary()
    alchemy_api.send_model_info(model, args['server'])
    weight_opt = get_optimizer(args['optimizer'])
    arch_opt = get_optimizer(args['arch_optimizer_param'])
    model_checkpoint_cb, latest_epoch = init_custom_checkpoint_callbacks(
        {'model': model}, checkpoint_dir)

    weights, arch_params = split_trainable_weights(model)
    temperature_decay_fn = exponential_decay(
        args['temperature']['init_value'], args['temperature']['decay_steps'],
        args['temperature']['decay_rate'])

    lr_decay_fn = CosineDecay(
        args['optimizer']['lr'],
        alpha=args["optimizer"]["lr_decay_strategy"]["lr_params"]["alpha"],
        total_epochs=args['num_epochs'])

    loss_fn = CategoricalCrossentropy()
    accuracy_metric = CategoricalAccuracy()
    loss_metric = Mean()
    val_accuracy_metric = CategoricalAccuracy()
    val_loss_metric = Mean()

    train_log_dir = os.path.join(args['log_dir'], 'train')
    val_log_dir = os.path.join(args['log_dir'], 'validation')
    train_summary_writer = tf.summary.create_file_writer(train_log_dir)
    val_summary_writer = tf.summary.create_file_writer(val_log_dir)

    @tf.function
    def train_step(x_batch, y_batch):
        with tf.GradientTape() as tape:
            y_hat = model(x_batch, training=True)
            loss = loss_fn(y_batch, y_hat)

        accuracy_metric.update_state(y_batch, y_hat)
        loss_metric.update_state(loss)
        grads = tape.gradient(loss, weights)
        weight_opt.apply_gradients(zip(grads, weights))

    @tf.function
    def train_step_arch(x_batch, y_batch):
        with tf.GradientTape() as tape:
            y_hat = model(x_batch, training=False)
            loss = loss_fn(y_batch, y_hat)

        accuracy_metric.update_state(y_batch, y_hat)
        loss_metric.update_state(loss)
        grads = tape.gradient(loss, arch_params)
        arch_opt.apply_gradients(zip(grads, arch_params))

    @tf.function
    def evaluation_step(x_batch, y_batch):
        y_hat = model(x_batch, training=False)
        loss = loss_fn(y_batch, y_hat)

        val_accuracy_metric.update_state(y_batch, y_hat)
        val_loss_metric.update_state(loss)

    for epoch in range(latest_epoch, args['num_epochs']):
        print(f'Epoch: {epoch}/{args["num_epochs"]}')

        weight_opt.learning_rate = lr_decay_fn(epoch)

        # Updating the weight parameters using a subset of the training data
        for step, (x_batch, y_batch) in tqdm.tqdm(
                enumerate(train_weight_dataset, start=1)):
            train_step(x_batch, y_batch)

        # Evaluate the model on validation subset
        for x_batch, y_batch in val_dataset:
            evaluation_step(x_batch, y_batch)

        train_accuracy = accuracy_metric.result()
        train_loss = loss_metric.result()
        val_accuracy = val_accuracy_metric.result()
        val_loss = val_loss_metric.result()

        template = f'Weights updated, Epoch {epoch}, Train Loss: {float(train_loss)}, Train Accuracy: ' \
            f'{float(train_accuracy)}, Val Loss: {float(val_loss)}, Val Accuracy: {float(val_accuracy)}, ' \
            f'lr: {float(weight_opt.learning_rate)}'
        print(template)

        new_temperature = temperature_decay_fn(epoch)

        with train_summary_writer.as_default():
            tf.summary.scalar('loss', train_loss, step=epoch)
            tf.summary.scalar('accuracy', train_accuracy, step=epoch)
            tf.summary.scalar('temperature', new_temperature, step=epoch)

        with val_summary_writer.as_default():
            tf.summary.scalar('loss', val_loss, step=epoch)
            tf.summary.scalar('accuracy', val_accuracy, step=epoch)

        # Resetting metrices for reuse
        accuracy_metric.reset_states()
        loss_metric.reset_states()
        val_accuracy_metric.reset_states()
        val_loss_metric.reset_states()

        if epoch >= 10:
            # Updating the architectural parameters on another subset
            for step, (x_batch, y_batch) in tqdm.tqdm(
                    enumerate(train_arch_dataset, start=1)):
                train_step_arch(x_batch, y_batch)

            # Evaluate the model on validation subset
            for x_batch, y_batch in val_dataset:
                evaluation_step(x_batch, y_batch)

            train_accuracy = accuracy_metric.result()
            train_loss = loss_metric.result()
            val_accuracy = val_accuracy_metric.result()
            val_loss = val_loss_metric.result()

            template = f'Arch params updated, Epoch {epoch}, Train Loss: {float(train_loss)}, Train Accuracy: ' \
                f'{float(train_accuracy)}, Val Loss: {float(val_loss)}, Val Accuracy: {float(val_accuracy)}'
            print(template)
            with train_summary_writer.as_default():
                tf.summary.scalar('loss_after_arch_params_update',
                                  train_loss,
                                  step=epoch)
                tf.summary.scalar('accuracy_after_arch_params_update',
                                  train_accuracy,
                                  step=epoch)

            with val_summary_writer.as_default():
                tf.summary.scalar('loss_after_arch_params_update',
                                  val_loss,
                                  step=epoch)
                tf.summary.scalar('accuracy_after_arch_params_update',
                                  val_accuracy,
                                  step=epoch)

            # Resetting metrices for reuse
            accuracy_metric.reset_states()
            loss_metric.reset_states()
            val_accuracy_metric.reset_states()
            val_loss_metric.reset_states()

        define_temperature(new_temperature)

    print("Training Completed!!")

    print("Architecture params: ")
    print(arch_params)
    post_training_analysis(model, args['exported_architecture'])