Пример #1
0
    def _train_disc(self, images, labels, disc_network, disc_loss_fn):
        with tf.GradientTape() as tape:
            logits = disc_network(images)
            disc_loss = disc_loss_fn(labels, logits)

        grads = tape.gradient(disc_loss, disc_network.trainable_weights)

        return disc_loss, grads
Пример #2
0
    def _train_gen(self, random_latent_vectors, labels, disc_network,
                   generator, gan_loss_fn):
        with tf.GradientTape() as tape:
            logits = disc_network(generator(random_latent_vectors))
            g_loss = gan_loss_fn(labels, logits)

        grads = tape.gradient(g_loss, generator.trainable_weights)

        return g_loss, grads
Пример #3
0
def init():
    global model
    # AZUREML_MODEL_DIR is an environment variable created during deployment.
    # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
    # For multiple models, it points to the folder containing all deployed models (./azureml-models)

    model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'),
                              'classifier.hdf5')
    #model = joblib.load(model_path)
    _, model = disc_network()
    model.load_weights(model_path)
Пример #4
0
def test_if_LowConfUnlabeledSampler___sample___is_running_properly():
    unlabeled_path = os.path.join(
        os.path.dirname(__file__),
        "data/low_conf_unlabeled_sampler/sample/unlabeled_data/data")
    unlabeled_images_list = glob.glob(unlabeled_path + '\*.png')
    classifier_file = os.path.join(
        os.path.dirname(__file__),
        "data/low_conf_unlabeled_sampler/sample/model_register/classifier.hdf5"
    )
    assert os.path.isfile(classifier_file) == True
    _, classifier = disc_network()
    classifier.load_weights(classifier_file)
    low_conf_sampler = LowConfUnlabeledSampler()
    sampled_images = low_conf_sampler.sample(classifier, unlabeled_images_list,
                                             30)
    assert len(sampled_images) == 30
Пример #5
0
    def register(self, validated_model_folder, registered_model_folder,
                 azure_ml_logs_provider, web_service_deployer):

        IGNORE_TRAIN_STEP = azure_ml_logs_provider.get_tag_from_brother_run(
            "prep_data.py", "IGNORE_TRAIN_STEP")
        if IGNORE_TRAIN_STEP == True:
            print("Ignore register step")
            self._execute_sampling_pipeline()
            print("launch sampling state")
            return

        _, classifier = disc_network()
        classifier_name = "classifier.hdf5"
        validated_model_file = os.path.join(validated_model_folder,
                                            classifier_name)
        classifier.load_weights(validated_model_file)

        self.run.upload_file(name=self.config.MODEL_NAME,
                             path_or_stream=validated_model_file)

        #_ = self.run.register_model(model_name=self.config.MODEL_NAME,
        #                        tags={'Training context':'Pipeline'},
        #                        model_path=validated_model_file)

        Model.register(workspace=self.run.experiment.workspace,
                       model_path=validated_model_file,
                       model_name=self.config.MODEL_NAME,
                       tags={'Training context': 'Pipeline'})

        acc = azure_ml_logs_provider.get_log_from_brother_run(
            "eval_model.py", "acc")
        print("acc :", acc)
        #deploy model
        if web_service_deployer.to_deploy(acc):
            print("deploying...")
            web_service_deployer.deploy()
            print("model deployed")

        #pas si import à part pour le test
        registered_model_file = os.path.join(registered_model_folder,
                                             classifier_name)
        os.makedirs(registered_model_folder)
        _ = shutil.copy(validated_model_file, registered_model_file)
Пример #6
0
    def evaluate(self, input_data, model_candidate_folder,
                 validated_model_folder):

        IGNORE_TRAIN_STEP = self.azure_ml_logs_provider.get_tag_from_brother_run(
            "prep_data.py", "IGNORE_TRAIN_STEP")
        if IGNORE_TRAIN_STEP == True:
            print("Ignore evaluate step")
            return

        test_datagen = ImageDataGenerator(rescale=1. / 255)
        test_generator = test_datagen.flow_from_directory(
            os.path.join(input_data, "eval/"),
            target_size=(self.IMAGE_RESIZE, self.IMAGE_RESIZE),
            batch_size=self.BATCH_SIZE_TRAINING_LABELED_SUBSET,
            class_mode='categorical')  # set as training data

        steps = len(
            test_generator.filenames) / self.BATCH_SIZE_TRAINING_LABELED_SUBSET

        _, classifier = disc_network()
        classifier_name = "classifier.hdf5"
        model_candidate_file = os.path.join(model_candidate_folder,
                                            classifier_name)
        classifier.load_weights(model_candidate_file)

        classifier.compile(loss="categorical_crossentropy",
                           metrics=["accuracy"],
                           optimizer="adam")
        loss, acc = classifier.evaluate_generator(test_generator,
                                                  steps=steps,
                                                  verbose=0)
        self.run.log("acc", round(acc, 5))

        validated_model_file = os.path.join(validated_model_folder,
                                            classifier_name)

        os.makedirs(validated_model_folder)

        _ = shutil.copy(model_candidate_file, validated_model_file)
Пример #7
0
    def process(self, input_data, register_model_folder, sampled_data,
                random_sampler, lowfonc_sampler, imagepath_list_uploader):

        unlabeled_path = os.path.join(input_data, "unlabeled_data/data")
        unlabeled_images_list = glob.glob(unlabeled_path + '\*.png')
        sampled_images = random_sampler.sample(unlabeled_images_list, 200)

        classifier_name = "classifier.hdf5"
        classifier_file = os.path.join(register_model_folder, classifier_name)
        if os.path.isfile(classifier_file):
            _, classifier = disc_network()
            classifier.load_weights(classifier_file)

            lowconf_sampled_images = lowfonc_sampler.sample(
                classifier, unlabeled_images_list, 180)
            sampled_images = sampled_images[:20] + lowconf_sampled_images

        for image_path in sampled_images:
            image_path_dest = os.path.join(sampled_data,
                                           os.path.basename(image_path))
            os.makedirs(sampled_data, exist_ok=True)
            shutil.copy(image_path, image_path_dest)

        imagepath_list_uploader.upload(sampled_images, "sampled_data/current")
Пример #8
0
    def train(self, input_data, model_candidate_folder):
        #Prepare data
        train_datagen = ImageDataGenerator(
            rescale=1. / 255,
            shear_range=0.2,
            zoom_range=0.2,
            horizontal_flip=True,
            featurewise_center=True,
            featurewise_std_normalization=True,
            zca_whitening=True,
            rotation_range=90,
            width_shift_range=0.2,
            height_shift_range=0.2,
            validation_split=0.2)  # set validation split

        labeled_subset = train_datagen.flow_from_directory(
            os.path.join(input_data, "train/"),
            target_size=(self.IMAGE_RESIZE, self.IMAGE_RESIZE),
            batch_size=self.BATCH_SIZE_TRAINING_LABELED_SUBSET,
            class_mode='categorical',
            subset='training')  # set as training data

        unlabeled_dataset = train_datagen.flow_from_directory(
            os.path.join(input_data, "unlabeled/"),
            target_size=(self.IMAGE_RESIZE, self.IMAGE_RESIZE),
            batch_size=self.BATCH_SIZE_TRAINING_UNLABELED_SUBSET,
            class_mode=None,
            subset='training')

        x_batch, y_batch = next(labeled_subset)

        #instance models
        generator = generator_network(latent_dim=128)
        disc, classifier = disc_network()

        assert classifier(np.expand_dims(x_batch[0], 0)).shape == (1, 2)
        assert disc(np.expand_dims(x_batch[0], 0)).shape == (1, 1)
        assert generator(np.random.normal(size=(1, 128))).shape == (1, 50, 50,
                                                                    3)
        """Define the optimizers
        """
        c_optimizer = tf.keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
        d_optimizer = tf.keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
        g_optimizer = tf.keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
        """Define the loss functions
        """
        classifier_loss_fn = tf.keras.losses.CategoricalCrossentropy()
        disc_loss_fn = tf.keras.losses.BinaryCrossentropy()
        gan_loss_fn = tf.keras.losses.BinaryCrossentropy()
        """The shared weights of the classifier and the discriminator would be updated on a set of 32 images:

            16 images from the set of only hundred labeled examples.
            8 images from the unlabeled examples.
            8 fake images generated by the generator.
        """

        ################## Training ##################
        ##############################################
        for epoch in tqdm(range(2)):
            #for epoch in tqdm(range(7500)):
            """Define objects to calculate the mean losses across each epoch
            """
            c_loss_mean = tf.keras.metrics.Mean()
            d_loss_mean = tf.keras.metrics.Mean()
            g_loss_mean = tf.keras.metrics.Mean()
            """ Train the classifier
            for (images, labels) in labeled_subset:
            """
            images, labels = next(iter(labeled_subset))  # 16 images
            classification_loss, grads = self._train_classifier(
                images, labels, classifier, classifier_loss_fn)
            c_optimizer.apply_gradients(
                zip(grads, classifier.trainable_weights)
            )  # The shared weights of the classifier and the discriminator will therefore be updated on a total of 32 images
            c_loss_mean.update_state(classification_loss)
            """
            ## Train discriminator and generator ##
            #######################################
            Train discriminator
            """
            real_images = next(iter(unlabeled_dataset))  # 8 real images
            batch_size = tf.shape(real_images)[0]
            random_latent_vectors = tf.random.normal(shape=(batch_size, 128))

            generated_images = generator(
                random_latent_vectors)  # 8 fake images
            combined_images = tf.concat([generated_images, real_images],
                                        axis=0)  # 16 total images
            combined_labels = tf.concat(
                [tf.zeros((batch_size, 1)),
                 tf.ones((batch_size, 1))],
                axis=0)  # 0 -> Fake images, 1 -> Real images

            disc_loss, grads = self._train_disc(combined_images,
                                                combined_labels, disc,
                                                disc_loss_fn)
            d_optimizer.apply_gradients(
                zip(grads, disc.trainable_weights)
            )  # The shared weights of the classifier and the discriminator will therefore be updated on a total of 32 images
            d_loss_mean.update_state(disc_loss)

            # Train the generator via signals from the discriminator
            random_latent_vectors = tf.random.normal(shape=(batch_size * 4,
                                                            128))  # 32 images
            misleading_labels = tf.ones((batch_size * 4, 1))

            g_loss, grads = self._train_gen(random_latent_vectors,
                                            misleading_labels, disc, generator,
                                            gan_loss_fn)
            g_optimizer.apply_gradients(zip(grads,
                                            generator.trainable_weights))
            g_loss_mean.update_state(g_loss)

            if epoch % 100 == 0:
                print(
                    "epoch: {} classification loss: {:.3f} dicriminator loss: {:.3f} gan loss:{:.3f}"
                    .format(epoch, c_loss_mean.result(), d_loss_mean.result(),
                            g_loss_mean.result()))
        """ Save classifier and generator
        """
        os.makedirs(model_candidate_folder, exist_ok=True)
        classifier_file = os.path.join(model_candidate_folder,
                                       "classifier.hdf5")
        classifier.save(classifier_file)
        self.run.upload_file(name="diagnoz_classifier",
                             path_or_stream=classifier_file)
        generator_file = os.path.join(model_candidate_folder, "generator.hdf5")
        generator.save(generator_file)
        self.run.upload_file(name="diagnoz_generator",
                             path_or_stream=generator_file)