예제 #1
0
 def ret(input_img):
     if not cutter.is_green(input_img):
         return input_img
     img = input_img.copy()
     if len(img.shape) == 3:
         img = np.expand_dims(img, 0)
     y1, y2, x1, x2 = self.dims
     img2, _, _ = ms.mask_green_corner(img.copy(), val=-1)
     prediced = np.squeeze(self.generator.predict(img2), 0)
     img = np.squeeze(img, 0)
     img[y1:y2, x1:x2] = prediced  #[y1:y2,x1:x2]
     return np.expand_dims(img, 0)
예제 #2
0
    def save_img(self, epoch):
        test = plotload.load_one_img(
            (256, 256),
            dest=
            '/home/mathias/Documents/kvasir-dataset-v2/med/stool-plenty/1.jpg',
            extra_dim=True)
        test2, _, _ = ms.mask_green_corner(test.copy(), val=-1)
        test2 = self.generator.predict(test2)

        plt.subplot(121)
        plt.imshow(test[0] * 0.5 + 0.5)
        plt.subplot(122)
        plt.imshow(test2[0] * 0.5 + 0.5)
        plt.savefig(f"epoc_{epoch}.jpg")
        plt.savefig(f"tmp.jpg")
예제 #3
0
    def train_model(self):
        def t(m, bol):
            for layer in m.layers:
                layer.trainable = bol

        if self.info == None:
            print("Warning no info found, prompting for info")
            self.set_training_info()
        globals().update(self.info)
        if self.combined == None:
            print("Error: no model loaded")
            return
        if self.pretrained == True:
            print("Warning: model has pretrained weights")
        half_batch = int(batch_size / 2)
        for epoch in tqdm(range(epochs)):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            X_train = plotload.load_polyp_batch(self.img_shape,
                                                batch_size,
                                                data_type='none',
                                                crop=False)
            idx = np.random.randint(0, X_train.shape[0], half_batch)
            imgs = X_train[idx]

            if corner:
                masked_imgs, missing, _ = ms.mask_green_corner(imgs)
            else:
                masked_imgs, missing, _ = ms.mask_randomly_square(
                    imgs, self.mask_height, self.mask_width)

            # Generate a half batch of new images
            gen_missing = self.generator.predict(masked_imgs)

            if soft:
                valid = 0.2 * np.random.random_sample((half_batch, 1)) + 0.9
                fake = 0.1 * np.random.random_sample((half_batch, 1))
            else:
                valid = np.ones((half_batch, 1))
                fake = np.zeros((half_batch, 1))

            if epoch % 120 == 0:
                #small shakeup to get out of local minimas
                placeholder = valid
                valid = fake
                fake = placeholder

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(missing, valid)
            d_loss_fake = self.discriminator.train_on_batch(gen_missing, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------
            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs = X_train[idx]

            if corner:
                masked_imgs, missing_parts, _ = ms.mask_green_corner(imgs,
                                                                     val=-1)
            else:
                masked_imgs, missing_parts, _ = ms.mask_randomly_square(
                    imgs, self.mask_height, self.mask_width)

            # Generator wants the discriminator to label the generated images as valid
            valid = np.ones((batch_size, 1))

            # Train the generator
            t(self.discriminator, False)
            g_loss = self.combined.train_on_batch(masked_imgs,
                                                  [missing_parts, valid])
            t(self.discriminator, True)

            # Plot the progress
            if epoch % 10 == 0:
                print(
                    "%d [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" %
                    (epoch, d_loss[0], 100 * d_loss[1], g_loss[0], g_loss[1]))
                self.save_img(epoch)

            if g_loss[1] < self.threshold:
                self.threshold = g_loss[1]
                self.generator.save(
                    f"models/CE-gen-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5"
                )
                self.discriminator.save(
                    f"models/CE-dic-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5"
                )
                self.combined.save(
                    f"models/CE-com-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5"
                )
                self.combined.save_weights(
                    f"models/CE-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w.h5"
                )
예제 #4
0
    def train_model(self):
        def t(m, bol):
            for layer in m.layers:
                layer.trainable = bol

        if self.info == None:
            print("Warning no info found, prompting for info")
            self.set_training_info()
        globals().update(self.info)
        if self.combined == None:
            print("Error: no model loaded")
            return
        if self.pretrained == True:
            print("Warning: model has pretrained weights")
        half_batch = batch_size
        for epoch in tqdm(range(epochs)):
            X_train = plotload.load_polyp_batch(self.img_shape,
                                                batch_size,
                                                data_type='med/none',
                                                crop=False)

            if corner:
                masked_imgs, missing, mask = ms.mask_green_corner(X_train)
                m = np.zeros(shape=X_train.shape)
                for i in range(X_train.shape[0]):
                    m[i, mask[0]:mask[1], mask[2]:mask[3]] = missing[i]
                missing = m
            else:
                masked_imgs, missing, mask = ms.mask_from_template(X_train)

            if soft:
                valid = 0.2 * np.random.random_sample((half_batch, 1)) + 0.9
                fake = 0.1 * np.random.random_sample((half_batch, 1))
            else:
                valid = np.ones((half_batch, 1))
                fake = np.zeros((half_batch, 1))

            # ---------------------
            #  Train Generator
            # ---------------------

            valid = np.ones((batch_size, 1))

            # Train the generator
            t(self.discriminator, False)
            g_loss = self.combined.train_on_batch(masked_imgs,
                                                  [X_train, valid])
            t(self.discriminator, True)

            # ---------------------
            #  Train discriminator
            # ---------------------

            gen_fake = self.generator.predict(masked_imgs)
            gen_fake = ms.combine_imgs_with_mask(gen_fake, X_train, mask)

            if epoch % 120 == 0 and epoch != 0:
                #small shakeup to get out of local minimas
                fake, valid = valid, fake

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(X_train, valid)
            d_loss_fake = self.discriminator.train_on_batch(gen_fake, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # Plot the progress
            print("[D: %f  G: %f, mse: %f]" %
                  (d_loss[0], g_loss[0], g_loss[1]))
            if g_loss[1] < self.threshold:
                self.threshold = g_loss[1]
                self.generator.save(
                    f"models/CCgan-gen-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5"
                )
                self.discriminator.save(
                    f"models/CCgan-dic-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5"
                )
                self.combined.save(
                    f"models/CCgan-com-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5"
                )
                self.combined.save_weights(
                    f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-com.h5"
                )
                self.discriminator.save_weights(
                    f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-dis.h5"
                )
                self.generator.save_weights(
                    f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-gen.h5"
                )
        if g_loss[1] < self.threshold:
            self.threshold = g_loss[1]
            self.generator.save(
                f"models/CCgan-gen-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}_fin.h5"
            )
            self.discriminator.save(
                f"models/CCgan-dic-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}_fin.h5"
            )
            self.combined.save(
                f"models/CCgan-com-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}_fin.h5"
            )
            self.combined.save_weights(
                f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-com_fin.h5"
            )
            self.discriminator.save_weights(
                f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-dis_fin.h5"
            )
            self.generator.save_weights(
                f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-gen_fin.h5"
            )