Example #1
0
def main():
    img_width = 32
    img_height = 32
    img_channels = 3

    image_path = './dataset/images'
    caption_path = './dataset/captions'
    weights_path = './weights'

    image_caption_pair = load_image_and_text(img_width, img_height, image_path,
                                             caption_path)
    shuffle(image_caption_pair)

    gan = Gan()
    gan.img_width = img_width
    gan.img_height = img_height
    gan.img_channels = img_channels
    gan.random_input_dim = 20
    gan.glove_path = './glove'

    batch_size = 20
    epoch = 10000

    gan.fit(image_caption_pair, epoch, batch_size,
            './output/intermediate_output', 50, weights_path)
Example #2
0
def main():

    gan = Gan()
    gan.load_model()

    for i in range(10):
        ip = input('Enter image description\n-')
        for j in range(10):
            gen_img = gan.generate_image_from_text(ip)
            gen_img.save('./output/' + 'result' + str(i) + '-' + str(j) +
                         '.jpg')
    def __init__(self, rows, cols, channels, dataFolder):
        self.img_rows = rows
        self.img_cols = cols
        self.channels = channels
        self.dataFolder = dataFolder
        # TODO : create a data generator and read files from dataFolder in order to generate train set and validation set
        # maybe use one hot encoding for the class
        self.posters = None

        self.gan = Gan(rows, cols, channels)
        self.discriminator = self.gan.discriminator_model()
        self.adversarial = self.gan.adversarial_model()
        self.generator = self.gan.generator()
Example #4
0
def run_game():
    pygame.init()
    screen = pygame.display.set_mode(
        (setting.screen_width, setting.screen_height))
    icon = pygame.image.load('image/icon.png')
    pygame.display.set_caption("汉诺塔")
    pygame.display.set_icon(icon)

    f1 = pygame.freetype.Font(r'C:\Windows\Fonts\msyh.ttc', 36)
    # 移动步数统计
    n_step = [0]

    #创建杆子对象
    gans = []
    gans.append(Gan(setting, 200, 10, 350, screen))
    gans.append(Gan(setting, 500, 10, 350, screen))
    gans.append(Gan(setting, 800, 10, 350, screen))

    # 创建盘子的编组
    screen_x = screen.get_rect().centerx
    screen_bottom = screen.get_rect().bottom
    # dish_widthes = [setting.dish_width_0- for i in range(setting.dishes_n)]  # 可以首先计算出各个盘子的宽 的数组
    for i in range(setting.dishes_n):
        new_dish = Dish(screen, setting, gans[0],
                        setting.dish_width_0 - 20 * i)
        gans[0].dishes.append(new_dish)

    # hanoi线程,实现每移动一步,sleep一次
    t1 = threading.Thread(target=gf.hanoi,
                          args=(gans[0], gans[1], gans[2], setting.dishes_n,
                                n_step))
    # 开始游戏主循环
    while True:
        # 监听键盘和鼠标事件
        gf.check_events(setting, gans, t1)

        # 刷新盘子状态
        # dishes[-1].move(3)
        # dishes[-2].move(2)

        # 每次循环都重绘屏幕, 让最近的绘图屏幕可见
        gf.update_screen(setting, screen, gans, n_step, f1)
Example #5
0
def main(argv):
    if len(argv) != 2:
        print("Usage of this program:\npython main.py <path to images folder>")
        return
    folder = argv[1]

    training_data = None

    if not os.path.isfile(folder + ".pickle"):
        training_data = load_images_from_folder(folder)
        pickle.dump(training_data, open(folder + ".pickle", "wb"))
    else:
        training_data = pickle.load(open(folder + ".pickle", "rb"))
    print(f"loaded {len(training_data)} images as numpy array.")
    gan = None

    if os.path.isfile("gan.model"):
        gan = pickle.load(open("gan.model", "rb"))
    else:
        gan = Gan()
    gan.train(training_data, 1000)
Example #6
0
def mkmodel():
    gan = Gan()
    gan.compile()
    return gan
Example #7
0
    plt.show()


X = get_normal_shaped_arrays(60000, (1, 784))

X_train, y_train, X_test, y_test = discriminator_train_test_set(
    X, X_train, params.DISCRIMINATOR_TRAIN_TEST_SPLIT)

discriminator = Discriminator(params.DISCRIMINATOR_BATCH_SIZE,
                              params.DISCRIMINATOR_EPOCHS)
discriminator.train(X_train, y_train)
print(discriminator.eval(X_test, y_test))

generator = Generator()

gan = Gan(generator, discriminator)
gan.set_discriminator_trainability(False)
gan.show_trainable()

X = get_normal_shaped_arrays(100000, (1, 16))
y = []
for _ in range(100000):
    y.append([0, 1])

y = np.array(y)

generator = gan.train_generator(X, y)

print(generator.summary())

pred1 = generator.predict(X[0].reshape(1, 16))
class filmPosterGan():
    def __init__(self, rows, cols, channels, dataFolder):
        self.img_rows = rows
        self.img_cols = cols
        self.channels = channels
        self.dataFolder = dataFolder
        # TODO : create a data generator and read files from dataFolder in order to generate train set and validation set
        # maybe use one hot encoding for the class
        self.posters = None

        self.gan = Gan(rows, cols, channels)
        self.discriminator = self.gan.discriminator_model()
        self.adversarial = self.gan.adversarial_model()
        self.generator = self.gan.generator()

    def generate_image(self,
                       nb_images=1,
                       folder='./',
                       model_name='./default.h5'):
        self.generator = load_model(model_name)
        for k in range(nb_images):
            noise = self.generate_noise(1, 100)
            img_gen = self.generator.predict(noise)
            img = np.reshape(img_gen, (32, 32, 3))
            plt.imshow(img)
            plt.axis('off')
            filename = folder + '/poster_' + str(i)
            plt.savefig(filename)
            plt.close('all')

    def load_data(self, grayscale=True):

        dataGen = ImageDataGenerator(
            #preprocessing_function=changeColorSpace
        )
        self.posters = dataGen.flow_from_directory(
            self.dataFolder,
            target_size=(self.img_rows, self.img_cols),
            batch_size=16,
            class_mode='categorical',
            color_mode='grayscale' if grayscale else 'rgb')
        return self.posters

    def generate_noise(self, n_samples, noise_dim):
        X = np.random.normal(0, 1, size=(n_samples, noise_dim))
        return X

    def show_imgs(self, batchidx):
        noise = self.generate_noise(9, 100)
        gen_imgs = self.generator.predict(noise)

        fig, axs = plt.subplots(3, 3)
        count = 0
        for i in range(3):
            for j in range(3):
                # Dont scale the images back, let keras handle it
                img = image.array_to_img(gen_imgs[count], scale=True)
                axs[i, j].imshow(img)
                axs[i, j].axis('off')
                count += 1
        plt.show()

    def train(self,
              N_EPOCHS=100,
              batch_size=16,
              save_interval=200,
              NB_DATA=3866,
              model_name='./default.h5'):

        N_EPOCHS = 100
        num_batches = int(NB_DATA / batch_size)
        for epoch in range(N_EPOCHS):
            print("Epoch ", epoch)
            cum_d_loss = 0.
            cum_g_loss = 0.

            for batch_idx in tqdm(range(num_batches), file=sys.stdout):
                # Get the next set of real images to be used in this iteration
                #images = X_train[batch_idx*BATCH_SIZE : (batch_idx+1)*BATCH_SIZE]
                (images, labels_real) = self.posters.next()
                if (images.shape[0] != batch_size):
                    print('reset')
                    self.posters.reset()
                    (images, labels_real) = self.posters.next()
                noise_data = self.generate_noise(batch_size, 100)
                generated_images = self.generator.predict(noise_data)

                # Train on soft labels (add noise to labels as well)
                noise_prop = 0.05  # Randomly flip 5% of labels

                # Prepare labels for real data
                true_labels = np.zeros((batch_size, 1)) + np.random.uniform(
                    low=0.0, high=0.1, size=(batch_size, 1))
                flipped_idx = np.random.choice(np.arange(len(true_labels)),
                                               size=int(noise_prop *
                                                        len(true_labels)))
                true_labels[flipped_idx] = 1 - true_labels[flipped_idx]

                # Train discriminator on real data
                d_loss_true = self.discriminator.train_on_batch(
                    images, true_labels)

                # Prepare labels for generated data
                gene_labels = np.ones((batch_size, 1)) - np.random.uniform(
                    low=0.0, high=0.1, size=(batch_size, 1))
                flipped_idx = np.random.choice(np.arange(len(gene_labels)),
                                               size=int(noise_prop *
                                                        len(gene_labels)))
                gene_labels[flipped_idx] = 1 - gene_labels[flipped_idx]

                # Train discriminator on generated data
                d_loss_gene = self.discriminator.train_on_batch(
                    generated_images, gene_labels)

                # d_loss = 0.5 * np.add(d_loss_true, d_loss_gene)
                # cum_d_loss += d_loss

                # Train generator
                noise_data = self.generate_noise(batch_size, 100)
                g_loss = self.adversarial.train_on_batch(
                    noise_data, np.zeros((batch_size, 1)))
                # cum_g_loss += g_loss

            print('  Epoch: {}, Generator Loss: {}, Discriminator Loss: {}'.
                  format(epoch + 1, 0, 0))
            self.show_imgs("epoch" + str(epoch))
        model.save(model_name)

    def plot_images(self, save2file=False, samples=16, noise=None, step=0):
        filename = 'posters.png'
        if noise is None:
            noise = np.random.uniform(-1.0, 1.0, size=[samples, 100])
        else:
            filename = "posters_%d.png" % step
        images = self.generator.predict(noise)

        plt.figure(figsize=(10, 10))
        print("images[0] shape")
        print(images.shape[0])
        for i in range(images.shape[0]):
            plt.subplot(4, 4, i + 1)
            image = images[i, :, :, :]
            if (self.channels == 1):
                image = np.reshape(image, [self.img_rows, self.img_cols])
                plt.imshow(image, cmap='gray')
            else:
                image = 255 * np.reshape(
                    image, [self.img_rows, self.img_cols, self.channels])
                plt.imshow(image.astype('uint8'))
            plt.axis('off')
        plt.tight_layout()
        if save2file:
            plt.savefig(filename)
            plt.close('all')
        else:
            plt.show()
Example #9
0
from datagen import load_dataset

# Global
import numpy as np
import random as rd
import matplotlib.pyplot as plt
from tensorflow import keras as ks
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Flatten, Dropout

## Parameters and dataset
Ldim = 100
P = 10
Shape = (28, 28, 1)

X, Y = load_dataset()

## Gan
Gan = Gan(ldim=Ldim, p=P, shape=Shape)
Gan.load('C:/Users/meri2/Documents/Projects/MNSIT_GAN/Attempt_0')
Gan.make_gan()

losses, accuracies, times = Gan.train(
    X,
    Y,
    epochs=0,
    batch_size=256,
)

Gan.samples(7)