Ejemplo n.º 1
0
    def test__LPPLLayer(self):
        """given"""
        model = Sequential([LPPLLayer()])
        model.compile(loss='mse', optimizer=SGD(0.2, 0.01))
        #model.compile(loss='mse', optimizer='adam')

        x = np.log(df["Close"].values)
        x = ReScaler((x.min(), x.max()), (1, 2))(x)
        x = x.reshape(1, -1)

        x2 = np.vstack([x, x])
        """when"""
        model.fit(x2,
                  x2,
                  epochs=5000,
                  verbose=0,
                  callbacks=[EarlyStopping('loss')])
        """then"""
        print(model.predict_on_batch(x))
        res = pd.DataFrame({
            "close": x[0],
            "lppl": model.predict_on_batch(x)
        },
                           index=df.index)
        res.to_csv('/tmp/lppl.csv')
        print(res.head())
        print(model.layers[0].get_weights())
Ejemplo n.º 2
0
 def glove(self, embedding_index, vtr_dim):  # return GloVe embedding texts
     embedding_matrix = np.zeros((self.vocab_size + 1, vtr_dim))
     for word, i in self.tokenizer.word_index.iteritems():
         embedding_vector = embedding_index.get(word)
         if embedding_vector is not None:
             # words not found in embedding index will be all-zeros.
             embedding_matrix[i] = embedding_vector
     model = Sequential()
     model.add(
         Embedding(self.vocab_size + 1,
                   output_dim=vtr_dim,
                   weights=[embedding_matrix],
                   trainable=False))
     model.compile('rmsprop', 'mse')
     try:
         encoded_tweets = np.asarray(
             pad_sequences(self.tokenizer.texts_to_sequences(
                 self.processed_texts),
                           maxlen=140,
                           padding='post'))
     except:
         encoded_tweets = np.asarray(
             pad_sequences(self.tokenizer.texts_to_sequences(self.df.tweet),
                           maxlen=140,
                           padding='post'))
     embedding_texts = model.predict_on_batch(encoded_tweets)
     return embedding_texts
Ejemplo n.º 3
0
    def __test__LinearRegressionLayer(self):
        """given"""
        df = DF_TEST.copy()
        model = Sequential()
        model.add(LinearRegressionLayer())
        model.compile(loss='mse', optimizer='nadam')

        x = df["Close"].values.reshape(1, -1)

        """when"""
        model.fit(x, x, epochs=500, verbose=0)

        "then"
        res = pd.DataFrame({"close": df["Close"], "reg": model.predict_on_batch(x)}, index=df.index)
        print(res.head())
Ejemplo n.º 4
0
class Discriminator:
    def __init__(self, expert_data: np.ndarray):
        self.expert_data = expert_data
        self.num_samples = expert_data.shape[0]

        val_std = np.std(self.expert_data)
        val_avg = np.average(self.expert_data)
        self.normalize = lambda trajectories: (trajectories - val_avg
                                               ) / val_std

        self.discriminator = Sequential()
        self.discriminator.add(
            layers.Flatten(input_shape=expert_data[0].shape))
        self.discriminator.add(layers.Dense(32))
        self.discriminator.add(layers.LeakyReLU(alpha=0.4))
        self.discriminator.add(layers.Dense(32))
        self.discriminator.add(layers.LeakyReLU(alpha=0.2))
        self.discriminator.add(layers.Dense(1, activation='sigmoid'))
        self.discriminator.summary()
        # self.discriminator.compile(
        #     optimizer=tf.train.AdamOptimizer(),
        #     loss='sparse_categorical_crossentropy',
        #     metrics=['accuracy'],
        # )

        optimizer = Adam(0.0002, 0.5)
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=optimizer,
                                   metrics=['accuracy'])

    def pretrain_discriminator(self):
        pass
        # the discriminator should at least be able to distinguish random noise
        batch_size = 32

        for i in range(4):
            expert_trajectories = self.sample_expert_data(batch_size)
            noise = np.random.normal(0.0, 1.0,
                                     size=expert_trajectories.size).reshape(
                                         expert_trajectories.shape)

            x_train = np.concatenate((noise, expert_trajectories))
            y_train = np.concatenate(
                (np.zeros(batch_size), np.ones(batch_size)))

            loss = self.discriminator.train_on_batch(x_train, y_train)
            if i % 8 == 0:
                print("pretrain loss: {}".format(loss))

    def predict(self, trajectory: np.ndarray):
        inputs = self.normalize(np.array([trajectory]))
        return self.discriminator.predict_on_batch(inputs)[0][0]

    def sample_expert_data(self, n: int):
        ids = np.random.randint(0, self.num_samples, size=n)
        return self.normalize(self.expert_data[ids])

    def train(self,
              trajectories: np.ndarray,
              expert_trajectories: tp.Optional[np.ndarray] = None):
        trajectories = self.normalize(trajectories)

        n_trajectories = len(trajectories)
        if expert_trajectories is None:
            expert_trajectories = self.sample_expert_data(n_trajectories)
        else:
            assert n_trajectories == len(expert_trajectories)

        x_train = np.concatenate((trajectories, expert_trajectories))
        y_train = np.concatenate(
            (np.zeros(n_trajectories), np.ones(n_trajectories)))

        return self.discriminator.train_on_batch(x_train, y_train)
Ejemplo n.º 5
0
import numpy as np

from keras import Sequential
from keras.layers import BatchNormalization

bn = BatchNormalization(input_shape=(28, 28, 3), epsilon=0)
model = Sequential([bn])
model.compile(optimizer='rmsprop', loss='mse')

n_filters = 3
new_weights = [
    np.ones(n_filters, dtype=np.float32),
    np.zeros(n_filters, dtype=np.float32),
    np.zeros(n_filters, dtype=np.float32),
    np.ones(n_filters, dtype=np.float32)
]
bn.set_weights(new_weights)

x_train = np.random.rand(2, 28, 28, 3)
output = model.predict_on_batch(x_train)

print(x_train.shape)
print(output.shape)
print(np.sum(np.abs(x_train - output)))
Ejemplo n.º 6
0
class GAN:
    def __init__(self):

        self.batch_size = 32
        self.log_step = 50
        self.scaler = MinMaxScaler((-1, 1))
        self.data = self.get_data_banknotes()
        self.init_model()

        # Logging loss
        self.logs_loss = pd.DataFrame(columns=['d_train_r',  # real data from discriminator training
                                               'd_train_f',  # fake data from discriminator training
                                               'd_test_r',  # real data from discriminator testing
                                               'd_test_f',  # fake data from discriminator testing
                                               'a'  # data from GAN(adversarial) training
                                               ])

        # Logging accuracy
        self.logs_acc = pd.DataFrame(columns=['d_train_r', 'd_train_f', 'd_test_r', 'd_test_f', 'a'])

        # Logging generated rows
        self.results = pd.DataFrame(columns=['iteration','variance', 'skewness', 'curtosis', 'entropy', 'prediction'])

    def get_data_banknotes(self):
        """
        Get data from file
        :return:
        """
        names = ['variance', 'skewness', 'curtosis', 'entropy', 'class']
        dataset = pd.read_csv('data/data_banknotes.csv', names=names)
        dataset = dataset.loc[dataset['class'] == 0].values  # only real banknotes, because fake ones will be generated
        X = dataset[:, :4]  # omitting last column, we already know it will be 0
        data = self.structure_data(X)
        return data

    def scale(self, X):
        return self.scaler.fit_transform(X)

    def descale(self, X):
        return self.scaler.inverse_transform(X)

    def structure_data(self, X):
        """
        Structure data
        :param X:
        :return:
        """
        data_subsets = {'normal': X, 'scaled': self.scale(X)}
        for subset, data in data_subsets.items():  # splitting each subset on train and test
            splited_data = train_test_split(data, test_size=0.3, shuffle=True)
            data_subsets.update({
                subset: {
                    'train': splited_data[0],
                    'test': splited_data[1]}
            })

        return data_subsets

    def init_discriminator(self):
        """
        Init trainable discriminator model. Will be used for training and testing itself outside connected GAN model.
        LeakyReLU activation function, Adam optimizer and Dropout are recommended in GAN papers
        """
        self.D = Sequential()
        self.D.add(Dense(16, input_dim=4))
        self.D.add(LeakyReLU())
        self.D.add(Dropout(0.3))
        self.D.add(Dense(16))
        self.D.add(LeakyReLU())
        self.D.add(Dense(16))
        self.D.add(LeakyReLU())
        self.D.add(Dense(1, activation='sigmoid'))
        self.D.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

    def init_discriminator_G(self):
        """
        Init non-trainable discriminator model. Will be used for training generator inside connected GAN model.
        LeakyReLU activation function, Adam optimizer and Dropout are recommended in GAN papers
        """
        self.Dg = Sequential()
        self.Dg.add(Dense(16, input_dim=4))  # activation function: ganhacks
        self.Dg.add(LeakyReLU())
        self.Dg.add(Dropout(0.3))
        self.Dg.add(Dense(16))
        self.Dg.add(LeakyReLU())
        self.Dg.add(Dense(16))
        self.Dg.add(LeakyReLU())
        # activation function: ganhacks
        self.Dg.add(Dense(1, activation='sigmoid'))
        self.Dg.trainable = False
        self.Dg.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

    def init_generator(self):
        """
        LeakyReLU activation function, Adam optimizer and Dropout are recommended in GAN papers for BOTH D and G
        """
        self.G = Sequential()
        self.G.add(Dense(16, input_dim=64))
        self.G.add(LeakyReLU())
        self.G.add(Dropout(0.3))
        self.G.add(Dense(16))
        self.G.add(LeakyReLU())
        self.G.add(GaussianNoise(0.1))
        self.G.add(Dense(16))
        self.G.add(LeakyReLU())
        self.G.add(Dense(4, activation='tanh'))
        self.G.compile(loss='binary_crossentropy', optimizer='adam')

    def init_model(self):
        """
        Connecting non trainable model with Generator. Initializing D.
        :return:
        """
        self.init_discriminator()
        self.init_discriminator_G()
        self.init_generator()
        self.GAN = Sequential()
        self.GAN.add(self.G)
        self.GAN.add(self.Dg)
        self.GAN.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

    def get_adversarial_data(self, mode='train'):
        """
        Get data for adversarial training.
        """
        data = self.data['scaled'][mode].copy()
        np.random.shuffle(data)
        features_real = data[:int(self.batch_size / 2)]  # random rows with real data

        noise = np.random.uniform(-1.0, 1.0, size=[int(self.batch_size / 2), 64])  # random noise for generator
        features_fake = self.G.predict(noise)  # fake data
        y_real = np.zeros([int(self.batch_size / 2), 1])  # array of zeros for real rows labels
        y_fake = np.ones([int(self.batch_size / 2), 1])  # array of ones for fake rows labels
        return features_real, y_real, features_fake, y_fake

    def train(self, train_steps):
        try:
            for i in range(train_steps):
                # Training D
                xr, yr, xf, yf = self.get_adversarial_data()  # train D separately from G
                d_loss_r = self.D.train_on_batch(xr, yr)  # separating real and fake data is recommended
                d_loss_f = self.D.train_on_batch(xf, yf)

                # Training G
                # flipping the label before prediction will
                # not influence D prediction as here D is not trainable and is getting weights from trainable D
                y = np.zeros([int(self.batch_size / 2), 1])  # flipping labels is recommended
                self.Dg.set_weights(self.D.get_weights())  # Copying weights from trainable D
                noise = np.random.uniform(-1.0, 1.0, size=[int(self.batch_size / 2), 64])  # getting input noise for G
                a_loss = self.GAN.train_on_batch(noise, y)

                # Testing
                xr_t, yr_t, xf_t, yf_t = self.get_adversarial_data(mode='test')
                d_pred_r = self.D.predict_on_batch(xr_t)  # getting example predictions
                d_pred_f = self.D.predict_on_batch(xf_t)
                d_loss_r_t = self.D.test_on_batch(xr_t, yr_t)  # getting loss and acc
                d_loss_f_t = self.D.test_on_batch(xf_t, yf_t)

                # Logging important data
                self.log(locals())
        finally:
            """
            Plot and save data when finished.
            """
            self.plot()
            self.results.to_csv('results/results.csv', index=False)

    def plot(self):
        """
        Preparing for plotting, plotting and saving plots.
        """
        import matplotlib.pyplot as plt

        ax_loss = self.logs_loss.plot(linewidth=0.75, figsize=(20, 10))
        ax_loss.set_xlabel('iteration')
        ax_loss.set_ylabel('loss')
        fig = plt.gcf()
        fig.set_dpi(200)
        plt.legend(loc='upper right', framealpha=0, prop={'size': 'large'})
        fig.savefig('results/loss.png', dpi=200)

        ax_acc = self.logs_acc.plot(linewidth=0.75, figsize=(20, 10))
        ax_acc.set_xlabel('iteration')
        ax_acc.set_ylabel('accuracy')
        fig = plt.gcf()
        fig.set_dpi(200)
        plt.legend(loc='upper right', framealpha=0, prop={'size': 'large'})
        fig.savefig('results/acc.png', dpi=200)

        plt.show()

    def log(self, variables):
        """
        Logging and printing all the necessary data
        """
        r_rows = pd.DataFrame(self.descale(variables['xr_t']), columns=['variance', 'skewness', 'curtosis', 'entropy'])
        r_rows['prediction'] = variables['d_pred_r']
        f_rows = pd.DataFrame(self.descale(variables['xf_t']), columns=['variance', 'skewness', 'curtosis', 'entropy'])
        f_rows['prediction'] = variables['d_pred_f']
        f_rows['iteration'] = variables['i']
        self.logs_loss = self.logs_loss.append(pd.Series(  # logging loss
                [variables['d_loss_r'][0],
                 variables['d_loss_f'][0],
                 variables['d_loss_r_t'][0],
                 variables['d_loss_f_t'][0],
                 variables['a_loss'][0]], index=self.logs_loss.columns), ignore_index=True)
        self.logs_acc = self.logs_acc.append(pd.Series(  # logging acc
                [variables['d_loss_r'][1],
                 variables['d_loss_f'][1],
                 variables['d_loss_r_t'][1],
                 variables['d_loss_f_t'][1],
                 variables['a_loss'][1]], index=self.logs_loss.columns), ignore_index=True)
        self.results = self.results.append(f_rows, ignore_index=True, sort=False)  # logging generated data
        if self.log_step and variables['i'] % self.log_step == 0:  # print metrics every 'log_step' iteration
            # preparing strings for printing
            log_msg = f""" 
Batch {variables['i']}:
    D(training):  
        loss:
            real : {variables['d_loss_r'][0]:.4f}
            fake : {variables['d_loss_f'][0]:.4f}
        acc: 
            real: {variables['d_loss_r'][1]:.4f}
            fake: {variables['d_loss_f'][1]:.4f}

    D(testing):  
        loss:
            real : {variables['d_loss_r_t'][0]:.4f}
            fake : {variables['d_loss_f_t'][0]:.4f}
        acc: 
            real: {variables['d_loss_r_t'][1]:.4f}
            fake: {variables['d_loss_f_t'][1]:.4f}
            
    GAN:
        loss: {variables['a_loss'][0]:.4f}
        acc: {variables['a_loss'][1]:.4f}
                        """
            print(log_msg)
            np.set_printoptions(precision=5, linewidth=140, suppress=True)  # set how np.array will be printed
            predictions = f"""
Example results:
    Real rows:

{r_rows}

    Fake rows:

{f_rows}
"""
            print(predictions)
Ejemplo n.º 7
0
def create_model(_rows, data_dict, max_flow):
    def rmse(y_true, y_pred, axis=0):
        return np.sqrt(((y_pred - y_true)**2).mean(axis=axis))

    def create_dataset_from_dict(ddata, lookback=1, steps=1):
        dataX = []
        dataY = []
        for dt, data in ddata.items():
            timestep = []
            yval = ddata.get(dt + timedelta(minutes=5 * steps))
            # check the future value exists and is not an error
            if yval is not None:
                for j in range(lookback):
                    offset = dt - timedelta(minutes=5 * steps * j)
                    # make sure we have all previous values in the lookback
                    if ddata.get(offset) is not None:
                        timestep.append(ddata[offset])
                if len(timestep) == lookback:
                    dataX.append(timestep)
                    dataY.append(yval[0])
        fields = len(dataX[0][0])
        return np.array(dataX,
                        dtype=np.double), np.array(dataY,
                                                   dtype=np.double), fields

    def fit_to_batch(arr, b_size):
        lim = len(arr) - (len(arr) % b_size)
        return arr[:lim]

    class TerminateOnNaN(Callback):
        """Callback that terminates training when a NaN loss is encountered.
        """
        def __init__(self):
            super(TerminateOnNaN, self).__init__()
            self.terminated = False

        def on_batch_end(self, batch, logs=None):
            logs = logs or {}
            loss = logs.get('loss')
            if loss is not None:
                if np.isnan(loss) or np.isinf(loss):
                    print('Batch %d: Invalid loss, terminating training' %
                          (batch))
                    self.model.stop_training = True
                    self.terminated = True

    # input fields are:
    """
    Input is: 
    [
        flow
        dayOfWeek
        MinuteOfDay
        month
        week
        isWeekend
    ]
    for `lookback` records
    """
    lookback = int({{quniform(1, 40, 1)}})
    scaler = MinMaxScaler((0, 1))
    # rows = scaler.fit_transform(_rows)
    # dataX, dataY, fields = create_dataset(rows, lookback)
    scaled = scaler.fit_transform(list(data_dict.values()))
    scaled_data_dict = dict(zip(data_dict.keys(), scaled))
    dataX, dataY, fields = create_dataset_from_dict(scaled_data_dict, lookback)

    test_train_split = 0.60  ## 60% training 40% test
    split_idx = int(len(dataX) * test_train_split)
    train_x = dataX[:split_idx]
    train_y = dataY[:split_idx]
    test_x = dataX[split_idx:]
    test_y = dataY[split_idx:]
    batch_size = int({{quniform(1, 5, 1)}})

    train_x = fit_to_batch(train_x, batch_size)
    train_y = fit_to_batch(train_y, batch_size)
    test_x = fit_to_batch(test_x, batch_size)
    test_y = fit_to_batch(test_y, batch_size)

    nb_epoch = 1
    lstm_size_1 = {{quniform(96, 300, 4)}}
    lstm_size_2 = {{quniform(96, 300, 4)}}
    lstm_size_3 = {{quniform(69, 300, 4)}}
    optimizer = {{choice(['adam', 'rmsprop'])}
                 }  #  'nadam', 'adamax', 'adadelta', 'adagrad'])}}
    l1_dropout = {{uniform(0.001, 0.7)}}
    l2_dropout = {{uniform(0.001, 0.7)}}
    l3_dropout = {{uniform(0.001, 0.7)}}
    output_activation = {{choice(['relu', 'tanh', 'linear'])}}
    # reset_interval = int({{quniform(1, 100, 1)}})
    # layer_count = {{choice([1, 2, 3])}}
    l1_reg = {{uniform(0.0001, 0.1)}}
    l2_reg = {{uniform(0.0001, 0.1)}}
    params = {
        'batch_size': batch_size,
        'lookback': lookback,
        'lstm_size_1': lstm_size_1,
        'lstm_size_2': lstm_size_2,
        'lstm_size_3': lstm_size_3,
        'l1_dropout': l1_dropout,
        'l2_dropout': l2_dropout,
        'l3_dropout': l3_dropout,
        'l1_reg': l1_reg,
        'l2_reg': l2_reg,
        'optimizer': optimizer,
        'output_activation': output_activation,
        # 'state_reset': reset_interval,
        # 'layer_count': layer_count,
        # 'use_embedding': use_embedding
    }
    print("PARAMS=", json.dumps(params, indent=4))

    def krmse(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))

    def geh(y_true, y_pred):
        return K.sqrt(2 * K.pow(y_pred - y_true, 2) /
                      (y_pred + y_true)).mean(axis=-1)

    reg = L1L2(l1_reg, l2_reg)
    start = datetime.now()
    model = Sequential()
    # if conditional(use_embedding):
    #     model.add(Embedding())
    model.add(
        LSTM(int(lstm_size_1),
             batch_input_shape=(batch_size, lookback, fields),
             return_sequences=True,
             stateful=True,
             activity_regularizer=reg,
             bias_initializer='ones'))
    model.add(Dropout(l1_dropout))
    model.add(Activation('relu'))
    model.add(
        LSTM(int(lstm_size_2),
             return_sequences=True,
             bias_initializer='ones',
             stateful=True,
             activity_regularizer=reg))
    model.add(Dropout(l2_dropout))
    model.add(Activation('relu'))
    model.add(
        LSTM(int(lstm_size_3),
             bias_initializer='ones',
             stateful=True,
             activity_regularizer=reg))
    model.add(Dropout(l3_dropout))
    model.add(Activation('relu'))
    model.add(Dense(1, activation='relu'))

    terminate_cb = TerminateOnNaN()
    model.compile(loss='mse', optimizer=optimizer)
    try:
        model.fit(
            train_x,
            train_y,
            epochs=1,
            verbose=1,
            batch_size=batch_size,
            shuffle=False,
            callbacks=[terminate_cb],
        )
    except Exception as e:
        print(e)
        return {'status': STATUS_FAIL, 'msg': e}
    if terminate_cb.terminated:
        return {'status': STATUS_FAIL, 'msg': "Invalid loss"}
    # have it continue learning during this phase
    # split the test_x,test_y
    preds = []

    def group(iterable, n):
        it = iter(iterable)
        while True:
            chunk = tuple(itertools.islice(it, n))
            if not chunk:
                return
            yield chunk

    test_y_it = iter(group(test_y, batch_size))
    test_batch_idx = 0
    prog = tqdm(range(len(test_y / batch_size)), desc='Train ')
    for batch in group(test_x, batch_size):

        batch = np.array(batch)
        test_y_batch = np.array(next(test_y_it))
        model.train_on_batch(batch, test_y_batch)
        batch_preds = model.predict_on_batch(batch)[:, 0]
        preds.extend(batch_preds)
        test_batch_idx += 1
        prog.update()
        # if test_batch_idx % reset_interval == 0:
        #     model.reset_states()
    preds = np.array(preds)
    finish = datetime.now()
    preds_pad = np.zeros((preds.shape[0], fields))
    preds_pad[:, 0] = preds.flatten()
    test_y_pad = np.zeros((preds.shape[0], fields))
    test_y_pad[:, 0] = test_y.flatten()
    unscaled_pred = scaler.inverse_transform(preds_pad)
    unscaled_test_y = scaler.inverse_transform(test_y_pad)
    rmse_result = rmse(unscaled_pred, unscaled_test_y)[0]

    plot_x = np.arange(test_x.shape[0])
    dpi = 80
    width = 1920 / dpi
    height = 1080 / dpi
    plt.figure(figsize=(width, height), dpi=dpi)
    plt.plot(plot_x, unscaled_test_y[:, 0], color='b', label='Actual')
    plt.plot(plot_x, unscaled_pred[:, 0], color='r', label='Predictions')
    plt.legend()

    plt.title("LSTM Discrete Predictions at 115, SI 2\nRMSE:{}".format(
        round(rmse_result, 3)))
    plt.xlabel('Time')
    plt.ylabel('Flow')
    fig_name = 'model_{}.png'.format(time())
    plt.savefig(fig_name)
    plt.show()
    with open(fig_name, 'rb') as img_file:
        fig_b64 = base64.b64encode(img_file.read()).decode('ascii')

    return {
        'loss': rmse_result,
        'status': STATUS_OK,
        'model': model._updated_config(),
        'metrics': {
            'rmse': rmse_result,
            # 'geh': geh(unscaled_pred, unscaled_test_y)[0],
            'duration': (finish - start).total_seconds()
        },
        'figure': fig_b64,
        'params': params
    }
Ejemplo n.º 8
0
    for i in range(32):
        plt.subplot(4, 8, i + 1)
        im = np.reshape(images[i], (1, -1))
        im = (np.reshape(im, [28, 28]) + 1) * 255
        im = np.clip(im, 0, 255)
        im = np.uint8(im)
        plt.imshow(im, cmap='gray')
    plt.show()


epochs = 100
batch_size = 100
xtrain = np.reshape(xtrain, [xtrain.shape[0], -1])
xtest = np.reshape(xtest, [xtest.shape[0], -1])
xtrain = (xtrain.astype(np.float32) - 127.5) / 127.5
plt.imshow(np.reshape(xtrain[0], [28, 28]), cmap="gray")
plt.show()

for e in range(epochs):
    for i in tqdm(range(int(xtrain.shape[0] / batch_size))):
        xreal = xtrain[(i) * batch_size:(i + 1) * batch_size]
        noise = generateRandomData(batch_size, input_size)
        xfake = generator.predict_on_batch(noise)
        discriminator.trainable = True
        discriminator.train_on_batch(xreal, np.array([[0.9]] * batch_size))
        discriminator.train_on_batch(xfake, np.array([[0.]] * batch_size))
        discriminator.trainable = False
        gan.train_on_batch(noise, np.array([[1.]] * batch_size))

    showResults(generator)