Ejemplo n.º 1
0
class DQTModel:
    def __init__(self, input_size, lr, hidden_layers, seed=None):
        self.ann = Sequential()
        self.ann.add(
            Dense(hidden_layers[0], activation='relu', input_dim=input_size))
        for units in hidden_layers[1:]:
            self.ann.add(Dense(units, activation='relu'))
        self.ann.add(Dense(3, activation='linear'))
        self.ann.compile(optimizer=Adam(lr=lr), loss='mse')

    def _huber_loss(self, target, prediction):
        error = prediction - target
        return K.mean(K.sqrt(1 + K.square(error)) - 1, axis=-1)

    def train(self, batch):
        self.ann.fit(batch["inputs"],
                     batch["targets"],
                     steps_per_epoch=1,
                     epochs=1,
                     verbose=0)

    def predict(self, input):
        input = input.reshape([1, len(input)])
        return self.ann.predict(input, steps=1, verbose=0)[0]

    def interpolate(self, model, factor):
        mine = self.ann.get_weights()
        other = model.ann.get_weights()
        for i, o in enumerate(other):
            mine[i] = o * factor + (1 - factor) * mine[i]

        self.ann.set_weights(mine)
Ejemplo n.º 2
0
    def regression_model(model, nr_classes, nr_features, nr_hidden, x_train,
                         x_val, y_train, y_val, epochs, tb, path):
        tot_features = nr_classes * nr_features

        reg_mod = Sequential()
        #make layer 1
        reg_mod.add(
            Reshape((tot_features, ), input_shape=(nr_features, nr_classes)))
        reg_mod.add(Dense(nr_hidden, activation="linear"))
        reg_mod.set_weights(model.layers[2].get_weights())

        #make first hidden
        reg_mod.add(Dense(nr_hidden, activation="linear"))
        reg_mod.add(advanced_activations.LeakyReLU())

        #make final output
        nr_of_labels = y_train.shape[1]
        print("nr_lables", nr_of_labels)
        reg_mod.add(Dense(nr_of_labels, activation="sigmoid"))

        reg_mod.compile(optimizer="adam",
                        loss="mean_squared_error",
                        metrics=['mse', 'mae'])

        reg_mod.fit(x_train,
                    y_train,
                    epochs=epochs,
                    batch_size=32,
                    shuffle=True,
                    verbose=2,
                    validation_data=(x_val, y_val),
                    callbacks=[tb])
        reg_mod.save(path + "/regression/regression_model.h5")
        return (reg_mod)
Ejemplo n.º 3
0
def create_model(w1, b1, w2, b2):
    L = [w1, b1, w2, b2]
    np.asarray(L)
    model = Sequential()
    model.set_weights(L)
    model.add(Dense(68, input_dim=34, activation='sigmoid'))
    model.add(Dense(1, activation='sigmoid'))
    return model
Ejemplo n.º 4
0
def copy_model(old_model, X, n_neurons, batch_size):
    new_model = Sequential()
    new_model.add(
        LSTM(n_neurons,
             batch_input_shape=(batch_size, X.shape[1], X.shape[2]),
             stateful=True))
    new_model.add(Dense(1))
    old_weights = old_model.get_weights()
    new_model.set_weights(old_weights)
    return new_model
Ejemplo n.º 5
0
def create_model(weight1, bias1, weight2, bias2):
    L1 = []
    L1.append(weight1)
    L1.append(bias1)
    L1.append(weight2)
    L1.append(bias2)
    np.asarray(L1)
    model = Sequential()
    model.set_weights(L1)
    model.add(Dense(68, input_dim=34, activation='sigmoid'))
    model.add(Dense(1, activation='sigmoid'))
    return model
Ejemplo n.º 6
0
def build_model() -> Model:
    model = Sequential([
        Dense(2,
              use_bias=False,
              activation="relu",
              name="Dense1",
              input_shape=(2, )),
        Dense(2, use_bias=False, activation="softmax", name="Dense2")
    ])
    model.summary()
    model.set_weights([
        np.array([[1, 2], [3, 4]]),  # Dense1の重み
        np.array([[6, 5], [7, 8]]),  # Dense2の重み
    ])
    return model
Ejemplo n.º 7
0
class DeepNeuralClassifier(BaseClassifier):
    encoder = LabelBinarizer(
    )  # please annotate what this is doing (hypothesis: for to_categorical())

    def __init__(self, feature_length, num_classes):
        super().__init__(feature_length, num_classes)
        self.num_classes = num_classes

        # From Keras examples (https://keras.io/getting-started/sequential-model-guide/)
        self.model = Sequential()
        self.model.add(Dense(64, activation='relu', input_dim=feature_length))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(64, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(num_classes, activation='softmax'))

        self.model.compile(loss='categorical_crossentropy',
                           optimizer='sgd',
                           metrics=['accuracy'])

        self.initial_weights = self.model.get_weights()

    def train(self, features, labels):
        """
        Using a set of features and labels, trains the classifier and returns the training accuracy.
        :param features: An MxN matrix of features to use in prediction
        :param labels: An M row list of labels to train to predict
        :return: Prediction accuracy, as a float between 0 and 1
        """
        labels = self.labels_to_categorical(labels)
        result = self.model.fit(features, labels, epochs=16, verbose=0)
        return result.history['acc'][-1]

        # make sure you save model using the same library as we used in machine learning price-predictor

    def predict(self, features, labels):
        """
        Using a set of features and labels, predicts the labels from the features,
        and returns the accuracy of predicted vs actual labels.
        :param features: An MxN matrix of features to use in prediction
        :param labels: An M row list of labels to test prediction accuracy on
        :return: Prediction accuracy, as a float between 0 and 1
        """
        labels = self.labels_to_categorical(labels)
        accuracy = self.model.evaluate(features, labels, verbose=0)[1]
        return accuracy

    def get_prediction(self, features):
        '''
        this function get the prediction from the
        :param features: sample to predict
        :return: prediction from the model
        '''
        probabilities_list = self.model.predict(features)
        return [
            list(probabilities).index(max(list(probabilities)))
            for probabilities in probabilities_list
        ]

    def reset(self):
        """
        Resets the trained weights / parameters to initial state
        :return:
        """
        self.model.set_weights(self.initial_weights)
        pass

    def labels_to_categorical(self, labels):
        '''
        convert the labels from string to number
        :param labels: labels list of string
        :return: labels converted in number
        '''
        _, IDs = unique(labels, return_inverse=True)
        return to_categorical(IDs, num_classes=self.num_classes)
Ejemplo n.º 8
0
class DQN():
    GAMMA = 0.9
    LEARNING_RATE = 0.5
    BATCH_SIZE = 15
    NUM_EPISODES = 50000

    def __init__(self, observation_space):

        self.replay_buffer = Replay_buffer()
        self.explorationRate = 0.5
        self.decay = 0.0002
        self.minExplorationRate = 0.0001
        self.observation_space = observation_space
        self.model = Sequential([
            Dense(15, input_shape=(observation_space, )),
            Activation("relu"),
            Dense(6),
            Activation("relu"),
            Dense(2)
        ])
        self.model.compile(loss="mse", optimizer=Adam(lr=DQN.LEARNING_RATE))
        self.critic = Sequential([
            Dense(15, input_shape=(observation_space, )),
            Activation("relu"),
            Dense(6),
            Activation("relu"),
            Dense(2)
        ])
        self.critic.compile(loss="mse", optimizer=Adam(lr=DQN.LEARNING_RATE))

    def decay_epsilon(self, episodes):
        if (episodes < 0.6 * DQN.NUM_EPISODES):
            self.decay = 1 / (2 * DQN.NUM_EPISODES)
        else:
            self.decay = 10 / DQN.NUM_EPISODES
        self.explorationRate = self.minExplorationRate + (
            self.explorationRate -
            self.minExplorationRate) * (np.exp(-self.decay))

    def policy(self, state):
        x = random.random()
        if (x < self.explorationRate):
            return random.randint(0, 1)
        # state2 = np.asarray(state)
        # state2 = state2.reshape([1, self.observation_space])
        # print(state.shape)
        q_values = self.model.predict(state)
        return np.argmax(q_values[0])

    def train(self, num_ep):
        batch = self.replay_buffer.sample(DQN.BATCH_SIZE)
        for state, action, reward, state_next, terminal in batch:
            q_update = reward
            if not terminal:
                q_update += DQN.GAMMA * np.amax(
                    self.critic.predict(state_next)[0])
            q_values = self.model.predict(state)
            # print(q_values)
            q_values[0][action] = q_update
            self.model.fit(state, q_values, verbose=0)
        self.decay_epsilon(num_ep)

    def make_equal(self):
        self.critic.set_weights(self.model.get_weights())
Ejemplo n.º 9
0
class Model():
    def __init__(self, logging=False):
        self.logging = logging

        # Initial standard deviation of the kernel
        self.kernel_init_std = 1
        self.kernel_weights = None
        self.k_size = 8

        self.rnn_timesteps = 4
        self.rnn_layers = [128, 128]
        self.rnn_activation = 'relu'

        self.with_zoom = False
        self.control_output = 3 if self.with_zoom else 2

        self.batch_size = 64
        self.path_to_images = "data/mnist/mnist.pkl"
        self.image_size = 28
        self.nr_of_classes = 10

        # gpu_options = K.tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
        config = K.tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = K.tf.Session(config=config)
        with self.sess.as_default():
            self.glimpse = K.variable(np.zeros((1, 1, self.k_size**2)))
            self.init_image_loader()
            self.init_networks()
            self.init_weight_sizes()

    def init_weight_sizes(self):
        # 3 because we need x, y coordinates and std in both directions
        self.kernel_weight_size = 3 * self.k_size**2
        self.rnn_weight_size = sum(
            [w.size for w in self.rnn_model.get_weights()])
        self.control_weight_size = sum(
            [w.size for w in self.control_model.get_weights()])
        self.classifier_weight_size = sum(
            [w.size for w in self.classifier_model.get_weights()])
        self.weights_size = self.kernel_weight_size + self.rnn_weight_size + self.control_weight_size + self.classifier_weight_size

    def init_networks(self):
        self.rnn_model = Sequential()
        self.rnn_model.add(
            SimpleRNN(self.rnn_layers[0],
                      activation=self.rnn_activation,
                      input_shape=(self.rnn_timesteps, self.k_size**2),
                      return_sequences=True))
        self.rnn_model.add(
            SimpleRNN(self.rnn_layers[1], activation=self.rnn_activation))
        # self.rnn_model.summary()

        self.control_model = Sequential(
            [Dense(units=self.control_output, input_dim=self.rnn_layers[-1])])
        # self.control_model.summary()
        self.classifier_model = Sequential(
            [Dense(units=self.nr_of_classes, input_dim=self.rnn_layers[-1])])
        # self.classifier_model.summary()
        self.sess.run(K.tf.global_variables_initializer())

    def init_image_loader(self):
        self.train_x, self.train_y, self.test_x, self.test_y = mnist_loader.load(
            self.path_to_images)
        middle = math.sqrt(len(self.train_x[0])) / 2
        self.lattice = [middle, middle]

    # If init_kernel, we ignore the input and set the kernel positions
    def set_weights(self, weights):
        with self.sess.as_default():
            k, r, co, cl = self.kernel_weight_size, self.rnn_weight_size, self.control_weight_size, self.classifier_weight_size
            # 3, because x, y, std
            self.kernel_weights = np.reshape(weights[:k], (3, -1))
            w1 = k

            self.rnn_weights = []
            for w in self.rnn_model.get_weights():
                self.rnn_weights.append(
                    np.reshape(weights[w1:w1 + w.size], w.shape))
                w1 += w.size

            self.control_weights = []
            for w in self.control_model.get_weights():
                self.control_weights.append(
                    np.reshape(weights[w1:w1 + w.size], w.shape))
                w1 += w.size
            self.classifier_weights = []
            for w in self.classifier_model.get_weights():
                self.classifier_weights.append(
                    np.reshape(weights[w1:w1 + w.size], w.shape))
                w1 += w.size
            self.rnn_model.set_weights(self.rnn_weights)
            self.control_model.set_weights(self.control_weights)
            self.classifier_model.set_weights(self.classifier_weights)

    def set_batch_size(self, batch_size):
        self.batch_size = batch_size

    def get_weights_size(self):
        return self.weights_size

    def set_logging(self, logging):
        self.logging = logging

    def classify(self, features):
        pass

    def get_score(self):
        return self.accuracy

    def train(self, epoch=None):
        true_positives = 0
        with self.sess.as_default():
            indices = range(
                (epoch * self.batch_size) % len(self.train_x),
                ((epoch + 1) * self.batch_size) % len(self.train_x))
            for i in indices:
                img = self.train_x[i]
                for n in range(self.rnn_timesteps):
                    k = self.kernel_weights

                    glimpse_ = GlimpseGenerator().get_glimpse(
                        img, self.lattice[0], self.lattice[1], k[0], k[1],
                        k[2])
                    # print("Glimpse:")
                    # print(glimpse_)
                    K.set_value(self.glimpse,
                                glimpse_.reshape((1, 1, self.k_size**2)))
                    # Get the RNN params to feed to control or classifier network
                    rnn_out = self.rnn_model.call(self.glimpse)
                    # print("RNN weights:")
                    # print(rnn_out.eval())
                    control_out = self.control_model.call(rnn_out)
                    # print(type(control_out))
                    control_out = control_out.eval()
                    class_out = self.classifier_model.call(rnn_out).eval()
                    self.lattice[0] = control_out[0][0]
                    self.lattice[1] = control_out[0][1]
                    # print(class_out)
                    # print(control_out)
                    true_positives += np.argmax(class_out) == self.train_y[i]
        # K.clear_session()
        # TODO - simplest scoring right now - we probably want to change this to reward guessing quicker
        self.accuracy = true_positives / (self.batch_size * self.rnn_timesteps)
        # print("acc: {}".format(self.accuracy))

    def test(self):
        pass

    def visualize(self, epoch, res_directory=None, filename=None):
        scale = 20
        img = np.zeros((scale * self.image_size, scale * self.image_size, 3),
                       np.uint8)
        for i in self.kernel_weights.T:
            img = cv2.circle(img,
                             (int((self.image_size / 2 - int(i[0])) * scale),
                              int((self.image_size / 2 - int(i[1])) * scale)),
                             abs(int(i[2] * scale)), (0, 0, 255), -1)
        if filename is None:
            filename = res_directory + "lattice-epoch_{}-{}.png".format(
                epoch,
                str(time.time())[-5:])
        cv2.imwrite(filename, img)
Ejemplo n.º 10
0
# Experiment on linear model with policy to maximize momentum
from matplotlib import pyplot as plt
import gym

import numpy as np
from keras import Sequential
from keras.layers import Dense

env = gym.make('MountainCar-v0')

ns = env.observation_space.shape[0]
na = env.action_space.n

model = Sequential([Dense(na, input_shape=(ns, ))])

model.set_weights([np.array([[0, 0, 0], [1, 2, 3]]), np.array([0, 0, 0])])

test_size = 200

rewards = 0
x = []
y = []
for _ in range(test_size):
    s = env.reset()
    x.append(s[0])
    i = 0
    while True:
        # env.render()
        q_values = model.predict(np.array([s]))
        s, r, done, _ = env.step(np.argmax(q_values == q_values.max()))
        rewards += r
Ejemplo n.º 11
0
     outfile.write('+++++++++++++++++++++++++++++++++++++++\n')
     outfile.write(label)
     outfile.write('\n+++++++++++++++++++++++++++++++++++++++\n')
 
 loss = []
 #Iterate through each training duration
 for ind,epochs in enumerate(epochs_list):
     #Fit the model for the current number of epochs
     history = model.fit(X, y, epochs=epochs, batch_size=batch_size)
     loss.extend(history.history['loss'])
     # print(history.history['loss'])
         
     #Extract the weights from the training model to set the weights for the
     #character generation model
     weights = model.get_weights()
     trained_model.set_weights(weights)
     
     #Select a sample from X to be the seed
     seed = np.array(X[100])
     
     #Generate the results and output to file
     with open(file,'a') as outfile:
         #Write the header line with number of epochs, seed, etc
         outfile.write('Using seed of length {} generated {} characters after {} epochs.\nSeed: \'{}\'\n'.format(
                     seed.shape[0],
                     num_to_generate,
                     sum(epochs_list[:ind+1]),
                     ''.join(onehot_to_char(seed))))
         #Generate the characters from the current iteration of the trained model
         chars = generate_text(trained_model,num_to_generate,seed)
         #Convert from a one-hot array to a list of characters
'''
    Here we are converting our data to a 4Dimensional container
    Think of it as an array of tensors (Tensor being a 3Dimensional Array)
    Such that [number of samples, columns, rows, channels]
    In this trivial case we only have one sample, and the channels are shallow
'''
data = data.reshape(1, 8, 8, 1)
print(data)

# Create a Sequential keras model, we'll only have one layer here
model = Sequential()
# https://keras.io/layers/convolutional/
# Conv2D(number of filters, tuple specifying the dimension of the convolution window, input_shape)
model.add(Conv2D(1, (3, 3), input_shape=(8, 8, 1)))

# Define a vertical line detector
detector = [[[[0]], [[1]], [[0]]], [[[0]], [[1]], [[0]]], [[[0]], [[1]],
                                                           [[0]]]]
weights = [asarray(detector), np.asarray([0.0])]
# store the weights in the model
model.set_weights(weights)
# confirm they were stored
print(model.get_weights())

# apply filter to input data
yhat = model.predict(data)

for r in range(yhat.shape[1]):
    # print each column in the row
    print([yhat[0, r, c, 0] for c in range(yhat.shape[2])])
Ejemplo n.º 13
0
class GAN:
    def __init__(self):

        self.batch_size = 32
        self.log_step = 50
        self.scaler = MinMaxScaler((-1, 1))
        self.data = self.get_data_banknotes()
        self.init_model()

        # Logging loss
        self.logs_loss = pd.DataFrame(columns=['d_train_r',  # real data from discriminator training
                                               'd_train_f',  # fake data from discriminator training
                                               'd_test_r',  # real data from discriminator testing
                                               'd_test_f',  # fake data from discriminator testing
                                               'a'  # data from GAN(adversarial) training
                                               ])

        # Logging accuracy
        self.logs_acc = pd.DataFrame(columns=['d_train_r', 'd_train_f', 'd_test_r', 'd_test_f', 'a'])

        # Logging generated rows
        self.results = pd.DataFrame(columns=['iteration','variance', 'skewness', 'curtosis', 'entropy', 'prediction'])

    def get_data_banknotes(self):
        """
        Get data from file
        :return:
        """
        names = ['variance', 'skewness', 'curtosis', 'entropy', 'class']
        dataset = pd.read_csv('data/data_banknotes.csv', names=names)
        dataset = dataset.loc[dataset['class'] == 0].values  # only real banknotes, because fake ones will be generated
        X = dataset[:, :4]  # omitting last column, we already know it will be 0
        data = self.structure_data(X)
        return data

    def scale(self, X):
        return self.scaler.fit_transform(X)

    def descale(self, X):
        return self.scaler.inverse_transform(X)

    def structure_data(self, X):
        """
        Structure data
        :param X:
        :return:
        """
        data_subsets = {'normal': X, 'scaled': self.scale(X)}
        for subset, data in data_subsets.items():  # splitting each subset on train and test
            splited_data = train_test_split(data, test_size=0.3, shuffle=True)
            data_subsets.update({
                subset: {
                    'train': splited_data[0],
                    'test': splited_data[1]}
            })

        return data_subsets

    def init_discriminator(self):
        """
        Init trainable discriminator model. Will be used for training and testing itself outside connected GAN model.
        LeakyReLU activation function, Adam optimizer and Dropout are recommended in GAN papers
        """
        self.D = Sequential()
        self.D.add(Dense(16, input_dim=4))
        self.D.add(LeakyReLU())
        self.D.add(Dropout(0.3))
        self.D.add(Dense(16))
        self.D.add(LeakyReLU())
        self.D.add(Dense(16))
        self.D.add(LeakyReLU())
        self.D.add(Dense(1, activation='sigmoid'))
        self.D.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

    def init_discriminator_G(self):
        """
        Init non-trainable discriminator model. Will be used for training generator inside connected GAN model.
        LeakyReLU activation function, Adam optimizer and Dropout are recommended in GAN papers
        """
        self.Dg = Sequential()
        self.Dg.add(Dense(16, input_dim=4))  # activation function: ganhacks
        self.Dg.add(LeakyReLU())
        self.Dg.add(Dropout(0.3))
        self.Dg.add(Dense(16))
        self.Dg.add(LeakyReLU())
        self.Dg.add(Dense(16))
        self.Dg.add(LeakyReLU())
        # activation function: ganhacks
        self.Dg.add(Dense(1, activation='sigmoid'))
        self.Dg.trainable = False
        self.Dg.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

    def init_generator(self):
        """
        LeakyReLU activation function, Adam optimizer and Dropout are recommended in GAN papers for BOTH D and G
        """
        self.G = Sequential()
        self.G.add(Dense(16, input_dim=64))
        self.G.add(LeakyReLU())
        self.G.add(Dropout(0.3))
        self.G.add(Dense(16))
        self.G.add(LeakyReLU())
        self.G.add(GaussianNoise(0.1))
        self.G.add(Dense(16))
        self.G.add(LeakyReLU())
        self.G.add(Dense(4, activation='tanh'))
        self.G.compile(loss='binary_crossentropy', optimizer='adam')

    def init_model(self):
        """
        Connecting non trainable model with Generator. Initializing D.
        :return:
        """
        self.init_discriminator()
        self.init_discriminator_G()
        self.init_generator()
        self.GAN = Sequential()
        self.GAN.add(self.G)
        self.GAN.add(self.Dg)
        self.GAN.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

    def get_adversarial_data(self, mode='train'):
        """
        Get data for adversarial training.
        """
        data = self.data['scaled'][mode].copy()
        np.random.shuffle(data)
        features_real = data[:int(self.batch_size / 2)]  # random rows with real data

        noise = np.random.uniform(-1.0, 1.0, size=[int(self.batch_size / 2), 64])  # random noise for generator
        features_fake = self.G.predict(noise)  # fake data
        y_real = np.zeros([int(self.batch_size / 2), 1])  # array of zeros for real rows labels
        y_fake = np.ones([int(self.batch_size / 2), 1])  # array of ones for fake rows labels
        return features_real, y_real, features_fake, y_fake

    def train(self, train_steps):
        try:
            for i in range(train_steps):
                # Training D
                xr, yr, xf, yf = self.get_adversarial_data()  # train D separately from G
                d_loss_r = self.D.train_on_batch(xr, yr)  # separating real and fake data is recommended
                d_loss_f = self.D.train_on_batch(xf, yf)

                # Training G
                # flipping the label before prediction will
                # not influence D prediction as here D is not trainable and is getting weights from trainable D
                y = np.zeros([int(self.batch_size / 2), 1])  # flipping labels is recommended
                self.Dg.set_weights(self.D.get_weights())  # Copying weights from trainable D
                noise = np.random.uniform(-1.0, 1.0, size=[int(self.batch_size / 2), 64])  # getting input noise for G
                a_loss = self.GAN.train_on_batch(noise, y)

                # Testing
                xr_t, yr_t, xf_t, yf_t = self.get_adversarial_data(mode='test')
                d_pred_r = self.D.predict_on_batch(xr_t)  # getting example predictions
                d_pred_f = self.D.predict_on_batch(xf_t)
                d_loss_r_t = self.D.test_on_batch(xr_t, yr_t)  # getting loss and acc
                d_loss_f_t = self.D.test_on_batch(xf_t, yf_t)

                # Logging important data
                self.log(locals())
        finally:
            """
            Plot and save data when finished.
            """
            self.plot()
            self.results.to_csv('results/results.csv', index=False)

    def plot(self):
        """
        Preparing for plotting, plotting and saving plots.
        """
        import matplotlib.pyplot as plt

        ax_loss = self.logs_loss.plot(linewidth=0.75, figsize=(20, 10))
        ax_loss.set_xlabel('iteration')
        ax_loss.set_ylabel('loss')
        fig = plt.gcf()
        fig.set_dpi(200)
        plt.legend(loc='upper right', framealpha=0, prop={'size': 'large'})
        fig.savefig('results/loss.png', dpi=200)

        ax_acc = self.logs_acc.plot(linewidth=0.75, figsize=(20, 10))
        ax_acc.set_xlabel('iteration')
        ax_acc.set_ylabel('accuracy')
        fig = plt.gcf()
        fig.set_dpi(200)
        plt.legend(loc='upper right', framealpha=0, prop={'size': 'large'})
        fig.savefig('results/acc.png', dpi=200)

        plt.show()

    def log(self, variables):
        """
        Logging and printing all the necessary data
        """
        r_rows = pd.DataFrame(self.descale(variables['xr_t']), columns=['variance', 'skewness', 'curtosis', 'entropy'])
        r_rows['prediction'] = variables['d_pred_r']
        f_rows = pd.DataFrame(self.descale(variables['xf_t']), columns=['variance', 'skewness', 'curtosis', 'entropy'])
        f_rows['prediction'] = variables['d_pred_f']
        f_rows['iteration'] = variables['i']
        self.logs_loss = self.logs_loss.append(pd.Series(  # logging loss
                [variables['d_loss_r'][0],
                 variables['d_loss_f'][0],
                 variables['d_loss_r_t'][0],
                 variables['d_loss_f_t'][0],
                 variables['a_loss'][0]], index=self.logs_loss.columns), ignore_index=True)
        self.logs_acc = self.logs_acc.append(pd.Series(  # logging acc
                [variables['d_loss_r'][1],
                 variables['d_loss_f'][1],
                 variables['d_loss_r_t'][1],
                 variables['d_loss_f_t'][1],
                 variables['a_loss'][1]], index=self.logs_loss.columns), ignore_index=True)
        self.results = self.results.append(f_rows, ignore_index=True, sort=False)  # logging generated data
        if self.log_step and variables['i'] % self.log_step == 0:  # print metrics every 'log_step' iteration
            # preparing strings for printing
            log_msg = f""" 
Batch {variables['i']}:
    D(training):  
        loss:
            real : {variables['d_loss_r'][0]:.4f}
            fake : {variables['d_loss_f'][0]:.4f}
        acc: 
            real: {variables['d_loss_r'][1]:.4f}
            fake: {variables['d_loss_f'][1]:.4f}

    D(testing):  
        loss:
            real : {variables['d_loss_r_t'][0]:.4f}
            fake : {variables['d_loss_f_t'][0]:.4f}
        acc: 
            real: {variables['d_loss_r_t'][1]:.4f}
            fake: {variables['d_loss_f_t'][1]:.4f}
            
    GAN:
        loss: {variables['a_loss'][0]:.4f}
        acc: {variables['a_loss'][1]:.4f}
                        """
            print(log_msg)
            np.set_printoptions(precision=5, linewidth=140, suppress=True)  # set how np.array will be printed
            predictions = f"""
Example results:
    Real rows:

{r_rows}

    Fake rows:

{f_rows}
"""
            print(predictions)