Пример #1
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          lr=0.01,
          decay=1e-5,
          momentum=0.9):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """

    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # dense layers (the hidden layer)
    for param in params:
        model.add(Dense(param))
        # ReLU activation
        model.add(Activation('relu'))
    # the output layer, with 10 classes
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the SGD optimizer with given hyper parameters
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.summary()
    print("Traing a {} layer model, saving to {}".format(
        len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return {'model': model, 'history': history}
def train(data, file_name, params, num_epochs=50, batch_size=256, train_temp=1, init=None, lr=0.01, decay=1e-5, momentum=0.9, activation="relu", optimizer_name="sgd"):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """
    
    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # dense layers (the hidden layer)
    n = 0
    for param in params:
        n += 1
        model.add(Dense(param, kernel_initializer='he_uniform'))
        # ReLU activation
        if activation == "arctan":
            model.add(Lambda(lambda x: tf.atan(x), name=activation+"_"+str(n)))
        else:
            model.add(Activation(activation, name=activation+"_"+str(n)))
    # the output layer, with 10 classes
    model.add(Dense(10, kernel_initializer='he_uniform'))
    
    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted/train_temp)

    if optimizer_name == "sgd":
        # initiate the SGD optimizer with given hyper parameters
        optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    elif optimizer_name == "adam":
        optimizer = Adam(lr=lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay=decay, amsgrad=False)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn,
                  optimizer=optimizer,
                  metrics=['accuracy'])
    
    model.summary()
    print("Traing a {} layer model, saving to {}".format(len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data, data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)
    

    # save model to a file
    if file_name != None:
        model.save(file_name)
        print('model saved to ', file_name)
    
    return {'model':model, 'history':history}
Пример #3
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          lr=0.01,
          decay=1e-5,
          momentum=0.9):
    """
    Train a 2-layer simple network for MNIST and CIFAR
    """
    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # first dense layer (the hidden layer)
    model.add(Dense(params[0]))
    # \alpha = 10 in softplus, multiply input by 10
    model.add(Lambda(lambda x: x * 10))
    # in Keras the softplus activation cannot set \alpha
    model.add(Activation('softplus'))
    # so manually add \alpha to the network
    model.add(Lambda(lambda x: x * 0.1))
    # the output layer, with 10 classes
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the SGD optimizer with given hyper parameters
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    # run training with given dataset, and print progress
    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return model
Пример #4
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None):
    """
    Standard neural network training procedure.
    """
    model = Sequential()

    print(data.train_data.shape)

    model.add(Conv2D(params[0], (3, 3), input_shape=data.train_data.shape[1:]))
    model.add(Activation('relu'))
    model.add(Conv2D(params[1], (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(params[2], (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(params[3], (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(params[4]))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(params[5]))
    model.add(Activation('relu'))
    model.add(Dense(10))

    if init != None:
        model.load_weights(init)

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    if file_name != None:
        model.save(file_name)

    return model
Пример #5
0
def convert(file_name, new_name, cifar=False):
    if not cifar:
        eq_weights, new_params = get_weights(file_name)
        data = MNIST()
    else:
        eq_weights, new_params = get_weights(file_name, inp_shape=(32, 32, 3))
        data = CIFAR()
    model = Sequential()
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    for param in new_params:
        model.add(Dense(param))
        model.add(Lambda(lambda x: tf.nn.relu(x)))
    model.add(Dense(10))

    for i in range(len(eq_weights)):
        try:
            print(eq_weights[i][0].shape)
        except:
            pass
        model.layers[i].set_weights(eq_weights[i])

    sgd = SGD(lr=0.01, decay=1e-5, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.save(new_name)
    acc = model.evaluate(data.validation_data, data.validation_labels)[1]
    printlog("Converting CNN to MLP")
    nlayer = file_name.split('_')[-3][0]
    filters = file_name.split('_')[-2]
    kernel_size = file_name.split('_')[-1]
    printlog(
        "model name = {0}, numlayer = {1}, filters = {2}, kernel size = {3}".
        format(file_name, nlayer, filters, kernel_size))
    printlog("Model accuracy: {:.3f}".format(acc))
    printlog("-----------------------------------")
    return acc
Пример #6
0

def fizzbuzz(i):
    if i % 15 == 0: return np.array([0, 0, 0, 1])
    elif i % 5 == 0: return np.array([0, 0, 1, 0])
    elif i % 3 == 0: return np.array([0, 1, 0, 0])
    else: return np.array([1, 0, 0, 0])


def bin(i, num_digits):
    return np.array([i >> d & 1 for d in range(num_digits)])


NUM_DIGITS = 7
trX = np.array([bin(i, NUM_DIGITS) for i in range(1, 101)])
trY = np.array([fizzbuzz(i) for i in range(1, 101)])
model = Sequential()
model.add(Dense(64, input_dim=7))
model.add(Activation('tanh'))
model.add(Dense(4, input_dim=64))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(trX, trY, epochs=3600, batch_size=64)
model.save('fizzbuzz_model.h5')

converter = lite.TFLiteConverter.from_keras_model_file('fizzbuzz_model.h5')
tflite_model = converter.convert()
open('fizzbuzz_model.tflite', 'wb').write(tflite_model)
Пример #7
0
predict_1 = classifier.predict(test_image_1)
predict_2 = classifier.predict(test_image_2)

# to understand output of 0 or 1, cats are 0, dogs are 1
training_set.class_indices

if predict_1[0][0] == 1:
    prediction = 'dog'
else:
    prediction = 'cat'
print("The image 1 is a ", prediction)

if predict_2[0][0] == 1:
    prediction = 'dog'
else:
    prediction = 'cat'
print("The image 2 is a ", prediction)

# Save model
model_backup_path = script_directory + '/cats_or_dogs_model.h5'
classifier.save(model_backup_path)
print('Model saved to: ', model_backup_path)

# Save loss history
loss_history_path = script_directory + '/loss_history.log'
with open(loss_history_path, 'w') as myFile:
    myFile.write(history.losses)

backend.clear_session()
print('The model class indices are: ', training_set.class_indices)
Пример #8
0
import numpy as np 
from tensorflow.contrib.keras.api.keras.models import Sequential, model_from_json
from tensorflow.contrib.keras.api.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.api.keras.optimizers import SGD
import tensorflow.contrib.lite as lite

model = Sequential()
model.add(Dense(8, input_dim = 2))
model.add(Activation('tanh'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = SGD(lr = 0.1))
model.fit(
    np.array([[0, 0], [0, 1.0], [1.0, 0], [1.0, 1.0]]),
    np.array([[0.0], [1.0], [1.0], [0.0]]),
    batch_size = 1, epochs = 300)
model.save('xor_model.h5')

converter = lite.TFLiteConverter.from_keras_model_file("xor_model.h5")
tflite_model = converter.convert()
open("xor_model.tflite", "wb").write(tflite_model)
Пример #9
0
import numpy as np
from tensorflow.contrib.keras.api.keras.models import Sequential
from tensorflow.contrib.keras.api.keras.layers import Dense, Activation
from tensorflow.contrib.keras.api.keras.optimizers import SGD, Adam
#import matplotlib.pyplot as plt

data = np.loadtxt('sin.csv', delimiter=',', unpack=True)
x = data[0]
y = data[1]
model = Sequential()
model.add(Dense(30, input_shape=(1, )))
model.add(Activation('sigmoid'))
model.add(Dense(40))
model.add(Activation('sigmoid'))
model.add(Dense(1))
sgd = Adam(lr=0.1)
model.compile(loss='mean_squared_error', optimizer=sgd)
model.fit(x, y, epochs=1000, batch_size=20, verbose=0)
print('save model')
model.save('sin_model.h5')
predictions = model.predict(x)
print(np.mean(np.square(predictions - y)))
preds = model.predict(x)
plt.plot(x, y, 'b', x, preds, 'r--')
plt.show()
Пример #10
0
def train_cnn_7layer(data,
                     file_name,
                     params,
                     num_epochs=50,
                     batch_size=256,
                     train_temp=1,
                     init=None,
                     lr=0.01,
                     decay=1e-5,
                     momentum=0.9,
                     activation="relu",
                     optimizer_name="sgd"):
    """
    Train a 7-layer cnn network for MNIST and CIFAR (same as the cnn model in Clever)
    mnist: 32 32 64 64 200 200 
    cifar: 64 64 128 128 256 256
    """

    # create a Keras sequential model
    model = Sequential()

    print("training data shape = {}".format(data.train_data.shape))

    # define model structure
    model.add(Conv2D(params[0], (3, 3), input_shape=data.train_data.shape[1:]))
    model.add(Activation(activation))
    model.add(Conv2D(params[1], (3, 3)))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(params[2], (3, 3)))
    model.add(Activation(activation))
    model.add(Conv2D(params[3], (3, 3)))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(params[4]))
    model.add(Activation(activation))
    model.add(Dropout(0.5))
    model.add(Dense(params[5]))
    model.add(Activation(activation))
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    if optimizer_name == "sgd":
        # initiate the SGD optimizer with given hyper parameters
        optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    elif optimizer_name == "adam":
        optimizer = Adam(lr=lr,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=None,
                         decay=decay,
                         amsgrad=False)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=optimizer, metrics=['accuracy'])

    model.summary()
    print("Traing a {} layer model, saving to {}".format(
        len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)
        print('model saved to ', file_name)

    return {'model': model, 'history': history}
Пример #11
0
# Define model
input_shape = (mnist.img_rows, mnist.img_cols, 1)
model = Sequential()
model.add(
    Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(mnist.n_classes, activation='softmax'))

# Fit model
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))

# Evaluate model
score = model.evaluate(x_test, y_test, verbose=0)
print('Test accuracy: {:0.2f}%'.format(score[1] * 100))

# Store model
model.save('mnist_tfkeras.h5')
Пример #12
0
# Define model
input_shape = (mnist.img_rows, mnist.img_cols, 1)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu',
                 input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(mnist.n_classes, activation='softmax'))

# Fit model
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])
model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))

# Evaluate model
score = model.evaluate(x_test, y_test, verbose=0)
print('Test accuracy: {:0.2f}%'.format(score[1] * 100))

# Store model
model.save('mnist_tfkeras.h5')
Пример #13
0
classifier.fit_generator(training_set,
                         steps_per_epoch=8000/batch_size, #Amount of batches to be completed before declaring an epoch to be finished.
                         epochs=90, 
                         validation_data=test_set, 
                         validation_steps=2000/batch_size,
                         workers=12,  #adjusted workers and maxQsize for my personal GPU performance.
                         max_queue_size=100,
                         callbacks=[history]) #recording training stats into history class object.
     


# PART 3 - MAKING PREDICTIONS, SAVING MODEL, SAVING LOSS HISTORY TO FILE.

# Saving model :
model_path = 'dataset/cat_or_dog_model.h5'
classifier.save(model_path)
print("Model saved to", model_path)
     
# Saving loss history to file : 
lossLog_path = 'dataset/loss_history.log'
myFile = open(lossLog_path, 'w+')
myFile.write(history.losses)
myFile.close()

# Clearing session :
backend.clear_session()

# Confirming class indices:
print("The model class indices are:", training_set.class_indices)

# Predicting a new image :
Пример #14
0
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)

model = Sequential()
model.add(Dense(512, input_shape=(784, )))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy",
              optimizer=Adam(),
              metrics=['accuracy'])
model.fit(x_train,
          y_train,
          batch_size=128,
          epochs=20,
          verbose=1,
          validation_data=(x_test, y_test))
model.save('mnist_model.h5')

converter = lite.TFLiteConverter.from_keras_model_file('mnist_model.h5')
tflite_model = converter.convert()
open('mnist_model.tflite', 'wb').write(tflite_model)
Пример #15
0
def train(data,
          file_name,
          filters,
          kernels,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          activation=tf.nn.relu,
          bn=False):
    """
    Train a n-layer CNN for MNIST and CIFAR
    """

    # create a Keras sequential model
    model = Sequential()
    model.add(
        Conv2D(filters[0], kernels[0], input_shape=data.train_data.shape[1:]))
    if bn:
        model.add(BatchNormalization())
    model.add(Lambda(activation))
    for f, k in zip(filters[1:], kernels[1:]):
        model.add(Conv2D(f, k))
        if bn:
            model.add(BatchNormalization())
        # ReLU activation
        model.add(Lambda(activation))
    # the output layer, with 10 classes
    model.add(Flatten())
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the Adam optimizer
    sgd = Adam()

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.summary()
    print("Traing a {} layer model, saving to {}".format(
        len(filters) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return {'model': model, 'history': history}
Пример #16
0
class TinyYoloV2:
    """
    This class handles the building and the loss of the
    tiny yolo v2 network.
    """
    def __init__(self, config):
        """
        Initializes class variables.
        :param config: Contains the networks hyperparameters
        """
        #super.__init__(self)
        self.config = config
        self.network = None
        self.loss = None
        self.input_shape = config.input_shape


    def build(self):
        """
        Builds the tiny yolo v2 network.
        :param input: input image batch to the network
        :return: logits output from network
        """
        self.model = Sequential()
        self.model.add(Convolution2D(16, (3, 3), input_shape=(416, 416, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(32, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(64, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(128, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(256, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(512, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 1), padding='valid'))

        self.model.add(Convolution2D(1024, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(Convolution2D(1024, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())

        self.model.add(Convolution2D(125, (1, 1), activation=None))

        if self.config.optimizer == 'adam':
            opt = Adam()
        elif self.config.optimizer == 'sgd':
            opt = SGD()

        if self.config.loss == 'categorical_crossentropy':
            loss = 'categorical_crossentropy'
        elif self.config.loss == 'yolov2_loss':
            raise NotImplemented

        self.model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
        self.model.summary()
        return self.model

    def convertToBoxParams(self, out):
        """
        Convert the final layer features to bounding box parameters.
        :return: box_xy: tensor
                    x, y box predictions adjusted by spatial location in conv layer
                box_wh: tensor
                    w, h box predictions adjusted by anchors and conv spatial resolution
                box_conf: tensor
                    Probability estimate for whether each box contains any object
                box_class_pred : tensor
                    Probability distribution estimate for each box over class labels
        """
        feats = out

        num_anchors = len(self.config.anchors)
        # Reshape to batch, height, width, num_anchors, box_params
        anchors_tensor = K.reshape(K.variable(self.config.anchors), [1, 1, 1, num_anchors, 2])

        conv_dims = K.shape(feats)[1:3]  # assuming channels last
        # In YOLO the height index is the inner most iteration
        conv_height_index = K.arange(0, stop=conv_dims[0])
        conv_width_index = K.arange(0, stop=conv_dims[1])
        conv_height_index = K.tile(conv_height_index, [conv_dims[1]])

        conv_width_index = K.tile(
            K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])
        conv_width_index = K.flatten(K.transpose(conv_width_index))
        conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
        conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
        conv_index = K.cast(conv_index, K.dtype(feats))

        feats = K.reshape(
            feats, [-1, conv_dims[0], conv_dims[1], num_anchors, self.config.classes + 5])
        conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))

        # Static generation of conv_index:
        # conv_index = np.array([_ for _ in np.ndindex(conv_width, conv_height)])
        # conv_index = conv_index[:, [1, 0]]  # swap columns for YOLO ordering.
        # conv_index = K.variable(
        #     conv_index.reshape(1, conv_height, conv_width, 1, 2))
        # feats = Reshape(
        #     (conv_dims[0], conv_dims[1], num_anchors, num_classes + 5))(feats)

        box_xy = K.sigmoid(feats[..., :2])
        box_wh = K.exp(feats[..., 2:4])
        box_confidence = K.sigmoid(feats[..., 4:5])
        box_class_probs = K.softmax(feats[..., 5:])

        # Adjust preditions to each spatial grid point and anchor size.
        # Note: YOLO iterates over height index before width index.
        box_xy = (box_xy + conv_index) / conv_dims
        box_wh = box_wh * anchors_tensor / conv_dims

        return box_xy, box_wh, box_confidence, box_class_probs


    def save_model(self, name):
        """
        Saves model to path.
        :return:
        """
        path = "C:\ObjectDetection\Data\ModelCheckpoints" + name + ".h5"
        self.model.save(path)

    def save_weights(self, name):
        """
        Saves the model weights to path.
        :param name:
        :return:
        """
        path = "C:\ObjectDetection\Data\ModelCheckpoints" + name + ".h5"
        self.model.save_weights(path)

    def load_weights(self):
        print("About to load weights.")
        utils.load_weights(self.model, 'C:\\ObjectDetection\\Data\\ModelCheckpoints\\tiny-yolo-voc.weights')
        print("Loaded weights.")

    def _load_pretrained_network(self):
        """
        Loads the pretrained network's weights
        into the new network.
        :return:
        """
        raise NotImplemented