def main():
    try:
        main_dir = '../'

        gConfig = getConfig(main_dir + 'config/metavision.ini')  # get configuration

        dataset = 'food'
        corpus_dir = main_dir + gConfig['corpus_dir'] + "/"
        data_dir = main_dir + gConfig['data_dir'] + "/"

        food_data_preparation(corpus_dir + dataset + "/", data_dir + dataset + "/", dataset)

    except Exception as ex:
        print("main function failed - " + str(ex))
        raise ex
Exemplo n.º 2
0
def main():
    try:
        main_dir = '../'

        gConfig = getConfig(main_dir +
                            'config/metavision.ini')  # get configuration

        dataset = 'caltech'
        corpus_dir = main_dir + gConfig['corpus_dir'] + "/"
        data_dir = main_dir + gConfig['data_dir'] + "/"
        testset_proportion = gConfig['testset_proportion']

        caltech_data_preparation(corpus_dir + dataset + "/",
                                 data_dir + dataset + "/", dataset,
                                 testset_proportion)

    except Exception as ex:
        print("main function failed - " + str(ex))
        raise ex
def main():
    try:
        gConfig = getConfig('config/meta_rl.ini')  # get configuration
        # site = gConfig['site']

        mode = gConfig['mode']
        dataset_name = gConfig['dataset']

        data_dir = gConfig['data_dir']
        # features_dir = gConfig['features_dir']
        # infer_dir = gConfig['infer_dir']
        model_dir = gConfig['model_dir']
        output_dir = gConfig['output_dir']
        log_dir = gConfig['log_dir']

        train_num_epochs = gConfig['train_num_epochs']
        num_layers = gConfig['num_layers']
        num_hidden = gConfig['num_hidden']
        learning_rate = gConfig['learning_rate']
        learning_rate_decay_factor = gConfig['learning_rate_decay_factor']
        num_steps_per_decay = gConfig['num_steps_per_decay']
        num_episodes = gConfig['num_episodes']
        train_batch_size = gConfig['train_batch_size']
        exploration = gConfig['exploration']
        discount_factor = gConfig['discount_factor']
        num_child_steps_per_cycle = gConfig['num_child_steps_per_cycle']
        # max_depth = gConfig['max_depth']
        initial_filters = gConfig['initial_filters']
        # num_classes = gConfig['num_classes']

        optimizer = gConfig['optimizer']
        # dropout_keep_prob = gConfig['dropout_keep_prob']

        # port = gConfig['port']
        # certificate = gConfig['certificate']
        # resource_dir = gConfig['resources']

        if ('train' in mode):
            # specify GPU numbers to use get gpu and cpu devices
            cpu_devices = get_cpu_devices()
            gpu_devices = get_gpu_devices()
            if (len(gpu_devices) > 1):
                os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
                os.environ["CUDA_VISIBLE_DEVICES"] = str(gConfig["gpu_to_use"])

                print("The available GPU devices: " + str(gpu_devices))

                # devices, device_category = (gpu_devices, DeviceCategory.GPU) if len(gpu_devices) > 1 else (cpu_devices, DeviceCategory.CPU)

                # desc = "A Meta-Reinforcement Learning Approach to Optimise Parameters and Hyper-parameters Simultaneously"
                # parser = argparse.ArgumentParser(description=desc)
                #
                # parser.add_argument('--max_layers', default=2)
                #
                # args = parser.parse_args()
                # args.max_layers = int(args.max_layers)

            for dataset_ in dataset_name.split(','):  # datasets
                checkPathExists([
                    model_dir + '/' + dataset_ + '/', data_dir,
                    log_dir + '/' + dataset_ + '/',
                    output_dir + '/' + dataset_ + '/'
                ])

                # create logger
                _log.basicConfig(filename=log_dir + "/" + "log.txt",
                                 level=_log.DEBUG,
                                 format='%(asctime)s %(message)s',
                                 datefmt='%m/%d/%Y %I:%M:%S %p')
                logger = _log.getLogger("VoiceNet")
                logger.setLevel(_log.DEBUG)
                console = _log.StreamHandler()
                console.setLevel(_log.DEBUG)

                formatter = _log.Formatter(
                    "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
                )  # create formatter
                console.setFormatter(formatter)
                logger.addHandler(console)

                data_format = ('channels_first'
                               if tf.test.is_built_with_cuda() else
                               'channels_last')

                # dataset preprocessing
                if 'mnist' == dataset_:
                    input_dimensions = '28x28x1'
                    num_classes = 10
                    max_depth = 9
                    train_batch_size = 32
                    # train_num_epochs = 20

                    # dataset = mnist_dataset.read_data_sets(data_dirs + '/', one_hot=True)
                    # train_x, train_y, test_x, test_y = np.reshape(mnist_dataset.train_images(data_dirs), [-1, 784]), mnist_dataset.train_labels(data_dirs), \
                    #                                    np.reshape(mnist_dataset.test_images(data_dirs), [-1, 784]), mnist_dataset.test_labels(data_dirs)
                    (train_x, train_y), (test_x, test_y) = mnist.load_data()

                    train_x = np.expand_dims(train_x, axis=-1)
                    test_x = np.expand_dims(test_x, axis=-1)

                    train_x, test_x = normalize(train_x, test_x)

                    train_y = to_categorical(train_y, num_classes)
                    test_y = to_categorical(test_y, num_classes)

                    if ('channels_first' in data_format):
                        train_x = train_x.transpose(0, 3, 1, 2)
                        test_x = test_x.transpose(0, 3, 1, 2)

                    num_episodes = (
                        len(train_x) // train_batch_size
                    ) * 100  # episodes = num_steps * num_epochs

                elif 'fashion_mst' == dataset_:
                    input_dimensions = '28x28x1'
                    num_classes = 10
                    max_depth = 9
                    # num_episodes = 30000
                    train_batch_size = 32
                    # train_num_epochs = 25

                    (train_x, train_y), (test_x,
                                         test_y) = fashion_mnist.load_data()

                    train_x = np.expand_dims(train_x, axis=-1)
                    test_x = np.expand_dims(test_x, axis=-1)

                    train_x, test_x = normalize(train_x, test_x)

                    train_y = to_categorical(train_y, num_classes)
                    test_y = to_categorical(test_y, num_classes)

                    if ('channels_first' in data_format):
                        train_x = train_x.transpose(0, 3, 1, 2)
                        test_x = test_x.transpose(0, 3, 1, 2)

                    num_episodes = (
                        len(train_x) // train_batch_size
                    ) * 100  # episodes = num_steps * num_epochs
                elif 'cifar10' == dataset_:
                    input_dimensions = '32x32x3'
                    num_classes = 10
                    max_depth = 9
                    # num_episodes = 35000
                    train_batch_size = 32
                    # train_num_epochs = 25

                    (train_x, train_y), (test_x, test_y) = cifar10.load_data()

                    train_x, test_x = normalize(train_x, test_x)

                    train_y = to_categorical(train_y, num_classes)
                    test_y = to_categorical(test_y, num_classes)

                    if ('channels_last' in data_format):
                        train_x = train_x.transpose(0, 2, 3, 1)
                        test_x = test_x.transpose(0, 2, 3, 1)

                    num_episodes = (
                        len(train_x) // train_batch_size
                    ) * 120  # episodes = num_steps * num_epochs
                elif 'cifar100' == dataset_:
                    input_dimensions = '32x32x3'
                    num_classes = 100
                    max_depth = 18
                    train_batch_size = 32
                    # num_episodes = 60000
                    # train_num_epochs = 35

                    (train_x, train_y), (test_x, test_y) = cifar100.load_data()

                    train_x, test_x = normalize(train_x, test_x)

                    train_y = to_categorical(train_y, num_classes)
                    test_y = to_categorical(test_y, num_classes)

                    if ('channels_last' in data_format):
                        train_x = train_x.transpose(0, 2, 3, 1)
                        test_x = test_x.transpose(0, 2, 3, 1)

                    num_episodes = (
                        len(train_x) // train_batch_size
                    ) * 150  # episodes = num_steps * num_epochs
                elif 'tiny_imagenet' == dataset_:
                    input_dimensions = '64x64x3'
                    num_classes = 200
                    max_depth = 18
                    train_batch_size = 32
                    # num_episodes = 80000
                    # train_num_epochs = 30

                    (train_x,
                     train_y), (test_x,
                                test_y) = tiny_imagenet.load_data(data_dir +
                                                                  '/' +
                                                                  dataset_)

                    train_x, test_x = normalize(train_x, test_x)

                    train_y = to_categorical(train_y, num_classes)
                    test_y = to_categorical(test_y, num_classes)

                    if ('channels_last' in data_format):
                        train_x = train_x.transpose(0, 2, 3, 1)
                        test_x = test_x.transpose(0, 2, 3, 1)

                    num_episodes = (
                        len(train_x) // train_batch_size
                    ) * 180  # episodes = num_steps * num_epochs

                np.random.seed(777)
                np.random.shuffle(train_x)
                np.random.seed(777)
                np.random.shuffle(train_y)

                dataset = [train_x, train_y, test_x,
                           test_y]  # pack the dataset for the Network Manager

                train(dataset,
                      dataset_name=dataset_,
                      model_dir=model_dir + '/' + dataset_ + '/',
                      num_episodes=num_episodes,
                      max_depth=max_depth,
                      initial_filters=initial_filters,
                      num_layers=num_layers,
                      num_hidden=num_hidden,
                      initial_learning_rate=learning_rate,
                      learning_rate_decay_factor=learning_rate_decay_factor,
                      train_batch_size=train_batch_size,
                      test_batch_size=1,
                      train_num_epochs=train_num_epochs,
                      input_dimensions=input_dimensions,
                      num_classes=num_classes,
                      optimizer=optimizer,
                      num_steps_per_decay=num_steps_per_decay,
                      num_child_steps_per_cycle=num_child_steps_per_cycle,
                      exploration=exploration,
                      discount_factor=discount_factor,
                      log_dir=log_dir + '/' + dataset_ + '/',
                      output_dir=output_dir + '/' + dataset_ + '/',
                      logger=logger)

        # elif ('test' in mode):
        #     # 61, 24, 60,  5, 57, 55, 59, 3
        #     evaluate("5, 32, 2,  5, 3, 64, 2, 3", "model", data_dirs)
        elif ('analysis' in mode):
            plt.figure(figsize=(10, 10))

            plt.rcParams.update({'font.size': 6})

            count = 1
            for dataset_ in dataset_name.split(','):  # datasets
                # checkPathExists(['plots/' + dataset_ + '/'])
                checkPathExists(['plots/'])
                dataset = load_dataset(output_dir + '/' + dataset_ + '/' +
                                       dataset_ + '_results.csv')
                plot(dataset['policy_episode'], dataset['policy_loss'],
                     dataset['reward'], dataset['network_accuracy'], 0,
                     10000, "Episodes", "Policy Loss",
                     dataset_.replace('_', ' '), count,
                     "plots/" + dataset_ + "/episod_accuracy.png")
                count += 1

            # plt.savefig("plots/results.png")
            # plt.gca().yaxis.set_minor_formatter(NullFormatter())
            # Adjust the subplot layout, because the logit one may take more space
            # than usual, due to y-tick labels like "1 - 10^{-3}"
            plt.subplots_adjust(top=0.92,
                                bottom=0.22,
                                left=0.1,
                                right=0.6,
                                hspace=0.25,
                                wspace=0.35)

            plt.savefig("plots/results.pdf", bbox_inches='tight')
            plt.close()

            plt.figure()
            plt.rcParams.update({'font.size': 8})
            dataset = load_dataset(output_dir + '/cifar10/cifar10_results.csv')
            plot_cifar10(dataset['time_taken'], dataset['network_accuracy'], 0,
                         720, "Time (minutes)",
                         "Network validation accuracy (%)", '',
                         "plots/cifar10_time_accuracy.pdf")

    except Exception as ex:
        print("main function failed - " + str(ex))
        raise ex
Exemplo n.º 4
0
def main(argv):
    try:
        if (len(argv) > 1):
            if (len(argv) > 1 or argv[1:][0] == '-h'):
                try:
                    opts, args = getopt.getopt(argv[1:], "ho:m:",
                                               ["operation=", "model="])
                    for opt, arg in opts:
                        opt = opt.lower()
                        arg = arg.lower()
                        if opt == '-h':
                            print(
                                'voicenet.py -o <train|test|infer|analysis|serve> -m <inceptionV3,inception_resnetV2,vgg_19>'
                            )
                            return
                        elif opt in ("-o", "--operation"):
                            mode = arg
                        elif opt in ("-m", "--model"):
                            pretrained_models = arg

                except getopt.GetoptError:
                    print(
                        'voicenet.py -o <train|test|infer|analysis|serve> -m <inceptionV3,inception_resnetV2,vgg_19>'
                    )  # -o <data_prep|train_test|freeze_model|infer|serve|regress_infer|analysis>')
                    return

                if pretrained_models in 'inceptionV3' or pretrained_models in 'inception':
                    pretrained_models = 'inceptionV3'
                elif pretrained_models in 'inception_resnetV2' or pretrained_models in 'resnet':
                    pretrained_models = 'inception_resnetV2'
                elif pretrained_models in 'vgg_19' or pretrained_models in 'vgg':
                    pretrained_models = 'vgg_19'
        else:
            mode = ''
            pretrained_models = ''

        # if len(argv):
        #     gConfig = getConfig(main_dir + 'config/' + getConfig(argv[1]).lower() + '.ini')   # get configuration
        # else:
        gConfig = getConfig('config/metavision.ini')  # get configuration

        site = gConfig['site']
        if (not len(mode)):
            mode = gConfig['mode']

        if (not len(pretrained_models)):
            pretrained_models = gConfig['pretrained_model_dir']

        datasets = gConfig['datasets']
        data_dirs = gConfig['data_dir']
        infer_dir = gConfig['infer_dir'] + "/" + datasets + "/"
        train_num_epochs = gConfig['train_num_epochs']
        test_num_epochs = gConfig['test_num_epochs']
        layer_start = gConfig['layer_start']
        infer_layer = gConfig['infer_layer']
        learning_rate = gConfig['learning_rate']
        learning_rate_decay_factor = gConfig['learning_rate_decay_factor']
        num_epochs_per_decay = gConfig['num_epochs_per_decay']
        train_batch_size = gConfig['train_batch_size']
        test_batch_size = gConfig['test_batch_size']
        optimizer = gConfig['optimizer']
        dropout_keep_prob = gConfig['dropout_keep_prob']
        extract_features_only = gConfig['extract_features_only']
        log_dir = gConfig['log_dir']
        port = gConfig['port']
        gpu_to_use = gConfig['gpu_to_use']
        certificate = gConfig['certificate']
        resource_dir = gConfig['resources']

        # init_inception = False
        # init_inception_resnet = False
        # init_vgg = False
        #
        # logits = None

        # create logger
        _log.basicConfig(filename=log_dir + "/" + "log.txt",
                         level=_log.DEBUG,
                         format='%(asctime)s %(message)s',
                         datefmt='%m/%d/%Y %I:%M:%S %p')
        logger = _log.getLogger("VoiceNet")
        logger.setLevel(_log.DEBUG)
        console = _log.StreamHandler()
        console.setLevel(_log.DEBUG)

        formatter = _log.Formatter(
            "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
        )  # create formatter
        console.setFormatter(formatter)
        logger.addHandler(console)

        if ('train' in mode or 'test' in mode):
            # specify GPU numbers to use get gpu and cpu devices
            cpu_devices = get_cpu_devices()
            gpu_devices = get_gpu_devices()
            if (len(gpu_devices) > 1):
                os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
                os.environ["CUDA_VISIBLE_DEVICES"] = str(gConfig["gpu_to_use"])

                print("The available GPU devices: " + str(gpu_devices))

                # devices, device_category = (gpu_devices, DeviceCategory.GPU) if len(gpu_devices) > 1 else (cpu_devices, DeviceCategory.CPU)

            for pretrained_model in pretrained_models.split(
                    ','):  # pre-trained architecture
                pretrained_model_dir = "pretrained_models/" + pretrained_model + "/"
                for dataset in datasets.split(','):  # datasets
                    data_dir = data_dirs + "/" + dataset + "/"
                    features_dir = gConfig[
                        'features_dir'] + "/" + pretrained_model + "/" + dataset + "/"
                    model_dir = gConfig[
                        'model_dir'] + "/" + pretrained_model + "/" + dataset + "/"
                    log_dir = gConfig[
                        'log_dir'] + "/" + pretrained_model + "/"  # + dataset + "/"
                    output_dir = gConfig[
                        'output_dir'] + "/" + pretrained_model + "/" + dataset + "/"

                    checkPathExists(
                        [model_dir, features_dir, log_dir, output_dir])

                    if (pretrained_model == 'inceptionV3'):  # inception v3
                        architecture_layers = inceptionV3_layers
                    elif (pretrained_model == 'inception_resnetV2'
                          ):  # inception_resnet v2
                        architecture_layers = inceptionResnetV2_layers
                    elif (pretrained_model == 'vgg_19'):  # vgg 19
                        architecture_layers = vgg_19_layers

                    logging(
                        pretrained_model + " Transfer learning on " + dataset +
                        " dataset", logger, 'info')

                    if (pretrained_model == 'vgg_19'):
                        layer_count_len = len(architecture_layers)
                    else:
                        layer_count_len = len(architecture_layers) - 1

                    if (extract_features_only):
                        layer_count_len = 2

                    for layer_count in range(layer_start, layer_count_len):
                        # learning_rate_ = round((learning_rate / (1.05 + ((layer_count - 1) / 10))), 8)
                        train_num_epochs_ = train_num_epochs + (
                            10 * (layer_count - 1))

                        if ("train" in mode):  # training
                            with tf.Graph().as_default() as graph:
                                tf.logging.set_verbosity(
                                    tf.logging.ERROR
                                )  # set the verbosity to INFO level

                                train_dataset, train_images, train_labels = dataPreprocessing(
                                    'train', data_dir, features_dir,
                                    train_batch_size, pretrained_model, logger)

                                if (not extract_features_only):
                                    train_accuracy = train(
                                        'train', train_dataset, train_images,
                                        train_labels, train_batch_size,
                                        train_num_epochs_, optimizer,
                                        learning_rate,
                                        learning_rate_decay_factor,
                                        num_epochs_per_decay,
                                        dropout_keep_prob, pretrained_model,
                                        model_dir, pretrained_model_dir,
                                        layer_count, logger)

                                graph.finalize()

                        if ("test" in mode):  # testing
                            with tf.Graph().as_default() as graph:
                                tf.logging.set_verbosity(
                                    tf.logging.ERROR
                                )  # set the verbosity to INFO level

                                test_dataset, test_images, test_labels = dataPreprocessing(
                                    'test', data_dir, features_dir,
                                    test_batch_size, pretrained_model, logger)

                                if (not extract_features_only):
                                    test_accuracy = test(
                                        'test', test_dataset, test_images,
                                        test_labels, test_batch_size,
                                        test_num_epochs, pretrained_model,
                                        model_dir, layer_count, logger)
                                    # test_accuracy = test('test', test_dataset, data_dir, pretrained_model, model_dir, layer_count, logger)

                                graph.finalize()

                    if (not extract_features_only):
                        # save the results file
                        logging(
                            pretrained_model + " Writing results of " +
                            dataset + " dataset", logger, 'info')
                        if ("train" in mode):  # training
                            with open(output_dir + 'accuracy.txt',
                                      'w') as writeresultsDict:
                                writeresultsDict.write(
                                    'Architecture\tDataset\tLayer\tEpochs\tLearning\tTrain_Loss\tTrain_Accuracy\tMax_Train_Accuracy\n'
                                )
                                # for train_k, train_v in train_accuracy.items():
                                for layer_count in range(1, layer_count_len):
                                    print(train_accuracy[str(layer_count)])
                                    # accuracy = train_accuracy[layer_count].split('\t')
                                    writeresultsDict.write(
                                        pretrained_model + '\t' + dataset +
                                        '\t' + str(layer_count) + '\t' +
                                        train_accuracy[str(layer_count)] +
                                        '\n')

                        if ("test" in mode):  # testing
                            with open(output_dir + 'test_accuracy.txt',
                                      'w') as writeresultsDict:
                                writeresultsDict.write(
                                    'Architecture\tDataset\tLayer\tTest_Accuracy\n'
                                )
                                # for test_k, test_v in test_accuracy.items():
                                for layer_count in range(1, layer_count_len):
                                    # accuracy = test_accuracy[layer_count].split('\t')
                                    writeresultsDict.write(
                                        pretrained_model + '\t' + dataset +
                                        '\t' + str(layer_count) + '\t' +
                                        str(test_accuracy[str(layer_count)]) +
                                        '\n')

        elif 'infer' in mode:
            output_dir = gConfig[
                'output_dir'] + "/" + pretrained_models + "/" + datasets + "/"
            model_dir = gConfig[
                'model_dir'] + "/" + pretrained_models + "/" + datasets + "/" + str(
                    infer_layer) + "/"
            checkPathExists([output_dir, model_dir])

            inferenceResults = open(gConfig['output_dir'] + '/inference.txt',
                                    'w')
            inferenceResults.write(
                'Architecture\tActual Class\tPredicted Class\tProbability\n')
            # test image
            # image = "willy_wonka_new.jpg"

            # preprocessing
            # input_tensor, _, _ = dataPreprocessing('infer', infer_dir, dataset, train_batch_size, pretrained_model, logger)

            # inference
            # predictions = inference(mode, pretrained_model, pretrained_model_dir, infer_dir + imagefile, channels = 3, return_top_predictions=5)
            # PlotResizedImage(sess, image_path=image_path)
            # ineption_prediction = ClassifyInception(sess, image_path, return_top_predictions=5)

            # print(pretrained_model + ' - network prediction: ' + str(predictions) + '\n')

            classes = []
            with open(resource_dir + '/' + datasets + '/labels.txt',
                      'r') as readfile:
                for line in readfile.readlines():
                    classes.append(line.split(':')[1].strip())

            sess_transfer_learner, end_points, logits, input_tensor = InitializeTransferLearner(
                model_dir, pretrained_models, classes)
            init_model = True

            # # initialization
            # if (pretrained_model == 'inceptionV3' and not init_inception):  # inception v3
            #     sess_inception = InitializeInception(pretrained_model_dir)
            #     init_inception = True
            # elif (pretrained_model == 'inception_resnetV2' and not init_inception_resnet):  # inception_resnet v2
            #     sess_inception_resnet, end_points, logits, input_tensor, imagenet_classes = InitializeInceptionResnet(model_dir)
            #     init_inception_resnet = True
            # elif (pretrained_model == 'vgg_19' and not init_vgg):  # vgg 19
            #     sess_vgg, prediction, input_tensor = InitializeVGG(pretrained_model_dir)
            #     init_vgg = True

            # print('Inception - Resnet network prediction: ' + str(ineption_resnet_prediction[0]) + '\n')

            logging(
                datasets + " inference on " + pretrained_models + " network",
                logger, 'info')

            # inference
            count = 0
            probability = 0.0
            accuracy_ = 0.0
            # entropy = 0.0
            for subdir, dirs, files in os.walk(os.path.join(infer_dir)):
                for file in files:
                    if file.endswith('.png') or file.endswith('.jpg'):
                        # if (pretrained_model == 'inceptionV3' and init_inception):  # inception v3
                        #     probabilities, entropies = ClassifyInception(sess_inception, subdir + "/" + file)
                        # elif (pretrained_model == 'inception_resnetV2' and init_inception_resnet):  # inception_resnet v2
                        #     probabilities, entropies = ClassifyInceptionResnet(sess_inception_resnet, end_points, logits, input_tensor, subdir + "/" + file)
                        # elif (pretrained_model == 'vgg_19' and init_vgg):  # vgg 19
                        #     probabilities, entropies = ClassifyVGG(sess_vgg, prediction, input_tensor, subdir + "/" + file)

                        # if(init_model):
                        probabilities, actual_class, pred_class, accuracy, processed_image = ClassifyTransferLearner(
                            sess_transfer_learner,
                            end_points,
                            logits,
                            input_tensor,
                            subdir + "/" + file,
                            is_inference=True)

                        grad_cam(subdir + "/" + file,
                                 processed_image,
                                 input_tensor,
                                 end_points,
                                 sess_transfer_learner,
                                 classes.index(pred_class),
                                 num_classes=len(classes),
                                 output_path=subdir + "/" +
                                 file.split('.')[0] + '_cam.jpg')

                        probability += probabilities
                        accuracy_ += accuracy

                        inferenceResults.write(pretrained_models + '\t' +
                                               str(file) + '\t' +
                                               actual_class + '\t' +
                                               pred_class + '\t' +
                                               str(accuracy) + '\t' +
                                               str(round(probabilities, 2)) +
                                               '\n')

                        count += 1

            if (count):
                probability = (probability * 100) / count
                accuracy_ = (accuracy_ * 100) / count

            inferenceResults.write(pretrained_models + '\t'
                                   "Accuracy: " + str(accuracy_) + '\t' +
                                   "Probability: " +
                                   str(round(probability, 2)) + '\n')

            print(pretrained_models + ' network predictions on ' + datasets +
                  " - Probability: " + str(probability) + " - Accuracy: " +
                  str(accuracy_))

            # if (pretrained_model == 'inceptionV3' and init_inception):  # inception v3
            #     CloseInceptionResnet(sess_inception)
            #     init_inception = False
            # elif (pretrained_model == 'inception_resnetV2' and init_inception_resnet):  # inception_resnet v2
            #     CloseInceptionResnet(sess_inception_resnet)
            #     init_inception_resnet = False
            # elif (pretrained_model == 'vgg_19' and init_vgg):  # vgg 19
            #     CloseVGG(sess_vgg)
            #     init_vgg = False

            CloseTransferLearner(sess_transfer_learner)
            inferenceResults.close()
        elif (mode == "serve"):  # serve
            output_dir = gConfig[
                'output_dir'] + "/" + pretrained_models + "/" + datasets + "/"
            model_dir = gConfig[
                'model_dir'] + "/" + pretrained_models + "/" + datasets + "/" + str(
                    infer_layer) + "/"
            checkPathExists([output_dir, model_dir])

            classes = []
            with open(resource_dir + '/' + datasets + '/labels.txt',
                      'r') as readfile:
                for line in readfile.readlines():
                    classes.append(line.split(':')[1].strip())

            model_server = Serving(site,
                                   port,
                                   model_dir,
                                   pretrained_models,
                                   infer_dir,
                                   output_dir,
                                   log_dir,
                                   gpu_to_use,
                                   classes,
                                   certificate=certificate,
                                   logger=logger)

            model_server.run()

    except Exception as ex:
        print("main function failed - " + str(ex))
        raise ex