Exemplo n.º 1
0
    def batch(self):
        batch_imgnames = list()
        lineidx_upper = self.lineidx + self.batch_size
        if lineidx_upper > self.sample_num:
            lineidx_upper = self.sample_num
        for idx in range(self.lineidx, lineidx_upper):
            batch_imgnames.append(self.imgnames[idx])
        batch_labels = self.labels[self.lineidx:lineidx_upper]
        self.lineidx = lineidx_upper

        if self.lineidx >= self.sample_num:
            self.lineidx = 0

        img_list = list()
        for imgname in batch_imgnames:
            img_path = self.imgs_path + imgname[0] + ".jpg"
            img_path = self.imgs_path + imgname[0] +".jpg"
            img = image.load_img(img_path, target_size=(299, 299))
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            img_list.append(x)
        
        batch_imgs = np.reshape(np.stack(img_list), [-1,299,299,3])
        batch_labels = np.reshape(batch_labels, [-1, self.labels.shape[1]])
        return batch_imgs, batch_labels
Exemplo n.º 2
0
def extract_features(directory):
    base_model = InceptionV3(include_top=True, weights=None)
    weights_path = 'data/image_net.h5'

    base_model.load_weights(weights_path)

    new_input = base_model.layers[0].input
    hidden_layer = base_model.get_layer('avg_pool').output

    image_model = Model(new_input, hidden_layer)

    img_id = []
    img_matrices = []
    for img_file in os.listdir(directory):
        img_path = directory + '/' + img_file
        img = image.load_img(img_path, target_size=(299, 299))
        x = image.img_to_array(img)
        x = preprocess_input(x)

        img_id.append(os.path.splitext(img_file)[0])
        img_matrices.append(x)

    img_matrices = np.array(img_matrices)
    assert (len(img_matrices.shape) == 4)

    img_features = image_model.predict(img_matrices, verbose=1)

    return {'ids': img_id, 'features': img_features}
Exemplo n.º 3
0
def make_predictions(model, img_path):

    #img_path = '/home/cmpt726/sport3/validation/hockey/img_2997.jpg'
    img = image.load_img(img_path, target_size=IMSIZE)
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)

    x = inception.preprocess_input(x)

    preds = model.predict(x)
    print('Predicted:', preds)
    return preds
def predict(model, img_path):

	#img_path = img_path = 'sport3/validation/hockey/img_2997.jpg'
	img = image.load_img(img_path, target_size=IMSIZE)
	x = image.img_to_array(img)
	x = np.expand_dims(x, axis=0)
	x = inception.preprocess_input(x)
        #preds=model.predict_classes(x, verbose=1)
	preds = model.predict(x, batch_size=1, verbose=1)
	print('Predicted:', preds)

	return preds
Exemplo n.º 5
0
def predict_from_file(model, img_file):
    """Run model prediction on image
    Args:
      model: keras model
      img_file: image path
    Returns:
      list of predicted labels and their probabilities
    """
    target_size = (IM_WIDTH, IM_HEIGHT)
    img = image.load_img(img_file, target_size=target_size)
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)[0]
    return preds
def get_feature_mat(mymodel, input_data, batch_size=128):
    idx = 0
    preds_mat = None
    while (idx < len(input_data)):
        x = input_data[idx:idx + batch_size]
        x = preprocess_input(x)
        preds = mymodel.predict(x)

        if preds_mat is None:
            preds_mat = preds
        else:
            preds_mat = np.concatenate([preds_mat, preds])
        idx += batch_size

    return preds_mat.astype(np.float32)
Exemplo n.º 7
0
def extract_feature_from_image(file_dir):

    img = image.load_img(file_dir, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    base_model = InceptionV3(include_top=True, weights=None)
    weights_path = 'data/image_net.h5'

    base_model.load_weights(weights_path)

    new_input = base_model.layers[0].input
    hidden_layer = base_model.get_layer('avg_pool').output

    image_model = Model(new_input, hidden_layer)

    return image_model.predict(x)
Exemplo n.º 8
0
    def random_batch(self):
        rand = list()
        for i in xrange(self.batch_size):
            rand.append(self.get_one_random_balance_index()[0])
        batch_imgnames = list()
        for idx in rand:
            batch_imgnames.append(self.imgnames[idx])
        batch_labels = self.labels[rand]

        img_list = list()
        for imgname in batch_imgnames:
            img_path = self.imgs_path + imgname[0] +".jpg"
            img = image.load_img(img_path, target_size=(299, 299))
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            img_list.append(x)

        batch_imgs = np.reshape(np.stack(img_list), [-1,299,299,3])
        batch_labels = np.reshape(batch_labels, [-1, self.labels.shape[1]])
        return batch_imgs, batch_labels
Exemplo n.º 9
0
def predImg(model, file_path, IMSIZE):
    try:
        img = image.load_img(file_path, target_size=IMSIZE)
        ig = image.img_to_array(img)
        ig = np.expand_dims(ig, axis=0)
        ig = inception.preprocess_input(ig)
        preds = model.predict(ig)
        max_pred = preds[0][0]
        max_index = 0
        for i in [1, 2, 3]:
            if preds[0][i] > max_pred:
                max_pred = preds[0][i]
                max_index = i
        mapping = {
            0: 'cross',
            1: 'left',
            2: 'right',
            3: 'straight'
        }
        return mapping.get(max_index, "error")
    except BaseException as err:
        print(err)
Exemplo n.º 10
0
    def predict_single(self, img_path, is_print_pred=True):
        """
        Predicts single image to it's chances
        :param img_path: Path of the image to classify
        :param model: Pre-compiled model
        :return:
        """
        img = image.load_img(img_path, target_size=self.image_size)
        resized_img = image.img_to_array(img)
        resized_img = np.expand_dims(resized_img, axis=0)
        resized_img = inception.preprocess_input(resized_img)
        predicted_data = self.model.predict(resized_img)

        # Formatting the result
        occupied = np.array(predicted_data[:, 0])
        vacant = np.array(predicted_data[:, 1])

        predicted_data = {'Occupied': occupied[0], 'Vacant': vacant[0]}

        # Print prediction after each prediction
        if is_print_pred:
            print(predicted_data)
        return predicted_data
Exemplo n.º 11
0
def testing(weights_path="weights_gender_and_age/weights.h5", dataset_base_dir="sorted_gender_and_age"):
    dir_list = next(os.walk(dataset_base_dir + '/valid'))[1]

    classes = dir_list
    classes = np.sort(classes)
    nb_classes = len(classes)
    # Setup the inceptionV3 model, pretrained on ImageNet dataset, without the fully connected part.
    base_model = InceptionV3(weights='imagenet', include_top=False)  # include_top=False excludes final FC layer
    # Add a new fully connected layer at the top of the base model. The weights of this FC layer are random
    # so they need to be trained
    model = add_new_last_layer(base_model, nb_classes)
    # We have already trained our model, so we just need to load it
    model.load_weights(weights_path)
    # Here, instead of writing the path and load the model each time, we load our model one time and we make a loop
    # where we ask only for the image path every time. If we enter "stop", we exit the loop

    file_processed = 0
    f_count = 0  # tested
    success_f_count = 0  # successfully classified
    m_count = 0  # tested
    success_m_count = 0  # successfully classified
    one_off = 0  # almost got the age category right
    two_off = 0
    three_off = 0
    four_off = 0
    more_off = 0
    success_age_count = 0
    fully_correct = 0  # both age and gender are correct

    file_count = sum([len(files) for r, d, files in os.walk(dataset_base_dir + "/valid/")])

    offsets = dict()  # stores how close to the actual age the prediction was

    for combined_class in dir_list:
        for root, dirs, files in os.walk(dataset_base_dir + "/valid/" + combined_class):
            print("Number of items in " + combined_class + ": " + str(len(files)))
            for file in files:
                file_processed = file_processed + 1
                if file.lower().endswith('.jpg'):
                    img_path = dataset_base_dir + "/valid/" + combined_class + "/" + file
                    if os.path.isfile(img_path):
                        img = image.load_img(img_path, target_size=(299, 299))
                        x = image.img_to_array(img)
                        x = np.expand_dims(x, axis=0)
                        x = preprocess_input(x)

                        preds = model.predict(x)
                        # decode the results into a list of tuples (class, description, probability)
                        # (one such list for each sample in the batch)
                        label = classes[np.argmax(preds)]
                        p = preds[0][np.argmax(preds)] * 100
                        gender_ok = False
                        age_ok = False

                        if 'f' in label:  # classified as female
                            f_count = f_count + 1
                            if 'f' in combined_class:  # actually a female
                                success_f_count = success_f_count + 1
                                gender_ok = True

                        elif 'm' in label:
                            m_count = m_count + 1
                            if 'm' in combined_class:
                                success_m_count = success_m_count + 1
                                gender_ok = True

                        expected = 100  # dummy values that should never be used
                        predicted = -100

                        # As the age ranges are ordered, we can use the indices to determine
                        # how close to the expected value the prediction was
                        for index, cat in enumerate(age_categories):
                            if cat in combined_class:
                                expected = index
                            if cat in label:
                                predicted = index

                        offset = expected - predicted

                        offsets[offset] = offsets.get(offset, 0) + 1

                        if offset == 0:
                            age_ok = True
                            success_age_count = success_age_count + 1
                        elif abs(offset) == 1:
                            one_off = one_off + 1
                        elif abs(offset) == 2:
                            two_off = two_off + 1
                        elif abs(offset) == 3:
                            three_off = three_off + 1
                        elif abs(offset) == 4:
                            four_off = four_off + 1
                        else:
                            print("worse than 4-off:" + str(offset))
                            more_off = more_off + 1

                        if not gender_ok or not age_ok:
                            print("[class-err] Exp: " + combined_class + ", Got: " + label + " (p=" + (
                                "%.2f" % p) + "%, img=" + file + ")")
                        else:
                            fully_correct = fully_correct + 1

                    else:
                        print("Error")

                # Prints current progress in case we're dealing with a large dataset
                if file_processed % 50 == 0:
                    print("..." + "%.2f" % (100 * file_processed / file_count) + " %")

    total_age_classifications = success_age_count + one_off + two_off + three_off + four_off + more_off

    print()
    print("=> Female Accuracy: " + str(100 * success_f_count / f_count) + " %")
    print("=> Male Accuracy: " + str(100 * success_m_count / m_count) + " %")
    print("=> Gender global accuracy: " + "%.2f" % (
        100 * (success_m_count + success_f_count) / (m_count + f_count)) + " %")
    print("=> Gender average accuracy (in case test sets aren't equally distributed): " + "%.2f" % (
        (100 * success_f_count / f_count + 100 * success_m_count / m_count) / 2) + " %")
    print()
    print("====================================")
    print()
    print("=> Age Accuracy: " + str(100 * success_age_count / total_age_classifications) + " %")
    print("=> 1-off: " + str(100 * one_off / total_age_classifications) + " %")
    print("=> 2-off: " + str(100 * two_off / total_age_classifications) + " %")
    print("=> 3-off: " + str(100 * three_off / total_age_classifications) + " %")
    print("=> 4-off: " + str(100 * four_off / total_age_classifications) + " %")
    print("=> worse: " + str(100 * more_off / total_age_classifications) + " %")
    print()
    # Crappy histogram to display the age classification results in full yolo mode
    for key in sorted(offsets):
        to_print = str(key) + ":\t"
        for i in range(0, offsets[key] // 2):
            to_print = to_print + Back.GREEN + '_' + Back.RESET
        to_print = to_print + ' (' + str(offsets[key]) + ')'
        print(to_print)
    print()
    print("====================================")
    print()
    print("=> Full classification accuracy: " + str(100 * fully_correct / total_age_classifications) + " %")
Exemplo n.º 12
0
def main(args):
    """Use transfer learning and fine-tuning to train a network on a new dataset"""

    n_classes = len(glob.glob(args.image_dir + "/*"))
    # nb_val_samples = get_nb_files(args.val_dir)
    nb_epoch = int(args.nb_epoch)
    batch_size = int(args.batch_size)

    # model_file = os.path.join(
    #     args.model_dir, 'retrain_incep_v3_model_config.json')
    # if not os.path.exists(model_file):
    base_model = InceptionV3(weights='imagenet', include_top=False)
    base_model = add_pooling_layer(base_model)
    iv3_model = InceptionV3(weights='imagenet', include_top=True)
    iv3_base_model = Model(inputs=iv3_model.input,
                           outputs=iv3_model.layers[311].output)
    img_paths = [
        'OBOG5055.JPG', 'wallhaven-220382.jpg', 'wallhaven-295153.jpg',
        'wallhaven-605824.jpg'
    ]
    target_size = (IM_WIDTH, IM_HEIGHT)
    for img_path in img_paths:
        print("img_path: {}".format(img_path))
        img = image.load_img(img_path, target_size=target_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        print("model: iv3_base_model")
        preds = iv3_base_model.predict(x)
        print(preds)
        print("model: base_model")
        preds = base_model.predict(x)
        print(preds)
    raise RuntimeError

    model = add_final_layer(base_model.input, base_model.output, n_classes)
    # model = add_new_last_layer(base_model, n_classes)
    # with open(model_file, 'w') as f:
    #     f.write(model.to_json())
    # else:
    #     with open(model_file) as f:
    #         model = model_from_json(f.read())
    #         print('reloading model...')

    image_lists, n_classes = create_or_load_training_data(args)
    nb_train_samples = get_nb_files(args.image_dir)
    print('total no. samples: {}'.format(nb_train_samples))

    if args.transfer_learning:
        # use bottleneck, here the model must be identical to the original top layer
        retrain_input_tensor = Input(shape=base_model.output.shape)
        retrain_model = add_final_layer(retrain_input_tensor,
                                        retrain_input_tensor, n_classes)
        check_point_file = os.path.join(args.model_dir,
                                        "retrain_weights_IV3.hdf5")
        if os.path.exists(check_point_file):
            print('loading checkpoint {}'.format(check_point_file))
            retrain_model.load_weights(check_point_file)

        retrain_model.compile(optimizer='rmsprop',
                              loss='categorical_crossentropy',
                              metrics=['accuracy'])
        bottleneck_dir = os.path.join(args.model_dir,
                                      'bottleneck_retrain_keras/')

        def bottle_pred_func(file):
            return predict_from_file(base_model, file)

        if not os.path.exists(bottleneck_dir):
            cache_bottlenecks(image_lists, args.image_dir, bottleneck_dir,
                              bottle_pred_func)
        train_sequence = cached_bottlenecks_sequence(
            image_lists, args.batch_size, 'training', bottleneck_dir,
            args.image_dir, bottle_pred_func)
        validation_data = cached_bottlenecks_sequence(
            image_lists,
            args.validation_batch_size,
            'validation',
            bottleneck_dir,
            args.image_dir,
            bottle_pred_func,
            sequence=False)
        # args.model_dir, "weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5")
        checkpoint = ModelCheckpoint(check_point_file,
                                     monitor='val_acc',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='max',
                                     save_weights_only=True)
        tb_callback = TensorBoard(log_dir=args.model_dir,
                                  histogram_freq=2,
                                  write_graph=True)
        callbacks_list = [checkpoint, tb_callback]
        history_tl = retrain_model.fit_generator(
            train_sequence,
            epochs=nb_epoch,
            steps_per_epoch=nb_train_samples // batch_size,
            validation_data=validation_data,
            validation_steps=nb_train_samples // batch_size * 5,
            class_weight='auto',
            callbacks=callbacks_list)

        if not args.no_plot:
            plot_training(history_tl)

    if args.fine_tune:
        assert model.layers[
            FINE_TUNE_FINAL_LAYER_INDEX].name == FINE_TUNE_FINAL_LAYER_NAME
        set_trainable_layers(
            trainable_layer_list=model.layers[:FINE_TUNE_FINAL_LAYER_INDEX +
                                              1],
            frozen_layer_list=model.layers[FINE_TUNE_FINAL_LAYER_INDEX + 1:])
        model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        history_ft = model.fit_generator(
            train_sequence,
            steps_per_epoch=nb_train_samples // batch_size,
            epochs=nb_epoch,
            validation_data=validation_data,
            validation_steps=nb_train_samples // batch_size,
            class_weight='auto')
        if not args.no_plot:
            plot_training(history_ft)

    model.save(os.path.join(args.model_dir, 'inceptionv3-ft.model'))
Exemplo n.º 13
0
def generator_img_batch(data_list,
                        nbr_classes=1,
                        batch_size=32,
                        return_label=True,
                        hsv=False,
                        crop_method=center_crop_PIL,
                        preprocess=False,
                        img_width=299,
                        img_height=299,
                        random_shuffle=True,
                        save_to_dir=None,
                        augment=False,
                        call_counts='',
                        normalize=True,
                        crop=True,
                        mirror=False):
    '''
    A generator that yields a batch of (data, label).

    Input:
        data_list  : a tuple contains of two lists of binoculus data, e.g.
                     ("/data/workspace/dataset/Cervical_Cancer/train/10_left.jpg 0",
                      "/data/workspace/dataset/Cervical_Cancer/train/10_rightt.jpg 0")
        sf   : whether shuffle rows in the data_llist
        batch_size : batch size

    Output:
        (X_batch, Y_batch)
    '''

    # 固定当前随机状态
    seq_fixed = seq.to_deterministic()
    randnum = np.int(100 * random.random())
    # print('HI~')

    left_data_list, right_data_list = data_list

    N = len(left_data_list)

    if random_shuffle:
        random.seed(randnum)
        random.shuffle(left_data_list)
        random.seed(randnum)
        random.shuffle(right_data_list)

    batch_index = 0
    while True:
        current_index = (batch_index * batch_size) % N
        # 判断是否到达最后一个batch,若是则修改该batch的size
        if N >= (current_index + batch_size):
            current_batch_size = batch_size
            batch_index += 1
        else:
            current_batch_size = N - current_index
            batch_index = 0

        X_left_batch = np.zeros((current_batch_size, img_width, img_height, 3))
        X_right_batch = np.zeros(
            (current_batch_size, img_width, img_height, 3))
        Y_left_batch = np.zeros((current_batch_size, nbr_classes))
        Y_right_batch = np.zeros((current_batch_size, nbr_classes))

        for i in range(current_index, current_index + current_batch_size):
            line_left = left_data_list[i].strip().split(' ')
            label_left = int(line_left[-1])
            img_path_left = line_left[0]
            line_right = right_data_list[i].strip().split(' ')
            label_right = int(line_right[-1])
            img_path_right = line_right[0]

            # 将图片按比例缩放并剪裁,方便后面拼接为batch
            if crop:
                left_img = scale_byRatio(img_path_left,
                                         return_width=img_width,
                                         crop_method=crop_method)
                right_img = scale_byRatio(img_path_right,
                                          return_width=img_width,
                                          crop_method=crop_method)
            else:
                left_img = load_img(img_path_left)
                right_img = load_img(img_path_right)
            left_img = img_to_array(left_img)
            right_img = img_to_array(right_img)

            if hsv:
                # Change from RGB space to HSV space
                left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2HSV)
                right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2HSV)
                # Mapping to [0, 255]
                left_img = np.interp(
                    left_img, [left_img.min(), left_img.max()], [0, 255])
                right_img = np.interp(
                    right_img,
                    [right_img.min(), right_img.max()], [0, 255])

            # 将当前图像填装如batch
            X_left_batch[i - current_index] = left_img
            X_right_batch[i - current_index] = right_img
            ##将标签转换为one-hot形式,仅在多分类时使用
            #Y_left_batch[i - current_index, label_left] = 1
            #Y_right_batch[i - current_index, label_right] = 1
            Y_left_batch[i - current_index] = label_left
            Y_right_batch[i - current_index] = label_right

        if mirror:
            if random.random() < 0.5:
                X_left_batch, X_right_batch = X_right_batch, X_left_batch
                Y_left_batch, Y_right_batch = Y_right_batch, Y_left_batch
                X_left_batch = iaa.Fliplr(1).augment_images(X_left_batch)
                X_right_batch = iaa.Fliplr(1).augment_images(X_right_batch)

        if augment:
            X_left_batch = seq_fixed.augment_images(X_left_batch)
            X_right_batch = iaa.Fliplr(1).augment_images(X_right_batch)
            X_right_batch = seq_fixed.augment_images(X_right_batch)
            X_right_batch = iaa.Fliplr(1).augment_images(X_right_batch)

        X_left_batch = X_left_batch.astype(np.uint8)
        X_right_batch = X_right_batch.astype(np.uint8)

        # 眼底图片预处理
        if preprocess:
            for i in range(current_batch_size):
                X_left_batch[i] = retinal_img_preprocessing(
                    X_left_batch[i],
                    return_image=True,
                    result_size=(img_width, img_height))
                X_right_batch[i] = retinal_img_preprocessing(
                    X_right_batch[i],
                    return_image=True,
                    result_size=(img_width, img_height))

        # 导出扩充后的图片数据集
        if save_to_dir:
            for i in range(current_index, current_index + current_batch_size):
                tmp_path_left = left_data_list[i].strip().split(' ')[0]
                tmp_path_right = right_data_list[i].strip().split(' ')[0]
                image_name_left = call_counts + tmp_path_left.split(os.sep)[-1]
                image_name_right = call_counts + tmp_path_right.split(
                    os.sep)[-1]
                #                image_name = '_'.join(basedir)
                img_to_save_path_left = os.path.join(save_to_dir,
                                                     image_name_left)
                img_to_save_path_right = os.path.join(save_to_dir,
                                                      image_name_right)

                img_left = array_to_img(X_left_batch[i - current_index])
                img_right = array_to_img(X_right_batch[i - current_index])

                img_left.save(img_to_save_path_left)
                img_right.save(img_to_save_path_right)

        if normalize:
            X_left_batch = X_left_batch.astype(np.float64)
            X_right_batch = X_right_batch.astype(np.float64)
            X_left_batch = preprocess_input(X_left_batch)
            X_right_batch = preprocess_input(X_right_batch)

        X_batch = {'left_input': X_left_batch, 'right_input': X_right_batch}
        Y_batch = {'left_output': Y_left_batch, 'right_output': Y_right_batch}

        if return_label:
            yield (X_batch, Y_batch)
        else:
            yield (X_left_batch, X_right_batch)
def show_webcam(mirror=False, camSource=0):
    IMSIZE = (299, 299)
    model = load_model()

    #Set up Camera
    font = cv2.FONT_HERSHEY_SIMPLEX
    cam = cv2.VideoCapture(camSource)

    #emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"] #Emotion list
    #emotions = ["neutral", "anger", "disgust", "happy", "surprise"]
    #emotions = ["anger", "disgust", "fear", "happy", "sad", "surprise","neutral"]
    emotions = ["happy", "neutral", "surprise"]

    # Face Recognizer
    faceDet = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
    #faceDet2 = cv2.CascadeClassifier(path+"haarcascade_frontalface_alt2.xml")
    #faceDet3 = cv2.CascadeClassifier(path+"haarcascade_frontalface_alt.xml")
    #faceDet4 = cv2.CascadeClassifier(path+"haarcascade_frontalface_alt_tree.xml")

    no_frames = 1
    while True:
        preds = [0] * len(emotions)
        #Capture image from camera
        for i in range(no_frames):
            ret_val, img = cam.read()
            if mirror:
                img = cv2.flip(img, 1)

            # Detect Face
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
            clahe_image = clahe.apply(gray)
            face = faceDet.detectMultiScale(clahe_image,
                                            scaleFactor=1.1,
                                            minNeighbors=15,
                                            minSize=(10, 10),
                                            flags=cv2.CASCADE_SCALE_IMAGE)
            #face2 = faceDet2.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
            #face3 = faceDet3.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
            #face4 = faceDet4.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
            if len(face) == 1:
                facefeatures = face
                #elif len(face2) == 1:
                #    facefeatures == face2
                #elif len(face3) == 1:
                #    facefeatures = face3
                #elif len(face4) == 1:
                #    facefeatures = face4
            else:
                facefeatures = ""

            #If found face, predict corresponding emotion
            for (
                    x, y, w, h
            ) in facefeatures:  #get coordinates and size of rectangle containing face
                #print "face found in file: %s" %f
                out = clahe_image[y:y + h, x:x + w]  #Cut the frame to size
                cv2.imwrite('temp.jpg', out)

                img2 = image.load_img('temp.jpg', target_size=IMSIZE)
                x = image.img_to_array(img2)
                x = np.expand_dims(x, axis=0)
                x = inception.preprocess_input(x)
                pred = np.argmax(model.predict(x))
                #print pred
                preds[pred] = preds[pred] + 1

        #output predicted emotion on camera
        if np.max(preds) > 0:
            expression = emotions[np.argmax(preds)]
        else:
            expression = "No face Found"

        cv2.putText(img, expression, (10, 40), font, 1, (255, 255, 255), 2)
        cv2.imshow('my webcam', img)
        #time.sleep(0.15)

        if cv2.waitKey(1) == 27:  # esc to quit
            break

    cv2.destroyAllWindows()
    cam.release()
body = """ """

sport_list = ['basketball', 'hockey', 'soccer']
ranges = [(2000, 2999), (2000, 2999), (100, 999)]
i = 0
for sport in sport_list:
    for index in range(0, batch_size):
        number = random.randint(ranges[i][0], ranges[i][1])
        path = test_dir + sport + '/' + 'img_' + str(number) + '.jpg'

        img = image.load_img(path, target_size=IMSIZE)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)

        x = inception.preprocess_input(x)

        preds = model.predict(x)
        print(index, 'Predicted:', preds)

        predict = preds[0]
        prediction = ""
        for p in predict:
            prediction += str(p) + " \t "

        body += """<tr>	<td> <img src='""" + path + """' width="300" height="300"/> </td> <td>""" + str(
            prediction) + """ </td> </tr>\n"""

    i = i + 1
    html_file = open('a3_15_' + sport + '.html', 'w')
    message = head + body + tail