Пример #1
0
def test_batch_generator(test_files, batch_size, img_width=224, img_height=224, model='vgg16'):
    N = len(test_files)
    batch_index = 0
    while True:
        current_index = (batch_index * batch_size) % N
        if N >= (current_index + batch_size):
            current_batch_size = batch_size
            batch_index += 1
        else:
            current_batch_size = N - current_index
            batch_index = 0

        X_batch = np.zeros((current_batch_size, img_width, img_height, 3))
        for i in range(current_index, current_index + current_batch_size):
            img_path = test_files[i]
            img = load_img(img_path, img_width)
            X_batch[i - current_index] = img

        X_batch = X_batch.astype(np.float16)
        if model == 'vgg16':
            X_batch = vgg16_preprocess_input(X_batch)
        elif model == 'inceptv3':
            X_batch = incept3_preprocess_input(X_batch)

        yield X_batch
Пример #2
0
def generate_train_data(dataframe, nbr_classes, img_root_path, shuffle=True, 
                        augment=False, img_width=224, img_height=224, model='vgg16'):
    N = dataframe.shape[0]
    if shuffle:
        dataframe = sk_shuffle(dataframe)
    X_train = np.zeros((N, img_width, img_height, 3))
    Y_train = np.zeros((N, nbr_classes))
    for index, row in dataframe.iterrows():
        driver_id = row['subject']
        classname = row['classname']
        label = int(classname[-1])
        img_name = row['img']
        img_path = os.path.join(img_root_path, 'train', classname, img_name)

        img = load_img(img_path, img_width)
        X_train[index] = img
        Y_train[index, label] = 1
    
    X_train = X_train.astype(np.float16)
    if model == 'vgg16':
        X_train = vgg16_preprocess_input(X_train)
    elif model == 'inceptv3':
        X_train = incept3_preprocess_input(X_train)

    return X_train, Y_train
Пример #3
0
def load_images_vgg16(files):
    # files is the list of image file name in one set (either train or dev or test set)
    model = VGG16()
    images_dict = {}
    for name in files:
        img = image.load_img(flicker8k_dataset + name, target_size=(224, 224))
        img = image.img_to_array(img)
        img = img.reshape((1, img.shape[0], img.shape[1], img.shape[2]))
        img = vgg16_preprocess_input(img)
        images_dict[name.split('.')[0]] = img
    return images_dict
Пример #4
0
def batch_generator(dataframe, nbr_classes, img_root_path, batch_size, shuffle=True, augment=False,
                     return_label=True, img_width=224, img_height=224, model='vgg16'):
    N = dataframe.shape[0]
    if shuffle:
        dataframe = sk_shuffle(dataframe)
    batch_index = 0
    while True:
        current_index = (batch_index * batch_size) % N
        if N >= (current_index + batch_size):
            current_batch_size = batch_size
            batch_index += 1
        else:
            current_batch_size = N - current_index
            batch_index = 0
            if shuffle:
                dataframe = sk_shuffle(dataframe)
        
        X_batch = np.zeros((current_batch_size, img_width, img_height, 3))
        Y_batch = np.zeros((current_batch_size, nbr_classes))
        
        for i in range(current_index, current_index + current_batch_size):
            row = dataframe.loc[i,:]
            driver_id = row['subject']
            classname = row['classname']
            label = int(classname[-1])
            img_name = row['img']
            img_path = os.path.join(img_root_path, 'train', classname, img_name)

            img = load_img(img_path, img_width)
            X_batch[i - current_index] = img
            if return_label:
                Y_batch[i - current_index, label] = 1

        if augment:
            X_batch = X_batch.astype(np.uint8)
            X_batch = seq.augment_images(X_batch)

        X_batch = X_batch.astype(np.float16)
        if model == 'vgg16':
            X_batch = vgg16_preprocess_input(X_batch)
        elif model == 'inceptv3':
            X_batch = incept3_preprocess_input(X_batch)

        if return_label:
            yield (X_batch, Y_batch)
        else:
            yield X_batch
def find_faces(data, model, show_boxes=False):
    global num_faces
    # Arrays to hold all cropped face images and their corresponding labels
    faces = []
    labels = []

    # Check that the model provided is one of the expected models
    assert model in ("MobileNetV2", "VGG16", "ResNet50")

    # Iterate through each image in the provided data
    for image in data:
        # Read the image using OpenCV
        bgr_image = cv2.imread(filename=data_directory_prefix + "images/" + image["name"])

        # For each face in the image (most images contain multiple faces) load the bounding box and label
        for bndbox, label in zip(image["bndboxes"], image["classes"]):
            # Obtain bounding box values
            xmin = bndbox[0]
            ymin = bndbox[1]
            xmax = bndbox[2]
            ymax = bndbox[3]

            # Many images have tiny labelled faces, exclude them here to reduce training time later
            limit = 30
            if (xmax - xmin) > limit and (ymax - ymin) > limit:
                # Crop the selected face out of the original image
                face_image = bgr_image[ymin:ymax, xmin:xmax]

                # Resize the face image with OpenCV to the selected image size
                face_image = cv2.resize(src=face_image, dsize=(IMG_SIZE, IMG_SIZE))

                if show_boxes:
                    cv2.imshow("Cropped", face_image)
                    cv2.waitKey(0)

                # Convert the cropped image to an array
                face_image = img_to_array(img=face_image)

                # Preprocess the image according to the selected model to be used
                if model == "ResNet50":
                    face_image = resnet50_preprocess_input(face_image)
                elif model == "VGG16":
                    face_image = vgg16_preprocess_input(face_image)
                elif model == "MobileNetV2":
                    face_image = mobilenet_preprocess_input(face_image)

                # Add the cropped face and its label to the corresponding arrays
                faces.append(face_image)
                labels.append(label)

            # Optionally show the bounding boxes around the selected faces on the orignial image
            if show_boxes:
                # Choose a color based on the class
                if label == 0:
                    color = (0, 255, 0)
                elif label == 1:
                    color = (0, 0, 255)
                else:
                    color = (0, 255, 255)

                # Draw a rectangle around the face
                bgr_image = cv2.rectangle(img=bgr_image,
                                          pt1=(xmin, ymin),
                                          pt2=(xmax, ymax),
                                          color=color,
                                          thickness=2)
        # Display the image
        if show_boxes:
            cv2.imshow("image", bgr_image)
            cv2.waitKey(0)

    # Convert the faces and labels arrays to np arrays
    faces = np.array(faces, dtype=np.float32)
    labels = np.array(labels)

    print(num_faces)
    # Return the new data arrays
    return faces, labels
Пример #6
0
def batch_generator_triplet(dataframe, nbr_classes, img_root_path, batch_size=16, mode='train',
                            return_label=True, img_width=224, img_height=224, shuffle=True, augment=False, model='vgg16'):
    N = dataframe.shape[0]
    if shuffle:
        dataframe = sk_shuffle(dataframe)
    batch_index = 0
    while True:
        current_index = (batch_index * batch_size) % N
        if N >= (current_index + batch_size):
            current_batch_size = batch_size
            batch_index += 1
        else:
            current_batch_size = N - current_index
            batch_index = 0
            if shuffle:
                dataframe = sk_shuffle(dataframe)

        X_anchor = np.zeros((current_batch_size, img_width, img_height, 3))
        X_positive = np.zeros((current_batch_size, img_width, img_height, 3))
        X_negative = np.zeros((current_batch_size, img_width, img_height, 3))
        Y_class = np.zeros((current_batch_size, nbr_classes))
        Y_pseudo_label = np.zeros((current_batch_size, 1))

        for i in range(current_index, current_index + current_batch_size):
            row = dataframe.loc[i, :]
            driver_id = row['subject']
            classname = row['classname']
            label = int(classname[-1])
            img_name = row['img']
            anchor_img_path = os.path.join(img_root_path, 'train', classname, img_name)
            anchor_img = load_img(anchor_img_path, img_width)

            if mode == 'train':
                same_driver_df = dataframe[dataframe['subject']==driver_id]
                classname_list = same_driver_df['classname'].unique().tolist()
                # find positive sample which has the same subject and the same classname but different img name
                same_driver_same_class_df = same_driver_df[same_driver_df['classname']==classname]
                same_driver_same_class_diff_img_df = same_driver_same_class_df[same_driver_same_class_df['img']!=img_name]
                assert len(same_driver_same_class_diff_img_df) != 0, 'driver:{},classname:{},only has one img:{}'.format(driver_id,classname,img_name)
                positive_row = same_driver_same_class_diff_img_df.sample(1)
                same_driver_same_class_df = []
                same_driver_same_class_diff_img_df = []
                # find negative sample which has the same subject and the different classname also different img name (Hard negative)
                classname_list.remove(classname)
                assert classname_list != [], 'driver: {},only has one class: {}'.format(driver_id,classname)
                other_classname = random.choice(classname_list)
                hard_negative_df = same_driver_df[same_driver_df['classname']==other_classname]
                negative_row = hard_negative_df.sample(1)
                hard_negative_df = []
                same_driver_df = []

                positive_img_path = os.path.join(img_root_path, 'train', positive_row['classname'].values[0], positive_row['img'].values[0])
                negative_img_path = os.path.join(img_root_path, 'train', negative_row['classname'].values[0], negative_row['img'].values[0])
                positive_img = load_img(positive_img_path, img_width)
                negative_img = load_img(negative_img_path, img_width)

                X_anchor[i - current_index] = anchor_img
                X_positive[i - current_index] = positive_img
                X_negative[i - current_index] = negative_img
            elif mode == 'val':
                X_anchor[i - current_index] = anchor_img
                X_positive[i - current_index] = anchor_img
                X_negative[i - current_index] = anchor_img
            
            if return_label:
                Y_class[i - current_index, label] = 1

        if augment:
            X_anchor = X_anchor.astype(np.uint8)
            X_anchor = seq.augment_images(X_anchor)
            X_positive = X_positive.astype(np.uint8)
            X_positive = seq.augment_images(X_positive)
            X_negative = X_negative.astype(np.uint8)
            X_negative = seq.augment_images(X_negative)

        X_anchor = X_anchor.astype(np.float16)
        X_positive = X_positive.astype(np.float16)
        X_negative = X_negative.astype(np.float16)

        if model == 'vgg16':
            X_anchor = vgg16_preprocess_input(X_anchor)
            X_positive = vgg16_preprocess_input(X_positive)
            X_negative = vgg16_preprocess_input(X_negative)
        elif model == 'inceptv3':
            X_anchor = incept3_preprocess_input(X_anchor)
            X_positive = incept3_preprocess_input(X_positive)
            X_negative = incept3_preprocess_input(X_negative)

        if return_label:
            yield ([X_anchor, X_positive, X_negative], [Y_class, Y_pseudo_label])
        else:
            if mode == 'feature_extraction':
                yield X_anchor
            else:
                yield [X_anchor, X_positive, X_negative]
Пример #7
0
def generate_train_data_triplet(dataframe, nbr_classes, img_root_path, shuffle=True, mode='train',
                        augment=False, img_width=224, img_height=224, model='vgg16'):
    N = dataframe.shape[0]
    if shuffle:
        dataframe = sk_shuffle(dataframe)
    X_anchor = np.zeros((N, img_width, img_height, 3))
    X_positive = np.zeros((N, img_width, img_height, 3))
    X_negative = np.zeros((N, img_width, img_height, 3))
    Y_train = np.zeros((N, nbr_classes))
    Y_pseudo_label = np.zeros((N, 1))
    for index, row in dataframe.iterrows():
        driver_id = row['subject']
        classname = row['classname']
        label = int(classname[-1])
        img_name = row['img']
        img_path = os.path.join(img_root_path, 'train', classname, img_name)

        img = load_img(img_path, img_width)

        if mode == 'train':
            same_driver_df = dataframe[dataframe['subject']==driver_id]
            classname_list = same_driver_df['classname'].unique().tolist()
            # find positive sample which has the same subject and the same classname but different img name
            same_driver_same_class_df = same_driver_df[same_driver_df['classname']==classname]
            same_driver_same_class_diff_img_df = same_driver_same_class_df[same_driver_same_class_df['img']!=img_name]
            assert len(same_driver_same_class_diff_img_df) != 0, 'driver:{},classname:{},only has one img:{}'.format(driver_id,classname,img_name)
            positive_row = same_driver_same_class_diff_img_df.sample(1)
            same_driver_same_class_df = []
            same_driver_same_class_diff_img_df = []
            # find negative sample which has the same subject and the different classname also different img name (Hard negative)
            classname_list.remove(classname)
            assert classname_list != [], 'driver: {},only has one class: {}'.format(driver_id,classname)
            other_classname = random.choice(classname_list)
            hard_negative_df = same_driver_df[same_driver_df['classname']==other_classname]
            negative_row = hard_negative_df.sample(1)
            hard_negative_df = []
            same_driver_df = []

            positive_img_path = os.path.join(img_root_path, 'train', positive_row['classname'].values[0], positive_row['img'].values[0])
            negative_img_path = os.path.join(img_root_path, 'train', negative_row['classname'].values[0], negative_row['img'].values[0])
            positive_img = load_img(positive_img_path, img_width)
            negative_img = load_img(negative_img_path, img_width)
        elif mode == 'valid':
            positive_img = img
            negative_img = img
        
        X_anchor[index] = img
        X_positive[index] = positive_img
        X_negative[index] = negative_img
        Y_train[index, label] = 1

    X_anchor = X_anchor.astype(np.float16)
    X_positive = X_positive.astype(np.float16)
    X_negative = X_negative.astype(np.float16)
    if model == 'vgg16':
        X_anchor = vgg16_preprocess_input(X_anchor)
        X_positive = vgg16_preprocess_input(X_positive)
        X_negative = vgg16_preprocess_input(X_negative)
    elif model == 'inceptv3':
        X_anchor = incept3_preprocess_input(X_anchor)
        X_positive = incept3_preprocess_input(X_positive)
        X_negative = incept3_preprocess_input(X_negative)

    return ([X_anchor,X_positive,X_positive], [Y_train,Y_pseudo_label])
Пример #8
0
def vgg16_preprocess_image(resized_image):
    preprocessed_image = vgg16_preprocess_input(resized_image)
    return preprocessed_image