def process_batch(images, models):
    def zoom(x, z):
        original_size = x.size[0]
        target_size = int(x.size[0] * z)
        x = x.resize((target_size, target_size), resample=Image.BILINEAR)
        step = abs(target_size - original_size) // 2
        if z > 1:
            return x.crop(
                (step, step, step + original_size, step + original_size))
        elif z < 1:
            new_image = Image.fromarray(np.zeros(
                (original_size, original_size, 3), dtype=np.uint8),
                                        mode="RGB")
            new_image.paste(x, (step, step))
            return new_image
        else:
            return x

    angles = np.random.random(N_SAMPLES) * 1
    zooms = np.random.random(N_SAMPLES) * 0.1 + 1
    contrast = np.random.uniform(0.7, 1.3, size=N_SAMPLES)
    color = np.random.uniform(0.7, 1.3, N_SAMPLES)
    sharpness = np.random.uniform(0.6, 1.0, N_SAMPLES)
    preds = []
    for angle, zm, cont, colr, shrp in zip(angles, zooms, contrast, color,
                                           sharpness):
        batch = images.copy()
        batch = [zoom(x, zm) for x in batch]
        batch = [
            x.rotate(angle, resample=Image.BILINEAR, expand=False)
            for x in batch
        ]
        batch = [ImageEnhance.Contrast(x).enhance(cont) for x in batch]
        batch = [ImageEnhance.Color(x).enhance(colr) for x in batch]
        batch = [ImageEnhance.Sharpness(x).enhance(shrp) for x in batch]

        for model in models:
            batch_ = batch.copy()
            if model.name in {"resnet50", "vgg16"}:
                batch_ = [
                    x.resize((SIDE_VGG, SIDE_VGG), resample=Image.BILINEAR)
                    for x in batch_
                ]
            batch_ = np.stack([np.array(x) for x in batch_]).astype(K.floatx())
            if model.name in {"resnet50", "vgg16"}:
                batch_ = preprocess_vgg_resnet(batch_)
            else:
                batch_ = preprocess_inception(batch_)
            if model.name == "inception_v3":
                preds.append(model.predict(batch_)
                             [:, 1:])  # background class in adv_inception
            else:
                preds.append(model.predict(batch_))
    preds = np.stack(preds).mean(axis=0)
    labels = np.argmax(preds, axis=1) + 1  # add background class
    return labels
Пример #2
0
for filename in classes: 
	# load image
        path = ROOT_DIR + 'fruit-recognition_reduced/' + filename
        list_images = os.listdir(path)[:100]
        print(f'{filename}: {len(list_images)}')
        for img in list_images:
                # img_data = image.imread(ROOT_DIR + 'fruit-recognition_reduced/' + filename + '/' + img)
                img_data = load_img(ROOT_DIR + 'fruit-recognition_reduced/' + filename + '/' + img, target_size=(224,224))
                # img_data = img_data.resize((224, 224), Image.ANTIALIAS)
                img_data = img_to_array(img_data)
                # Scale images to the range [0,1]
                # img_data = img_data.astype(np.float32)/255.0
                # img_data = img_data.astype(np.float32) - [123.68, 116.779, 103.939]
                
                # img_data = preprocess_vgg(img_data.astype(np.float32))
                img_data = preprocess_inception(img_data.astype(np.float32))
                # img_data = preprocess_resnet(img_data.astype(np.float32))

                # Remove noise
                # img_data = gaussian(img_data, sigma=1, multichannel=True)
                # store loaded image
                X.append(img_data)
                y.append(c)
        c += 1
X = np.array(X)
y = np.array(y)
print(set(y))

# Compute class weights
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced', np.unique(y), y)
Пример #3
0
 def predict(self, x):
     if self.data_format == "channels_first":
         x = x.transpose(0, 3, 1, 2)
     x = preprocess_inception(x.astype(K.floatx()))
     return self.model.predict(x, batch_size=self.batch_size)
Пример #4
0
        x2 = GlobalAveragePooling2D()(model.get_layer("block2_conv2").output)  # 128
        x3 = GlobalAveragePooling2D()(model.get_layer("block3_conv3").output)  # 256
        x4 = GlobalAveragePooling2D()(model.get_layer("block4_conv3").output)  # 512
        x5 = GlobalAveragePooling2D()(model.get_layer("block5_conv3").output)  # 512
        x = Concatenate()([x2, x3, x4, x5])
        self.model = Model(inputs=model.input, outputs=x)
        self.batch_size = batch_size
        self.data_format = K.image_data_format()

    def predict(self, x):
        if self.data_format == "channels_first":
            x = x.transpose(0, 3, 1, 2)
        x = preprocess_vgg(x.astype(K.floatx()))
        return self.model.predict(x, batch_size=self.batch_size)


if __name__ == "__main__":
    img_path = "data/elephant.jpg"
    img = image.load_img(img_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_inception(x)

    model = InceptionV3()
    preds = model.predict(x)
    # decode the results into a list of tuples (class, description, probability)
    # (one such list for each sample in the batch)
    print("Predicted:", decode_predictions(preds, top=3)[0])
    # Predicted: [('n02504013', 'Indian_elephant', 0.78864819), ('n01871265', 'tusker', 0.029346621), ('n02504458', 'African_elephant', 0.01768155)]