예제 #1
0
def tta(image,model):
    datagen=ImageDataGenerator()
    all_images=np.expand_dims(image,0)
    hori_image=np.expand_dims(datagen.apply_transform(x=image,transform_parameters={"flip_horizontal":True}),axis=0)
    vert_image=np.expand_dims(datagen.apply_transform(x=image,transform_parameters={"flip_vertical":True}),axis=0)
    rotated_image=np.expand_dims(datagen.apply_transform(x=image,transform_parameters={"theta":15}),axis=0)
    all_images=np.append(all_images,hori_image,axis=0)
    all_images=np.append(all_images,vert_image,axis=0)
    all_images=np.append(all_images,rotated_image,axis=0)
    prediction=model.predict(all_images)
    prediction=np.sum(prediction,axis=0)
    # print(prediction)
    return np.argmax(prediction)
예제 #2
0
def tta(image, model, model_output='regression'):
    datagen = ImageDataGenerator()
    all_images = np.expand_dims(image, 0)
    hori_image = np.expand_dims(datagen.apply_transform(x=image, transform_parameters={"flip_horizontal": True}),
                                axis=0)
    vert_image = np.expand_dims(datagen.apply_transform(x=image, transform_parameters={"flip_vertical": True}), axis=0)
    rotated_image = np.expand_dims(datagen.apply_transform(x=image, transform_parameters={"theta": 15}), axis=0)
    all_images = np.append(all_images, hori_image, axis=0)
    all_images = np.append(all_images, vert_image, axis=0)
    all_images = np.append(all_images, rotated_image, axis=0)
    prediction = model.predict(all_images)
    if model_output is 'regression':
        return np.mean(prediction)
    else:
        prediction = np.sum(prediction, axis=0)
        return np.argmax(prediction)
예제 #3
0
 def __getitem__(self, idx):
     batch_x = []
     batch_y = []
     video_idx = 0
     _iter = 0
     while _iter < self.v:
         _iter += 1
         if self.frame_counter >= self.dataset_size:
             self.on_epoch_end()
             print('*', end=",")
             _iter -= 1
             continue
         frames2read = self.x[str(
             self.video_iterator)][self.frame_iterator:self.frame_iterator +
                                   self.fpv]
         temp_frames = []
         if self.data_augmentation == True:
             img_gen = ImageDataGenerator(
                 rotation_range=30,
                 samplewise_center=True,
                 samplewise_std_normalization=True,
                 # width_shift_range=0.2,
                 # height_shift_range=0.2,
                 # rescale=1./255,
                 brightness_range=[0.7, 1.0],
                 channel_shift_range=50.0,
                 # shear_range=0.1,
                 zoom_range=0.2,
                 horizontal_flip=True,
                 #                                         vertical_flip = True,
                 fill_mode='nearest')
             transform_param = img_gen.get_random_transform(
                 img_shape=(self.image_size, self.image_size, 3), seed=None)
         # print(transform_param)
         if len(frames2read) >= self.fpv:
             for frame in frames2read:
                 _image = cv2.imread(frame)
                 _image = cv2.cvtColor(_image,
                                       cv2.COLOR_BGR2RGB).astype('float64')
                 _image = cv2.resize(_image,
                                     (self.image_size, self.image_size))
                 if self.data_augmentation == True:
                     _image = img_gen.apply_transform(
                         _image, transform_param)
                 _image /= 255
                 temp_frames.append(np.asarray(_image))
             batch_x.append(temp_frames)
             batch_y.append(self.y[self.video_iterator])
             self.pre_classes.append(self.y[self.video_iterator])
         else:
             _iter -= 1
         self.video_iterator += 1
         self.frame_counter += len(frames2read)
         if self.video_iterator % len(self.x) == 0:
             self.video_iterator = 0
             self.frame_iterator += len(frames2read)
         video_idx += 1
     if self.video_iterator % len(self.x) == 0:
         self.video_iterator = 0
     if len(batch_y) > 0:
         batch_y = to_categorical(batch_y, 2)
         batch_x = np.array(batch_x)
         if self.training == True:
             indices = np.arange(batch_x.shape[0])
             np.random.shuffle(indices)
             batch_x = batch_x[indices]
             batch_y = batch_y[indices]
     return (batch_x, batch_y)