Пример #1
0
class ClassificationSampleInference(Task, MixinMeta):
    task_type = 'classification'

    input_images = Option(name='Predicting images', type='uploader')
    output_activation = Option(name='activation', type='collection', default='None', collection=['None', 'sigmoid', 'softmax'])
    transform = DataflowModule(name='Transform', component_types=['Dataflow'], multiple=True, required=True)
    model = BasicModule(name='Model', component_types=['Model'])

    def __call__(self):
        input_images = [cv2.imread(input_image) for input_image in self.input_images]
        inputs = [self.transform([input_image])[0] for input_image in input_images]

        transform = get_transform_func(inputs[0])

        self.model.eval()
        logits = [self.model(transform(input)).squeeze() for input in inputs]

        if self.output_activation == 'softmax':
            outputs = [F.softmax(logit) for logit in logits]
        elif self.output_activation == 'sigmoid':
            outputs = [F.sigmoid(logit) for logit in logits]
        else:
            outputs = logits

        df = pd.DataFrame(columns=['Image Name', 'Image Preview (ImageBase64)', *[f'Class {klass} score' for klass in range(len(outputs[0]))]])
        for i, image_path in enumerate(self.input_images):
            image_name = image_path.split('/')[-1]
            base64encode = image_base64(image_path)
            row_data = [image_name, base64encode]
            for klass, klass_score in enumerate(outputs[i]):
                row_data.append('%.4f'%(round(klass_score.item(), 4)))
            df.loc[i] = row_data
        df.to_csv('./output.csv', index=False)
        self.env.rpc.add_file('./output.csv')
Пример #2
0
class SegmentationModel(Model):

    encoder_name = Option(default='resnet34',
                          type='collection',
                          collection=encoder_name_collection)
    encoder_weights = Option(default='imagenet',
                             type='collection',
                             collection=encoder_weights_collection)
    num_classes = Option(default=2, type='number', help='class number of mask')
    model_architecture = Option(default='Unet',
                                type='collection',
                                collection=architecture_collection)

    def create_model(self):
        kwargs = {
            'encoder_name': self.encoder_name,
            'encoder_weights': self.encoder_weights,
            'classes': self.num_classes
        }
        if self.model_architecture == 'Unet':
            model = smp.Unet(**kwargs)
        elif self.model_architecture == 'FPN':
            model = smp.FPN(**kwargs)
        elif self.model_architecture == 'Linknet':
            model = smp.Linknet(**kwargs)
        elif self.model_architecture == 'PSPNet':
            model = smp.Linknet(**kwargs)

        return model
Пример #3
0
class TrainDataset(Dataset):
    """This is a segmentation datasettrain_test_split preparing data from annotations and data directory
    """
    #fold = Option(help='Absolute fold path to the dataset', required=True, default="~/.minetorch_dataset/torchvision_mnist")
    annotations = Option(type='uploader', help='You may upload a csv file with columns=["image_names", "class_1_labels", "class_2_labels", ..., "class_n_labels"]')
    upload = Option(help='Upload your trainning images', type='uploader', required=True)
    batch_size = Option(name='Batch Size', type='number')
    split_ratio = Option(name='Split Ratio', type='number', default=0.2, help='Split your datasets into trainset and valset')
    #k_fold = Option(name='K folds', type='number', default=1, help='Number of folds to split from original datasets', required=False)

    def __call__(self):
        #assert isinstance(k_fold, int) and kfold > 0, 'K fold must be an interger'
        #assert isinstance(batch_size, int), 'Batch Size must be an interger'
        #assert 0 <= split_ratio <= 1, 'Split Ratio must be between 0 to 1'
        
        fold = os.path.join(os.getcwd(), self.upload[0].split('.zip')[0].split('./')[1])
        with ZipFile(self.upload[0], 'r') as zip_object:
            zip_object.extractall(os.path.split(fold)[0])
        df = pd.read_csv(self.annotations[0])
        #train_dfs, val_dfs = Kfold(df, n_splits=5)  # TO DO: kfold for datasets
        train_df, val_df = train_test_split(df, test_size=0.1)

        if self.__task__.task_type == 'classification':
                dataloader_train = (
                    DataLoader(dataset=ClassificationDataset(annotation=train_df, data_folder=fold, transforms=self.train_transform), batch_size=self.batch_size, pin_memory=True),
                    DataLoader(dataset=ClassificationDataset(annotation=val_df, data_folder=fold, transforms=self.val_transform), batch_size=self.batch_size, pin_memory=True)
                    )

        elif self.__task__.task_type == 'segmentation':
                dataloader_train = (
                    DataLoader(dataset=SegmentationDataset(annotation=train_df, data_folder=fold, transforms=self.train_transform), batch_size=self.batch_size, pin_memory=True),
                    DataLoader(dataset=SegmentationDataset(annotation=val_df, data_folder=fold, transforms=self.val_transform), batch_size=self.batch_size, pin_memory=True)
                    )
        return dataloader_train
Пример #4
0
class SegmentationSampleInference(Task, MixinMeta):

    input_images = Option(name='Predicting images', type='uploader')
    output_activation = Option(name='activation',
                               type='collection',
                               default='None',
                               collection=['None', 'sigmoid', 'softmax'])
    transform = DataflowModule(name='Transform',
                               component_types=['Dataflow'],
                               multiple=True,
                               required=True)
    model = BasicModule(name='Model', component_types=['Model'])
    pixel_threshold = Option(name='Pixel Threshold',
                             type='number',
                             default=0.5)

    def mask2rle(self, image_logits, pixel_threshold):
        img = image_logits > pixel_threshold
        pixels = img.T.flatten()
        pixels = np.concatenate([[0], pixels, [0]])
        runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
        runs[1::2] -= runs[::2]
        return ' '.join(str(x) for x in runs)

    def __call__(self):
        input_images = [
            cv2.imread(input_image) for input_image in self.input_images
        ]
        inputs = [
            self.transform([input_image])[0] for input_image in input_images
        ]
        transform = get_transform_func(inputs[0])
        logits = [self.model(transform(input)).squeeze() for input in inputs]

        if self.output_activation == 'softmax':
            outputs = [F.softmax(logit) for logit in logits]
        elif self.output_activation == 'sigmoid':
            outputs = [F.sigmoid(logit) for logit in logits]
        else:
            outputs = logits

        df = pd.DataFrame(columns=[
            'Image Name',
            *[f'Class {klass} score' for klass in range(len(outputs[0]))]
        ])
        for i, image_path in enumerate(self.input_images):
            image_name = image_path.split('/')[-1]
            row_data = [image_name]
            for klass, output in enumerate(outputs[i]):
                row_data.append(self.mask2rle(output, self.pixel_threshold))
            df.loc[i] = row_data
        df.to_csv('./output.csv', index=False)
        self.env.rpc.add_file('./output.csv')
Пример #5
0
class ClassificationModel(Model):

    encoder_name = Option(default='efficientnet-b0',
                          type='collection',
                          collection=efficientnet_collection)
    num_classes = Option(default=2, type='number', help='class number of mask')

    def create_model(self):
        kwargs = {
            'model_name': self.encoder_name,
            'num_classes': self.num_classes,
        }
        model = EfficientNet.from_pretrained(**kwargs)

        return model
Пример #6
0
class RAdam(Optimizer):

    learning_rate = Option(name='Learning Rate', type='number', default=0.001)

    def __call__(self):
        return TorchRAdam(self.model.parameters(),
                          lr=float(self.learning_rate))
Пример #7
0
class MNISTDataset(Dataset):
    """This is a simple wrap for torchvision.datasets.MNIST
    """
    fold = Option(help='Absolute folder path to the dataset',
                  required=True,
                  default="~/.minetorch_dataset/torchvision_mnist")

    def __call__(self):
        return (DataLoader(dataset=DelegateMNIST(
            root=self.fold,
            download=True,
            train=True,
            transform=self.train_transform),
                           batch_size=128),
                DataLoader(dataset=DelegateMNIST(root=self.fold,
                                                 download=True,
                                                 train=False,
                                                 transform=self.val_transform),
                           batch_size=128))
Пример #8
0
class Visualize(Task, MixinMeta):

    input_images = Option(name='Predicting images', type='uploader')
    output_activation = Option(name='activation',
                               type='collection',
                               default='None',
                               collection=['None', 'sigmoid', 'softmax'])
    transform = DataflowModule(name='Transform',
                               component_types=['Dataflow'],
                               multiple=True,
                               required=True)
    model = BasicModule(name='Model', component_types=['Model'])
    pixel_threshold = Option(name='Pixel Threshold',
                             type='number',
                             default=0.5)

    def __call__(self):
        input_images = [
            cv2.imread(input_image) for input_image in self.input_images
        ]
        inputs = [
            self.transform([input_image])[0] for input_image in input_images
        ]

        shape = input_images[0].shape

        transform = get_transform_func(inputs[0])

        self.model.eval()
        logits = [self.model(transform(input)).squeeze() for input in inputs]

        transform_shape = logits[0].shape

        if self.output_activation == 'softmax':
            outputs = [F.softmax(logit) for logit in logits]
        elif self.output_activation == 'sigmoid':
            outputs = [torch.sigmoid(logit) for logit in logits]
        else:
            outputs = logits

        df = pd.DataFrame(columns=[
            'Image Name', 'Image Preview (ImageBase64)',
            'Image With Masks (ImageBase64)'
        ])
        for idx, image_path in enumerate(self.input_images):
            # DISPLAY IMAGES WITH DEFECTS
            image_name = image_path.split('/')[-1]
            plt.figure(figsize=(0.01 * shape[1], 0.01 * shape[0]))
            img = Image.open(image_path)
            img_array = np.array(img)
            patches = []

            for classes in range(len(outputs[idx])):
                try:
                    msk = cv2.threshold(outputs[idx][classes].detach().numpy(),
                                        self.pixel_threshold, 1,
                                        cv2.THRESH_BINARY)[1]

                    if msk.shape != shape[0:2]:
                        msk = cv2.resize(msk,
                                         dsize=(shape[1], shape[0]),
                                         interpolation=cv2.INTER_LINEAR)
                except:
                    msk = np.zeros(shape[0:2])
                msk = mask2contour(msk, width=5)

                img_array[msk == 1, 0] = colors[classes][0]
                img_array[msk == 1, 1] = colors[classes][1]
                img_array[msk == 1, 2] = colors[classes][2]
                patches.append(
                    mpatches.Patch(color=matplotlib.colors.to_rgba(
                        np.array(colors[classes]) / 255),
                                   label=f'Class {classes+1}'))

            plt.legend(handles=patches)
            plt.axis('off')
            plt.imshow(img_array)
            #plt.subplots_adjust(wspace=0.001)
            plt.savefig(os.path.join('/tmp', image_name),
                        bbox_inches='tight',
                        pad_inches=0.0)
            self.env.rpc.add_file(os.path.join('/tmp', image_name))

            base64encode_origin = image_base64(image_path)
            base64encode_processed = image_base64(
                os.path.join('/tmp', image_name))

            row_data = [
                image_name, base64encode_origin, base64encode_processed
            ]
            df.loc[idx] = row_data
        df.to_csv('./output.csv', index=False)
        self.env.rpc.add_file('./output.csv')