コード例 #1
0
def create_inference_model(checkpoint: str = None, model='resnet34', path='.'):
    if model == 'resnet34':
        model = resnet34
    elif model == 'resnet18':
        model = resnet18
    elif model == 'mobilenet_v2':
        model = mobilenet_v2

    # Create an inference model instance and load the requested checkpoint
    inf_db = DataBlock(blocks=[ImageBlock, CategoryBlock],
                       get_x=ItemGetter(0),
                       get_y=ItemGetter(1))

    dummy_img = PILImage.create(np.zeros((415, 415, 3), dtype=np.uint8))
    source = [(dummy_img, False), (dummy_img, True)]

    inf_dls = inf_db.dataloaders(source)

    if model == mobilenet_v2:
        learner = cnn_learner(inf_dls,
                              model,
                              cut=-1,
                              splitter=_mobilenetv2_split,
                              pretrained=False)
    else:
        learner = cnn_learner(inf_dls, model, pretrained=False)
    learner.path = Path(path)

    if checkpoint is not None:
        learner.load(checkpoint, with_opt=False, device='cpu')

    return learner
コード例 #2
0
ファイル: models.py プロジェクト: schwobr/siim-pneumothorax
def multi_task_unet_learner(*args, log_vars=None, **kwargs):
    """
    Creates a learner suited for classification+segmentation multii-task
    learning problem

    args: positional arguments for cnn_learner and unet_learner
    kwargs: keayword arguments for cnn_learner and unet_learner

    return: learner that contains MultiTaskModel
    """
    unet_learn = unet_learner(*args, **kwargs)
    sfs_idxs = unet_learn.model.sfs_idxs
    cnn_learn = cnn_learner(*args, **kwargs)
    base = unet_learn.model[0]
    unet_head = unet_learn.model[1:]
    hooks = hook_outputs([base[i] for i in sfs_idxs])
    for block, hook in zip(unet_head[3:7], hooks):
        block.hook = hook
    heads = [cnn_learn.model[1:], unet_head]
    unet_learn.model = MultiTaskModel(base, heads, log_vars=log_vars).to(
        unet_learn.data.device)
    lg = unet_learn.layer_groups
    lg[2] = nn.Sequential(*list(lg[2]), *flatten_model(heads[0]),
                          unet_learn.model.log_vars)
    unet_learn.layer_groups = lg
    unet_learn.create_opt(slice(1e-3))
    return unet_learn
コード例 #3
0
def load_model(data_bunch, model_type, model_name):
    """
    Function to create and load pretrained weights of convolutional learner
    """
    learn = cnn_learner(data_bunch, model_type, pretrained=False)
    learn.load(model_name)
    return learn
コード例 #4
0
def train_model() -> Learner:
    dls = create_dataloaders()
    learner = cnn_learner(dls, resnet34, metrics=[error_rate, accuracy])
    new_dls = create_dataloaders()
    learner.dls = new_dls
    with learner.no_bar():
        learner.fine_tune(1)
    return learner
コード例 #5
0
def run():
    models = {
        'resnet34': mod.resnet34,
        'resnet50': mod.resnet50,
        'resnet101': mod.resnet101,
        'resnet152': mod.resnet152
    }

    db = load_data_classif(cfg.LABELS,
                           bs=8 * cfg.BATCH_SIZE,
                           train_size=cfg.TRAIN_SIZE)

    learner = cnn_learner(db,
                          models[cfg.MODEL],
                          pretrained=cfg.PRETRAINED,
                          wd=cfg.WD,
                          model_dir=cfg.MODELS_PATH,
                          metrics=[accuracy])

    save_name = f'clf_{cfg.MODEL}'
    save_name = f'{save_name}_{getNextFilePath(cfg.MODELS_PATH, save_name)}'

    learner = learner.clip_grad(1.)
    set_BN_momentum(learner.model)

    learner.fit_one_cycle(cfg.EPOCHS,
                          slice(cfg.LR),
                          callbacks=[
                              SaveModelCallback(learner,
                                                monitor='valid_loss',
                                                name=save_name),
                              AccumulateStep(learner, 64 // cfg.BATCH_SIZE),
                              LearnerTensorboardWriter(learner,
                                                       cfg.LOG,
                                                       save_name,
                                                       loss_iters=10,
                                                       hist_iters=100,
                                                       stats_iters=10)
                          ])

    learner.unfreeze()
    uf_save_name = 'uf_' + save_name

    learner.fit_one_cycle(cfg.EPOCHS,
                          slice(cfg.LR / 10),
                          callbacks=[
                              SaveModelCallback(learner,
                                                monitor='valid_loss',
                                                name=uf_save_name),
                              AccumulateStep(learner, 64 // cfg.BATCH_SIZE),
                              LearnerTensorboardWriter(learner,
                                                       cfg.LOG,
                                                       uf_save_name,
                                                       loss_iters=10,
                                                       hist_iters=100,
                                                       stats_iters=10)
                          ])
コード例 #6
0
def train(imgs_root, model_dir):

    my_tfms = get_transforms()
    print(f"Transforms on Train set: {my_tfms[0]}")
    print(f"Transforms on Validation set: {my_tfms[1]}")

    np.random.seed(42)
    ### '/home/user/tmp/pycharm_project_310/1_detectron2/Furniture-Style-Classifier-master/Data'

    # imgs_root = '/home/user/tmp/pycharm_project_310/1_detectron2/Furniture-Style-Classifier-master/MyData'

    data = ImageDataBunch.from_folder(path=Path(imgs_root),
                                      train=".",
                                      valid_pct=0.2,
                                      ds_tfms=my_tfms,
                                      size=224,
                                      num_workers=4,
                                      bs=64).normalize(imagenet_stats)

    print(f"BatchSize: {data.batch_size}")
    print(f"Train Dataset size: {len(data.train_ds)}")
    print(f"Validataion Dataset size: {len(data.valid_ds)}")
    print(f"Classes: {data.classes}")
    print(f"Number of Classes : {data.c}")

    num_epochs = 5
    lr = 4.37E-03
    learn = cnn_learner(data,
                        models.resnet34,
                        metrics=error_rate,
                        pretrained=True,
                        true_wd=False,
                        train_bn=False)
    learn.fit(epochs=num_epochs, lr=lr)

    #### 模型评估
    report = learn.interpret()
    matrix = report.confusion_matrix().tolist()

    print("confusion_matrix:\n{}".format(matrix))

    learn.model = learn.model.cpu()  ### 转化为cpu模型

    # model_dir = os.path.join(os.getcwd(),"./models")
    # model_dir = '/home/user/tmp/pycharm_project_310/1_detectron2/ImageDetectionAPI/image_style_classifier/models/'

    weight_path = os.path.join(model_dir, 'resnet34_scene_detection')
    inference_path = os.path.join(model_dir, 'export.pkl')

    learn.save(weight_path)
    learn.export(file=Path(inference_path))

    if os.path.exists(inference_path):
        print("model save to :{}".format(model_dir))
コード例 #7
0
    def __init__(self, data, backbone=None, pretrained_path=None):
        
        super().__init__(data, backbone)

        backbone_cut = None
        backbone_split = None
        if self._backbone == models.mobilenet_v2:
            backbone_cut = -1
            backbone_split = _mobilenet_split

        if not self._check_backbone_support(self._backbone):
            raise Exception (f"Enter only compatible backbones from {', '.join(self.supported_backbones)}")

        self._code = feature_classifier_prf
        self.learn = cnn_learner(data, self._backbone, metrics=accuracy, cut=backbone_cut, split_on=backbone_split)

        self.learn.model = self.learn.model.to(self._device)

        _set_multigpu_callback(self)
        if pretrained_path is not None:
            self.load(pretrained_path)
コード例 #8
0
def load_inf_model(model_file: str, data_path: str):
    """Creates an inference model from a pth file.

  model_file: name of the model, without directory and suffix. E.g.
      load_inf_model('bears') will try to load saved model from
      models/bears.pth
  data_path: directory that contains data used for training the model.
      To build a inference model, we only need the directory structure
      and some data in each directory.
  """
    bears = block.DataBlock(blocks=(ImageBlock, block.CategoryBlock),
                            get_items=transforms.get_image_files,
                            splitter=transforms.RandomSplitter(valid_pct=0.2,
                                                               seed=42),
                            get_y=transforms.parent_label,
                            item_tfms=Resize(224))
    dls = bears.dataloaders(data_path)
    # resnet18 must matches the model we used when training
    learn = cnn_learner(dls, resnet18)
    learn.load(model_file)
    return learn
コード例 #9
0
def load_model(inference=False):
    if inference:
        data = ImageDataBunch.load_empty(TRAIN_PATH)
    else:
        np.random.seed(1337)  # give consistency to the validation set
        data = ImageDataBunch.from_folder(TRAIN_PATH,
                                          train=".",
                                          valid_pct=0.1,
                                          ds_tfms=transform.get_transforms(),
                                          size=224,
                                          num_workers=4,
                                          bs=32).normalize(imagenet_stats)

        data.export()  # Save the classes used in training for inference

    learn = learner.cnn_learner(data,
                                models.resnet34,
                                metrics=metrics.error_rate)

    if inference:
        learn.load(MODEL_NAME)

    return learn, data
コード例 #10
0
                    self.image,
                    LABEL_MAP[label_list[idx - 1]],
                    (X, Y - 14 * idx),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.45,
                    (80, 100, 50),
                    2,
                )
            print("Label :", label)


path = Path("Training")
tfm = get_transforms(do_flip=True, max_rotate=35.0, max_zoom=0.6, max_lighting=0.3, max_warp=0.2, p_affine=0.75, p_lighting=0.75)
data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.2, ds_tfms=tfm, num_workers=4, size=224).normalize(imagenet_stats)
# Loading our model
learn = cnn_learner(data, models.resnet50, pretrained=False)
learn.load("stage-3")
cap = cv2.VideoCapture('testet.mp4')
Traffic = cv2.CascadeClassifier('second_2_5.xml')
while True:
    ret, img = cap.read()
    if type(img) == type(None):
        break
    ret = count % 5
    if ret == 0:
        H, W, C = img.shape
        gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)
        Traffic_sign = Traffic.detectMultiScale(gray, scaleFactor=2, minNeighbors=5, minSize=(90, 90), maxSize=(120, 120))  # 1.05
        if len(Traffic_sign) < 1:
            print("NOTHING FOUND")
        elif len(Traffic_sign) < 2:
コード例 #11
0
from fastai.vision import models
from PIL import ImageFile
import dill

#defaults.device = torch.device('cuda')
DATA_PATH = '/valohai/inputs/dataset/dataset/'
MODEL_PATH = '/valohai/outputs/'
# Data augmentation: create a list of flip, rotate, zoom, warp, lighting transforms...
tfms = get_transforms()
# Create databunch from imagenet style dataset in path with
# images resized 224x224 and batch size equal to 64
# and validation set about 30% of the dataset
data = ImageDataBunch.from_folder(DATA_PATH,
                                  ds_tfms=tfms,
                                  size=224,
                                  bs=64,
                                  valid_pct=0.3).normalize(imagenet_stats)
# Get a pretrained model (resnet34) with a custom head that is suitable for our data.
learn = cnn_learner(data, models.resnet34, metrics=[accuracy])
learn.model_dir = MODEL_PATH

ImageFile.LOAD_TRUNCATED_IMAGES = True
# Fit a model following the 1cycle policy with 50 epochs
learn.fit_one_cycle(50)

# save model parameters
learn.save('weights')
# Save the model architecture (pytorch form .pt)
torch.save(learn.model, MODEL_PATH + 'my_model.pt', pickle_module=dill)
learn.export(MODEL_PATH + 'export.pkl')
コード例 #12
0
    def __init__(self,
                 data,
                 grids=None,
                 zooms=[1.],
                 ratios=[[1., 1.]],
                 backbone=None,
                 drop=0.3,
                 bias=-4.,
                 focal_loss=False,
                 pretrained_path=None,
                 location_loss_factor=None,
                 ssd_version=2,
                 backend='pytorch'):

        super().__init__(data, backbone)

        self._backend = backend
        if self._backend == 'tensorflow':
            self._intialize_tensorflow(data, grids, zooms, ratios, backbone,
                                       drop, bias, pretrained_path,
                                       location_loss_factor)
        else:
            # assert (location_loss_factor is not None) or ((location_loss_factor > 0) and (location_loss_factor < 1)),
            if not ssd_version in [1, 2]:
                raise Exception("ssd_version can be only [1,2]")

            if location_loss_factor is not None:
                if not ((location_loss_factor > 0) and
                        (location_loss_factor < 1)):
                    raise Exception(
                        '`location_loss_factor` should be greater than 0 and less than 1'
                    )
            self.location_loss_factor = location_loss_factor

            self._code = code
            self.ssd_version = ssd_version

            backbone_cut = None
            backbone_split = None

            if hasattr(self, '_orig_backbone'):
                self._backbone_ms = self._backbone
                self._backbone = self._orig_backbone
                _backbone_meta = cnn_config(self._orig_backbone)
                backbone_cut = _backbone_meta['cut']
                backbone_split = _backbone_meta['split']

            if backbone is None:
                self._backbone = models.resnet34
                backbone_name = 'res'
            elif type(backbone) is str:
                self._backbone = getattr(models, backbone)
                backbone_name = backbone[:3]
            else:
                self._backbone = backbone
                backbone_name = 'custom'

            if not self._check_backbone_support(self._backbone):
                raise Exception(
                    f"Enter only compatible backbones from {', '.join(self.supported_backbones)}"
                )

            if self._backbone == models.mobilenet_v2:
                backbone_cut = -1
                backbone_split = _mobilenet_split

            if ssd_version == 1:
                if grids == None:
                    grids = [4, 2, 1]

                self._create_anchors(grids, zooms, ratios)

                feature_sizes = model_sizes(create_body(self._backbone,
                                                        cut=backbone_cut),
                                            size=(data.chip_size,
                                                  data.chip_size))
                num_features = feature_sizes[-1][-1]
                num_channels = feature_sizes[-1][1]

                ssd_head = SSDHead(grids,
                                   self._anchors_per_cell,
                                   data.c,
                                   num_features=num_features,
                                   drop=drop,
                                   bias=bias,
                                   num_channels=num_channels)
            elif ssd_version == 2:

                # find bounding boxes height and width

                if grids is None:
                    logger.info("Computing optimal grid size...")
                    hw = data.height_width
                    hw = np.array(hw)

                    # find most suitable centroids for dataset
                    centroid = kmeans(hw, 1)
                    avg = avg_iou(hw, centroid)

                    for num_anchor in range(2, 5):
                        new_centroid = kmeans(hw, num_anchor)
                        new_avg = avg_iou(hw, new_centroid)
                        if (new_avg - avg) < 0.05:
                            break
                        avg = new_avg
                        centroid = new_centroid.copy()

                    # find grid size

                    grids = list(
                        map(
                            int,
                            map(
                                round, data.chip_size /
                                np.sort(np.max(centroid, axis=1)))))
                    grids = list(set(grids))
                    grids.sort(reverse=True)
                    if grids[-1] == 0:
                        grids[-1] = 1
                    grids = list(set(grids))

                self._create_anchors(grids, zooms, ratios)

                feature_sizes = model_sizes(create_body(self._backbone,
                                                        cut=backbone_cut),
                                            size=(data.chip_size,
                                                  data.chip_size))
                num_features = feature_sizes[-1][-1]
                num_channels = feature_sizes[-1][1]

                if grids[0] > 8 and abs(num_features - grids[0]
                                        ) > 4 and backbone_name == 'res':
                    num_features = feature_sizes[-2][-1]
                    num_channels = feature_sizes[-2][1]
                    backbone_cut = -3
                ssd_head = SSDHeadv2(grids,
                                     self._anchors_per_cell,
                                     data.c,
                                     num_features=num_features,
                                     drop=drop,
                                     bias=bias,
                                     num_channels=num_channels)

            else:
                raise Exception('SSDVersion can only be 1 or 2')

            if hasattr(self, '_backbone_ms'):
                self._orig_backbone = self._backbone
                self._backbone = self._backbone_ms

            self.learn = cnn_learner(data=data,
                                     base_arch=self._backbone,
                                     cut=backbone_cut,
                                     split_on=backbone_split,
                                     custom_head=ssd_head)
            self._arcgis_init_callback()  # make first conv weights learnable
            self.learn.model = self.learn.model.to(self._device)

            if focal_loss:
                self._loss_f = FocalLoss(data.c)
            else:
                self._loss_f = BCE_Loss(data.c)
            self.learn.loss_func = self._ssd_loss

            _set_multigpu_callback(self)
            if pretrained_path is not None:
                self.load(pretrained_path)
コード例 #13
0
# %%
if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

# %%
mnist_dls = ImageDataLoaders.from_folder(
    mnist_dir,
    train="training",
    valid="testing",
    device=device,
    batch_tfms=aug_transforms(mult=2, do_flip=False),
    item_tfms=Resize(224),
)

# %%
resnet_learner = cnn_learner(
    mnist_dls,
    resnet18,
    metrics=[accuracy,
             Precision(average="macro"),
             Recall(average="macro")],
)

# %%
with resnet_learner.no_bar():
    resnet_learner.fine_tune(1)

# %%
コード例 #14
0
ファイル: starter.py プロジェクト: ussozi/PotHoleDetection
ll = sd.label_from_folder()

# specify data augmentaion
tfms = get_transforms()
ll = ll.transform(tfms, size=256)#, resize_method=ResizeMethod.SQUISH)

# create databunch to pass to model and optimiser
data = ll.databunch(bs=32)

# inspect data
data.show_batch()

# create a learner object with ResNet18 model and adam optimiser (default)
# https://github.com/LiyuanLucasLiu/RAdam
# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer?source=post_page-----2dc83f79a48d----------------------
learn = cnn_learner(data, resnet18, metrics=accuracy, ps=0.5, opt_func=Ranger)
learn = learn.mixup()

# find good learning rate
learn.lr_find()
learn.recorder.plot()

# save best model

learn.fit_one_cycle(10, 1e-2,
learn.save('pre')

preds, y = learn.TTA()
preds, y = learn.get_preds()

accuracy(preds, y)