Exemplo n.º 1
0
def main(epochs=5, learning_rate=0.01):
    # Avoid OMP error and allow multiple OpenMP runtime
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
    warnings.filterwarnings("ignore")
    print(mlflow.__version__)

    # Download and untar the MNIST data set
    path = untar_data(URLs.MNIST_SAMPLE)

    # Prepare, transform, and normalize the data
    data = ImageDataBunch.from_folder(path,
                                      ds_tfms=(rand_pad(2, 28), []),
                                      bs=64)
    data.normalize(imagenet_stats)

    # Train and fit the Learner model
    learn = cnn_learner(data, models.resnet18, metrics=accuracy)

    # Start MLflow session
    with mlflow.start_run() as run:
        learn.fit(epochs, learning_rate)
        mlflow.fastai.log_model(learn, 'model')

    # fetch the logged model artifacts
    artifacts = [
        f.path for f in MlflowClient().list_artifacts(run.info.run_id, 'model')
    ]
    print("artifacts: {}".format(artifacts))
Exemplo n.º 2
0
def main(epochs=5, learning_rate=0.01):

    # Avoid OMP error and allow multiple OpenMP runtime
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
    warnings.filterwarnings("ignore")
    print(mlflow.__version__)

    # Download and untar the MNIST data set
    path = untar_data(URLs.MNIST_SAMPLE)

    # Prepare, transform, and normalize the data
    data = ImageDataBunch.from_folder(path,
                                      ds_tfms=(rand_pad(2, 28), []),
                                      bs=64)
    data.normalize(imagenet_stats)

    # Create CNN the Learner model
    learn = cnn_learner(data, models.resnet18, metrics=accuracy)

    # Start MLflow session
    with mlflow.start_run() as run:
        learn.fit(epochs, learning_rate)
        mlflow.fastai.log_model(learn, "model")

    # load the model for scoring
    model_uri = "runs:/{}/model".format(run.info.run_id)
    loaded_model = mlflow.fastai.load_model(model_uri)

    predict_data = ...
    loaded_model.predict(predict_data)
Exemplo n.º 3
0
def main(epochs=5, learning_rate=0.01):

    # Avoid OMP error and allow multiple OpenMP runtime
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
    warnings.filterwarnings("ignore")
    print(mlflow.__version__)

    # Download and untar the MNIST data set
    path = untar_data(URLs.MNIST_SAMPLE)

    # Prepare, transform, and normalize the data
    data = ImageDataBunch.from_folder(path,
                                      ds_tfms=(rand_pad(2, 28), []),
                                      bs=64)
    data.normalize(imagenet_stats)

    # Train and fit the Learner model
    learn = cnn_learner(data, models.resnet18, metrics=accuracy)

    # Start MLflow session
    with mlflow.start_run() as run:
        learn.fit(epochs, learning_rate)
        mlflow.fastai.log_model(learn, "model")

    # Fetch the default conda environment
    print("run_id: {}".format(run.info.run_id))
    env = mlflow.fastai.get_default_conda_env()
    print("conda environment: {}".format(env))
def get_data_from_path(path_image: Path):
    # tfms = get_transforms(do_flip=False)

    data = (ImageDataBunch.from_folder(path_image).random_split_by_pct().
            label_from_func(get_float_labels).transform())
    # data.normalize(imagenet_stats)

    return data
Exemplo n.º 5
0
    def getDataBunch(self, tfm = True, val_p = 0, sz = 224, b_sz = 8):

        if tfm:
            data = ImageDataBunch.from_folder('/lib/Auth/RecFace/images/',
                                              ds_tfms=get_transforms(do_flip=False),
                                              valid_pct=val_p,
                                              size=sz,
                                              bs=b_sz)

        else:
            data = ImageDataBunch.from_folder('/lib/Auth/RecFace/images/',
                                              ds_tfms=None,
                                              valid_pct=val_p,
                                              size=sz,
                                              bs=b_sz)

        return data
Exemplo n.º 6
0
def create_data_bunch(path):
    np.random.seed(42)
    return ImageDataBunch.from_folder(path,
                                      train=".",
                                      valid_pct=0.2,
                                      ds_tfms=get_transforms(),
                                      size=224,
                                      num_workers=4).normalize(imagenet_stats)
Exemplo n.º 7
0
def setup_data_loader() -> str:
    from fastai.vision import ImageDataBunch
    import dill
    import codecs
    bunch = ImageDataBunch.from_folder("test/images",
                                       train="training",
                                       valid="test",
                                       size=112)
    return codecs.encode(dill.dumps(bunch), "base64").decode()
Exemplo n.º 8
0
def main():
    # Set data transformations
    if args.ds_tfms:
        ds_tfms = get_transforms(do_flip=True,
                                 flip_vert=True,
                                 max_lighting=0.1,
                                 max_zoom=1.05,
                                 max_warp=0.1)
    else:
        ds_tfms = None

    # Load data
    data = ImageDataBunch.from_folder(args.data_path,
                                      train=args.train_dirname,
                                      valid=args.test_dirname,
                                      ds_tfms=ds_tfms,
                                      size=args.image_size,
                                      bs=args.batch_size).normalize()

    if args.verbose:
        print(len(data.valid_ds), "test images")

    # Get tile IDS, image IDs and image targets
    tile_ids, image_ids, image_targets = get_tile_filename_info(data)

    # Compute tile prediction scores using trained model
    tile_scores, tile_targets = predict_tiles(data,
                                              model_type[args.model],
                                              args.model_path,
                                              args.ps,
                                              args.wd,
                                              mixup=args.mixup)

    # Make tile predictions using prediction scores
    tile_preds = torch.argmax(tile_scores, 1)

    # Create output directory if it does not exist
    if not os.path.isdir(args.output_path):
        os.makedirs(args.output_path, exist_ok=True)

    # Create numpy array holding the tile output
    tile_output = tile_output_to_array(tile_ids, image_ids, tile_scores,
                                       tile_preds, tile_targets, image_targets)

    # Save tile IDs, image IDs, tile prediction scores, tile predictions and tile true labels to file
    np.savetxt(
        os.path.join(args.output_path, args.output_filename),
        tile_output,
        fmt=['%s', '%s', '%f', '%d', '%d', '%d'],
        delimiter=',',
        newline='\n',
        header='tile_id,image_id,tile_score,tile_pred,tile_target,image_target',
        comments='')

    if args.verbose:
        print("Completed execution.")
Exemplo n.º 9
0
def train(imgs_root, model_dir):

    my_tfms = get_transforms()
    print(f"Transforms on Train set: {my_tfms[0]}")
    print(f"Transforms on Validation set: {my_tfms[1]}")

    np.random.seed(42)
    ### '/home/user/tmp/pycharm_project_310/1_detectron2/Furniture-Style-Classifier-master/Data'

    # imgs_root = '/home/user/tmp/pycharm_project_310/1_detectron2/Furniture-Style-Classifier-master/MyData'

    data = ImageDataBunch.from_folder(path=Path(imgs_root),
                                      train=".",
                                      valid_pct=0.2,
                                      ds_tfms=my_tfms,
                                      size=224,
                                      num_workers=4,
                                      bs=64).normalize(imagenet_stats)

    print(f"BatchSize: {data.batch_size}")
    print(f"Train Dataset size: {len(data.train_ds)}")
    print(f"Validataion Dataset size: {len(data.valid_ds)}")
    print(f"Classes: {data.classes}")
    print(f"Number of Classes : {data.c}")

    num_epochs = 5
    lr = 4.37E-03
    learn = cnn_learner(data,
                        models.resnet34,
                        metrics=error_rate,
                        pretrained=True,
                        true_wd=False,
                        train_bn=False)
    learn.fit(epochs=num_epochs, lr=lr)

    #### 模型评估
    report = learn.interpret()
    matrix = report.confusion_matrix().tolist()

    print("confusion_matrix:\n{}".format(matrix))

    learn.model = learn.model.cpu()  ### 转化为cpu模型

    # model_dir = os.path.join(os.getcwd(),"./models")
    # model_dir = '/home/user/tmp/pycharm_project_310/1_detectron2/ImageDetectionAPI/image_style_classifier/models/'

    weight_path = os.path.join(model_dir, 'resnet34_scene_detection')
    inference_path = os.path.join(model_dir, 'export.pkl')

    learn.save(weight_path)
    learn.export(file=Path(inference_path))

    if os.path.exists(inference_path):
        print("model save to :{}".format(model_dir))
Exemplo n.º 10
0
def main(epochs):
    Task.init(project_name="examples", task_name="fastai v1")

    path = untar_data(URLs.MNIST_SAMPLE)

    data = ImageDataBunch.from_folder(path, ds_tfms=(rand_pad(2, 28), []), bs=64, num_workers=0)
    data.normalize(imagenet_stats)

    learn = cnn_learner(data, models.resnet18, metrics=accuracy)

    accuracy(*learn.get_preds())
    learn.fit_one_cycle(epochs, 0.01)
Exemplo n.º 11
0
    def load_w(self, location):
        self.location = location
        # res = models.resnet50
        #hyperparameters
        batch_size = 10
        epochs = 50

        save_loc = 'resnet50model_trainedonHerlevsetandSipakmed' + str(
            epochs) + "batch" + str(batch_size)  #location to save the model

        ## Declaring path of dataset
        path_img = Path("/cluster/home/cantoniou/deep_project/sipakmedFormat")
        ## Loading data
        data = ImageDataBunch.from_folder(path=path_img,
                                          train='train',
                                          valid='val',
                                          ds_tfms=get_transforms(),
                                          size=224,
                                          bs=batch_size)  #, check_ext=False)
        ## Normalizing data based on Image net parameters
        #data.normalize(imagenet_stats)
        #normalize now according to the batch data and not imagenet
        data.normalize()  #defaults to batch 'stats'
        print(data.classes)
        len(data.classes), data.c

        #LOAD THE TRANSFER LEARNING MODEL
        ## To create a ResNET 50 with pretrained weights based on the new dataset
        trans_model = cnn_learner(
            data,
            models.resnet50,
            metrics=[accuracy, FBeta(average="weighted")])
        #print(trans_model) #check the architecture of the loaded model to make sure it was loaded with a head of 5 ---to match the data classes
        trans_model = trans_model.load(
            location
        )  #load the previous pretrained model weights form the harlev dataset

        print("Start training")

        #find best learning rate
        trans_model.lr_find()

        # Train the model
        trans_model.fit_one_cycle(epochs,
                                  callbacks=[
                                      SaveModelCallback(trans_model,
                                                        every='improvement',
                                                        mode='max',
                                                        monitor='accuracy',
                                                        name=save_loc)
                                  ])
Exemplo n.º 12
0
def search(bot, update):
    """Send reply of user's message."""
    photo_file = bot.get_file(update.message.photo[-1].file_id)
    photo_file.download('testing.jpeg')
    try:
        bs = 32
        path = "classes"

        np.random.seed(42)
        data = ImageDataBunch.from_folder(
            path,
            train='.',
            valid_pct=0.2,
            ds_tfms=get_transforms(),
            size=224,
            num_workers=4).normalize(imagenet_stats)

        learn = cnn_learner(data, models.resnet34,
                            metrics=error_rate).load("stage-1")
        learn.export()
        learn = load_learner("classes")

        cat, tensor, probs = learn.predict(open_image("testing.jpeg"))

        l = list(probs)
        a = tensor.__str__()
        a = int(a.strip("tensor" "()"))
        l = list(probs)[a]
        l = l.__str__()
        b = float(l.strip("tensor" "()"))
        if b >= 0.9:
            update.message.reply_text(
                '`' + str(cat) + '`',
                parse_mode=ParseMode.MARKDOWN,
                reply_to_message_id=update.message.message_id)
#             print("prediction :")
#             print(cat)
        else:
            cat = "sry I am not sure "
            update.message.reply_text(
                '`' + str(cat) + '`',
                parse_mode=ParseMode.MARKDOWN,
                reply_to_message_id=update.message.message_id)


#             print("prediction :")
#             print("Not Sure")

    except Exception as e:
        update.message.reply_text(e)
Exemplo n.º 13
0
def main():
    # Load data
    data = ImageDataBunch.from_folder(
        args.data_path,
        train=args.train_dirname,
        valid=args.test_dirname,
        ds_tfms=get_transforms(do_flip=True, flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.1),
        size=args.image_size,
        bs=args.batch_size
    ).normalize()

    if args.verbose:
        print(len(data.valid_ds), "test images")

    # Compute F1, TPR and FPR for different thresholds and find optimal threshold
    optimal_thres, optimal_thres_idx, thres_metrics = tune_thres(
        data, model_type[args.model], args.model_path, args.ps, args.thres,
        tpr_lb=args.tpr_lb, fpr_ub=args.fpr_ub, verbose=args.verbose
    )

    # Save optimal threshold and associated F1 score, TPF and FPR to file
    np.savetxt(
        os.path.join(args.output_path, args.output_optimal_thres_filename),
        [[
            optimal_thres,
            thres_metrics['f1'][optimal_thres_idx],
            thres_metrics['tpr'][optimal_thres_idx],
            thres_metrics['fpr'][optimal_thres_idx]
        ]],
        fmt=['%f', '%f', '%f', '%f'],
        delimiter=',',
        newline='\n',
        header='thres,f1,tpr,fpr',
        comments=''
    )

    # Save thresholds, F1 score, TPR and FPR to file
    np.savetxt(
        os.path.join(args.output_path, args.output_thres_metrics_filename),
        thres_metrics,
        fmt=['%f', '%f', '%f', '%f'],
        delimiter=',',
        newline='\n',
        header='thres,f1,tpr,fpr',
        comments=''
    )

    if args.verbose:
        print("Completed execution.")
Exemplo n.º 14
0
def main():
    # Set data transformations
    if args.ds_tfms:
        ds_tfms = get_transforms(do_flip=True,
                                 flip_vert=True,
                                 max_lighting=0.1,
                                 max_zoom=1.05,
                                 max_warp=0.1)
    else:
        ds_tfms = None

    # Load data
    data = ImageDataBunch.from_folder(args.data_path,
                                      train=args.train_dirname,
                                      valid=args.validation_dirname,
                                      test=args.test_dirname,
                                      ds_tfms=ds_tfms,
                                      size=args.image_size,
                                      bs=args.batch_size).normalize()

    if args.verbose:
        print(len(data.train_ds), "training images", len(data.valid_ds),
              "validation images and", len(data.test_ds), "test images")

    # Explore optimal learning rates
    lrs, losses = tune_lr(data,
                          model_dict[args.model],
                          args.ps,
                          args.wd,
                          args.start_lr,
                          args.end_lr,
                          args.num_lr_iters,
                          pretrained=args.pretrained,
                          mixup=args.mixup)

    # Create output directory if it does not exist
    if not os.path.isdir(args.output_path):
        os.makedirs(args.output_path, exist_ok=True)

    # Save learning rates and associated loss values
    np.savetxt(os.path.join(args.output_path, args.output_filename),
               np.column_stack((lrs, losses)),
               delimiter=',',
               newline='\n',
               header='lrs,losses',
               comments='')

    if args.verbose:
        print("Completed execution.")
def fine_tune_convnet(path: str):
    """
    Path must be in the format of a FastAI data bunch, ie Train/Test/Valid dirs
    and then one subdirectory per class in each of those dirs.
    """

    tfms = get_transforms(do_flip=False)
    data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=64)
    learn = cnn_learner(data, models.resnet34, metrics=error_rate)
    learn.fit_one_cycle(5)
    learn.save('stage-1')
    learn.unfreeze()
    learn.fit_one_cycle(1, max_lr=slice(1e-6, 1e-3))
    learn.save('stage-2')
    learn.export(file="two-stage-model.pkl")
Exemplo n.º 16
0
def main():
    # Load data
    data = ImageDataBunch.from_folder(args.data_path,
                                      train=args.train_dirname,
                                      valid=args.validation_dirname,
                                      test=args.test_dirname,
                                      ds_tfms=get_transforms(do_flip=True,
                                                             flip_vert=True,
                                                             max_lighting=0.1,
                                                             max_zoom=1.05,
                                                             max_warp=0.1),
                                      size=args.image_size,
                                      bs=args.batch_size).normalize()

    if args.verbose:
        print(len(data.train_ds), "training images", len(data.valid_ds),
              "validation images and", len(data.test_ds), "test images")

    # Create output directory if it does not exist
    if not Path(args.output_path).exists():
        Path(args.output_path).mkdir(parents=True, exist_ok=True)

    # Fit model using one-cycle policy
    recorder = fit_model(data, model_dict[args.model], args.pretrained,
                         args.ps, args.cyc_len, args.lr_lower, args.lr_upper,
                         args.output_path, args.output_model_filename,
                         args.verbose)

    # Save loss value for each processed batch to file
    if args.save_loss:
        np.savetxt(Path(args.output_path).joinpath(
            args.output_training_loss_filename),
                   [l.item() for l in recorder.losses],
                   newline='\n',
                   header='losses',
                   comments='')

        # Save training and validation losses per epoch
        np.savetxt(Path(args.output_path).joinpath(args.output_loss_filename),
                   all_losses_to_array(get_all_losses_per_epoch(recorder)),
                   fmt=['%d', '%f', '%f'],
                   delimiter=',',
                   newline='\n',
                   header='nb_batches,train_losses,val_losses',
                   comments='')

    if args.verbose:
        print("Completed execution.")
Exemplo n.º 17
0
def train_model(model_name, dataset_name, arch, img_size, epochs):
    data = ImageDataBunch.from_folder(f'{BASE_DIR}datasets/{dataset_name}',
                                      valid_pct=0.2,
                                      ds_tfms=get_transforms(),
                                      size=img_size,
                                      num_workers=6,
                                      bs=BATCH_SIZE)
    arch = model_mapping[arch]
    learner = cnn_learner(data, arch, metrics=[error_rate, accuracy])
    learner.fit_one_cycle(epochs)
    learner.export(
        os.path.join(os.getcwd(), '..', 'static', 'models',
                     model_name + '.pkl'))
    meta = {}
    meta['metrics'] = [[i.item() for i in e] for e in learner.recorder.metrics]
    meta['loss'] = [i.item() for i in learner.recorder.losses]
    meta['lr'] = [i.item() for i in learner.recorder.lrs]
    return meta
Exemplo n.º 18
0
def test_freeze_unfreeze_effnet():
    this_tests(cnn_learner)

    def get_number_of_trainable_params(model: nn.Module):
        return sum(p.numel() for p in model.parameters() if p.requires_grad)

    base_arch = EfficientNetB1
    path = untar_data(URLs.MNIST_TINY)
    data = ImageDataBunch.from_folder(path, size=64)
    data.c = 1000  # Set number of class to be 1000 to stay in line with the pretrained model.
    cnn_learn = cnn_learner(data, base_arch, pretrained=True)
    ref_learn = Learner(data, EfficientNet.from_pretrained("efficientnet-b1"))
    # By default the neural net in cnn learner is freezed.
    assert get_number_of_trainable_params(
        cnn_learn.model) < get_number_of_trainable_params(ref_learn.model)
    cnn_learn.unfreeze()
    assert get_number_of_trainable_params(
        cnn_learn.model) == get_number_of_trainable_params(ref_learn.model)
def main(epochs):
    Task.init(project_name="examples",
              task_name="fastai with tensorboard callback")

    path = untar_data(URLs.MNIST_SAMPLE)

    data = ImageDataBunch.from_folder(path,
                                      ds_tfms=(rand_pad(2, 28), []),
                                      bs=64,
                                      num_workers=0)
    data.normalize(imagenet_stats)

    learn = cnn_learner(data, models.resnet18, metrics=accuracy)
    tboard_path = Path("data/tensorboard/project1")
    learn.callback_fns.append(
        partial(LearnerTensorboardWriter, base_dir=tboard_path, name="run0"))

    accuracy(*learn.get_preds())
    learn.fit_one_cycle(epochs, 0.01)
Exemplo n.º 20
0
def test_ImageDataBunch_init():
    """
    For understanding various databunches.

    For example, ImageDataBunch in the from folder:

    Src is originally an ImageList, but the following code:

    `src = src.label_from_folder(classes=classes)`

    CHANGES THE CLASS TO A LABELLISTS?!?!?

    In other words, the ImageList is capable of turning into a dataset.

    :return:
    """
    data = ImageDataBunch.from_folder(get_absolute_path('data'), valid_pct=0.5)

    for e in data.train_ds:
        print(e)
Exemplo n.º 21
0
def train_model(hyperparams):
    """Train a resnet model...

    Parameters
    ----------
    hyperparams : dict
        dict of hyperparams

    Returns
    -------
    [type]
        trained pytorch model
    """

    # init args
    path = hyperparams["path"]
    train = hyperparams["train"]
    valid_pct = hyperparams["valid_pct"]
    size = hyperparams["size"]
    num_workers = hyperparams["num_workers"]

    # load data from folder
    data = ImageDataBunch.from_folder(
        path=path,
        train=train,
        valid_pct=valid_pct,
        ds_tfms=get_transforms(),
        size=size,
        num_workers=num_workers,
    ).normalize(imagenet_stats)

    # init model
    learn = cnn_learner(data, models.resnet34, metrics=error_rate)

    # train model
    learn.fit_one_cycle(4)

    return learn
def image_learner(valid_pct=0.2,
                  size=224,
                  model=models.resnet34,
                  num_workers=8,
                  bs=32,
                  max_rotate=20,
                  max_zoom=1.3,
                  max_lighting=0.4,
                  max_warp=0.4,
                  p_affine=1,
                  p_lighting=1.):
    '''this function helps to prepare the data for training purposes'''

    np.random.seed(2)
    #setting the image augmentation
    tfms = get_transforms(max_rotate=max_rotate,
                          max_zoom=max_zoom,
                          max_lighting=max_lighting,
                          max_warp=max_warp,
                          p_affine=p_affine,
                          p_lighting=p_lighting)
    #instantiate an ImageDataBunch class
    data = ImageDataBunch.from_folder(
        data_directory,
        train=".",
        valid_pct=valid_pct,
        bs=bs,
        ds_tfms=tfms,
        size=size,
        num_workers=num_workers).normalize(imagenet_stats)
    print('the following images are the examples of prepared training data')
    data.show_batch(rows=3, figsize=(7, 8))
    print(data.classes, data.c, len(data.train_ds), len(data.valid_ds))
    #instantiate a learner here to pass down to other functions
    learn = cnn_learner(data, model, metrics=error_rate)

    return learn
Exemplo n.º 23
0
# Utility file that will connect to pytorch to evaluate models
# package this file with the models folder instead for global
# access my multiple apps. Also, this just makes more sense

from fastai.vision import create_cnn, ImageDataBunch, open_image, get_transforms, models
from fastai.metrics import accuracy
import torch.nn.functional as F
import torch

imageNum = 0

data = ImageDataBunch.from_folder("datamodels/",
                                  ds_tfms=get_transforms(),
                                  test='test',
                                  size=224,
                                  bs=1)
learn = create_cnn(data, models.resnet34, metrics=accuracy)
learn.load("model", device="cpu")
learn.precompute = False


def save_image(image):
    """
    Save an uploaded image for processing.

    Args:
        image: UploadedFile
            The image that is to be classified.

    Returns:
        str: image name
Exemplo n.º 24
0
print(model_save_dir)

NOTE = f"""
Note for version {version} model {MODEL_NAME}:
Train for a long time
"""
print(NOTE)

data_path = '/home/qnkhuat/data/emotion_compilation_split'
tfms = get_transforms(do_flip=True,
                      flip_vert=False,
                      max_rotate=20,
                      max_zoom=1.1,
                     )
# ran this get erro in THcroe
data = (ImageDataBunch.from_folder(data_path,test='test',size=48,ds_tfms=tfms,bs=256).normalize(imagenet_stats))
print(data)



model = models.resnet18
#model = models.densenet121

learn = cnn_learner(data, model,callback_fns=[ShowGraph])
### THE DIRECTORY TO SAVE CHECKPOINTS
learn.path = Path(model_save_dir)
learn.model_dir = model_save_dir
learn.metrics = [accuracy]


Exemplo n.º 25
0
    for cuisine in cuisines:
        url_fpath = data_root / f'urls_{cuisine}.csv'
        dest_folder = img_root / cuisine

        print(f'{url_fpath} >> {dest_folder}')
        dest_folder.mkdir(parents=True, exist_ok=True)

        download_images(url_fpath, dest_folder, max_pics=20, max_workers=0)
        verify_images(dest_folder, delete=True, max_size=500)

# Setup dataloader
np.random.seed(42)
data = ImageDataBunch.from_folder(data_root,
                                  train="images",
                                  valid_pct=0.2,
                                  ds_tfms=get_transforms(),
                                  size=224,
                                  num_workers=0).normalize(imagenet_stats)
print(f'Classes: {data.classes}, C: {data.c}')
print(f'No. Train Images: {len(data.train_ds)}')
print(f'No. Valid Images: {len(data.valid_ds)}')

# Show Images
if SHOW_IMAGES_FLAG:
    data.show_batch(rows=4, figsize=(7, 8))
    plt.show()

# Train model
if TRAIN_MODEL_FLAG:
    from fastai.vision import (cnn_learner, models, error_rate)
Exemplo n.º 26
0
class LearningRateSetter(LearnerCallback):
    def on_epoch_begin(self, **kwargs):
        set_learning_rate(self.learn)


@reloading
def print_model_statistics(model):
    # Uncomment the following lines after during the training
    # to start printing statistics
    #
    # print('{: <28}  {: <7}  {: <7}'.format('NAME', ' MEAN', ' STDDEV'))
    # for name, param in model.named_parameters():
    #     mean = param.mean().item()
    #     std = param.std().item()
    #     print('{: <28}  {: 6.4f}  {: 6.4f}'.format(name, mean, std))
    pass


class ModelStatsPrinter(LearnerCallback):
    def on_epoch_begin(self, **kwargs):
        print_model_statistics(self.learn.model)


path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
learn = cnn_learner(data,
                    models.resnet18,
                    metrics=accuracy,
                    callback_fns=[ModelStatsPrinter, LearningRateSetter])
learn.fit(10)
Exemplo n.º 27
0
from fastai.vision import (ImageDataBunch, get_transforms, cnn_learner, models, imagenet_stats, accuracy)
from pathlib import Path 
from azureml.core.run import Run 
import numpy as np

# get the Azure ML run object
run = Run.get_context()

# get images
path = Path('data')
np.random.seed(2)
data = ImageDataBunch.from_folder(path,
                                       train=".",
                                       valid_pct=0.2,
                                       ds_tfms=get_transforms(),
                                       size=224).normalize(imagenet_stats)

# build estimator based on ResNet 34
learn = cnn_learner(data, models.resnet34, metrics=accuracy)
learn.fit_one_cycle(2)

# do test time augmentation and get accuracy
acc = accuracy(*learn.TTA())


# log the accuracy to run
run.log('Accuracy', np.float(acc))
print("Accuracy: ", np.float(acc))

# Save the model to the root. Note: this is not registering model
Exemplo n.º 28
0
    valid_names = get_names(waste_type, valid_ind)
    valid_source_files = [os.path.join(source_folder, name)
                          for name in valid_names]
    valid_dest = "data/valid/"+waste_type
    move_files(valid_source_files, valid_dest)

    test_names = get_names(waste_type, test_ind)
    test_source_files = [os.path.join(source_folder, name)
                         for name in test_names]

    move_files(test_source_files, "data/test")

print("Moved files")

path = Path(os.getcwd()) / "data"

tfms = get_transforms(do_flip=True, flip_vert=True)
data = ImageDataBunch.from_folder(path, test="test", ds_tfms=tfms, bs=16)

print("Gotten data")

learn = cnn_learner(data, models.resnet34, metrics=error_rate)

print("Generated CNN")

learn.fit_one_cycle(20, max_lr=5.13e-03)
learn.save("model", return_path=True)

print("Trained")
Exemplo n.º 29
0
from fastai.vision import (
    ImageDataBunch,
    cnn_learner,
    get_image_files,
    get_transforms,
    imagenet_stats,
    models,
)
from pathlib import Path

# Config and data path
bs = 64
path = Path("./data")

# Load data
tfms = get_transforms(do_flip=False)
data = ImageDataBunch.from_folder(path, ds_tfms=get_transforms(), size=224, bs=bs).normalize(imagenet_stats)

# Train data in transfer learning using resnet34
learn = cnn_learner(data, models.resnet34, metrics=error_rate)

# Find the best learning rate
learn.lr_find()

# Retrain the whole model
learn.unfreeze()
learn.fit_one_cycle(4, max_lr=slice(1e-6, 1e-3))

# Export the trained model
learn.export('seefood_model.pkl')
Exemplo n.º 30
0
from fastai.vision import models, get_transforms, cnn_learner, ImageDataBunch, imagenet_stats, ClassificationInterpretation
from fastai.metrics import accuracy

PATH = 'DATA PATH'

tfms = get_transforms(flip_vert=True,
                      max_lighting=0.1,
                      max_zoom=1.05,
                      max_warp=0.1)
data = ImageDataBunch.from_folder(PATH,
                                  ds_tfms=tfms,
                                  bs=64,
                                  size=224,
                                  num_workers=4).normalize(imagenet_stats)

model = cnn_learner(data, models.resnet34, metrics=accuracy, pretrained=True)
model.fit_one_cycle(5)

model.save('foodnotfoodv1')
model.unfreeze()
model.lr_find()
model.recorder.plot()

model.fit_one_cycle(2, max_lr=slice(1e-5, 1e-4))

interp = ClassificationInterpretation.from_learner(model)
interp.plot_confusion_matrix()

model.export()