def cs_learner(data: fv.DataBunch, arch: Callable, instructor, td_c=1, bu_c=0, embedding=fv.embedding, lateral=laterals.ConvAddLateral, td_out_lateral=None, ppm=False, pretrained: bool = True, **learn_kwargs: Any) -> fv.Learner: """Build Counter Stream learner from `data` and `arch`.""" body = fv.create_body(arch, pretrained) size = next(iter(data.train_dl))[0].shape[-2:] model = fv.to_device( CounterStream(body, instructor, td_c=td_c, bu_c=bu_c, img_size=size, embedding=embedding, lateral=lateral, td_out_lateral=td_out_lateral), data.device) learn = fv.Learner(data, model, **learn_kwargs) learn.split([learn.model.td[0]]) if pretrained: learn.freeze() return learn
def double_unet_learner(data: fv.DataBunch, arch: Callable, iterations=2, td_c=16, **learn_kwargs: Any) -> fv.Learner: """Build Counter Stream learner from `data` and `arch`.""" body = fv.create_body(arch, pretrained=False) size = next(iter(data.train_dl))[0].shape[-2:] model = DoubleUnet(body, iterations=iterations, td_c=td_c, img_size=size) model = fv.to_device(model, data.device) learn = fv.Learner(data, model, **learn_kwargs) fv.apply_init(learn.model, nn.init.kaiming_normal_) return learn
def create_learner(data, model, run_parallel=True, mixed_precision=True, model_dir=Path(os.getcwd()) / '..' / 'models', **learn_kwargs) -> fvision.Learner: if run_parallel: model = nn.DataParallel(model) learn = fvision.Learner(data, model, **learn_kwargs) learn.model_dir = model_dir learn.to_fp16() if mixed_precision else learn.to_fp32() return learn
def objective(trial): # Data Augmentation apply_tfms = trial.suggest_categorical("apply_tfms", [True, False]) if apply_tfms: # MNIST is a hand-written digit dataset. Thus horizontal and vertical flipping are # disabled. However, the two flipping will be important when the dataset is CIFAR or # ImageNet. tfms = vision.get_transforms( do_flip=False, flip_vert=False, max_rotate=trial.suggest_int("max_rotate", -45, 45), max_zoom=trial.suggest_float("max_zoom", 1, 2), p_affine=trial.suggest_discrete_uniform("p_affine", 0.1, 1.0, 0.1), ) data = vision.ImageDataBunch.from_folder( path, bs=BATCHSIZE, ds_tfms=tfms if apply_tfms else None) n_layers = trial.suggest_int("n_layers", 2, 5) n_channels = [3] for i in range(n_layers): out_channels = trial.suggest_int("n_channels_{}".format(i), 3, 32) n_channels.append(out_channels) n_channels.append(2) model = vision.simple_cnn(n_channels) learn = vision.Learner( data, model, silent=True, metrics=[vision.accuracy], callback_fns=[ partial(FastAIPruningCallback, trial=trial, monitor="valid_loss") ], ) learn.fit(EPOCHS) return learn.validate()[-1].item()
def part_learner(data, arch, obj_tree: ObjectTree, pretrained=False, sample_one=False, emb_op=torch.mul, **learn_kwargs): body = fv.create_body(arch, pretrained) model = CsNet(body, obj_tree, sample_one=sample_one, emb_op=emb_op) model = fv.to_device(model, device=data.device) loss = Loss(obj_tree) learn = fv.Learner(data, model, loss_func=loss, **learn_kwargs) metrics = BrodenMetrics(learn, obj_tree=obj_tree, preds_func=obj_tree.cs_preds_func, restrict=False) learn.callbacks.extend([metrics, utils.AddTargetClbk()]) learn.split([learn.model.td[0]]) if pretrained: learn.freeze() return learn
data_df = pd.read_feather(path + "desc.feather") label_columns = list(data_df.columns[data_df.columns.str.contains("label_*")]) data = (faiv.ImageList.from_df(data_df, path).split_by_rand_pct(0.01).label_from_df( cols=label_columns, label_cls=faiv.FloatList).databunch(bs=10)) shape = list(data.one_batch()[0].size()) model = models.SimpleClassifier( shape[2:], 50, 2, # 2, kernel=[3, 3], channel_progression=lambda x: x + 1, batch_normalization=True, conv_init=nn.init.zeros_, cut_channels=True, ) learner = faiv.Learner(data, model) learner.load("model") #%% learner.validate() #%% learner.recorder #%%