def main(epochs=5, learning_rate=0.01): # Avoid OMP error and allow multiple OpenMP runtime os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' warnings.filterwarnings("ignore") print(mlflow.__version__) # Download and untar the MNIST data set path = untar_data(URLs.MNIST_SAMPLE) # Prepare, transform, and normalize the data data = ImageDataBunch.from_folder(path, ds_tfms=(rand_pad(2, 28), []), bs=64) data.normalize(imagenet_stats) # Train and fit the Learner model learn = cnn_learner(data, models.resnet18, metrics=accuracy) # Start MLflow session with mlflow.start_run() as run: learn.fit(epochs, learning_rate) mlflow.fastai.log_model(learn, "model") # Fetch the default conda environment print("run_id: {}".format(run.info.run_id)) env = mlflow.fastai.get_default_conda_env() print("conda environment: {}".format(env))
def main(epochs=5, learning_rate=0.01): # Avoid OMP error and allow multiple OpenMP runtime os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' warnings.filterwarnings("ignore") print(mlflow.__version__) # Download and untar the MNIST data set path = untar_data(URLs.MNIST_SAMPLE) # Prepare, transform, and normalize the data data = ImageDataBunch.from_folder(path, ds_tfms=(rand_pad(2, 28), []), bs=64) data.normalize(imagenet_stats) # Train and fit the Learner model learn = cnn_learner(data, models.resnet18, metrics=accuracy) # Start MLflow session with mlflow.start_run() as run: learn.fit(epochs, learning_rate) mlflow.fastai.log_model(learn, 'model') # fetch the logged model artifacts artifacts = [ f.path for f in MlflowClient().list_artifacts(run.info.run_id, 'model') ] print("artifacts: {}".format(artifacts))
def main(epochs=5, learning_rate=0.01): # Avoid OMP error and allow multiple OpenMP runtime os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' warnings.filterwarnings("ignore") print(mlflow.__version__) # Download and untar the MNIST data set path = vis.untar_data(vis.URLs.MNIST_SAMPLE) # Prepare, transform, and normalize the data data = vis.ImageDataBunch.from_folder(path, ds_tfms=(vis.rand_pad(2, 28), []), bs=64) data.normalize(vis.imagenet_stats) # Train and fit the Learner model learn = vis.cnn_learner(data, vis.models.resnet18, metrics=vis.accuracy) # Enable auto logging mlflow.fastai.autolog() # Start MLflow session with mlflow.start_run() as run: learn.fit(epochs, learning_rate) # fetch the auto logged parameters, metrics, and artifacts print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id))
def main(epochs=5, learning_rate=0.01): # Avoid OMP error and allow multiple OpenMP runtime os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' warnings.filterwarnings("ignore") print(mlflow.__version__) # Download and untar the MNIST data set path = untar_data(URLs.MNIST_SAMPLE) # Prepare, transform, and normalize the data data = ImageDataBunch.from_folder(path, ds_tfms=(rand_pad(2, 28), []), bs=64) data.normalize(imagenet_stats) # Create CNN the Learner model learn = cnn_learner(data, models.resnet18, metrics=accuracy) # Start MLflow session with mlflow.start_run() as run: learn.fit(epochs, learning_rate) mlflow.fastai.log_model(learn, "model") # load the model for scoring model_uri = "runs:/{}/model".format(run.info.run_id) loaded_model = mlflow.fastai.load_model(model_uri) predict_data = ... loaded_model.predict(predict_data)
def main(epochs): Task.init(project_name="examples", task_name="fastai v1") path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path, ds_tfms=(rand_pad(2, 28), []), bs=64, num_workers=0) data.normalize(imagenet_stats) learn = cnn_learner(data, models.resnet18, metrics=accuracy) accuracy(*learn.get_preds()) learn.fit_one_cycle(epochs, 0.01)
def prepare_cifar(config): dataset = build_dataset('cifar_10', val_size=config['val_size']) x_set, y_set = dataset.dataset('train') x_val, y_val = dataset.dataset('val') shape = (-1, 3, 32, 32) x_set = ((x_set - 128) / 128).reshape(shape) x_val = ((x_val - 128) / 128).reshape(shape) train_tfms = [*rand_pad(4, 32), flip_lr(p=0.5)] # Transformation to augment images return x_set, y_set, x_val, y_val, train_tfms
def load_data(): dataset = build_dataset('cifar_10', val_size=val_size) x_train, y_train = dataset.dataset('train') x_val, y_val = dataset.dataset('val') shape = (-1, 3, 32, 32) x_train = ((x_train - 128) / 128).reshape(shape) x_val = ((x_val - 128) / 128).reshape(shape) train_tfms = [*rand_pad(4, 32), flip_lr(p=0.5)] train_ds = ImageArrayDS(x_train, y_train, train_tfms) val_ds = ImageArrayDS(x_val, y_val) data = ImageDataBunch.create(train_ds, val_ds, bs=256) return data, x_train, y_train, x_val, y_val
def prepare_svhn(config): dataset = build_dataset('svhn', val_size=config['val_size']) x_set, y_set = dataset.dataset('train') x_val, y_val = dataset.dataset('val') y_set[y_set == 10] = 0 y_val[y_val == 10] = 0 shape = (-1, 32, 32, 3) x_set = ((x_set - 128) / 128).reshape(shape) x_val = ((x_val - 128) / 128).reshape(shape) x_set = np.rollaxis(x_set, 3, 1) x_val = np.rollaxis(x_val, 3, 1) train_tfms = [*rand_pad(4, 32), flip_lr(p=0.5)] # Transformation to augment images return x_set, y_set, x_val, y_val, train_tfms
def main(epochs): Task.init(project_name="examples", task_name="fastai with tensorboard callback") path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path, ds_tfms=(rand_pad(2, 28), []), bs=64, num_workers=0) data.normalize(imagenet_stats) learn = cnn_learner(data, models.resnet18, metrics=accuracy) tboard_path = Path("data/tensorboard/project1") learn.callback_fns.append( partial(LearnerTensorboardWriter, base_dir=tboard_path, name="run0")) accuracy(*learn.get_preds()) learn.fit_one_cycle(epochs, 0.01)
def main(): # Parse command-line arguments args = parse_args() # Download and untar the MNIST data set path = vis.untar_data(vis.URLs.MNIST_TINY) # Prepare, transform, and normalize the data data = vis.ImageDataBunch.from_folder(path, ds_tfms=(vis.rand_pad(2, 28), []), bs=64) data.normalize(vis.imagenet_stats) # Train and fit the Learner model learn = vis.cnn_learner(data, vis.models.resnet18, metrics=vis.accuracy) # Enable auto logging mlflow.fastai.autolog() # Train and fit with default or supplied command line arguments learn.fit(args.epochs, args.lr)
def main(): # Parse command=line arguments args = parse_args() # Setup MLFlow Tracking mlflow_tracking_uri = os.environ.get("MLFLOW_TRACKING_URI") mlflow.set_tracking_uri(mlflow_tracking_uri) expr_name = str(uuid.uuid1()) s3_bucket = os.environ.get("AWS_S3_BUCKET") # replace this value mlflow.create_experiment(expr_name, s3_bucket) mlflow.set_experiment(expr_name) # Experiment Variables print("MLFlow Tracking Server URI: " + mlflow.get_tracking_uri()) print("Artifact URI: " + mlflow.get_artifact_uri()) # should print out a s3 bucket path # Download and untar the MNIST data set path = vis.untar_data(vis.URLs.MNIST_TINY) # Prepare, transform, and normalize the data data = vis.ImageDataBunch.from_folder(path, ds_tfms=(vis.rand_pad(2, 28), []), bs=64) data.normalize(vis.imagenet_stats) # Train and fit the Learner model learn = vis.cnn_learner(data, vis.models.resnet18, metrics=vis.accuracy) # Enable auto logging mlflow.fastai.autolog() # Start MLflow session #with mlflow.start_run(): # Train and fit with default or supplied command line arguments learn.fit(args.epochs, args.lr)
pool_size = 200 # Load data dataset = build_dataset('cifar_10', val_size=val_size) x_set, y_set = dataset.dataset('train') x_val, y_val = dataset.dataset('val') shape = (-1, 3, 32, 32) x_set = ((x_set - 128) / 128).reshape(shape) x_val = ((x_val - 128) / 128).reshape(shape) # x_pool, x_train, y_pool, y_train = train_test_split(x_set, y_set, test_size=start_size, stratify=y_set) x_train, y_train = x_set, y_set train_tfms = [*rand_pad(4, 32), flip_lr(p=0.5)] train_ds = ImageArrayDS(x_train, y_train, train_tfms) val_ds = ImageArrayDS(x_val, y_val) data = ImageDataBunch.create(train_ds, val_ds, bs=256) loss_func = torch.nn.CrossEntropyLoss() np.set_printoptions(threshold=sys.maxsize, suppress=True) model = AnotherConv() # model = resnet_masked(pretrained=True) # model = resnet_linear(pretrained=True, dropout_rate=0.5, freeze=False) # learner = Learner(data, model, metrics=accuracy, loss_func=loss_func) # # model_path = "experiments/data/model.pt"
# -*- coding: utf-8 -* - ''' 手写字体识别训练 ''' import fastai from fastai import vision ''' 下载手写字体数据集,数据集URL、文件名称、保存路径(这里下载不下来,只能手动先下载解压好,直接传路径了) ''' # path = fastai.untar_data(fastai.URLs.MNIST_SAMPLE) path = "/Users/rensike/Resources/datasets/fastai/mnist_sample" ''' 构建数据集,数据路径、数据预处理方式、bach_size ''' data = vision.ImageDataBunch.from_folder(path, ds_tfms=(vision.rand_pad(2, 28), []), bs=64) ''' 数据归一化 ''' data.normalize(vision.imagenet_stats) ''' 显示数据集中的第一个数据 ''' img, label = data.train_ds[0] print(label) img.show() ''' 创建一个学习器。数据集、模型类型、评估指标 '''