def get_estimator(batch_size=4, epochs=25, model_dir=tempfile.mkdtemp()): csv_path, path = montgomery.load_data() writer = RecordWriter(save_dir=os.path.join(path, "FEdata"), train_data=csv_path, validation_data=0.2, ops=[ ImageReader(grey_scale=True, inputs="image", parent_path=path, outputs="image"), ImageReader(grey_scale=True, inputs="mask_left", parent_path=path, outputs="mask_left"), ImageReader(grey_scale=True, inputs="mask_right", parent_path=path, outputs="mask_right"), CombineLeftRightMask(inputs=("mask_left", "mask_right")), Resize(target_size=(512, 512)), Reshape(shape=(512, 512, 1), outputs="mask"), Resize(inputs="image", target_size=(512, 512)), Reshape(shape=(512, 512, 1), outputs="image"), ], write_feature=["image", "mask"]) pipeline = fe.Pipeline(batch_size=batch_size, data=writer, ops=[ Augmentation2D(inputs=["image", "mask"], outputs=["image", "mask"], mode="train", rotation_range=10, flip_left_right=True), Minmax(inputs="image", outputs="image"), Minmax(inputs="mask", outputs="mask") ]) model = FEModel(model_def=lambda: UNet(input_size=(512, 512, 1)), model_name="lungsegmentation", optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001)) network = fe.Network(ops=[ ModelOp(inputs="image", model=model, outputs="pred_segment"), BinaryCrossentropy(y_true="mask", y_pred="pred_segment") ]) traces = [ Dice(true_key="mask", pred_key="pred_segment"), ModelSaver(model_name="lungsegmentation", save_dir=model_dir, save_best=True) ] estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=epochs, log_steps=20, traces=traces) return estimator
def get_estimator(batch_size=32, epochs=25, model_dir=tempfile.mkdtemp()): # load CUB200 dataset. csv_path, path = cub200.load_data() writer = RecordWriter( save_dir=os.path.join(path, "FEdata"), train_data=csv_path, validation_data=0.2, ops=[ ImageReader(inputs='image', parent_path=path), Resize(target_size=(128, 128), keep_ratio=True, outputs='image'), MatReader(inputs='annotation', parent_path=path), SelectDictKey(), Resize((128, 128), keep_ratio=True), Reshape(shape=(128, 128, 1), outputs="annotation") ]) # data pipeline pipeline = fe.Pipeline(batch_size=batch_size, data=writer, ops=Minmax(inputs='image', outputs='image')) # Network model = FEModel(model_def=UNet, model_name="unet_cub", optimizer=tf.optimizers.Adam()) network = fe.Network(ops=[ ModelOp(inputs='image', model=model, outputs='mask_pred'), BinaryCrossentropy(y_true='annotation', y_pred='mask_pred') ]) # estimator traces = [ Dice(true_key="annotation", pred_key='mask_pred'), ModelSaver(model_name="unet_cub", save_dir=model_dir, save_best=True) ] estimator = fe.Estimator(network=network, pipeline=pipeline, traces=traces, epochs=epochs, log_steps=50) return estimator
def get_estimator(style_img_path=None, data_path=None, style_weight=5.0, content_weight=1.0, tv_weight=1e-4, steps_per_epoch=None, validation_steps=None, model_dir=tempfile.mkdtemp()): train_csv, _, path = load_data(data_path, load_object=False) if style_img_path is None: style_img_path = tf.keras.utils.get_file( 'kandinsky.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_' '-_Composition_7.jpg') style_img = cv2.imread(style_img_path) assert (style_img is not None), "Invalid style reference image" tfr_save_dir = os.path.join(path, 'tfrecords') style_img = (style_img.astype(np.float32) / 127.5) / 127.5 style_img_t = tf.convert_to_tensor(np.expand_dims(style_img, axis=0)) writer = RecordWriter(train_data=train_csv, save_dir=tfr_save_dir, ops=[ ImageReader(inputs="image", parent_path=path, outputs="image"), Resize(inputs="image", target_size=(256, 256), outputs="image") ]) pipeline = fe.Pipeline(batch_size=4, data=writer, ops=[Rescale(inputs="image", outputs="image")]) model = fe.build(model_def=styleTransferNet, model_name="style_transfer_net", loss_name="loss", optimizer=tf.keras.optimizers.Adam(1e-3)) network = fe.Network(ops=[ ModelOp(inputs="image", model=model, outputs="image_out"), ExtractVGGFeatures(inputs=lambda: style_img_t, outputs="y_style"), ExtractVGGFeatures(inputs="image", outputs="y_content"), ExtractVGGFeatures(inputs="image_out", outputs="y_pred"), StyleContentLoss(style_weight=style_weight, content_weight=content_weight, tv_weight=tv_weight, inputs=('y_pred', 'y_style', 'y_content', 'image_out'), outputs='loss') ]) estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=2, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, traces=ModelSaver(model_name="style_transfer_net", save_dir=model_dir)) return estimator
def get_estimator(batch_size=4, epochs=1000, steps_per_epoch=1000, validation_steps=None, model_dir=tempfile.mkdtemp(), imagenet_path=None): """Args: imagenet_path: folder path of ImageNet dataset, containing train and val subdirs . """ assert imagenet_path is not None, 'Pass valid folder path of Imagenet dataset' # Ensure ImageNet dataset is downloaded. Pass the folder contianing train and val subdirectories. # currently the script doesn't download the ImageNet data. train_csv, val_csv, path = srgan.load_data(path_imgnet= imagenet_path) writer = fe.RecordWriter( save_dir=os.path.join(path, "sr_tfrecords"), train_data=train_csv, validation_data=val_csv, ops=[ImageReader(inputs="lowres", outputs="lowres"), ImageReader(inputs="highres", outputs="highres")], compression="GZIP", write_feature=['lowres', 'highres']) pipeline = fe.Pipeline( max_shuffle_buffer_mb=3000, batch_size=batch_size, data=writer, ops=[ LowresRescale(inputs='lowres', outputs='lowres'), Rescale(inputs='highres', outputs='highres'), ]) # prepare model model = fe.build(model_def=lambda: get_generator(input_shape=(24, 24, 3)), model_name="srresnet_gen", optimizer=tf.optimizers.Adam(learning_rate=0.0001), loss_name="mse_loss") network = fe.Network(ops=[ ModelOp(inputs='lowres', model=model, outputs='superres'), PixelMeanSquaredError(inputs=('superres', 'highres'), outputs="mse_loss") ]) model_dir = os.path.join(path) estimator = fe.Estimator(network=network, pipeline=pipeline, steps_per_epoch=steps_per_epoch, epochs=epochs, traces=[ ModelSaver(model_name="srresnet_gen", save_dir=model_dir, save_best=True), ]) return estimator
def get_estimator(batch_size=8, epochs=25, steps_per_epoch=None, validation_steps=None, model_dir=tempfile.mkdtemp()): # load CUB200 dataset. csv_path, path = cub200.load_data() writer = RecordWriter( save_dir=os.path.join(path, "tfrecords"), train_data=csv_path, validation_data=0.2, ops=[ ImageReader(inputs='image', parent_path=path), Resize(target_size=(512, 512), keep_ratio=True, outputs='image'), MatReader(inputs='annotation', parent_path=path), SelectDictKey(), Resize((512, 512), keep_ratio=True), Reshape(shape=(512, 512, 1), outputs="annotation") ]) #step 1, pipeline pipeline = fe.Pipeline( batch_size=batch_size, data=writer, ops=[ Augmentation2D(inputs=("image", "annotation"), outputs=("image", "annotation"), mode="train", rotation_range=15.0, zoom_range=[0.8, 1.2], flip_left_right=True), Rescale(inputs='image', outputs='image') ]) #step 2, network opt = tf.optimizers.Adam(learning_rate=0.0001) resunet50 = fe.build(model_def=ResUnet50, model_name="resunet50", optimizer=opt, loss_name="total_loss") uncertainty = fe.build(model_def=UncertaintyLoss, model_name="uncertainty", optimizer=opt, loss_name="total_loss") network = fe.Network(ops=[ ModelOp(inputs='image', model=resunet50, outputs=["label_pred", "mask_pred"]), SparseCategoricalCrossentropy(inputs=["label", "label_pred"], outputs="cls_loss"), BinaryCrossentropy(inputs=["annotation", "mask_pred"], outputs="seg_loss"), ModelOp(inputs=("cls_loss", "seg_loss"), model=uncertainty, outputs="total_loss"), Loss(inputs="total_loss", outputs="total_loss") ]) #step 3, estimator traces = [ Dice(true_key="annotation", pred_key='mask_pred'), Accuracy(true_key="label", pred_key="label_pred"), ModelSaver(model_name="resunet50", save_dir=model_dir, save_best=True), LRController(model_name="resunet50", lr_schedule=CyclicLRSchedule()) ] estimator = fe.Estimator(network=network, pipeline=pipeline, traces=traces, epochs=epochs, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) return estimator
def get_estimator(batch_size=128, epochs=15, model_dir=tempfile.mkdtemp()): # prepare data in disk train_csv, val_csv, path = svhn.load_data() writer = RecordWriter( save_dir=os.path.join(path, "FEdata"), train_data=train_csv, validation_data=val_csv, ops=[ ImageReader(inputs="image", parent_path=path, outputs="image"), String2List(inputs=["label", "x1", "y1", "x2", "y2"], outputs=["label", "x1", "y1", "x2", "y2"]), RelativeCoordinate(inputs=("image", "x1", "y1", "x2", "y2"), outputs=("x1", "y1", "x2", "y2")), Resize(inputs="image", target_size=(64, 128), outputs="image"), GenerateTarget(inputs=("label", "x1", "y1", "x2", "y2"), outputs=("target_cls", "target_loc")) ]) # prepare pipeline pipeline = Pipeline(batch_size=batch_size, data=writer, ops=Minmax(inputs="image", outputs="image"), read_feature=["image", "target_cls", "target_loc"]) # prepare model model = FEModel( model_def=lambda: RetinaNet(input_shape=(64, 128, 3), num_classes=10), model_name="retinanet", optimizer=tf.optimizers.Adam(learning_rate=0.0001)) network = Network(ops=[ ModelOp(inputs="image", model=model, outputs=["pred_cls", "pred_loc"]), PredictBox(outputs=("cls_selected", "loc_selected", "valid_outputs"), mode="eval"), RetinaLoss(inputs=("target_cls", "target_loc", "pred_cls", "pred_loc"), outputs="loss"), ]) # prepare estimator estimator = Estimator(network=network, pipeline=pipeline, epochs=epochs, log_steps=20, traces=ModelSaver(model_name="retinanet", save_dir=model_dir, save_best=True)) return estimator
def get_estimator(batch_size=4, epochs=200, steps_per_epoch=1000, validation_steps=None, model_dir=tempfile.mkdtemp(), imagenet_path=None, srresnet_model_path=None): """Args: imagenet_path: folder path of ImageNet dataset, containing train and val subdirs . srresnet_model_path: srresnet model weights, srgan generator gets initialized with the weights. """ assert imagenet_path is not None, 'Pass valid folder path of Imagenet dataset' assert srresnet_model_path is not None, 'srresnet model is needed to initialize srgan generator model' # Ensure ImageNet dataset is downloaded. Pass the folder contianing train and val subdirectories. # currently the script doesn't download the ImageNet data. train_csv, val_csv, path = srgan.load_data(path_imgnet=imagenet_path) writer = fe.RecordWriter(save_dir=os.path.join(path, "sr_tfrecords"), train_data=train_csv, validation_data=val_csv, ops=[ ImageReader(inputs="lowres", outputs="lowres"), ImageReader(inputs="highres", outputs="highres") ], compression="GZIP", write_feature=['lowres', 'highres']) pipeline = fe.Pipeline(max_shuffle_buffer_mb=3000, batch_size=batch_size, data=writer, ops=[ LowresRescale(inputs='lowres', outputs='lowres'), Rescale(inputs='highres', outputs='highres'), ]) # prepare model model_gen = fe.build(model_def=srresnet_model_path, model_name="srgan_gen", optimizer=tf.optimizers.Adam(learning_rate=0.0001), loss_name="mse_adv_loss", custom_objects={'SubPixelConv2D': SubPixelConv2D}) model_desc = fe.build( model_def=lambda: get_discriminator(input_shape=(96, 96, 3)), model_name="srgan_desc", optimizer=tf.optimizers.Adam(learning_rate=0.0001), loss_name="desc_loss") network = fe.Network(ops=[ ModelOp(inputs='lowres', model=model_gen, outputs='superres'), ModelOp(inputs='superres', model=model_desc, outputs='pred_fake'), ModelOp(inputs='highres', model=model_desc, outputs='pred_true'), DLoss(inputs=("pred_true", "pred_fake"), outputs=("desc_loss", "real_loss", "fake_loss")), GLoss(inputs=('superres', 'highres', 'pred_fake'), outputs=("mse_adv_loss", "mse_loss", "adv_loss"), vgg_content=True) ]) model_dir = os.path.join(path) estimator = fe.Estimator( network=network, pipeline=pipeline, steps_per_epoch=steps_per_epoch, epochs=epochs, traces=[ ModelSaver(model_name="srgan_gen", save_dir=model_dir, save_best=True), ModelSaver(model_name="srgan_desc", save_dir=model_dir, save_best=True), LRController(model_name="srgan_gen", lr_schedule=MyLRSchedule(schedule_mode='step')), LRController(model_name="srgan_desc", lr_schedule=MyLRSchedule(schedule_mode='step')) ]) return estimator
def get_estimator(data_dir=None, save_dir=None): train_csv, data_path = load_data(data_dir) imreader = ImageReader(inputs="x", parent_path=data_path, grey_scale=True) writer_128 = RecordWriter( save_dir=os.path.join(data_path, "tfrecord_128"), train_data=train_csv, ops=[imreader, ResizeRecord(target_size=(128, 128), outputs="x")]) writer_1024 = RecordWriter( save_dir=os.path.join(data_path, "tfrecord_1024"), train_data=train_csv, ops=[imreader, ResizeRecord(target_size=(1024, 1024), outputs="x")]) # We create a scheduler for batch_size with the epochs at which it will change and corresponding values. batchsize_scheduler_128 = Scheduler({ 0: 128, 5: 64, 15: 32, 25: 16, 35: 8, 45: 4 }) batchsize_scheduler_1024 = Scheduler({55: 4, 65: 2, 75: 1}) # pipeline ops resize_scheduler_128 = Scheduler({ 0: Resize(inputs="x", size=(4, 4), outputs="x"), 5: Resize(inputs="x", size=(8, 8), outputs="x"), 15: Resize(inputs="x", size=(16, 16), outputs="x"), 25: Resize(inputs="x", size=(32, 32), outputs="x"), 35: Resize(inputs="x", size=(64, 64), outputs="x"), 45: None }) resize_scheduler_1024 = Scheduler({ 55: Resize(inputs="x", size=(256, 256), outputs="x"), 65: Resize(inputs="x", size=(512, 512), outputs="x"), 75: None }) lowres_op = CreateLowRes(inputs="x", outputs="x_lowres") rescale_x = Rescale(inputs="x", outputs="x") rescale_lowres = Rescale(inputs="x_lowres", outputs="x_lowres") pipeline_128 = fe.Pipeline( batch_size=batchsize_scheduler_128, data=writer_128, ops=[resize_scheduler_128, lowres_op, rescale_x, rescale_lowres]) pipeline_1024 = fe.Pipeline( batch_size=batchsize_scheduler_1024, data=writer_1024, ops=[resize_scheduler_1024, lowres_op, rescale_x, rescale_lowres]) pipeline_scheduler = Scheduler({0: pipeline_128, 55: pipeline_1024}) optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.0, beta_2=0.99, epsilon=1e-8) fade_in_alpha = tf.Variable(initial_value=1.0, dtype='float32', trainable=False) d2, d3, d4, d5, d6, d7, d8, d9, d10 = fe.build( model_def=lambda: build_D( fade_in_alpha=fade_in_alpha, target_resolution=10, num_channels=1), model_name=["d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10"], optimizer=[optimizer] * 9, loss_name=["dloss"] * 9) g2, g3, g4, g5, g6, g7, g8, g9, g10, G = fe.build( model_def=lambda: build_G( fade_in_alpha=fade_in_alpha, target_resolution=10, num_channels=1), model_name=[ "g2", "g3", "g4", "g5", "g6", "g7", "g8", "g9", "g10", "G" ], optimizer=[optimizer] * 10, loss_name=["gloss"] * 10) g_scheduler = Scheduler({ 0: ModelOp(model=g2, outputs="x_fake"), 5: ModelOp(model=g3, outputs="x_fake"), 15: ModelOp(model=g4, outputs="x_fake"), 25: ModelOp(model=g5, outputs="x_fake"), 35: ModelOp(model=g6, outputs="x_fake"), 45: ModelOp(model=g7, outputs="x_fake"), 55: ModelOp(model=g8, outputs="x_fake"), 65: ModelOp(model=g9, outputs="x_fake"), 75: ModelOp(model=g10, outputs="x_fake") }) fake_score_scheduler = Scheduler({ 0: ModelOp(inputs="x_fake", model=d2, outputs="fake_score"), 5: ModelOp(inputs="x_fake", model=d3, outputs="fake_score"), 15: ModelOp(inputs="x_fake", model=d4, outputs="fake_score"), 25: ModelOp(inputs="x_fake", model=d5, outputs="fake_score"), 35: ModelOp(inputs="x_fake", model=d6, outputs="fake_score"), 45: ModelOp(inputs="x_fake", model=d7, outputs="fake_score"), 55: ModelOp(inputs="x_fake", model=d8, outputs="fake_score"), 65: ModelOp(inputs="x_fake", model=d9, outputs="fake_score"), 75: ModelOp(inputs="x_fake", model=d10, outputs="fake_score") }) real_score_scheduler = Scheduler({ 0: ModelOp(model=d2, outputs="real_score"), 5: ModelOp(model=d3, outputs="real_score"), 15: ModelOp(model=d4, outputs="real_score"), 25: ModelOp(model=d5, outputs="real_score"), 35: ModelOp(model=d6, outputs="real_score"), 45: ModelOp(model=d7, outputs="real_score"), 55: ModelOp(model=d8, outputs="real_score"), 65: ModelOp(model=d9, outputs="real_score"), 75: ModelOp(model=d10, outputs="real_score") }) interp_score_scheduler = Scheduler({ 0: ModelOp(inputs="x_interp", model=d2, outputs="interp_score", track_input=True), 5: ModelOp(inputs="x_interp", model=d3, outputs="interp_score", track_input=True), 15: ModelOp(inputs="x_interp", model=d4, outputs="interp_score", track_input=True), 25: ModelOp(inputs="x_interp", model=d5, outputs="interp_score", track_input=True), 35: ModelOp(inputs="x_interp", model=d6, outputs="interp_score", track_input=True), 45: ModelOp(inputs="x_interp", model=d7, outputs="interp_score", track_input=True), 55: ModelOp(inputs="x_interp", model=d8, outputs="interp_score", track_input=True), 65: ModelOp(inputs="x_interp", model=d9, outputs="interp_score", track_input=True), 75: ModelOp(inputs="x_interp", model=d10, outputs="interp_score", track_input=True) }) network = fe.Network(ops=[ RandomInput(inputs=lambda: 512), g_scheduler, fake_score_scheduler, ImageBlender(inputs=( "x", "x_lowres"), alpha=fade_in_alpha), real_score_scheduler, Interpolate(inputs=("x_fake", "x"), outputs="x_interp"), interp_score_scheduler, GradientPenalty(inputs=("x_interp", "interp_score"), outputs="gp"), GLoss(inputs="fake_score", outputs="gloss"), DLoss(inputs=("real_score", "fake_score", "gp"), outputs="dloss") ]) if save_dir is None: save_dir = os.path.join(str(Path.home()), 'fastestimator_results', 'NIH_CXR_PGGAN') os.makedirs(save_dir, exist_ok=True) estimator = fe.Estimator( network=network, pipeline=pipeline_scheduler, epochs=85, traces=[ AlphaController(alpha=fade_in_alpha, fade_start=[5, 15, 25, 35, 45, 55, 65, 75, 85], duration=[5, 5, 5, 5, 5, 5, 5, 5, 5]), ResetOptimizer(reset_epochs=[5, 15, 25, 35, 45, 55, 65, 75], optimizer=optimizer), ImageSaving(epoch_model={ 4: g2, 14: g3, 24: g4, 34: g5, 44: g6, 54: g7, 64: g8, 74: g9, 84: G }, save_dir=save_dir, num_channels=1), ModelSaving(epoch_model={84: G}, save_dir=save_dir) ]) return estimator
def get_estimator(): train_csv, data_path = load_data() writer = RecordWriter(save_dir=os.path.join(data_path, "tfrecord"), train_data=train_csv, ops=[ ImageReader(inputs="x", parent_path=data_path), ResizeRecord(target_size=(128, 128), outputs="x") ]) # We create a scheduler for batch_size with the epochs at which it will change and corresponding values. batchsize_scheduler = Scheduler({0: 64, 5: 32, 15: 16, 25: 8, 35: 4}) # batchsize_scheduler = Scheduler({0: 64, 5: 64, 15: 64, 25: 64, 35: 32}) # We create a scheduler for the Resize ops. resize_scheduler = Scheduler({ 0: Resize(inputs="x", size=(4, 4), outputs="x"), 5: Resize(inputs="x", size=(8, 8), outputs="x"), 15: Resize(inputs="x", size=(16, 16), outputs="x"), 25: Resize(inputs="x", size=(32, 32), outputs="x"), 35: Resize(inputs="x", size=(64, 64), outputs="x"), 45: None }) # In Pipeline, we use the schedulers for batch_size and ops. pipeline = fe.Pipeline(batch_size=batchsize_scheduler, data=writer, ops=[ resize_scheduler, CreateLowRes(inputs="x", outputs="x_lowres"), Rescale(inputs="x", outputs="x"), Rescale(inputs="x_lowres", outputs="x_lowres") ]) opt2 = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.0, beta_2=0.99, epsilon=1e-8) opt3 = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.0, beta_2=0.99, epsilon=1e-8) opt4 = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.0, beta_2=0.99, epsilon=1e-8) opt5 = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.0, beta_2=0.99, epsilon=1e-8) opt6 = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.0, beta_2=0.99, epsilon=1e-8) opt7 = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.0, beta_2=0.99, epsilon=1e-8) fade_in_alpha = tf.Variable(initial_value=1.0, dtype='float32', trainable=False) d2, d3, d4, d5, d6, d7 = fe.build( model_def=lambda: build_D(fade_in_alpha=fade_in_alpha, target_resolution=7), model_name=["d2", "d3", "d4", "d5", "d6", "d7"], optimizer=[opt2, opt3, opt4, opt5, opt6, opt7], loss_name=["dloss", "dloss", "dloss", "dloss", "dloss", "dloss"]) g2, g3, g4, g5, g6, g7, G = fe.build( model_def=lambda: build_G(fade_in_alpha=fade_in_alpha, target_resolution=7), model_name=["g2", "g3", "g4", "g5", "g6", "g7", "G"], optimizer=[opt2, opt3, opt4, opt5, opt6, opt7, opt7], loss_name=[ "gloss", "gloss", "gloss", "gloss", "gloss", "gloss", "gloss" ]) g_scheduler = Scheduler({ 0: ModelOp(model=g2, outputs="x_fake"), 5: ModelOp(model=g3, outputs="x_fake"), 15: ModelOp(model=g4, outputs="x_fake"), 25: ModelOp(model=g5, outputs="x_fake"), 35: ModelOp(model=g6, outputs="x_fake"), 45: ModelOp(model=g7, outputs="x_fake"), }) fake_score_scheduler = Scheduler({ 0: ModelOp(inputs="x_fake", model=d2, outputs="fake_score"), 5: ModelOp(inputs="x_fake", model=d3, outputs="fake_score"), 15: ModelOp(inputs="x_fake", model=d4, outputs="fake_score"), 25: ModelOp(inputs="x_fake", model=d5, outputs="fake_score"), 35: ModelOp(inputs="x_fake", model=d6, outputs="fake_score"), 45: ModelOp(inputs="x_fake", model=d7, outputs="fake_score") }) real_score_scheduler = Scheduler({ 0: ModelOp(model=d2, outputs="real_score"), 5: ModelOp(model=d3, outputs="real_score"), 15: ModelOp(model=d4, outputs="real_score"), 25: ModelOp(model=d5, outputs="real_score"), 35: ModelOp(model=d6, outputs="real_score"), 45: ModelOp(model=d7, outputs="real_score") }) interp_score_scheduler = Scheduler({ 0: ModelOp(inputs="x_interp", model=d2, outputs="interp_score", track_input=True), 5: ModelOp(inputs="x_interp", model=d3, outputs="interp_score", track_input=True), 15: ModelOp(inputs="x_interp", model=d4, outputs="interp_score", track_input=True), 25: ModelOp(inputs="x_interp", model=d5, outputs="interp_score", track_input=True), 35: ModelOp(inputs="x_interp", model=d6, outputs="interp_score", track_input=True), 45: ModelOp(inputs="x_interp", model=d7, outputs="interp_score", track_input=True) }) network = fe.Network(ops=[ RandomInput(inputs=lambda: 512), g_scheduler, fake_score_scheduler, ImageBlender(inputs=( "x", "x_lowres"), alpha=fade_in_alpha), real_score_scheduler, Interpolate(inputs=("x_fake", "x"), outputs="x_interp"), interp_score_scheduler, GradientPenalty(inputs=("x_interp", "interp_score"), outputs="gp"), GLoss(inputs="fake_score", outputs="gloss"), DLoss(inputs=("real_score", "fake_score", "gp"), outputs="dloss") ]) estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=55, traces=[ AlphaController( alpha=fade_in_alpha, fade_start=[5, 15, 25, 35, 45, 55], duration=[5, 5, 5, 5, 5, 5]), ImageSaving(epoch_model={ 4: "g2", 14: "g3", 24: "g4", 34: "g5", 44: "g6", 54: "g7" }, save_dir="/data/Xiaomeng/images") ]) return estimator
def get_estimator(data_path=None, model_dir=tempfile.mkdtemp(), batch_size=2): #prepare dataset train_csv, val_csv, path = load_data(path=data_path) writer = fe.RecordWriter( save_dir=os.path.join(path, "retinanet_coco_1024"), train_data=train_csv, validation_data=val_csv, ops=[ ImageReader(inputs="image", parent_path=path, outputs="image"), String2List(inputs=["x1", "y1", "width", "height", "obj_label"], outputs=["x1", "y1", "width", "height", "obj_label"]), ResizeImageAndBbox( target_size=(1024, 1024), keep_ratio=True, inputs=["image", "x1", "y1", "width", "height"], outputs=["image", "x1", "y1", "width", "height"]), FlipImageAndBbox(inputs=[ "image", "x1", "y1", "width", "height", "obj_label", "id" ], outputs=[ "image", "x1", "y1", "width", "height", "obj_label", "id" ]), GenerateTarget(inputs=("obj_label", "x1", "y1", "width", "height"), outputs=("cls_gt", "x1_gt", "y1_gt", "w_gt", "h_gt")) ], expand_dims=True, compression="GZIP", write_feature=[ "image", "id", "cls_gt", "x1_gt", "y1_gt", "w_gt", "h_gt", "obj_label", "x1", "y1", "width", "height" ]) # prepare pipeline pipeline = fe.Pipeline(batch_size=batch_size, data=writer, ops=[ Rescale(inputs="image", outputs="image"), Pad(padded_shape=[2051], inputs=[ "x1_gt", "y1_gt", "w_gt", "h_gt", "obj_label", "x1", "y1", "width", "height" ], outputs=[ "x1_gt", "y1_gt", "w_gt", "h_gt", "obj_label", "x1", "y1", "width", "height" ]) ]) # prepare network model = fe.build(model_def=lambda: RetinaNet(input_shape=(1024, 1024, 3), num_classes=90), model_name="retinanet", optimizer=tf.optimizers.SGD(momentum=0.9), loss_name="total_loss") network = fe.Network(ops=[ ModelOp(inputs="image", model=model, outputs=["cls_pred", "loc_pred"]), RetinaLoss(inputs=("cls_gt", "x1_gt", "y1_gt", "w_gt", "h_gt", "cls_pred", "loc_pred"), outputs=("total_loss", "focal_loss", "l1_loss")), PredictBox(inputs=[ "cls_pred", "loc_pred", "obj_label", "x1", "y1", "width", "height" ], outputs=("pred", "gt"), mode="eval", input_shape=(1024, 1024, 3)) ]) # prepare estimator estimator = fe.Estimator( network=network, pipeline=pipeline, epochs=7, traces=[ MeanAvgPrecision(90, (1024, 1024, 3), 'pred', 'gt', output_name=("mAP", "AP50", "AP75")), ModelSaver(model_name="retinanet", save_dir=model_dir, save_best='mAP', save_best_mode='max'), LRController(model_name="retinanet", lr_schedule=MyLRSchedule(schedule_mode="step")) ]) return estimator
def get_estimator(batch_size=128, epochs=100): usps_train_csv, _, usps_parent_dir = usps.load_data() mnist_train_csv, _, mnist_parent_dir = mnist.load_data() df = pd.read_csv(mnist_train_csv) df.columns = ['source_img', 'source_label'] df.to_csv(mnist_train_csv, index=False) df = pd.read_csv(usps_train_csv) df.columns = ['target_img', 'target_label'] df.to_csv(usps_train_csv, index=False) writer = fe.RecordWriter( save_dir=os.path.join(os.path.dirname(mnist_parent_dir), 'dann', 'tfr'), train_data=(usps_train_csv, mnist_train_csv), ops=( [ ImageReader(inputs="target_img", outputs="target_img", parent_path=usps_parent_dir, grey_scale=True) ], # first tuple element [ ImageReader(inputs="source_img", outputs="source_img", parent_path=mnist_parent_dir, grey_scale=True) ])) # second tuple element pipeline = fe.Pipeline(batch_size=batch_size, data=writer, ops=[ Resize(inputs="target_img", outputs="target_img", size=(28, 28)), Resize(inputs="source_img", outputs="source_img", size=(28, 28)), Minmax(inputs="target_img", outputs="target_img"), Minmax(inputs="source_img", outputs="source_img") ]) alpha = tf.Variable(0.0, dtype=tf.float32, trainable=False) img_shape = (28, 28, 1) feat_dim = 7 * 7 * 48 feature_extractor = fe.build( model_def=lambda: build_feature_extractor(img_shape), model_name="feature_extractor", loss_name="fe_loss", optimizer=tf.keras.optimizers.Adam(1e-4)) label_predictor = fe.build( model_def=lambda: build_label_predictor(feat_dim), model_name="label_predictor", loss_name="fe_loss", optimizer=tf.keras.optimizers.Adam(1e-4)) domain_predictor = fe.build( model_def=lambda: build_domain_predictor(feat_dim, alpha), model_name="domain_predictor", loss_name="fe_loss", optimizer=tf.keras.optimizers.Adam(1e-4)) network = fe.Network(ops=[ ModelOp( inputs="source_img", outputs="src_feat", model=feature_extractor), ModelOp( inputs="target_img", outputs="tgt_feat", model=feature_extractor), ModelOp( inputs="src_feat", outputs="src_c_logit", model=label_predictor), ModelOp( inputs="src_feat", outputs="src_d_logit", model=domain_predictor), ModelOp( inputs="tgt_feat", outputs="tgt_d_logit", model=domain_predictor), FELoss(inputs=("src_c_logit", "source_label", "src_d_logit", "tgt_d_logit"), outputs="fe_loss") ]) traces = [GRLWeightController(alpha=alpha)] estimator = fe.Estimator(pipeline=pipeline, network=network, traces=traces, epochs=epochs) return estimator
def get_estimator(pretrained_fe_path, classifier_path, data_path=None): assert os.path.exists(pretrained_fe_path), "Pretrained feature extractor is missing" assert os.path.exists(classifier_path), "Pretrained classifier is missing" usps_train_csv, usps_eval_csv, usps_parent_dir = usps.load_data(data_path) mnist_train_csv, mnist_eval_csv, mnist_parent_dir = mnist.load_data(data_path) tfr_path = os.path.join(os.path.dirname(usps_parent_dir), 'ADDA-tfrecords') os.makedirs(tfr_path, exist_ok=True) df = pd.read_csv(usps_train_csv) df.columns = ['target_img', 'target_label'] df.to_csv(usps_train_csv, index=False) df = pd.read_csv(usps_eval_csv) df.columns = ['target_img', 'target_label'] df.to_csv(usps_eval_csv, index=False) df = pd.read_csv(mnist_train_csv) df.columns = ['source_img', 'source_label'] df.to_csv(mnist_train_csv, index=False) df = pd.read_csv(mnist_eval_csv) df.columns = ['source_img', 'source_label'] df.to_csv(mnist_eval_csv, index=False) BATCH_SIZE = 128 writer = RecordWriter(save_dir=tfr_path, train_data=(usps_train_csv, mnist_train_csv), ops=( [ImageReader(inputs="target_img", outputs="target_img", parent_path=usps_parent_dir, grey_scale=True)], # first tuple element [ImageReader(inputs="source_img", outputs="source_img", parent_path=mnist_parent_dir, grey_scale=True)])) # second tuple element pipeline = fe.Pipeline( batch_size=BATCH_SIZE, data=writer, ops=[ Resize(inputs="target_img", outputs="target_img", size=(32, 32)), Resize(inputs="source_img", outputs="source_img", size=(32, 32)), Minmax(inputs="target_img", outputs="target_img"), Minmax(inputs="source_img", outputs="source_img") ]) # Step2: Define Network feature_extractor = fe.build(model_def=build_feature_extractor, model_name="fe", loss_name="fe_loss", optimizer=tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9)) discriminator = fe.build(model_def=build_discriminator, model_name="disc", loss_name="d_loss", optimizer=tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9)) network = fe.Network(ops=[ ModelOp(inputs="target_img", outputs="target_feature", model=feature_extractor), ModelOp(inputs="target_feature", outputs="target_score", model=discriminator), ExtractSourceFeature(model_path=pretrained_fe_path, inputs="source_img", outputs="source_feature"), ModelOp(inputs="source_feature", outputs="source_score", model=discriminator), DLoss(inputs=("source_score", "target_score"), outputs="d_loss"), FELoss(inputs="target_score", outputs="fe_loss") ]) traces = [ LoadPretrainedFE(model_name="fe", model_path=pretrained_fe_path), EvaluateTargetClassifier(model_name="fe", model_path=classifier_path) ] estimator = fe.Estimator(pipeline=pipeline, network=network, traces=traces, epochs=100) return estimator
def get_estimator(weight=10.0, epochs=200, model_dir=tempfile.mkdtemp()): trainA_csv, trainB_csv, _, _, parent_path = load_data() tfr_save_dir = os.path.join(parent_path, 'FEdata') # Step 1: Define Pipeline writer = RecordWriter(train_data=(trainA_csv, trainB_csv), save_dir=tfr_save_dir, ops=([ ImageReader(inputs="imgA", outputs="imgA", parent_path=parent_path) ], [ ImageReader(inputs="imgB", outputs="imgB", parent_path=parent_path) ])) pipeline = fe.Pipeline(data=writer, batch_size=1, ops=[ Myrescale(inputs="imgA", outputs="imgA"), RandomJitter(inputs="imgA", outputs="real_A"), Myrescale(inputs="imgB", outputs="imgB"), RandomJitter(inputs="imgB", outputs="real_B") ]) # Step2: Define Network g_AtoB = FEModel(model_def=build_generator, model_name="g_AtoB", loss_name="g_AtoB_loss", optimizer=tf.keras.optimizers.Adam(2e-4, 0.5)) g_BtoA = FEModel(model_def=build_generator, model_name="g_BtoA", loss_name="g_BtoA_loss", optimizer=tf.keras.optimizers.Adam(2e-4, 0.5)) d_A = FEModel(model_def=build_discriminator, model_name="d_A", loss_name="d_A_loss", optimizer=tf.keras.optimizers.Adam(2e-4, 0.5)) d_B = FEModel(model_def=build_discriminator, model_name="d_B", loss_name="d_B_loss", optimizer=tf.keras.optimizers.Adam(2e-4, 0.5)) network = fe.Network(ops=[ ModelOp(inputs="real_A", model=g_AtoB, outputs="fake_B"), ModelOp(inputs="real_B", model=g_BtoA, outputs="fake_A"), ModelOp(inputs="real_A", model=d_A, outputs="d_real_A"), ModelOp(inputs="fake_A", model=d_A, outputs="d_fake_A"), ModelOp(inputs="real_B", model=d_B, outputs="d_real_B"), ModelOp(inputs="fake_B", model=d_B, outputs="d_fake_B"), ModelOp(inputs="real_A", model=g_BtoA, outputs="same_A"), ModelOp(inputs="fake_B", model=g_BtoA, outputs="cycled_A"), ModelOp(inputs="real_B", model=g_AtoB, outputs="same_B"), ModelOp(inputs="fake_A", model=g_AtoB, outputs="cycled_B"), GLoss(inputs=("real_A", "d_fake_B", "cycled_A", "same_A"), weight=weight, outputs="g_AtoB_loss"), GLoss(inputs=("real_B", "d_fake_A", "cycled_B", "same_B"), weight=weight, outputs="g_BtoA_loss"), DLoss(inputs=("d_real_A", "d_fake_A"), outputs="d_A_loss"), DLoss(inputs=("d_real_B", "d_fake_B"), outputs="d_B_loss") ]) # Step3: Define Estimator traces = [ ModelSaver(model_name="g_AtoB", save_dir=model_dir, save_freq=10), ModelSaver(model_name="g_BtoA", save_dir=model_dir, save_freq=10) ] estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=epochs, traces=traces) return estimator