def fastestimator_run(): pos_real, _ = mnist.load_data(image_key="x1", label_key="y1") neg_real, _ = mnist.load_data(image_key="x2", label_key="y2") neg_sim, _ = mnist.load_data() neg_sim = NegativeImageSimulatedTube(neg_sim) batch_ds = BatchDataset(datasets=(pos_real, neg_real, neg_sim), num_samples=(10, 10, 2)) pipeline = fe.Pipeline(train_data=batch_ds) data = pipeline.get_results() for key, value in data.items(): print(key) print(value.shape)
def _build_estimator(model: Union[tf.keras.Model, torch.nn.Module], trace: Traceability, axis: int = -1): train_data, eval_data = mnist.load_data() test_data = eval_data.split(0.5) batch_size = 32 pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[ExpandDims(inputs="x", outputs="x", axis=axis), Minmax(inputs="x", outputs="x")]) network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce") ]) traces = [ Accuracy(true_key="y", pred_key="y_pred"), LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3)), trace ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=1, traces=traces, max_train_steps_per_epoch=1, max_eval_steps_per_epoch=None) fake_data = tf.ones(shape=(batch_size, 28, 28, 1)) if axis == -1 else torch.ones(size=(batch_size, 1, 28, 28)) model.fe_input_spec = FeInputSpec(fake_data, model) return estimator
def test_mode_ds_id_interaction(self): train_data, eval_data = mnist.load_data() test_data = eval_data.split(0.5) pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=32, ops=[ ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x") ]) model = fe.build(model_fn=LeNet, optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce", ds_id="ds_1") ]) pipeline_data = pipeline.transform(data=train_data[0], mode="train") data1 = network.transform(data=pipeline_data, mode="infer", ds_id="ds_1") assert "ce" not in data1 data2 = network.transform(data=pipeline_data, mode="infer", ds_id="ds_2") assert "ce" not in data2
def create_pipeline(): train_data, eval_data = mnist.load_data() pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, batch_size=32, ops=[ExpandDims(inputs="x", outputs="x1"), Minmax(inputs="x1", outputs="x")]) return pipeline
def get_estimator(epochs=2, batch_size=32, save_dir=tempfile.mkdtemp()): # step 1 train_data, eval_data = mnist.load_data() test_data = eval_data.split(0.5) pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")]) # step 2 model = fe.build(model_fn=LeNet, optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce") ]) # step 3 traces = [ Accuracy(true_key="y", pred_key="y_pred"), BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"), LRScheduler(model=model, lr_fn=lambda step: cosine_decay(step, cycle_length=3750, init_lr=1e-3)) ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces) return estimator
def get_estimator(epochs=50, batch_size=256, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp()): train_data, _ = mnist.load_data() pipeline = fe.Pipeline( train_data=train_data, batch_size=batch_size, ops=[ ExpandDims(inputs="x", outputs="x"), Normalize(inputs="x", outputs="x", mean=1.0, std=1.0, max_pixel_value=127.5), LambdaOp(fn=lambda: np.random.normal(size=[100]).astype('float32'), outputs="z") ]) gen_model = fe.build(model_fn=generator, optimizer_fn=lambda: tf.optimizers.Adam(1e-4)) disc_model = fe.build(model_fn=discriminator, optimizer_fn=lambda: tf.optimizers.Adam(1e-4)) network = fe.Network(ops=[ ModelOp(model=gen_model, inputs="z", outputs="x_fake"), ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score"), GLoss(inputs="fake_score", outputs="gloss"), UpdateOp(model=gen_model, loss_name="gloss"), ModelOp(inputs="x", model=disc_model, outputs="true_score"), DLoss(inputs=("true_score", "fake_score"), outputs="dloss"), UpdateOp(model=disc_model, loss_name="dloss") ]) estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=ModelSaver(model=gen_model, save_dir=save_dir, frequency=5), max_train_steps_per_epoch=max_train_steps_per_epoch) return estimator
def get_estimator(epochs=2, batch_size=32): # step 1 train_data, eval_data = mnist.load_data() pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, batch_size=batch_size, ops=[ ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x") ]) # step 2 model = fe.build(model_fn=LeNet, optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs=["y_pred", "feature_vector"], intermediate_layers='dense'), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), CustomLoss(inputs=("feature_vector", "feature_selected"), outputs="feature_loss"), LambdaOp(fn=lambda x, y: x + y, inputs=("ce", "feature_loss"), outputs="total_loss"), UpdateOp(model=model, loss_name="total_loss") ]) # step 3 traces = [ MemoryBank(inputs=("feature_vector", "y"), outputs="feature_selected") ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces) return estimator
def get_estimator(): pos_real, _ = mnist.load_data() neg_real, _ = mnist.load_data() neg_sim, _ = mnist.load_data() neg_sim = NegativeImageSimulatedTube(neg_sim) batch_ds = BatchDataset(datasets=(pos_real, neg_real, neg_sim), num_samples=(2, 2, 1)) pipeline = fe.Pipeline(train_data=batch_ds, ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")]) model = fe.build(model_fn=LeNet, optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce") ]) estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2) return estimator
def get_estimator(epochs=2, batch_size=32, max_train_steps_per_epoch=None, max_eval_steps_per_epoch=None, save_dir=tempfile.mkdtemp()): # step 1 train_data, eval_data = mnist.load_data() test_data = eval_data.split(0.5) pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[ ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x") ], num_process=0) # step 2 model = fe.build(model_fn=LeNet, optimizer_fn="adam") print([f"{idx}: {x.name}" for idx, x in enumerate(model.submodules)]) network = fe.Network(ops=[ Watch(inputs="x"), ModelOp(model=model, inputs="x", outputs=["y_pred", "embedding"], intermediate_layers='dense'), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), GradientOp(finals="embedding", inputs="x", outputs="grads"), UpdateOp(model=model, loss_name="ce") ]) # step 3 traces = [ Accuracy(true_key="y", pred_key="y_pred"), Inspector(), BestModelSaver(model=model, save_dir=save_dir, metric="accuracy", save_best_mode="max"), LRScheduler(model=model, lr_fn=lambda step: cosine_decay( step, cycle_length=3750, init_lr=1e-3)), TensorBoard(log_dir="tf_logs", write_embeddings="embedding", embedding_labels="y") ] estimator = fe.Estimator( pipeline=pipeline, network=network, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch, max_eval_steps_per_epoch=max_eval_steps_per_epoch) return estimator
def test_pytorch_weight_decay_vs_l2(self): # Get Data train_data, _ = mnist.load_data() t_d = train_data.split(128) # Initializing models pytorch_wd = fe.build(model_fn=MyNet_torch, optimizer_fn=lambda x: torch.optim.SGD(params=x, lr=0.01, weight_decay=self.beta)) pytorch_l2 = fe.build(model_fn=MyNet_torch, optimizer_fn=lambda x: torch.optim.SGD(params=x, lr=0.01)) # Initialize pipeline pipeline = fe.Pipeline(train_data=t_d, batch_size=128, ops=[ExpandDims(inputs="x", outputs="x", axis=0), Minmax(inputs="x", outputs="x")]) # Define the two pytorch networks network_weight_decay = fe.Network(ops=[ ModelOp(model=pytorch_wd, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=pytorch_wd, loss_name="ce") ]) network_l2 = fe.Network(ops=[ ModelOp(model=pytorch_l2, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), L2Regularizaton(inputs="ce", outputs="l2", model=pytorch_l2, beta=self.beta), UpdateOp(model=pytorch_l2, loss_name="l2") ]) # defining traces traces = [Accuracy(true_key="y", pred_key="y_pred")] # Setting up estimators estimator_wd = fe.Estimator(pipeline=pipeline, network=network_weight_decay, epochs=1, traces=traces, train_steps_per_epoch=1) estimator_l2 = fe.Estimator(pipeline=pipeline, network=network_l2, epochs=1, traces=traces, train_steps_per_epoch=1) # Training print('********************************Pytorch weight decay training************************************') estimator_wd.fit() print() print('********************************Pytorch L2 Regularization training************************************') estimator_l2.fit() # testing weights count = 0 for wt, l2 in zip(pytorch_wd.parameters(), pytorch_l2.parameters()): if ((wt - l2).abs()).sum() < torch.tensor(10**-6): count += 1 self.assertTrue(count == 6)
def setUpClass(cls): train_data, eval_data = mnist.load_data() cls.pipeline = fe.Pipeline(train_data=train_data) model = fe.build(model_fn=LeNetTf, optimizer_fn="adam") cls.network = fe.Network(ops=[ ModelOp(model=model, inputs="x_out", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce") ])
def test_tf_model_end_to_end_gradient(self): train_data, _ = mnist.load_data() pipeline = fe.Pipeline(train_data=train_data, batch_size=4, ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")]) model = fe.build(model_fn=LeNet_tf, optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), GradientOp(model=model, finals="ce", outputs="gradients"), UpdateOp(model=model, gradients="gradients", loss_name="ce") ]) estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2, max_train_steps_per_epoch=10) estimator.fit()
def get_estimator(): ds, _ = mnist.load_data() ds = NegativeImageSimulatedTube(ds) pipeline = fe.Pipeline(train_data=ds, ops=[ ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x") ]) model = fe.build(model_fn=LeNet, optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce") ]) estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2) return estimator
def get_estimator(epochs=2, batch_size=32): train_data, eval_data = mnist.load_data() test_data = eval_data.split(0.5) pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, test_data=test_data, batch_size=batch_size, ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")]) model = fe.build(model_fn=LeNet, optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), DebugOp(inputs="ce", outputs="ce", mode="train"), UpdateOp(model=model, loss_name="ce") ]) estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs) return estimator
def get_estimator(batch_size=100, epochs=20, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp()): train_data, _ = load_data() pipeline = fe.Pipeline(train_data=train_data, batch_size=batch_size, ops=[ ExpandDims(inputs="x", outputs="x", axis=0), Minmax(inputs="x", outputs="x"), Binarize(inputs="x", outputs="x", threshold=0.5), ]) encode_model = fe.build(model_fn=EncoderNet, optimizer_fn="adam", model_name="encoder") decode_model = fe.build(model_fn=DecoderNet, optimizer_fn="adam", model_name="decoder") network = fe.Network(ops=[ ModelOp(model=encode_model, inputs="x", outputs="meanlogvar"), SplitOp(inputs="meanlogvar", outputs=("mean", "logvar")), ReparameterizepOp(inputs=("mean", "logvar"), outputs="z"), ModelOp(model=decode_model, inputs="z", outputs="x_logit"), CrossEntropy(inputs=("x_logit", "x"), outputs="cross_entropy"), CVAELoss(inputs=("cross_entropy", "mean", "logvar", "z"), outputs="loss"), UpdateOp(model=encode_model, loss_name="loss"), UpdateOp(model=decode_model, loss_name="loss"), ]) traces = [ BestModelSaver(model=encode_model, save_dir=save_dir), BestModelSaver(model=decode_model, save_dir=save_dir) ] estimator = fe.Estimator( pipeline=pipeline, network=network, epochs=epochs, traces=traces, max_train_steps_per_epoch=max_train_steps_per_epoch) return estimator
def get_estimator(): # step 1 train_data, eval_data = mnist.load_data() pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data, batch_size=32, ops=[ ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x") ]) # step 2 model = fe.build(model_fn=LeNet, optimizer_fn="adam") network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce", merge_grad=4) ]) # step 3 estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2) return estimator
def create_estimator_for_arc(self, model, use_eval, axis): train_data, eval_data = mnist.load_data() pipeline = fe.Pipeline(train_data=train_data, eval_data=eval_data if use_eval else None, batch_size=8, ops=[ ExpandDims(inputs="x", outputs="x", axis=axis), Minmax(inputs="x", outputs="x") ]) network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), UpdateOp(model=model, loss_name="ce") ]) estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=2, traces=LRScheduler(model=model, lr_fn=ARC(1)), max_train_steps_per_epoch=10) return estimator
def setUpClass(cls): cls.train_data, _ = mnist.load_data()
def test_pytorch_l2_vs_tensorflow_l2(self): # Get Data train_data, eval_data = mnist.load_data() t_d = train_data.split(128) # Initializing Pytorch model pytorch_l2 = fe.build(model_fn=MyNet_torch, optimizer_fn=lambda x: torch.optim.SGD(params=x, lr=0.01)) # Initialize Pytorch pipeline pipeline = fe.Pipeline(train_data=t_d, eval_data=eval_data, batch_size=128, ops=[ExpandDims(inputs="x", outputs="x", axis=0), Minmax(inputs="x", outputs="x")]) # Initialize Pytorch Network network_l2 = fe.Network(ops=[ ModelOp(model=pytorch_l2, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), L2Regularizaton(inputs="ce",outputs="l2",model=pytorch_l2,beta = self.beta), UpdateOp(model=pytorch_l2, loss_name="l2") ]) # step 3 traces = [ Accuracy(true_key="y", pred_key="y_pred") ] # Initialize Pytorch estimator estimator_l2 = fe.Estimator(pipeline=pipeline, network=network_l2, epochs=1, traces=traces, train_steps_per_epoch=1, monitor_names=["ce","l2"]) print('********************************Pytorch L2 Regularization training************************************') estimator_l2.fit() # Converting Pytorch weights to numpy torch_wt = [] for _, param in pytorch_l2.named_parameters(): if param.requires_grad: torch_wt.append(param.detach().numpy()) # step 1 pipeline = fe.Pipeline(train_data=t_d, eval_data=eval_data, batch_size=128, ops=[ExpandDims(inputs="x", outputs="x"), Minmax(inputs="x", outputs="x")]) # step 2 model_tf = fe.build(model_fn=MyNet_tf, optimizer_fn=lambda: tf.optimizers.SGD(learning_rate=0.01)) network = fe.Network(ops=[ ModelOp(model=model_tf, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), L2Regularizaton(inputs="ce",outputs="l2",model=model_tf,beta = self.beta), UpdateOp(model=model_tf, loss_name="l2") ]) # step 3 traces = [ Accuracy(true_key="y", pred_key="y_pred") ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=1, traces=traces, train_steps_per_epoch=1, monitor_names=["ce","l2"]) print('*******************************Tensorflow L2 Regularization training***********************************') estimator.fit() # Converting TF weights to numpy tf_wt = [] for layer in model_tf.layers: for w in layer.trainable_variables: tf_wt.append(w.numpy()) # testing weights count = 0 for tf_t,tr in zip(tf_wt,torch_wt): if np.sum(np.abs(tf_t-np.transpose(tr))) < (10**-5): count += 1 self.assertTrue(count == 6)