def deep_iv_fit(x, z, t, y, epochs=100, hidden=[128, 64, 32]): from deepiv.models import Treatment, Response import deepiv.architectures as architectures import deepiv.densities as densities from keras.layers import Input, Dense from keras.models import Model from keras.layers.merge import Concatenate n = z.shape[0] dropout_rate = min(1000. / (1000. + n), 0.5) batch_size = 100 images = False act = "relu" n_components = 10 instruments = Input(shape=(z.shape[1], ), name="instruments") features = Input(shape=(x.shape[1], ), name="features") treatment_input = Concatenate(axis=1)([instruments, features]) est_treat = architectures.feed_forward_net( treatment_input, lambda x: densities.mixture_of_gaussian_output(x, n_components), hidden_layers=hidden, dropout_rate=dropout_rate, l2=0.0001, activations=act) treatment_model = Treatment(inputs=[instruments, features], outputs=est_treat) treatment_model.compile('adam', loss="mixture_of_gaussians", n_components=n_components) treatment_model.fit([z, x], t, epochs=epochs, batch_size=batch_size) # Build and fit response model treatment = Input(shape=(t.shape[1], ), name="treatment") response_input = Concatenate(axis=1)([features, treatment]) est_response = architectures.feed_forward_net(response_input, Dense(1), activations=act, hidden_layers=hidden, l2=0.001, dropout_rate=dropout_rate) response_model = Response(treatment=treatment_model, inputs=[features, treatment], outputs=est_response) response_model.compile('adam', loss='mse') response_model.fit([z, x], y, epochs=epochs, verbose=1, batch_size=batch_size, samples_per_batch=2) return response_model
def conv_embedding(images, output, other_features=[], dropout_rate=0.1, embedding_dropout=0.1, embedding_l2=0.05, constrain_norm=True): print("Building conv net") x_embedding = architectures.convnet(images, Dense(64, activation='linear'), dropout_rate=embedding_dropout, activations='relu', l2_rate=embedding_l2, constrain_norm=constrain_norm) if len(other_features) > 0: embedd = Concatenate(axis=1)([x_embedding] + other_features) else: embedd = x_embedding out = architectures.feed_forward_net(embedd, output, hidden_layers=[32], dropout_rate=dropout_rate, activations='relu', constrain_norm=constrain_norm) return out
Response:{y}".format(**{'x':x.shape, 'z':z.shape, 't':t.shape, 'y':y.shape})) # Build and fit treatment model instruments = Input(shape=(z.shape[1],), name="instruments") features = Input(shape=(x.shape[1],), name="features") treatment_input = Concatenate(axis=1)([instruments, features]) hidden = [128, 64, 32] act = "relu" n_components = 10 est_treat = architectures.feed_forward_net(treatment_input, lambda x: densities.mixture_of_gaussian_output(x, n_components), hidden_layers=hidden, dropout_rate=dropout_rate, l2=0.0001, activations=act) treatment_model = Treatment(inputs=[instruments, features], outputs=est_treat) treatment_model.compile('adam', loss="mixture_of_gaussians", n_components=n_components) treatment_model.fit([z, x], t, epochs=epochs, batch_size=batch_size) # Build and fit response model treatment = Input(shape=(t.shape[1],), name="treatment") response_input = Concatenate(axis=1)([features, treatment]) est_response = architectures.feed_forward_net(response_input, Dense(1),