Beispiel #1
0
def autoencoder(encoder_model, decoder_model):
    decoder_output = decoder_model(encoder_model.output)

    model = Model(inputs=encoder_model.input, outputs=decoder_output)
    opt = optimizers.Adam()
    model.compile(loss='mse', optimizer=opt)
    return model
Beispiel #2
0
def discriminator(im_shape, relu_before_bn=True):
    conv2d = lambda x, filters: layers.Conv2D(filters, (5, 5),
                                              strides=(2, 2),
                                              padding='same',
                                              kernel_initializer=initializers.
                                              RandomNormal(stddev=0.02))(x)
    flatten = lambda x: layers.Flatten()(x)
    lrelu = lambda x: layers.LeakyReLU(alpha=0.2)(x)
    norm = lambda x: layers.BatchNormalization()(x)
    if relu_before_bn:
        block = lambda x, filters: norm(lrelu(conv2d(x, filters)))
    else:
        block = lambda x, filters: lrelu(norm(conv2d(x, filters)))

    inputs = Input(shape=im_shape)
    l1 = block(inputs, 128)
    l2 = block(l1, 256)
    l3 = block(l2, 512)
    l4 = flatten(block(l3, 1024))
    d_out = layers.Dense(1, activation='sigmoid')(l4)

    d_model = Model(inputs=inputs, outputs=d_out)
    opt = optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
    d_model.compile(loss='binary_crossentropy',
                    optimizer=opt,
                    metrics=['accuracy'])
    return d_model
Beispiel #3
0
def test_pidon_operator_on_spherical_pde():
    set_random_seed(0)

    diff_eq = DiffusionEquation(3)
    mesh = Mesh(
        [(1., 11.), (0., 2 * np.pi), (.25 * np.pi, .75 * np.pi)],
        [2., np.pi / 5., np.pi / 4],
        CoordinateSystem.SPHERICAL)
    bcs = [
        (DirichletBoundaryCondition(
            lambda x, t: np.ones((len(x), 1)), is_static=True),
         DirichletBoundaryCondition(
             lambda x, t: np.full((len(x), 1), 1. / 11.), is_static=True)),
        (NeumannBoundaryCondition(
            lambda x, t: np.zeros((len(x), 1)), is_static=True),
         NeumannBoundaryCondition(
             lambda x, t: np.zeros((len(x), 1)), is_static=True)),
        (NeumannBoundaryCondition(
            lambda x, t: np.zeros((len(x), 1)), is_static=True),
         NeumannBoundaryCondition(
             lambda x, t: np.zeros((len(x), 1)), is_static=True))
    ]
    cp = ConstrainedProblem(diff_eq, mesh, bcs)
    ic = ContinuousInitialCondition(cp, lambda x: 1. / x[:, :1])
    t_interval = (0., .5)
    ivp = InitialValueProblem(cp, t_interval, ic)

    sampler = UniformRandomCollocationPointSampler()
    pidon = PIDONOperator(sampler, .001, True)

    training_loss_history, test_loss_history = pidon.train(
        cp,
        t_interval,
        training_data_args=DataArgs(
            y_0_functions=[ic.y_0],
            n_domain_points=20,
            n_boundary_points=10,
            n_batches=1
        ),
        model_args=ModelArgs(
            latent_output_size=20,
            branch_hidden_layer_sizes=[30, 30],
            trunk_hidden_layer_sizes=[30, 30],
        ),
        optimization_args=OptimizationArgs(
            optimizer=optimizers.Adam(learning_rate=2e-5),
            epochs=3,
            verbose=False
        )
    )

    assert len(training_loss_history) == 3
    for i in range(2):
        assert np.all(
            training_loss_history[i + 1].weighted_total_loss.numpy() <
            training_loss_history[i].weighted_total_loss.numpy())

    solution = pidon.solve(ivp)
    assert solution.d_t == .001
    assert solution.discrete_y().shape == (500, 6, 11, 3, 1)
Beispiel #4
0
def discriminator(n_cat):
    conv2d = lambda x, filters, strides: layers.Conv2D(filters, (4, 4), strides=strides, padding='same')(x)
    leakyrelu = lambda x: layers.LeakyReLU(alpha=0.2)(x)
    norm = lambda x: layers.BatchNormalization()(x)
    dense = lambda x, nodes: layers.Dense(nodes)(x)
    flatten = lambda x : layers.Flatten()(x)

    inputs = Input(shape=(64, 64, 3))
    l1 = leakyrelu(conv2d(inputs, 64, (2, 2)))
    l2 = norm(leakyrelu(conv2d(l1, 128, (2, 2))))
    l3 = norm(leakyrelu(conv2d(l2, 256, (2, 2))))
    l4 = norm(leakyrelu(conv2d(l3, 256, (1, 1))))
    l5 = norm(leakyrelu(conv2d(l4, 256, (1, 1))))
    l6 = norm(leakyrelu(dense(flatten(l5), 1024)))
    d_out = layers.Dense(1, activation='sigmoid')(l6)

    # not sure if i should include the continuous variables.
    # for now, I will not implement continuous variables to keep things simple.
    aux1 = leakyrelu(norm(dense(l6, 128)))
    q_out = dense(aux1, n_cat)
    d_model = Model(inputs=inputs, outputs=d_out)
    opt = optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
    d_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])

    q_model = Model(inputs=inputs, outputs=q_out)
    return d_model, q_model
Beispiel #5
0
def gan(g_model, d_model):
    d_model.trainable = False
    d_output = d_model(g_model.output)

    gan_model = Model(inputs=g_model.input, outputs=d_output)
    opt = optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
    gan_model.compile(loss='binary_crossentropy', optimizer=opt)
    return gan_model
Beispiel #6
0
def test_fdm_operator_on_pde_with_t_and_x_dependent_rhs():
    class TestDiffEq(DifferentialEquation):

        def __init__(self):
            super(TestDiffEq, self).__init__(2, 1)

        @property
        def symbolic_equation_system(self) -> SymbolicEquationSystem:
            return SymbolicEquationSystem([
                self.symbols.t / 100. *
                (self.symbols.x[0] + self.symbols.x[1]) ** 2
            ])

    diff_eq = TestDiffEq()
    mesh = Mesh([(-1., 1.), (0., 2.)], [2., 1.])
    bcs = [
        (NeumannBoundaryCondition(lambda x, t: np.zeros((len(x), 1))),
         NeumannBoundaryCondition(lambda x, t: np.zeros((len(x), 1))))
    ] * 2
    cp = ConstrainedProblem(diff_eq, mesh, bcs)
    ic = ContinuousInitialCondition(cp, lambda x: np.zeros((len(x), 1)))
    t_interval = (0., 1.)
    ivp = InitialValueProblem(cp, t_interval, ic)

    sampler = UniformRandomCollocationPointSampler()
    pidon = PIDONOperator(sampler, .05, True)

    training_loss_history, test_loss_history = pidon.train(
        cp,
        t_interval,
        training_data_args=DataArgs(
            y_0_functions=[ic.y_0],
            n_domain_points=20,
            n_boundary_points=10,
            n_batches=1
        ),
        model_args=ModelArgs(
            latent_output_size=20,
            branch_hidden_layer_sizes=[30, 30],
            trunk_hidden_layer_sizes=[30, 30],
        ),
        optimization_args=OptimizationArgs(
            optimizer=optimizers.Adam(learning_rate=2e-5),
            epochs=3,
            verbose=False
        )
    )

    assert len(training_loss_history) == 3
    for i in range(2):
        assert np.all(
            training_loss_history[i + 1].weighted_total_loss.numpy() <
            training_loss_history[i].weighted_total_loss.numpy())

    solution = pidon.solve(ivp)
    assert solution.d_t == .05
    assert solution.discrete_y().shape == (20, 2, 3, 1)
Beispiel #7
0
def test_pidon_operator_on_pde_system():
    set_random_seed(0)

    diff_eq = NavierStokesEquation()
    mesh = Mesh([(-2.5, 2.5), (0., 4.)], [1., 1.])
    ic_function = vectorize_ic_function(lambda x: [
        2. * x[0] - 4.,
        2. * x[0] ** 2 + 3. * x[1] - x[0] * x[1] ** 2,
        4. * x[0] - x[1] ** 2,
        2. * x[0] * x[1] - 3.
    ])
    bcs = [
        (DirichletBoundaryCondition(
            lambda x, t: ic_function(x),
            is_static=True),
         DirichletBoundaryCondition(
             lambda x, t: ic_function(x),
             is_static=True))
    ] * 2
    cp = ConstrainedProblem(diff_eq, mesh, bcs)
    ic = ContinuousInitialCondition(cp, ic_function)
    t_interval = (0., .5)
    ivp = InitialValueProblem(cp, t_interval, ic)

    sampler = UniformRandomCollocationPointSampler()
    pidon = PIDONOperator(sampler, .001, True)

    training_loss_history, test_loss_history = pidon.train(
        cp,
        t_interval,
        training_data_args=DataArgs(
            y_0_functions=[ic.y_0],
            n_domain_points=20,
            n_boundary_points=10,
            n_batches=1
        ),
        model_args=ModelArgs(
            latent_output_size=20,
            branch_hidden_layer_sizes=[20, 20],
            trunk_hidden_layer_sizes=[20, 20],
        ),
        optimization_args=OptimizationArgs(
            optimizer=optimizers.Adam(learning_rate=1e-5),
            epochs=3,
            verbose=False
        )
    )

    assert len(training_loss_history) == 3
    for i in range(2):
        assert np.all(
            training_loss_history[i + 1].weighted_total_loss.numpy() <
            training_loss_history[i].weighted_total_loss.numpy())

    solution = pidon.solve(ivp)
    assert solution.d_t == .001
    assert solution.discrete_y().shape == (500, 6, 5, 4)
Beispiel #8
0
def gan(g_model, d_model, q_model):
    d_model.trainable = False
    d_output = d_model(g_model.output)
    q_output = q_model(g_model.output)

    model = Model(inputs=g_model.input, outputs=[d_output, q_output])

    opt = optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
    model.compile(loss=['binary_crossentropy', 'categorical_crossentropy'], optimizer=opt)
    return model
Beispiel #9
0
def prepare_model():

    model = create_unet()
    loss = losses.SparseCategoricalCrossentropy(from_logits=True)
    dice = metric_dice()
    optimizer = optimizers.Adam(learning_rate=2e-4)

    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy', dice])

    return model
Beispiel #10
0
def test_pidon_operator_in_ar_mode_on_pde():
    set_random_seed(0)

    diff_eq = WaveEquation(1)
    mesh = Mesh([(0., 1.)], (.2,))
    bcs = [
        (NeumannBoundaryCondition(
            lambda x, t: np.zeros((len(x), 2)), is_static=True),
         NeumannBoundaryCondition(
             lambda x, t: np.zeros((len(x), 2)), is_static=True)),
    ]
    cp = ConstrainedProblem(diff_eq, mesh, bcs)
    t_interval = (0., 1.)
    ic = BetaInitialCondition(cp, [(3.5, 3.5), (3.5, 3.5)])
    ivp = InitialValueProblem(cp, t_interval, ic)

    training_y_0_functions = [
        BetaInitialCondition(cp, [(p, p), (p, p)]).y_0
        for p in [2., 3., 4., 5.]
    ]
    sampler = UniformRandomCollocationPointSampler()
    pidon = PIDONOperator(sampler, .25, False, auto_regression_mode=True)

    assert pidon.auto_regression_mode

    pidon.train(
        cp,
        (0., .25),
        training_data_args=DataArgs(
            y_0_functions=training_y_0_functions,
            n_domain_points=50,
            n_boundary_points=20,
            n_batches=2
        ),
        model_args=ModelArgs(
            latent_output_size=50,
            branch_hidden_layer_sizes=[50, 50],
            trunk_hidden_layer_sizes=[50, 50],
        ),
        optimization_args=OptimizationArgs(
            optimizer=optimizers.Adam(learning_rate=1e-4),
            epochs=2,
            ic_loss_weight=10.,
            verbose=False
        )
    )

    sol = pidon.solve(ivp)
    assert np.allclose(sol.t_coordinates, [.25, .5, .75, 1.])
    assert sol.discrete_y().shape == (4, 5, 2)
Beispiel #11
0
def build_model(input_shape,
                learning_rate,
                loss="sparse_categorical_crossentropy"
                ):  #sparse_categorical_crossentropy
    # build network
    model = keras.Sequential()

    # conv1
    model.add(
        keras.layers.Conv2D(64, (3, 3),
                            activation='relu',
                            input_shape=input_shape,
                            kernel_regularizer=keras.regularizers.l2(0.001)))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.MaxPooling2D((3, 3), strides=(2, 2),
                                        padding='same'))

    # conv2
    model.add(
        keras.layers.Conv2D(32, (3, 3),
                            activation='relu',
                            kernel_regularizer=keras.regularizers.l2(0.001)))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.MaxPooling2D((3, 3), strides=(2, 2),
                                        padding='same'))
    # conv3
    model.add(
        keras.layers.Conv2D(32, (2, 2),
                            activation='relu',
                            kernel_regularizer=keras.regularizers.l2(0.001)))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.MaxPooling2D((2, 2), strides=(2, 2),
                                        padding='same'))

    # flatten the output feed to output layer
    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dense(64, activation="relu"))
    model.add(keras.layers.Dropout(0.3))

    # softmax classifier
    model.add(keras.layers.Dense(16, activation='softmax'))

    optimiser = optimizers.Adam(learning_rate=learning_rate)

    # compile model
    model.compile(optimizer=optimiser, loss=loss, metrics=["accuracy"])

    model.summary()

    return model
Beispiel #12
0
def generate_3D_VGG(L2_constant,
                    dropout,
                    learning_rate,
                    kernel_initializer='glorot_uniform',
                    model_summary=True):
    # --- Define kwargs dictionary
    kwargs = {
        'kernel_size': (3, 3, 3),
        'padding': 'same',
        'kernel_regularizer': keras.regularizers.l2(L2_constant),
        'kernel_initializer': kernel_initializer
    }

    # --- Define block components
    conv = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs)(x)
    relu = lambda x: layers.LeakyReLU()(x)
    norm = lambda x: layers.BatchNormalization()(x)
    # --- Define stride-1, stride-2 blocks
    conv1 = lambda filters, x: relu(norm(conv(x, filters, strides=1)))
    conv2 = lambda filters, x: relu(norm(conv(x, filters, strides=(2, 2, 2))))

    # --- Architecture
    inputs = Input(shape=(64, 64, 64, 3))
    l1 = conv2(16, conv1(16, inputs))
    l2 = conv2(32, conv1(32, l1))
    l3 = conv2(64, conv1(64, l2))
    l4 = conv2(128, conv1(128, l3))
    f0 = layers.Flatten()(l4)
    h1 = relu(layers.Dense(128)(f0))
    h2 = relu(norm(h1))
    h2 = layers.Dropout(dropout)
    logits = layers.Dense(2)(h1)

    model = Model(inputs=inputs, outputs=logits)

    if model_summary:
        model.summary()

    # --- Define a categorical cross-entropy loss
    loss = losses.SparseCategoricalCrossentropy(from_logits=True)
    optimizer = optimizers.Adam(learning_rate=learning_rate)

    # --- Compile model
    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])

    return model
Beispiel #13
0
def build_cnn():
    """Builds a convolutional neural net (cnn) using tensorflow

    Args:

    Returns:
      model: The model for the cnn

    """
    drop_out = 0.25

    model = keras.models.Sequential()
    model.add(
        tf.keras.layers.Conv2D(32,
                               kernel_size=5,
                               padding="valid",
                               activation='relu'))
    model.add(tf.keras.layers.Dropout(drop_out))
    model.add(tf.keras.layers.BatchNormalization(momentum=0.8))

    model.add(
        tf.keras.layers.Conv2D(64,
                               kernel_size=4,
                               padding="valid",
                               activation='relu'))
    model.add(tf.keras.layers.Dropout(drop_out))
    model.add(tf.keras.layers.BatchNormalization(momentum=0.8))

    model.add(
        tf.keras.layers.Conv2D(64,
                               kernel_size=3,
                               padding="valid",
                               activation='relu'))
    model.add(tf.keras.layers.Dropout(drop_out))
    model.add(tf.keras.layers.BatchNormalization(momentum=0.8))

    model.add(tf.keras.layers.Flatten())
    model.add(tf.keras.layers.Dense(256, activation='relu'))
    model.add(tf.keras.layers.Dropout(0.5))
    model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))

    model.compile(optimizer=tf_optimizers.Adam(learning_rate=8e-4),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    return model
def define_optimizer(opt_name, opt_kwargs):

    if opt_name == 'sgd':
        optimizer = optimizers.SGD(**opt_kwargs)
    elif opt_name == 'adam':
        optimizer = optimizers.Adam(**opt_kwargs)
    elif opt_name == 'adagrad':
        optimizer = optimizers.Adagrad(**opt_kwargs)
    elif opt_name == 'rmsprop':
        optimizer = optimizers.RMSprop(**opt_kwargs)
    elif opt_name == 'adadelta':
        optimizer = optimizers.Adadelta(**opt_kwargs)
    elif opt_name == 'nadam':
        optimizer = optimizers.Nadam(**opt_kwargs)
    elif opt_name == 'adamax':
        optimizer = optimizers.Adamax(**opt_kwargs)
    else:
        raise ValueError(f"Optimizer {opt_name} is wrong or not implemented.")
    return optimizer
Beispiel #15
0
def test_auto_regression_operator_on_pde():
    set_random_seed(0)

    diff_eq = WaveEquation(2)
    mesh = Mesh([(-5., 5.), (-5., 5.)], [1., 1.])
    bcs = [(DirichletBoundaryCondition(lambda x, t: np.zeros((len(x), 2)),
                                       is_static=True),
            DirichletBoundaryCondition(lambda x, t: np.zeros((len(x), 2)),
                                       is_static=True))] * 2
    cp = ConstrainedProblem(diff_eq, mesh, bcs)
    ic = GaussianInitialCondition(
        cp, [(np.array([0., 2.5]), np.array([[.1, 0.], [0., .1]]))] * 2,
        [3., .0])
    ivp = InitialValueProblem(cp, (0., 10.), ic)

    oracle = FDMOperator(RK4(), ThreePointCentralDifferenceMethod(), .1)
    ref_solution = oracle.solve(ivp)

    ml_op = AutoRegressionOperator(2.5, True)
    ml_op.train(
        ivp, oracle,
        SKLearnKerasRegressor(
            DeepONet([
                np.prod(cp.y_shape(True)).item(), 100, 50,
                diff_eq.y_dimension * 10
            ], [1 + diff_eq.x_dimension, 50, 50, diff_eq.y_dimension * 10],
                     diff_eq.y_dimension),
            optimizer=optimizers.Adam(
                learning_rate=optimizers.schedules.ExponentialDecay(
                    1e-2, decay_steps=500, decay_rate=.95)),
            batch_size=968,
            epochs=500,
        ), 20, lambda t, y: y + np.random.normal(0., t / 75., size=y.shape))
    ml_solution = ml_op.solve(ivp)

    assert ml_solution.vertex_oriented
    assert ml_solution.d_t == 2.5
    assert ml_solution.discrete_y().shape == (4, 11, 11, 2)

    diff = ref_solution.diff([ml_solution])
    assert np.all(diff.matching_time_points == np.linspace(2.5, 10., 4))
    assert np.max(np.abs(diff.differences[0])) < .5
Beispiel #16
0
def retinanet_resnet50_3d(inputs,
                          K,
                          A,
                          filter_ratio=1,
                          n=2,
                          include_fc_layer=False,
                          shared_weights=False,
                          tahn=False,
                          lr=2e-4,
                          feature_maps=('c3', 'c4', 'c5')):
    """Generates retinanet with resnet backbone. Can specify if classification and regression networks share weights"""
    r_model = resnet50_3d(inputs=inputs['dat'],
                          filter_ratio=filter_ratio,
                          n=n,
                          include_fc_layer=include_fc_layer,
                          kernal1=(1, 1, 1),
                          kernal3=(1, 3, 3),
                          kernal7=(1, 7, 7))
    backbone_output = [
        r_model.get_layer(layer_name).output
        for layer_name in ['c3-output', 'c4-output', 'c5-output']
    ]
    fp_out = feature_pyramid_3d(inputs=backbone_output,
                                filter_ratio=filter_ratio)
    logits = class_and_reg_subnets(feature_pyramid=fp_out,
                                   K=K,
                                   A=A,
                                   filter_ratio=filter_ratio,
                                   shared_weights=shared_weights,
                                   tahn=tahn,
                                   feature_maps=feature_maps)
    preds = LogisticEndpoint1()(logits, inputs)
    model = Model(inputs=inputs, outputs=preds)
    model.compile(
        optimizer=optimizers.Adam(learning_rate=lr),
        experimental_run_tf_function=False,
    )
    return model
Beispiel #17
0
 def __init__(self, num_layers, state_space, policy_controller,
              reg_param=0.001,
              discount_factor=0.99,
              exploration=0.8,
              ):
     self.num_layers = num_layers
     self.state_space = state_space
     self.policy_controller = policy_controller
     self.reg_strength = reg_param
     self.discount_factor = discount_factor
     self.exploration = exploration
     self.state_size = self.state_space.size
     self.reward_buffer = []
     self.state_buffer = []
     self.labels = []
     self.global_step = tf.Variable(0)
     starter_learning_rate = 0.1
     learning_rate = keras.optimizers.schedules.ExponentialDecay(
         starter_learning_rate,
         decay_steps=50,
         decay_rate=0.95,
         staircase=True)
     self.optimizer = optimizers.Adam(learning_rate=learning_rate)
Beispiel #18
0
train_generator = train_datagen.flow_from_directory(
    train_dir,
    #  将所有图像的大小调整为227*227
    target_size=(227, 227),
    #  批量大小
    batch_size=16,
    class_mode='binary')
validation_generator = validation_datagen.flow_from_directory(
    validation_dir, target_size=(227, 227), batch_size=16, class_mode='binary')

#  初始化模型
model = model()
#  用于配置训练模型(优化器、目标函数、模型评估标准)

model.compile(optimizer=optimizers.Adam(lr=1e-4),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
#  查看各个层的信息
model.summary()
#  回调函数,在每个训练期之后保存模型
model_checkpoint = ModelCheckpoint(
    'model_AlexNet_adam.hdf5',  # 保存模型的路径
    monitor='loss',  # 被监测的数据
    verbose=1,  # 日志显示模式:0=>安静模式,1=>进度条,2=>每轮一行
    save_best_only=True)  # 若为True,最佳模型就不会被覆盖
#  用history接收返回值用于画loss/acc曲线
history = model.fit(train_generator,
                    steps_per_epoch=313,
                    epochs=200,
                    callbacks=[model_checkpoint],
Beispiel #19
0
def unet(inputs,
         filter_ratio=1,
         logits_num=2,
         num_layers=6,
         class_num=1,
         _3d=False,
         compile=False,
         lr=2e-4):
    # --- Define kwargs dictionary
    if _3d:
        kwargs = {'kernel_size': (3, 3, 3), 'padding': 'same'}
    else:
        kwargs = {'kernel_size': (1, 3, 3), 'padding': 'same'}

    # --- Define lambda functions
    conv = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs)(x)
    norm = lambda x: layers.BatchNormalization()(x)
    relu = lambda x: layers.LeakyReLU()(x)
    tran = lambda x, filters, strides: layers.Conv3DTranspose(
        filters=filters, strides=strides, **kwargs)(x)

    # --- Define stride-1, stride-2 blocks
    conv1 = lambda filters, x: relu(norm(conv(x, filters, strides=1)))
    conv2 = lambda filters, x: relu(norm(conv(x, filters, strides=(1, 2, 2))))
    tran2 = lambda filters, x: relu(norm(tran(x, filters, strides=(1, 2, 2))))

    # --- Define simple layers
    c_layer = lambda filters, x: conv1(filters, conv2(filters, x))
    e_layer = lambda filters1, filters2, x: tran2(filters1, conv1(filters2, x))

    contracting_layers = []
    for i in range(num_layers):  # 0,1,2,3,4,5
        if i == 0:
            contracting_layers.append(conv1(int(4 * filter_ratio), inputs))
        else:
            contracting_layers.append(
                c_layer(int(8 * filter_ratio) * i, contracting_layers[i - 1]))
    expanding_layers = []
    for j in reversed(range(num_layers - 1)):  # 4,3,2,1,0
        if j == num_layers - 2:
            expanding_layers.append(
                tran2(int(8 * filter_ratio) * j, contracting_layers[j + 1]))
        else:
            expanding_layers.append(
                e_layer(
                    int(8 * filter_ratio) * j if j != 0 else int(4 *
                                                                 filter_ratio),
                    int(8 * filter_ratio) * (j + 1),
                    expanding_layers[-1] + contracting_layers[j + 1]))
        last_layer = conv1(
            int(4 * filter_ratio),
            conv1(int(4 * filter_ratio),
                  expanding_layers[-1] + contracting_layers[0]))

    # --- Create logits
    logits = {}
    for k in range(class_num):
        logits[f'zones{k}'] = layers.Conv3D(filters=logits_num,
                                            name=f'zones{k}',
                                            **kwargs)(last_layer)

    # --- Create model
    model = Model(inputs=inputs, outputs=logits)
    if compile:
        model.compile(
            optimizer=optimizers.Adam(learning_rate=lr),
            loss_weights={
                i: keras.losses.SparseCategoricalCrossentropy(from_logits=True)
                for i in model.output_names
            },
            metrics={i: custom.dsc(cls=1)
                     for i in model.output_names},
            # TODO: Check if leaving this parameter out affects model training.
            experimental_run_tf_function=False,
        )
    return model
Beispiel #20
0
y_str = numpy.loadtxt("poco_1.prn", skiprows=11, usecols=8, dtype=numpy.str)
label_encoder = preprocessing.LabelEncoder()
label_encoder.fit(list(set(y_str)))
y = label_encoder.transform(y_str)
x_train, x_test, y_train, y_test = model_selection.train_test_split(
    X, y, test_size=0.33, random_state=42)

#Building, saving initial values of kernel/bias, compiling and training model
print("Prunable network:")
nn = create_neural_network_prunable()
nn.build(x_train.shape)
for i in nn.layers:
    i.save_kernel()
    i.save_bias()
nn.summary()
nn.compile(optimizer=optimizers.Adam(learning_rate=0.0001),
           loss=losses.BinaryCrossentropy(),
           metrics=[metrics.BinaryAccuracy()])
print("Before pruning:")
nn.fit(x_train,
       y_train,
       epochs=10,
       batch_size=64,
       validation_data=(x_train, y_train))
loss, accuracy = nn.evaluate(x_test, y_test, verbose=0)
print("Loss:", loss)
print("Accuracy:", accuracy)

#Creating tensor of weights
l1, l2 = [], []
for layer in nn.layers:
Beispiel #21
0
Test_Y = np_utils.to_categorical(Data_Test_Y)

# normalize
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.30,
                                                    random_state=30,
                                                    shuffle=True)

# create and fit the model
model = Sequential()
model.add(LSTM(200, input_shape=(X.shape[1], X.shape[2])))
model.add(Dropout(0.3))
model.add(Dense(y.shape[1], activation='softmax'))

adam = optimizers.Adam(lr=0.0001)  # use Adam optimizer
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()

# build the model
history = model.fit(X_train,
                    y_train,
                    epochs=500,
                    batch_size=200,
                    validation_split=0.3,
                    verbose=1,
                    shuffle=True)
# save model
model.save(filepath="../checkpoints/LSTM_FB15K237", save_format='tf')
Beispiel #22
0
    dnn_feature_columns = fixlen_feature_columns
    linear_feature_columns = fixlen_feature_columns

    feature_names = get_feature_names(linear_feature_columns +
                                      dnn_feature_columns)

    # 3.generate input data for model
    train_model_input = {name: train[name] for name in feature_names}
    test_model_input = {name: test[name] for name in feature_names}

    # 4.Define Model,train,predict and evaluate
    model = DeepFM(linear_feature_columns,
                   dnn_feature_columns,
                   task='binary',
                   dnn_dropout=dropout)
    optimizers.Adam(learning_rate=0.001)
    model.compile(optimizer, "binary_crossentropy", metrics=METRICS)

    log_dir = prefix_dir + 'dfm_' + data_type + '_' + str(epochs)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    logs = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)

    history = model.fit(train_model_input,
                        train[target].values,
                        batch_size=256,
                        epochs=epochs,
                        verbose=2,
                        validation_split=0.2,
                        callbacks=[logs])
Beispiel #23
0
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow.optimizers as optimizers
import numpy as np

# TEMPORARY STUFF UNRELATED TO MIDIS
print(tf.__version__)

mnist = tf.keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = np.expand_dims(x_train, 3)
x_test = np.expand_dims(x_test, 3)
print(x_train.shape)

model = tf.keras.models.Sequential([
    layers.Conv2D(16, (5, 5), input_shape=(28, 28, 1)),
    layers.Dropout(0.03),
    layers.Activation('relu'),
    layers.Flatten(),
    layers.Dense(10, activation=tf.nn.softmax),
    layers.Dropout(0.03),
])
model.compile(optimizer=optimizers.Adam(learning_rate=0.005),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=5, batch_size=128)
model.evaluate(x_test, y_test)
model = keras.Sequential()
model.add(layers.Input([28, 28]))
model.add(layers.Reshape([28, 28, 1]))
model.add(layers.Conv2D(filters=32, kernel_size=3, strides=2, padding='SAME'))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='VALID'))
model.add(layers.Conv2D(filters=64, kernel_size=3, strides=2, padding='SAME'))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='VALID'))
model.add(layers.Flatten())
model.add(layers.Dense(300, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))

model.summary()

loss_object = losses.SparseCategoricalCrossentropy()
optimizer = optimizers.Adam()
train_loss = tf.metrics.Mean(name='train_loss')
train_acc = tf.metrics.SparseCategoricalAccuracy(name='train_acc')
test_loss = tf.metrics.Mean(name='test_loss')
test_acc = tf.metrics.SparseCategoricalAccuracy(name='test_acc')


@tf.function
def train_step(images, labels):
    with tf.GradientTape() as tape:
        predictions = model(images)
        loss = loss_object(labels, predictions)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    train_loss(loss)
Beispiel #25
0
#f0 = layers.Reshape((1, 1, 1, bf.shape[2] * bf.shape[3] * bf.shape[4]))(bf)

# --- Create logits
logits = {}
#logits['lbl'] = layers.AveragePooling3D(pool_size=(1, bf.shape[2], bf.shape[3]), padding='same', name='lbl')(bf)
logits['lbl'] = layers.Conv3D(filters=1,
                              kernel_size=(1, 1, 1),
                              activation='sigmoid',
                              name='lbl')(f0)

# --- Create model
model = Model(inputs=inputs, outputs=logits)

# --- Compile model
model.compile(
    optimizer=optimizers.Adam(learning_rate=5e-5),
    #loss=losses.Huber(delta=0.042),
    loss=losses.MeanAbsoluteError(),
    metrics=['mse', 'mae', 'mape'],
    experimental_run_tf_function=False)

# --- Load data into memory for faster training
client.load_data_in_memory()

# --- TensorBoard
#tensor_board = TensorBoard(log_dir='./graph', histogram_freq=0, write_graph=True, write_images=True)

# --- Learning rate scheduler
lr_scheduler = callbacks.LearningRateScheduler(lambda epoch, lr: lr * 0.996)

# --- csv Callback
    BetaInitialCondition(cp, [(p, p)]).y_0 for p in np.arange(1.2, 6., .2)
]
pidon.train(cp,
            t_interval,
            training_data_args=DataArgs(y_0_functions=training_y_0_functions,
                                        n_domain_points=500,
                                        n_boundary_points=100,
                                        n_batches=1),
            model_args=ModelArgs(
                latent_output_size=50,
                branch_hidden_layer_sizes=[50] * 7,
                trunk_hidden_layer_sizes=[50] * 7,
            ),
            optimization_args=OptimizationArgs(
                optimizer=optimizers.Adam(
                    learning_rate=optimizers.schedules.ExponentialDecay(
                        2e-3, decay_steps=25, decay_rate=.98)),
                epochs=5000,
                ic_loss_weight=10.,
            ))

for p in [2., 3.5, 5.]:
    ic = BetaInitialCondition(cp, [(p, p)])
    ivp = InitialValueProblem(cp, t_interval, ic)

    fdm_solution = fdm.solve(ivp)
    for i, plot in enumerate(fdm_solution.generate_plots()):
        plot.save('diff_1d_fdm_{:.1f}_{}'.format(p, i)).close()

    pidon_solution = pidon.solve(ivp)
    for i, plot in enumerate(pidon_solution.generate_plots()):
Beispiel #27
0
    def build_origin(self,
                     print_summary=False,
                     num_classes=5,
                     image_size=(352, 640, 3)):

        input_tensor = keras.layers.Input(image_size)
        conv_0 = self.build_conv2D_block(input_tensor,
                                         filters=24,
                                         kernel_size=1,
                                         strides=1)
        # conv stage 1
        conv_1 = self.build_conv2D_block(conv_0,
                                         filters=64,
                                         kernel_size=3,
                                         strides=1)
        conv_1 = self.build_conv2D_block(conv_1,
                                         filters=64,
                                         kernel_size=3,
                                         strides=1)

        # pool stage 1
        pool1 = MaxPooling2D()(conv_1)
        # conv stage 2
        conv_2 = self.build_conv2D_block(pool1,
                                         filters=128,
                                         kernel_size=3,
                                         strides=1)
        conv_2 = self.build_conv2D_block(conv_2,
                                         filters=128,
                                         kernel_size=3,
                                         strides=1)

        # pool stage 2
        pool2 = MaxPooling2D()(conv_2)
        # conv stage 3
        conv_3 = self.build_conv2D_block(pool2,
                                         filters=256,
                                         kernel_size=3,
                                         strides=1)
        conv_3 = self.build_conv2D_block(conv_3,
                                         filters=256,
                                         kernel_size=3,
                                         strides=1)
        conv_3 = self.build_conv2D_block(conv_3,
                                         filters=256,
                                         kernel_size=3,
                                         strides=1)

        # pool stage 3
        pool3 = MaxPooling2D()(conv_3)
        # conv stage 4
        conv_4 = self.build_conv2D_block(pool3,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1)
        conv_4 = self.build_conv2D_block(conv_4,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1)

        conv_4 = self.build_conv2D_block(conv_4,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1)
        # pool4 = MaxPooling2D()(conv_4)
        ### add dilated convolution ###
        # conv stage 5_1
        conv_5 = self.build_conv2D_block(conv_4,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1,
                                         dilation_rate=2)
        conv_5 = self.build_conv2D_block(conv_5,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1,
                                         dilation_rate=2)
        conv_5 = self.build_conv2D_block(conv_5,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1,
                                         dilation_rate=2)

        # added part of SCNN #
        conv_6_4 = self.build_conv2D_block(conv_5,
                                           filters=1024,
                                           kernel_size=3,
                                           strides=1,
                                           dilation_rate=4)
        conv_6_5 = self.build_conv2D_block(conv_6_4,
                                           filters=128,
                                           kernel_size=1,
                                           strides=1)  # 8 x 36 x 100 x 128

        # add message passing #
        # top to down #

        feature_list_new = self.space_cnn_part(conv_6_5)

        #######################
        dropout_output = Dropout(0.9)(feature_list_new)
        conv_output = K.resize_images(
            dropout_output,
            height_factor=self.IMG_HEIGHT // dropout_output.shape[1],
            width_factor=self.IMG_WIDTH // dropout_output.shape[2],
            data_format="channels_last")
        ret_prob_output = Conv2D(filters=num_classes,
                                 kernel_size=1,
                                 activation='softmax',
                                 name='ctg_out_1')(conv_output)

        ### add lane existence prediction branch ###
        # spatial softmax #
        features = ret_prob_output  # N x H x W x C
        softmax = Activation('softmax')(features)
        avg_pool = AvgPool2D(strides=2)(softmax)
        _, H, W, C = avg_pool.get_shape().as_list()
        reshape_output = tf.reshape(avg_pool, [-1, H * W * C])
        fc_output = Dense(128)(reshape_output)
        relu_output = ReLU(max_value=6)(fc_output)
        existence_output = Dense(4, name='ctg_out_2')(relu_output)

        self.model = Model(inputs=input_tensor,
                           outputs=[ret_prob_output, existence_output])
        # print(self.model.summary())
        adam = optimizers.Adam(lr=0.001)
        sgd = optimizers.SGD(lr=0.001)

        if num_classes == 1:
            self.model.compile(optimizer=sgd,
                               loss="binary_crossentropy",
                               metrics=['accuracy'])
        else:
            self.model.compile(optimizer=sgd,
                               loss={
                                   'ctg_out_1': 'categorical_crossentropy',
                                   'ctg_out_2': 'binary_crossentropy'
                               },
                               loss_weights={
                                   'ctg_out_1': 1.,
                                   'ctg_out_2': 0.2,
                               },
                               metrics=['accuracy', 'mse'])
Beispiel #28
0
    def build(self,
              print_summary=False,
              num_classes=5,
              image_size=(352, 640, 3)):

        input_tensor = keras.layers.Input(image_size)
        conv_0 = self.build_conv2D_block(input_tensor,
                                         filters=24,
                                         kernel_size=1,
                                         strides=1)
        conv_0 = self.build_conv2D_block(conv_0,
                                         filters=24,
                                         kernel_size=3,
                                         strides=1)

        conv_0 = self.build_conv2D_block(conv_0,
                                         filters=24,
                                         kernel_size=3,
                                         strides=1)
        conv_0 = self.build_conv2D_block(conv_0,
                                         filters=24,
                                         kernel_size=3,
                                         strides=1)

        # first conv layer
        conv_1 = self.build_conv2D_block(conv_0,
                                         filters=48,
                                         kernel_size=3,
                                         strides=2)
        conv_1 = self.build_conv2D_block(conv_1,
                                         filters=48,
                                         kernel_size=3,
                                         strides=1)

        conv_1 = self.build_conv2D_block(conv_1,
                                         filters=48,
                                         kernel_size=3,
                                         strides=1)
        conv_1 = self.build_conv2D_block(conv_1,
                                         filters=48,
                                         kernel_size=3,
                                         strides=1)
        conv_1 = self.build_conv2D_block(conv_1,
                                         filters=48,
                                         kernel_size=3,
                                         strides=1)
        # second conv layer
        conv_2 = self.build_conv2D_block(conv_1,
                                         filters=64,
                                         kernel_size=3,
                                         strides=2)
        conv_2 = self.build_conv2D_block(conv_2,
                                         filters=64,
                                         kernel_size=3,
                                         strides=1)

        conv_2 = self.build_conv2D_block(conv_2,
                                         filters=64,
                                         kernel_size=3,
                                         strides=1)
        conv_2 = self.build_conv2D_block(conv_2,
                                         filters=64,
                                         kernel_size=3,
                                         strides=1)
        conv_2 = self.build_conv2D_block(conv_2,
                                         filters=64,
                                         kernel_size=3,
                                         strides=1)
        # third conv layer
        conv_3 = self.build_conv2D_block(conv_2,
                                         filters=96,
                                         kernel_size=3,
                                         strides=2)
        conv_3 = self.build_conv2D_block(conv_3,
                                         filters=96,
                                         kernel_size=3,
                                         strides=1)

        conv_3 = self.build_conv2D_block(conv_3,
                                         filters=96,
                                         kernel_size=3,
                                         strides=1)
        conv_3 = self.build_conv2D_block(conv_3,
                                         filters=96,
                                         kernel_size=3,
                                         strides=1)
        conv_3 = self.build_conv2D_block(conv_3,
                                         filters=96,
                                         kernel_size=3,
                                         strides=1)
        # fourth conv layer
        conv_4 = self.build_conv2D_block(conv_3,
                                         filters=128,
                                         kernel_size=3,
                                         strides=2)
        conv_4 = self.build_conv2D_block(conv_4,
                                         filters=128,
                                         kernel_size=3,
                                         strides=1)

        conv_4 = self.build_conv2D_block(conv_4,
                                         filters=128,
                                         kernel_size=3,
                                         strides=1)
        conv_4 = self.build_conv2D_block(conv_4,
                                         filters=128,
                                         kernel_size=3,
                                         strides=1)
        conv_4 = self.build_conv2D_block(conv_4,
                                         filters=128,
                                         kernel_size=3,
                                         strides=1)
        # fifth conv layer
        conv_5 = self.build_conv2D_block(conv_4,
                                         filters=256,
                                         kernel_size=3,
                                         strides=1,
                                         dilation_rate=1)
        conv_5 = self.build_conv2D_block(conv_5,
                                         filters=256,
                                         kernel_size=3,
                                         strides=1,
                                         dilation_rate=1)

        conv_5 = self.build_conv2D_block(conv_5,
                                         filters=256,
                                         kernel_size=3,
                                         strides=1,
                                         dilation_rate=1)
        conv_5 = self.build_conv2D_block(conv_5,
                                         filters=256,
                                         kernel_size=3,
                                         strides=1,
                                         dilation_rate=1)
        conv_5 = self.build_conv2D_block(conv_5,
                                         filters=256,
                                         kernel_size=3,
                                         strides=1,
                                         dilation_rate=1)
        # added part of SCNN #
        conv_6_4 = self.build_conv2D_block(conv_5,
                                           filters=256,
                                           kernel_size=3,
                                           strides=1,
                                           dilation_rate=1)
        conv_6_5 = self.build_conv2D_block(conv_6_4,
                                           filters=128,
                                           kernel_size=1,
                                           strides=1)  # 8 x 36 x 100 x 128

        scnn_part = self.space_cnn_part(conv_6_5)

        #######################
        # conv2d_deconv5_1 = self.build_conv2D_block(conv_5,filters = 196,kernel_size=3,strides=1)
        # conv2d_deconv4   = self.build_conv2Dtranspose_block(conv2d_deconv5_1, filters=128, kernel_size=4, strides=2)

        Concat_concat4 = concatenate([scnn_part, conv_4], axis=-1)

        conv2d_deconv4_1 = self.build_conv2D_block(Concat_concat4,
                                                   filters=96,
                                                   kernel_size=3,
                                                   strides=1)
        conv2d_deconv3 = self.build_conv2Dtranspose_block(conv2d_deconv4_1,
                                                          filters=96,
                                                          kernel_size=4,
                                                          strides=2)

        Concat_concat3 = concatenate([conv2d_deconv3, conv2d_deconv3], axis=-1)

        conv2d_deconv3_1 = self.build_conv2D_block(Concat_concat3,
                                                   filters=64,
                                                   kernel_size=3,
                                                   strides=1)
        conv2d_deconv2 = self.build_conv2Dtranspose_block(conv2d_deconv3_1,
                                                          filters=64,
                                                          kernel_size=4,
                                                          strides=2)

        Concat_concat2 = concatenate([conv2d_deconv2, conv_2], axis=-1)

        conv2d_deconv2_1 = self.build_conv2D_block(Concat_concat2,
                                                   filters=32,
                                                   kernel_size=3,
                                                   strides=1)
        conv2d_deconv1 = self.build_conv2Dtranspose_block(conv2d_deconv2_1,
                                                          filters=32,
                                                          kernel_size=4,
                                                          strides=2)

        Concat_concat1 = concatenate([conv2d_deconv1, conv_1], axis=-1)

        conv2d_deconv1_1 = self.build_conv2D_block(Concat_concat1,
                                                   filters=16,
                                                   kernel_size=3,
                                                   strides=1)
        conv2d_deconv0 = self.build_conv2Dtranspose_block(conv2d_deconv1_1,
                                                          filters=128,
                                                          kernel_size=4,
                                                          strides=2)

        if num_classes == 1:
            ret_prob_output = Conv2DTranspose(filters=num_classes,
                                              kernel_size=1,
                                              strides=1,
                                              activation='sigmoid',
                                              padding='same',
                                              name='ctg_out_1')(conv2d_deconv0)
        else:
            ret_prob_output = Conv2DTranspose(filters=num_classes,
                                              kernel_size=1,
                                              strides=1,
                                              activation='softmax',
                                              padding='same',
                                              name='ctg_out_1')(conv2d_deconv0)

        ### add lane existence prediction branch ###
        # spatial softmax #
        # features = ret_prob_output  # N x H x W x C
        # softmax = Activation('softmax')(features)
        # avg_pool = AvgPool2D(strides=2)(softmax)
        features = self.build_conv2D_block(ret_prob_output,
                                           filters=num_classes,
                                           kernel_size=1,
                                           strides=2)
        _, H, W, C = features.get_shape().as_list()
        reshape_output = tf.reshape(features, [-1, H * W * C])
        fc_output = Dense(128)(reshape_output)
        relu_output = Activation('relu')(fc_output)
        existence_output = Dense(4)(relu_output)
        existence_output = Activation('softmax',
                                      name='ctg_out_2')(existence_output)
        self.model = Model(inputs=input_tensor,
                           outputs=[ret_prob_output, existence_output])
        # print(self.model.summary())
        adam = optimizers.Adam(lr=0.001)
        sgd = optimizers.SGD(lr=0.001)

        if num_classes == 1:
            self.model.compile(optimizer=sgd,
                               loss="binary_crossentropy",
                               metrics=['accuracy'])
        else:
            self.model.compile(optimizer=sgd,
                               loss=self.my_loss_error,
                               metrics=['accuracy'])
Beispiel #29
0
if __name__ == '__main__':
    train_dataset = tf.data.Dataset.list_files(PATH + 'train/*.jpg')
    train_dataset = train_dataset.map(
        load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    train_dataset = train_dataset.shuffle(BUFFER_SIZE)
    train_dataset = train_dataset.batch(BATCH_SIZE)

    test_dataset = tf.data.Dataset.list_files(PATH + 'test/*.jpg')
    test_dataset = test_dataset.map(load_image_test)
    test_dataset = test_dataset.batch(BATCH_SIZE)

    generator = Generator(output_channels=OUTPUT_CHANNELS, name='generator')
    discriminator = Discriminator()
    generator_loss = GeneratorLoss
    discriminator_loss = DiscriminatorLoss()
    generator_optimizer = optimizers.Adam(learning_rate=1e-4, beta_1=0.5)
    discriminator_optimizer = optimizers.Adam(learning_rate=1e-4, beta_1=0.5)

    checkpoint_dir = './training_checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
    checkpoint = tf.train.Checkpoint(
        generator_optimizer=generator_optimizer,
        discriminator_optimizer=discriminator_optimizer,
        generator=generator,
        discriminator=discriminator)

    log_dir = "logs/"
    summary_writer = tf.summary.create_file_writer(
        log_dir + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))

    @tf.function
Beispiel #30
0
        n_ic_repeats=3
    ),
    test_data_args=DataArgs(
        y_0_functions=test_y_0_functions,
        n_domain_points=50,
        n_batches=1
    ),
    model_args=ModelArgs(
        latent_output_size=100,
        branch_hidden_layer_sizes=[100, 100, 100, 100, 100],
        trunk_hidden_layer_sizes=[100, 100, 100, 100, 100],
    ),
    optimization_args=OptimizationArgs(
        optimizer=optimizers.Adam(
            learning_rate=optimizers.schedules.ExponentialDecay(
                1e-3, decay_steps=30, decay_rate=.97
            )
        ),
        epochs=2000,
    ),
    secondary_optimization_args=SecondaryOptimizationArgs(
        max_iterations=500
    )
)

for y_0 in [.7, 1., 1.3]:
    ic = ContinuousInitialCondition(cp, lambda _: np.array([y_0]))
    ivp = InitialValueProblem(cp, t_interval, ic)

    fdm_solution = fdm.solve(ivp)
    for i, plot in enumerate(fdm_solution.generate_plots()):