예제 #1
0
def test_make_hotshot_network_small_dataset():
    nn = make_hotshot_network(
        peptide_length=3,
        n_amino_acids=2,
        activation="relu",
        init="lecun_uniform",
        loss="mse",
        layer_sizes=[4],
        optimizer=RMSprop(lr=0.05, rho=0.9, epsilon=1e-6),
        batch_normalization=False)
    X_binary = np.array([
        [True, False, True, False, True, False],
        [True, False, True, False, False, True],
        [True, False, False, True, True, False],
        [True, False, False, True, False, True],
        [False, True, True, False, True, False],
        [False, True, True, False, False, True],
    ], dtype=bool)
    Y = np.array([0.0, 0.0, 0.0, 0.0, 1.0, 1.0])
    nn.fit(X_binary, Y, nb_epoch=20)
    Y_pred = nn.predict(X_binary)
    print(Y)
    print(Y_pred)
    for (Y_i, Y_pred_i) in zip(Y, Y_pred):
        if Y_i:
            assert Y_pred_i >= 0.6, "Expected higher value than %f" % Y_pred_i
        else:
            assert Y_pred_i <= 0.4, "Expected lower value than %f" % Y_pred_i
예제 #2
0
def test_make_hotshot_network_small_dataset():
    nn = make_hotshot_network(peptide_length=3,
                              n_amino_acids=2,
                              activation="relu",
                              init="lecun_uniform",
                              loss="mse",
                              layer_sizes=[4],
                              optimizer=RMSprop(lr=0.05, rho=0.9,
                                                epsilon=1e-6),
                              batch_normalization=False)
    X_binary = np.array([
        [True, False, True, False, True, False],
        [True, False, True, False, False, True],
        [True, False, False, True, True, False],
        [True, False, False, True, False, True],
        [False, True, True, False, True, False],
        [False, True, True, False, False, True],
    ],
                        dtype=bool)
    Y = np.array([0.0, 0.0, 0.0, 0.0, 1.0, 1.0])
    nn.fit(X_binary, Y, nb_epoch=200)
    Y_pred = nn.predict(X_binary)
    print(Y)
    print(Y_pred)
    for (Y_i, Y_pred_i) in zip(Y, Y_pred):
        if Y_i:
            assert Y_pred_i >= 0.6, "Expected higher value than %f" % Y_pred_i
        else:
            assert Y_pred_i <= 0.4, "Expected lower value than %f" % Y_pred_i
def make_model(
        config,
        peptide_length=9):
    """
    If we're using a learned vector embedding for amino acids
    then generate a network that expects index inputs,
    otherwise assume a 1-of-k binary encoding.
    """
    print("===")
    print(config)
    if config.embedding_size:
        return make_embedding_network(
            peptide_length=peptide_length,
            embedding_input_dim=20,
            embedding_output_dim=config.embedding_size,
            layer_sizes=[config.hidden_layer_size],
            activation=config.activation,
            init=config.init,
            loss=config.loss,
            dropout_probability=config.dropout_probability,
            learning_rate=config.learning_rate,
            optimizer=config.optimizer)
    else:
        return make_hotshot_network(
            peptide_length=peptide_length,
            layer_sizes=[config.hidden_layer_size],
            activation=config.activation,
            init=config.init,
            loss=config.loss,
            dropout_probability=config.dropout_probability,
            learning_rate=config.learning_rate,
            optimizer=config.optimizer)
예제 #4
0
def test_make_hotshot_network_properties():
    layer_sizes = [3, 4]
    nn = make_hotshot_network(peptide_length=3,
                              n_amino_acids=2,
                              activation="relu",
                              init="lecun_uniform",
                              loss=mse,
                              layer_sizes=layer_sizes,
                              batch_normalization=False,
                              optimizer=RMSprop(lr=0.7, rho=0.9, epsilon=1e-6))
    eq_(nn.layers[0].input_dim, 6)
    eq_(nn.loss, mse)
    assert np.allclose(nn.optimizer.lr.eval(), 0.7)
    print(nn.layers)
    # since the hotshot network doesn't have an embedding layer + flatten
    # we expect two fewer total layers than the embedding network.
    eq_(len(nn.layers), 2 * (1 + len(layer_sizes)))
예제 #5
0
def test_make_hotshot_network_properties():
    layer_sizes = [3, 4]
    nn = make_hotshot_network(
        peptide_length=3,
        n_amino_acids=2,
        activation="relu",
        init="lecun_uniform",
        loss=mse,
        layer_sizes=layer_sizes,
        batch_normalization=False,
        optimizer=RMSprop(lr=0.7, rho=0.9, epsilon=1e-6))
    eq_(nn.layers[0].input_dim, 6)
    eq_(nn.loss, mse)
    assert np.allclose(nn.optimizer.lr.eval(), 0.7)
    print(nn.layers)
    # since the hotshot network doesn't have an embedding layer + flatten
    # we expect two fewer total layers than the embedding network.
    eq_(len(nn.layers), 2 * (1 + len(layer_sizes)))