コード例 #1
0
ファイル: net_seg_random.py プロジェクト: Cysu/dlearn
def train_model(dataset):
    X = T.tensor4()
    S = T.tensor3()

    layers = []
    layers.append(ConvPoolLayer(
        input=X,
        input_shape=(3, 160, 80),
        filter_shape=(32, 3, 5, 5),
        pool_shape=(2, 2),
        active_func=actfuncs.tanh,
        flatten=False
    ))

    layers.append(ConvPoolLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        filter_shape=(64, 32, 5, 5),
        pool_shape=(2, 2),
        active_func=actfuncs.tanh,
        flatten=False
    ))

    layers.append(FullConnLayer(
        input=layers[-1].output.flatten(2),
        input_shape=np.prod(layers[-1].output_shape),
        output_shape=1024,
        dropout_ratio=0.1,
        active_func=actfuncs.tanh
    ))

    layers.append(FullConnLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        output_shape=37 * 17,
        dropout_input=layers[-1].dropout_output,
        active_func=actfuncs.sigmoid
    ))

    model = NeuralNet(layers, X, layers[-1].output)
    model.target = S

    '''
    model.cost = costfuncs.binxent(layers[-1].dropout_output, S.flatten(2)) + \
        1e-3 * model.get_norm(2)
    model.error = costfuncs.binerr(layers[-1].output, S.flatten(2))
    '''

    model.cost = costfuncs.weighted_norm2(
        layers[-1].dropout_output, S.flatten(2), 1.0) + \
        1e-3 * model.get_norm(2)
    model.error = costfuncs.weighted_norm2(
        layers[-1].output, S.flatten(2), 1.0)

    sgd.train(model, dataset, lr=1e-2, momentum=0.9,
              batch_size=100, n_epochs=300,
              epoch_waiting=10)

    return model
コード例 #2
0
ファイル: net_seg_handcrafted.py プロジェクト: Cysu/dlearn
def train_model(dataset):
    X = T.matrix()
    S = T.tensor3()

    layers = []

    layers.append(FullConnLayer(
        input=X,
        input_shape=2784,
        output_shape=1024,
        dropout_ratio=0.1,
        active_func=actfuncs.tanh
    ))

    """
    layers.append(FullConnLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        output_shape=1024,
        dropout_ratio=0.1,
        dropout_input=layers[-1].dropout_output,
        active_func=actfuncs.tanh
    ))
    """

    layers.append(FullConnLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        output_shape=37 * 17,
        dropout_input=layers[-1].dropout_output,
        active_func=actfuncs.sigmoid
    ))

    model = NeuralNet(layers, X, layers[-1].output)
    model.target = S

    '''
    model.cost = costfuncs.binxent(layers[-1].dropout_output, S.flatten(2)) + \
        1e-3 * model.get_norm(2)
    model.error = costfuncs.binerr(layers[-1].output, S.flatten(2))
    '''

    model.cost = costfuncs.weighted_norm2(
        layers[-1].dropout_output, S.flatten(2), 1.0) + \
        1e-3 * model.get_norm(2)
    model.error = costfuncs.weighted_norm2(
        layers[-1].output, S.flatten(2), 1.0)

    sgd.train(model, dataset, lr=1e-2, momentum=0.9,
              batch_size=100, n_epochs=300,
              epoch_waiting=10, never_stop=True)

    return model
コード例 #3
0
ファイル: mnist_lenet5.py プロジェクト: Cysu/dlearn
def train_model(dataset):
    X = T.tensor4()
    Y = T.lvector()

    layers = []
    layers.append(ConvPoolLayer(
        input=X,
        input_shape=(1, 28, 28),
        filter_shape=(32, 1, 5, 5),
        pool_shape=(2, 2),
        active_func=actfuncs.tanh
    ))

    layers.append(ConvPoolLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        filter_shape=(64, 32, 5, 5),
        pool_shape=(2, 2),
        active_func=actfuncs.tanh,
        flatten=True
    ))

    layers.append(FullConnLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        output_shape=512,
        active_func=actfuncs.tanh
    ))

    layers.append(FullConnLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        output_shape=10,
        active_func=actfuncs.softmax
    ))

    model = NeuralNet(layers, X, layers[-1].output)
    model.target = Y
    model.cost = costfuncs.neglog(layers[-1].output, Y)
    model.error = costfuncs.miscls_rate(layers[-1].output, Y)

    sgd.train(model, dataset, lr=0.1, momentum=0.9,
              batch_size=500, n_epochs=200,
              lr_decr=1.0)

    return model
コード例 #4
0
ファイル: net_attr.py プロジェクト: Cysu/dlearn
def train_model(dataset, use_scpool):

    """
    def shape_constrained_pooling(fmaps):
        beta = 100.0
        s = fmaps.sum(axis=[2, 3])
        # Z = abs(actfuncs.tanh(beta * fmaps)).sum(axis=[2, 3])
        Z = T.neq(fmaps, 0).sum(axis=[2, 3])
        return s / Z
    """
    def shape_constrained_pooling(F, S):
        masked_F = T.switch(T.eq(S, 1), F, 0)
        #masked_F = F * S
        s = masked_F.sum(axis=[2, 3])
        Z = T.neq(masked_F, 0).sum(axis=[2, 3])
        return s / Z

    X = T.tensor4()
    A = T.matrix()
    S = T.tensor3()

    layers = []
    layers.append(ConvPoolLayer(
        input=X,
        input_shape=(3, 160, 80),
        filter_shape=(32, 3, 5, 5),
        pool_shape=(2, 2),
        active_func=actfuncs.tanh,
        flatten=False,
        b=0.0
    ))

    layers.append(ConvPoolLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        filter_shape=(64, 32, 5, 5),
        pool_shape=(2, 2),
        active_func=actfuncs.tanh,
        flatten=False,
        b=0.0
    ))

    """
    layers.append(ConvPoolLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        filter_shape=(128, 64, 3, 3),
        pool_shape=(2, 2),
        active_func=actfuncs.tanh,
        flatten=False,
        b=0.0
    ))
    """

    """
    F = layers[-1].output * S.dimshuffle(0, 'x', 1, 2)
    F = shape_constrained_pooling(F) if use_scpool else \
        F.flatten(2)
    """
    F = layers[-1].output
    F = shape_constrained_pooling(F, S.dimshuffle(0, 'x', 1, 2)) if use_scpool else F.flatten(2)

    layers.append(FullConnLayer(
        input=F,
        input_shape=layers[-1].output_shape[0],
        output_shape=32,
        dropout_ratio=0.1,
        active_func=actfuncs.tanh
    ))

    layers.append(FullConnLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        output_shape=11,
        dropout_input=layers[-1].dropout_output,
        active_func=actfuncs.sigmoid
    ))

    model = NeuralNet(layers, [X, S], layers[-1].output)
    model.target = A
    model.cost = costfuncs.binxent(layers[-1].dropout_output, A) + \
        1e-3 * model.get_norm(2)
    model.error = costfuncs.binerr(layers[-1].output, A)

    sgd.train(model, dataset, lr=1e-2, momentum=0.9,
              batch_size=100, n_epochs=300,
              epoch_waiting=10)

    return model
コード例 #5
0
ファイル: net_attr_random.py プロジェクト: Cysu/dlearn
def train_model(dataset):

    X = T.tensor4()
    A = T.matrix()

    layers = []
    layers.append(ConvPoolLayer(
        input=X,
        input_shape=(3, 160, 80),
        filter_shape=(32, 3, 5, 5),
        pool_shape=(2, 2),
        active_func=actfuncs.tanh,
        flatten=False,
    ))

    layers.append(ConvPoolLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        filter_shape=(64, 32, 5, 5),
        pool_shape=(2, 2),
        active_func=actfuncs.tanh,
        flatten=True,
    ))

    """
    layers.append(ConvPoolLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        filter_shape=(128, 64, 3, 3),
        pool_shape=(2, 2),
        active_func=actfuncs.tanh,
        flatten=False,
        b=0.0
    ))
    """

    layers.append(FullConnLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        output_shape=32,
        dropout_ratio=0.1,
        active_func=actfuncs.tanh
    ))

    layers.append(FullConnLayer(
        input=layers[-1].output,
        input_shape=layers[-1].output_shape,
        output_shape=11,
        dropout_input=layers[-1].dropout_output,
        active_func=actfuncs.sigmoid
    ))

    model = NeuralNet(layers, X, layers[-1].output)
    model.target = A
    model.cost = costfuncs.binxent(layers[-1].dropout_output, A) + \
        1e-3 * model.get_norm(2)
    model.error = costfuncs.binerr(layers[-1].output, A)

    sgd.train(model, dataset, lr=1e-2, momentum=0.9,
              batch_size=100, n_epochs=300,
              epoch_waiting=10)

    return model
コード例 #6
0
ファイル: net_latent.py プロジェクト: Cysu/dlearn
def train_model(dataset, attr_model, seg_model):
    def shape_constrained_pooling(fmaps):
        s = fmaps.sum(axis=[2, 3])
        Z = abs(actfuncs.tanh(fmaps)).sum(axis=[2, 3])
        return s / Z

    X = T.tensor4()
    A = T.matrix()

    feature_layers = []
    feature_layers.append(
        ConvPoolLayer(
            input=X,
            input_shape=(3, 160, 80),
            filter_shape=(32, 3, 5, 5),
            pool_shape=(2, 2),
            active_func=actfuncs.tanh,
            flatten=False,
            W=attr_model.blocks[0]._W,
            b=0.0,
        )
    )

    feature_layers.append(
        ConvPoolLayer(
            input=feature_layers[-1].output,
            input_shape=feature_layers[-1].output_shape,
            filter_shape=(64, 32, 5, 5),
            pool_shape=(2, 2),
            active_func=actfuncs.tanh,
            flatten=False,
            W=attr_model.blocks[1]._W,
            b=0.0,
        )
    )

    seg_layers = []
    seg_layers.append(
        FullConnLayer(
            input=feature_layers[-1].output.flatten(2),
            input_shape=np.prod(feature_layers[-1].output_shape),
            output_shape=1024,
            dropout_ratio=0.1,
            active_func=actfuncs.tanh,
            W=seg_model.blocks[2]._W,
            b=seg_model.blocks[2]._b,
        )
    )

    seg_layers.append(
        FullConnLayer(
            input=seg_layers[-1].output,
            input_shape=seg_layers[-1].output_shape,
            output_shape=37 * 17,
            dropout_input=seg_layers[-1].dropout_output,
            active_func=actfuncs.sigmoid,
            W=seg_model.blocks[3]._W,
            b=seg_model.blocks[3]._b,
        )
    )

    S = seg_layers[-1].output
    S = S * (S >= 0.1)
    S = S.reshape((S.shape[0], 37, 17))
    S = S.dimshuffle(0, "x", 1, 2)

    S_dropout = seg_layers[-1].dropout_output
    S_dropout = S_dropout * (S_dropout >= 0.1)
    S_dropout = S_dropout.reshape((S_dropout.shape[0], 37, 17))
    S_dropout = S_dropout.dimshuffle(0, "x", 1, 2)

    attr_layers = []
    """
    attr_layers.append(ConvPoolLayer(
        input=feature_layers[-1].output * S,
        input_shape=feature_layers[-1].output_shape,
        filter_shape=(128, 64, 3, 3),
        pool_shape=(2, 2),
        dropout_input=feature_layers[-1].output * S_dropout,
        active_func=actfuncs.tanh,
        flatten=False,
        W=attr_model.blocks[2]._W,
        b=0.0
    ))
    """

    attr_layers.append(
        FullConnLayer(
            input=shape_constrained_pooling(feature_layers[-1].output * S),
            input_shape=feature_layers[-1].output_shape,
            output_shape=64,
            dropout_input=shape_constrained_pooling(feature_layers[-1].dropout_output * S_dropout),
            dropout_ratio=0.1,
            active_func=actfuncs.tanh,
            W=attr_model.blocks[2]._W,
            b=attr_model.blocks[2]._b,
        )
    )

    attr_layers.append(
        FullConnLayer(
            input=attr_layers[-1].output,
            input_shape=attr_layers[-1].output_shape,
            output_shape=11,
            dropout_input=attr_layers[-1].dropout_output,
            active_func=actfuncs.sigmoid,
            W=attr_model.blocks[3]._W,
            b=attr_model.blocks[3]._b,
        )
    )

    model = NeuralNet(feature_layers + seg_layers + attr_layers, X, attr_layers[-1].output)
    model.target = A

    model.cost = costfuncs.binxent(attr_layers[-1].dropout_output, A) + 1e-3 * model.get_norm(2)
    model.error = costfuncs.binerr(attr_layers[-1].output, A)

    sgd.train(model, dataset, lr=1e-3, momentum=0.9, batch_size=100, n_epochs=300, epoch_waiting=10)

    return model