示例#1
0
 def feature_extraction():
     decomp = DecompLayer([(2, 80, 30)] * len(bodyconf.groups))
     column = MultiwayNeuralNet([
         NeuralNet([
             ConvPoolLayer((64, 2, 5, 5), (2, 2), (2, 80, 30), af.tanh,
                           False),
             ConvPoolLayer((64, 64, 5, 5), (2, 2), None, af.tanh, True)
         ]) for __ in xrange(len(bodyconf.groups))
     ])
     comp = CompLayer(strategy='Maxout')
     return NeuralNet([decomp, column, comp])
示例#2
0
def show_cmc(model, X, indices, samples):
    print "Computing cmc ..."

    result = cachem.load('cmc')

    if result is not None: return result

    import theano
    import theano.tensor as T
    from reid.models.layers import CompLayer
    from reid.utils import cmc

    x = T.matrix()
    y, thr = model.get_output(x)
    thr.append(y)
    comp = CompLayer()
    y, thr = comp.get_output(thr)

    f = theano.function(inputs=[x], outputs=y)

    train_pids = samples[3]

    gX, gY, pX, pY = [], [], [], []
    for i, (pid, vid) in enumerate(indices):
        if pid in train_pids: continue
        if vid == 0:
            gX.append(i)
            gY.append(pid)
        else:
            pX.append(i)
            pY.append(pid)

    def compute_distance(i, j):
        y = f(numpy.hstack(
            (X[gX[i]:gX[i] + 1, :], X[pX[j]:pX[j] + 1, :]))).ravel()
        return -y[-1]

    return cmc.count_lazy(compute_distance, gY, pY, 100, 1)
示例#3
0
def show_cmc(model, X, indices, samples):
    print "Computing cmc ..."

    result = cachem.load('cmc')

    if result is not None: return result

    import theano
    import theano.tensor as T
    from reid.models.layers import CompLayer
    from reid.utils import cmc

    x = T.matrix()
    y, thr = model.get_output(x)
    thr.append(y)
    comp = CompLayer()
    y, thr = comp.get_output(thr)

    f = theano.function(inputs=[x], outputs=y)

    train_pids = samples[3]

    gX, gY, pX, pY = [], [], [], []
    for i, (pid, vid) in enumerate(indices):
        if pid in train_pids: continue
        if vid == 0:
            gX.append(i)
            gY.append(pid)
        else:
            pX.append(i)
            pY.append(pid)

    def compute_distance(i, j):
        y = f(numpy.hstack((X[gX[i]:gX[i]+1, :], X[pX[j]:pX[j]+1, :]))).ravel()
        return -y[-1]

    return cmc.count_lazy(compute_distance, gY, pY, 100, 1)
示例#4
0
def compute_result(model, dataset, data):
    """Compute output value of data samples by using the trained model

    Args:
        model: A NeuralNet model
        dataset: A Dataset object returned by ``create_dataset``
        data:  list of pedestrian tuples returned by ``decomp_body``

    Returns:
        A tuple (train, valid, test) where each is three matrices
        (image_tensor4D, output_matrix, target_matrix)
    """

    result = cachem.load('result')

    if result is None:
        import theano
        import theano.tensor as T
        from reid.models.layers import CompLayer
        from reid.models.neural_net import NeuralNet

        model = NeuralNet([model, CompLayer()])
        x = T.matrix()
        f = theano.function(inputs=[x], outputs=model.get_output(x))

        def compute_output(X):
            outputs = [f(X[i:i+1, :]).ravel() for i in xrange(X.shape[0])]
            return numpy.asarray(outputs)

        images = numpy.asarray([p[0] for p in data])

        train = (images[dataset.train_ind],
                 compute_output(dataset.train_x.get_value(borrow=True)),
                 dataset.train_y.get_value(borrow=True))

        valid = (images[dataset.valid_ind],
                 compute_output(dataset.valid_x.get_value(borrow=True)),
                 dataset.valid_y.get_value(borrow=True))

        test = (images[dataset.test_ind],
                compute_output(dataset.test_x.get_value(borrow=True)),
                dataset.test_y.get_value(borrow=True))

        result = (train, valid, test)

    return result
示例#5
0
def compute_attr(model, dataset, batch_size=100):
    """Compute the attributes matrix

    Args:
        model: The deep neural net model
        dataset: The dataset returned by ``preprocess``

    Returns:
        The dataset ``(gA, gY, pA, pY)``
    """

    data = cachem.load('attr')

    if data is None:
        import theano
        import theano.tensor as T
        from reid.models.layers import CompLayer
        from reid.models.neural_net import NeuralNet

        model = NeuralNet([model, CompLayer()])

        def compute(X):
            x = T.matrix('x')
            i = T.lscalar('i')

            func = theano.function(
                inputs=[i],
                outputs=model.get_output(x),
                givens={x: X[i * batch_size:(i + 1) * batch_size]})

            n_batches = X.get_value(borrow=True).shape[0] / batch_size + 1
            return numpy.vstack([func(j) for j in xrange(n_batches)])

        gX, gY, pX, pY = dataset
        gA = compute(theano.shared(gX, borrow=True))
        pA = compute(theano.shared(pX, borrow=True))

        data = (gA, gY, pA, pY)

    return data
示例#6
0
def compute_result(model, batch_dir, images, samples):
    """Compute output for dataset

    Args:
        model: Deep model
        dataset: Dataset X = X1_X2, Y = A1_A2_(0/1)
        images: [img]
        samples: (train, vaid, test) each is [(i, j, 0/1)]

    Returns:
        (train, valid, test) where each is
        (img_list_1, img_list_2 output_matrix, target_matrix)
    """

    print "Computing result ..."

    result = cachem.load('result')

    if result is not None: return result

    import glob
    import cPickle
    import theano
    import theano.tensor as T
    from reid.models.layers import CompLayer

    x = T.matrix()
    y, thr = model.get_output(x)
    thr.append(y)
    comp = CompLayer()
    y, thr = comp.get_output(thr)

    f = theano.function(inputs=[x], outputs=y)

    def compute_output(X):
        outputs = [f(X[i:i + 1, :]).ravel() for i in xrange(X.shape[0])]
        return numpy.asarray(outputs)

    train_files = glob.glob(os.path.join(batch_dir, 'train_*.pkl'))
    valid_files = glob.glob(os.path.join(batch_dir, 'valid_*.pkl'))
    test_files = glob.glob(os.path.join(batch_dir, 'test_*.pkl'))

    train_files.sort()
    valid_files.sort()
    test_files.sort()

    # Setup parameters
    n_train_batches = len(train_files)
    n_valid_batches = len(valid_files)
    n_test_batches = len(test_files)

    for i in xrange(n_train_batches):
        with open(train_files[i], 'rb') as fid:
            X, T = cPickle.load(fid)
            Y = compute_output(X)
            if i == 0:
                cum_T = T
                cum_Y = Y
            else:
                cum_T = numpy.vstack((cum_T, T))
                cum_Y = numpy.vstack((cum_Y, Y))

    train = ([images[i] for i, __, __ in samples[0]],
             [images[j] for __, j, __ in samples[0]], cum_Y, cum_T)

    for i in xrange(n_valid_batches):
        with open(valid_files[i], 'rb') as fid:
            X, T = cPickle.load(fid)
            Y = compute_output(X)
            if i == 0:
                cum_T = T
                cum_Y = Y
            else:
                cum_T = numpy.vstack((cum_T, T))
                cum_Y = numpy.vstack((cum_Y, Y))

    valid = ([images[i] for i, __, __ in samples[1]],
             [images[j] for __, j, __ in samples[1]], cum_Y, cum_T)

    for i in xrange(n_test_batches):
        with open(test_files[i], 'rb') as fid:
            X, T = cPickle.load(fid)
            Y = compute_output(X)
            if i == 0:
                cum_T = T
                cum_Y = Y
            else:
                cum_T = numpy.vstack((cum_T, T))
                cum_Y = numpy.vstack((cum_Y, Y))

    test = ([images[i] for i, __, __ in samples[2]],
            [images[j] for __, j, __ in samples[2]], cum_Y, cum_T)

    return (train, valid, test)
示例#7
0
def train_model(batch_dir):
    """Train deep model

    Args:
        dataset: Dataset X = X1_X2, Y = A1_A2_(0/1)

    Returns:
        The trained deep model
    """

    print "Training ..."

    model = cachem.load('model')

    if model is not None: return model

    import reid.models.active_functions as af
    import reid.models.cost_functions as cf
    from reid.models.layers import ConvPoolLayer, FullConnLayer, IdentityLayer, FilterParingLayer
    from reid.models.layers import CompLayer, DecompLayer, CloneLayer
    from reid.models.neural_net import NeuralNet, MultiwayNeuralNet
    from reid.models.evaluate import Evaluator
    from reid.optimization import sgd

    output_sizes = [len(grp) for grp in attrconf.unival + attrconf.multival]
    target_sizes = [1] * len(attrconf.unival) + [
        len(grp) for grp in attrconf.multival
    ]

    # Feature extraction module
    def feature_extraction():
        decomp = DecompLayer([(2, 80, 30)] * len(bodyconf.groups))
        column = MultiwayNeuralNet([
            NeuralNet([
                ConvPoolLayer((64, 2, 5, 5), (2, 2), (2, 80, 30), af.tanh,
                              False),
                ConvPoolLayer((64, 64, 5, 5), (2, 2), None, af.tanh, True)
            ]) for __ in xrange(len(bodyconf.groups))
        ])
        comp = CompLayer(strategy='Maxout')
        return NeuralNet([decomp, column, comp])

    fe = feature_extraction()
    feat_module = NeuralNet([
        DecompLayer([(2 * 80 * 30 * len(bodyconf.groups), )] * 2),
        MultiwayNeuralNet([fe, fe]),
        CompLayer()
    ])

    # Attribute classification module
    def attribute_classification():
        fcl_1 = FullConnLayer(4352, 1024, af.tanh)
        fcl_2 = FullConnLayer(1024, 104)
        decomp = DecompLayer(
            [(sz,) for sz in output_sizes],
            [af.softmax] * len(attrconf.unival) + \
            [af.sigmoid] * len(attrconf.multival)
        )
        return NeuralNet([fcl_1, fcl_2, decomp], through=True)

    ac = NeuralNet([attribute_classification(), CompLayer()])
    attr_module = NeuralNet([
        DecompLayer([(4352, )] * 2),
        MultiwayNeuralNet([ac, ac]),
        CompLayer()
    ])

    # Person re-identification module
    def person_reidentification():
        fp = FilterParingLayer((64, 17, 4), 4, (2, 2), True)
        fcl_1 = FullConnLayer(1088, 256, af.tanh)
        return NeuralNet([fp, fcl_1])

    reid_module = person_reidentification()

    # Combine them together
    model = NeuralNet([
        feat_module,
        CloneLayer(2),
        MultiwayNeuralNet([attr_module, reid_module]),
        CompLayer(),
        FullConnLayer(104 + 104 + 256, 256, af.tanh),
        FullConnLayer(256, 2, af.softmax)
    ])

    # Fine-tuning
    def reid_cost(output, target):
        return 6.0 * cf.mean_negative_loglikelihood(output, target)

    def reid_error(output, target):
        return 6.0 * cf.mean_negative_loglikelihood(output, target)

    def target_adapter():
        d1 = DecompLayer([(sum(target_sizes), ), (sum(target_sizes), ), (1, )])
        d2 = DecompLayer([(sz, ) for sz in target_sizes])
        return NeuralNet([d1, MultiwayNeuralNet([d2, d2, IdentityLayer()])])

    cost_func = [
        [cf.mean_negative_loglikelihood] * len(attrconf.unival) + \
            [cf.mean_binary_cross_entropy] * len(attrconf.multival),
        [cf.mean_negative_loglikelihood] * len(attrconf.unival) + \
            [cf.mean_binary_cross_entropy] * len(attrconf.multival),
        reid_cost
    ]
    error_func = [
        [cf.mean_number_misclassified] * len(attrconf.unival) + \
            [cf.mean_zeroone_error_rate] * len(attrconf.multival),
        [cf.mean_number_misclassified] * len(attrconf.unival) + \
            [cf.mean_zeroone_error_rate] * len(attrconf.multival),
        reid_error
    ]

    evaluator = Evaluator(model,
                          cost_func,
                          error_func,
                          target_adapter(),
                          regularize=1e-3)

    sgd.train_batch(evaluator,
                    batch_dir,
                    learning_rate=1e-4,
                    momentum=0.9,
                    batch_size=300,
                    n_epoch=100,
                    learning_rate_decr=1.0,
                    patience_incr=1.5)

    return model
示例#8
0
def train_model(dataset):
    """Train deep model

    This function will build up a deep neural network and train it using given
    dataset.

    Args:
        dataset: A Dataset object returned by ``create_dataset``

    Returns:
        The trained deep model.
    """

    model = cachem.load('model')

    if model is None:
        import reid.models.active_functions as actfuncs
        import reid.models.cost_functions as costfuncs
        from reid.models.layers import ConvPoolLayer, FullConnLayer
        from reid.models.layers import CompLayer, DecompLayer
        from reid.models.neural_net import NeuralNet, MultiwayNeuralNet
        from reid.models.evaluate import Evaluator
        from reid.optimization import sgd

        output_sizes = [len(grp) for grp in attrconf.unival + attrconf.multival]
        target_sizes = [1] * len(attrconf.unival) + [len(grp) for grp in attrconf.multival]

        # Build up model
        input_decomp = DecompLayer([(3,80,30)] * len(bodyconf.groups))

        columns = MultiwayNeuralNet([NeuralNet([
            ConvPoolLayer((64,3,3,3), (2,2), (3,80,30), actfuncs.tanh, False),
            ConvPoolLayer((64,64,3,3), (2,2), None, actfuncs.tanh, True)
        ]) for __ in xrange(len(bodyconf.groups))])

        feature_comp = CompLayer(strategy='Maxout')

        classify_1 = FullConnLayer(6912, 99)
        classify_2 = FullConnLayer(99, 99)

        attr_decomp = DecompLayer(
            [(sz,) for sz in output_sizes],
            [actfuncs.softmax] * len(attrconf.unival) + \
            [actfuncs.sigmoid] * len(attrconf.multival)
        )

        model = NeuralNet([input_decomp, columns, feature_comp, classify_1, classify_2, attr_decomp])

        # Build up adapter
        adapter = DecompLayer([(sz,) for sz in target_sizes])

        # Build up evaluator
        cost_functions = [costfuncs.mean_negative_loglikelihood] * len(attrconf.unival) + \
                         [costfuncs.mean_binary_cross_entropy] * len(attrconf.multival)

        error_functions = [costfuncs.mean_number_misclassified] * len(attrconf.unival) + \
                          [costfuncs.mean_zeroone_error_rate] * len(attrconf.multival)

        evaluator = Evaluator(model, cost_functions, error_functions, adapter,
                              regularize=1e-3)

        # Train the feature extraction model
        sgd.train(evaluator, dataset,
                  learning_rate=1e-3, momentum=0.9,
                  batch_size=300, n_epoch=200,
                  learning_rate_decr=1.0, patience_incr=1.5)

    return model
示例#9
0
X = numpy.asarray([[2, 1, 1, 3]], dtype=numpy.float32)
Y = numpy.asarray([[6, 9, 10, 4]], dtype=numpy.float32)
Z = numpy.asarray([[5, 8, 10, 6]], dtype=numpy.float32)
W1 = theano.shared(numpy.asarray([[1, 2], [3, 4]], dtype=numpy.float32),
                   borrow=True)
W2 = theano.shared(numpy.asarray([[4, 3], [2, 1]], dtype=numpy.float32),
                   borrow=True)

# Build up model
decomp = DecompLayer([(2, ), (2, )])

columns = MultiwayNeuralNet(
    [FullConnLayer(2, 2, W=W1),
     FullConnLayer(2, 2, W=W2)])

comp = CompLayer()

multitask = DecompLayer([(1, ), (3, )])

model = NeuralNet([decomp, columns, comp, multitask])

# Build up the target value adapter
adapter = DecompLayer([(1, ), (3, )])

# Build up evaluator
evaluator = Evaluator(model, [mse, mse], [mse, mse], adapter)

# Compute the expression by using the model
x = T.matrix('x')
y = T.matrix('y')
示例#10
-26
def compute_result(model, batch_dir, images, samples):
    """Compute output for dataset

    Args:
        model: Deep model
        dataset: Dataset X = X1_X2, Y = A1_A2_(0/1)
        images: [img]
        samples: (train, vaid, test) each is [(i, j, 0/1)]

    Returns:
        (train, valid, test) where each is
        (img_list_1, img_list_2 output_matrix, target_matrix)
    """

    print "Computing result ..."

    result = cachem.load('result')

    if result is not None: return result

    import glob
    import cPickle
    import theano
    import theano.tensor as T
    from reid.models.layers import CompLayer

    x = T.matrix()
    y, thr = model.get_output(x)
    thr.append(y)
    comp = CompLayer()
    y, thr = comp.get_output(thr)

    f = theano.function(inputs=[x], outputs=y)

    def compute_output(X):
        outputs = [f(X[i:i+1, :]).ravel() for i in xrange(X.shape[0])]
        return numpy.asarray(outputs)

    train_files = glob.glob(os.path.join(batch_dir, 'train_*.pkl'))
    valid_files = glob.glob(os.path.join(batch_dir, 'valid_*.pkl'))
    test_files = glob.glob(os.path.join(batch_dir, 'test_*.pkl'))

    train_files.sort()
    valid_files.sort()
    test_files.sort()    

    # Setup parameters
    n_train_batches = len(train_files)
    n_valid_batches = len(valid_files)
    n_test_batches = len(test_files)

    for i in xrange(n_train_batches):
        with open(train_files[i], 'rb') as fid:
            X, T = cPickle.load(fid)
            Y = compute_output(X)
            if i == 0:
                cum_T = T
                cum_Y = Y
            else:
                cum_T = numpy.vstack((cum_T, T))
                cum_Y = numpy.vstack((cum_Y, Y))

    train = ([images[i] for i, __, __ in samples[0]],
             [images[j] for __, j, __ in samples[0]],
             cum_Y, cum_T)

    for i in xrange(n_valid_batches):
        with open(valid_files[i], 'rb') as fid:
            X, T = cPickle.load(fid)
            Y = compute_output(X)
            if i == 0:
                cum_T = T
                cum_Y = Y
            else:
                cum_T = numpy.vstack((cum_T, T))
                cum_Y = numpy.vstack((cum_Y, Y))

    valid = ([images[i] for i, __, __ in samples[1]],
             [images[j] for __, j, __ in samples[1]],
             cum_Y, cum_T)

    for i in xrange(n_test_batches):
        with open(test_files[i], 'rb') as fid:
            X, T = cPickle.load(fid)
            Y = compute_output(X)
            if i == 0:
                cum_T = T
                cum_Y = Y
            else:
                cum_T = numpy.vstack((cum_T, T))
                cum_Y = numpy.vstack((cum_Y, Y))

    test = ([images[i] for i, __, __ in samples[2]],
            [images[j] for __, j, __ in samples[2]],
            cum_Y, cum_T)

    return (train, valid, test)