Ejemplo n.º 1
0
def _dense_module(network, settings):
    compactify = lambda network: _normalized_convolution(
        network, (1, 1), 16, (1, 1), (0, 0))
    convolve = lambda network: _normalized_convolution(network, (3, 3), 16,
                                                       (1, 1), (1, 1))
    cache = [network]
    for index in range(settings['N_LAYERS']):
        network = layers.concatenate(X=settings['CONNECT'](cache),
                                     axis=1,
                                     n_inputs=len(cache))
        if _DEBUG: print output_shape(network, data=(10, 3, 32, 32))
        network = compactify(network)
        network = convolve(network)
        cache.append(network)
    return network
Ejemplo n.º 2
0
def batch_convolution(X,
                      kernel_shape,
                      n_filters,
                      stride,
                      pad,
                      data_shape,
                      weight=None,
                      bias=None,
                      **kwargs):
    global _batch_convolution_count
    prefix = 'batch_convolution%d' % _batch_convolution_count
    N, C, H, W = output_shape(X, data=data_shape)
    filter_in, filter_out = C, n_filters

    weight = reshape(weight, (-1, filter_in) + kernel_shape)
    weights = slice_channels(X=weight, n_outputs=N, axis=0)
    bias = variable('%s_bias' % prefix) if bias is None else bias

    networks = slice_channels(X=X, n_outputs=N, axis=0)
    networks = tuple(
        convolution(X=networks[i],
                    kernel_shape=kernel_shape,
                    n_filters=n_filters,
                    stride=stride,
                    pad=pad,
                    weight=weights[i],
                    bias=bias) for i in range(N))
    network = concatenate(X=networks, n_inputs=N, axis=0)

    _batch_convolution_count += 1
    return network
Ejemplo n.º 3
0
def _break_graph(operations, name, data_shape):
    network = layers.variable('data')
    for operation in operations:
        if network.name == name:
            replaced = network
            shape = output_shape(replaced, data=data_shape)
            network = operation(layers.variable('%s_data' % name, shape=shape))
        else:
            network = operation(network)
    return replaced, network
Ejemplo n.º 4
0
      if args: args = (network,) + args
      else: kwargs['X'] = network
      network = getattr(layers, module_settings['operator'])(*args, **kwargs)
  network = layers.pooling(X=network, mode='average', global_pool=True, kernel_shape=(1, 1), stride=(1, 1), pad=(1, 1))
  network = layers.flatten(network)
  network = layers.fully_connected(X=network, n_hidden_units=10)
  network = layers.softmax_loss(prediction=network, normalization='batch', id='softmax')
  return network

if __name__ is '__main__':
  settings = (
    {
      'operator' : 'convolution',
      'kwargs' : {'n_filters' : 16, 'kernel_shape' : (3, 3), 'stride' : (1, 1), 'pad' : (1, 1)},
    },
    {
      'operator' : 'pooling',
      'kwargs' : {'mode' : 'maximum', 'kernel_shape' : (2, 2), 'stride' : (2, 2), 'pad' : (0, 0)},
    },
    {
      'operator' : 'constant_attention_module',
      'settings' : {
        'convolution_settings' : {'n_filters' : 16, 'kernel_shape' : (3, 3), 'stride' : (1, 1), 'pad' : (1, 1)},
        'n_layers' : 3,
        'weight_sharing' : True,
      },
    }
  )
  network = constant_attention_network(settings)
  print output_shape(network, data=(10000, 16, 32, 32))
Ejemplo n.º 5
0
    prefix = 'batch_convolution%d' % _batch_convolution_count
    N, C, H, W = output_shape(X, data=data_shape)
    filter_in, filter_out = C, n_filters

    weight = reshape(weight, (-1, filter_in) + kernel_shape)
    weights = slice_channels(X=weight, n_outputs=N, axis=0)
    bias = variable('%s_bias' % prefix) if bias is None else bias

    networks = slice_channels(X=X, n_outputs=N, axis=0)
    networks = tuple(
        convolution(X=networks[i],
                    kernel_shape=kernel_shape,
                    n_filters=n_filters,
                    stride=stride,
                    pad=pad,
                    weight=weights[i],
                    bias=bias) for i in range(N))
    network = concatenate(X=networks, n_inputs=N, axis=0)

    _batch_convolution_count += 1
    return network


if __name__ == '__main__':
    X_SHAPE = (64, 3, 32, 32)
    network = variable('data')
    weight = variable('weight', shape=(64, 48, 3, 3))
    network = \
      batch_convolution(X=network, kernel_shape=(3, 3), n_filters=16, stride=(1, 1), pad=(1, 1), weight=weight, data_shape=X_SHAPE)
    print output_shape(network, data=X_SHAPE, weight=(64, 48, 3, 3))
Ejemplo n.º 6
0
    parameters = ({
        'weight': None,
        'bias': None
    }, {
        'weight': None,
        'bias': None
    }, {
        'weight': None,
        'bias': None
    })
    KERNEL_SHAPES = ((3, 3, 3 * 16), ) + ((3, 3, 16 * 16), ) * 2
    for time in range(T):
        network = _extract_representations(X, parameters, batch_size)
        prediction = layers.pooling(X=network,
                                    mode='average',
                                    global_pool=True,
                                    kernel_shape=(1, 1),
                                    stride=(1, 1),
                                    pad=(0, 0))
        prediction = layers.flatten(prediction)
        prediction = layers.fully_connected(X=prediction, n_hidden_units=10)
        loss += layers.softmax_loss(prediction=prediction, label=label)
        for index, weight in enumerate(
                _generate_parameters(network, KERNEL_SHAPES)):
            parameters[index]['weight'] = weight
    return loss


if __name__ is '__main__':
    print output_shape(recurrent_hypernetwork(3, 64), data=(64, 3, 32, 32))