def make_network(env): with env.create_network() as net: dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): h, w, c = 28, 28, 1 img = O.placeholder('img', shape=(None, h, w, c)) return [img] def forward(img): _ = img _ = O.conv2d('conv1', _, 16, (3, 3), padding='SAME', nonlin=O.identity) _ = O.batch_norm('bn1', _) _ = O.relu(_) _ = O.pooling2d('pool1', _, kernel=2) _ = O.conv2d('conv2', _, 32, (3, 3), padding='SAME', nonlin=O.identity) _ = O.batch_norm('bn2', _) _ = O.relu(_) _ = O.pooling2d('pool2', _, kernel=2) dpc.add_output(_, name='feature') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature'] _ = O.fc('fc1', _, 64) _ = O.fc('fc2', _, 10) prob = O.softmax(_, name='prob') pred = _.argmax(axis=1).astype('int32', name='pred') net.add_output(prob) net.add_output(pred) if env.phase is env.Phase.TRAIN: label = O.placeholder('label', shape=(None, ), dtype='int32') loss = O.sparse_softmax_cross_entropy_with_logits( logits=_, labels=label).mean() loss = O.identity(loss, name='loss') net.set_loss(loss) accuracy = O.eq(label, pred).astype('float32').mean() error = 1. - accuracy summary.scalar('accuracy', accuracy) summary.scalar('error', error) summary.inference.scalar('loss', loss) summary.inference.scalar('accuracy', accuracy) summary.inference.scalar('error', error)
def make_network(env): with env.create_network() as net: nr_classes = get_env('dataset.nr_classes') conv_bn_relu = functools.partial(O.conv2d, nonlin=O.bn_relu) conv2d = conv_bn_relu dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): h, w, c = 32, 32, 3 img = O.placeholder('img', shape=(None, h, w, c)) return [img] def forward(img): _ = img _ = conv2d('conv1.1', _, 16, (3, 3), padding='SAME') _ = conv2d('conv1.2', _, 16, (3, 3), padding='SAME') _ = O.pooling2d('pool1', _, kernel=3, stride=2) _ = conv2d('conv2.1', _, 32, (3, 3), padding='SAME') _ = conv2d('conv2.2', _, 32, (3, 3), padding='SAME') _ = O.pooling2d('pool2', _, kernel=3, stride=2) _ = conv2d('conv3.1', _, 64, (3, 3), padding='VALID') _ = conv2d('conv3.2', _, 64, (3, 3), padding='VALID') _ = conv2d('conv3.3', _, 64, (3, 3), padding='VALID') dpc.add_output(_, name='feature') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature'] _ = O.fc('fc1', _, 128, nonlin=O.relu) _ = O.fc('fc2', _, 64, nonlin=O.relu) _ = O.fc('linear', _, nr_classes) prob = O.softmax(_, name='prob') pred = _.argmax(axis=1).astype('int32', name='pred') net.add_output(prob) net.add_output(pred) if env.phase is env.Phase.TRAIN: label = O.placeholder('label', shape=(None, ), dtype='int32') loss = O.sparse_softmax_cross_entropy_with_logits(logits=_, labels=label).mean() loss = O.identity(loss, name='loss') net.set_loss(loss) accuracy = O.eq(label, pred).astype('float32').mean() error = 1. - accuracy summary.scalar('accuracy', accuracy) summary.scalar('error', error) summary.inference.scalar('loss', loss) summary.inference.scalar('accuracy', accuracy) summary.inference.scalar('error', error)
def make_network(env): with env.create_network() as net: n = 2 nr_classes = get_env('dataset.nr_classes') conv2d = functools.partial(O.conv2d, kernel=3, use_bias=False, padding='SAME') conv_bn_relu = functools.partial(conv2d, nonlin=O.bn_relu) dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): h, w, c = 32, 32, 3 img = O.placeholder('img', shape=(None, h, w, c)) return [img] def residual(name, x, first=False, inc_dim=False): in_channel = x.static_shape[3] out_channel = in_channel stride = 1 if inc_dim: out_channel = in_channel * 2 stride = 2 with env.variable_scope(name): _ = x if first else O.bn_relu(x) _ = conv_bn_relu('conv1', _, out_channel, stride=stride) _ = conv2d('conv2', _, out_channel) if inc_dim: x = O.pooling2d('pool', x, kernel=2) x = O.pad(x, [[0, 0], [0, 0], [0, 0], [in_channel // 2, in_channel // 2]]) print(name, x.static_shape) _ = _ + x return _ def forward(img): _ = img / 128.0 - 1.0 _ = conv_bn_relu('conv0', _, 16) _ = residual('res1.0', _, first=True) for i in range(1, n): _ = residual('res1.{}'.format(i), _) _ = residual('res2.0', _, inc_dim=True) for i in range(1, n): _ = residual('res2.{}'.format(i), _) _ = residual('res3.0', _, inc_dim=True) for i in range(1, n): _ = residual('res3.{}'.format(i), _) _ = O.batch_norm('bn_last', _) _ = O.relu(_) _ = _.mean(axis=[1, 2]) # global avg pool dpc.add_output(_, name='feature') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature'] _ = O.fc('linear', _, nr_classes) prob = O.softmax(_, name='prob') pred = _.argmax(axis=1).astype('int32', name='pred') net.add_output(prob) net.add_output(pred) if env.phase is env.Phase.TRAIN: label = O.placeholder('label', shape=(None, ), dtype='int32') loss = O.sparse_softmax_cross_entropy_with_logits( logits=_, labels=label).mean() loss = O.identity(loss, name='loss') net.set_loss(loss) accuracy = O.eq(label, pred).astype('float32').mean() error = 1. - accuracy summary.scalar('accuracy', accuracy) summary.scalar('error', error) summary.inference.scalar('loss', loss) summary.inference.scalar('accuracy', accuracy) summary.inference.scalar('error', error)